From 6049cdca299ed754971c860ecb3db467d6cc76fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ros=C3=A1rio=20Pereira=20Fernandes?= Date: Fri, 7 Sep 2018 03:57:39 +0200 Subject: [PATCH 1/7] feat: ML Kit Quickstart Kotlin --- mlkit/app/build.gradle | 5 + mlkit/app/src/main/AndroidManifest.xml | 21 +- .../app/src/main/java/EntryChoiceActivity.kt | 25 + .../apps/mlkit/{ => java}/CameraSource.java | 2 +- .../mlkit/{ => java}/CameraSourcePreview.java | 2 +- .../mlkit/{ => java}/ChooserActivity.java | 2 +- .../apps/mlkit/{ => java}/FrameMetadata.java | 2 +- .../apps/mlkit/{ => java}/GraphicOverlay.java | 3 +- .../mlkit/{ => java}/LivePreviewActivity.java | 12 +- .../mlkit/{ => java}/StillImageActivity.java | 10 +- .../{ => java}/VisionImageProcessor.java | 2 +- .../mlkit/{ => java}/VisionProcessorBase.java | 2 +- .../barcodescanning/BarcodeGraphic.java | 6 +- .../BarcodeScanningProcessor.java | 8 +- .../CloudImageLabelingProcessor.java | 8 +- .../cloudimagelabeling/CloudLabelGraphic.java | 6 +- .../CloudLandmarkGraphic.java | 6 +- .../CloudLandmarkRecognitionProcessor.java | 8 +- .../CloudDocumentTextGraphic.java | 7 +- ...CloudDocumentTextRecognitionProcessor.java | 9 +- .../CloudTextGraphic.java | 6 +- .../CloudTextRecognitionProcessor.java | 8 +- .../custommodel/CustomImageClassifier.java | 2 +- .../CustomImageClassifierProcessor.java | 8 +- .../{ => java}/custommodel/LabelGraphic.java | 6 +- .../facedetection/FaceDetectionProcessor.java | 8 +- .../{ => java}/facedetection/FaceGraphic.java | 6 +- .../imagelabeling/ImageLabelingProcessor.java | 8 +- .../imagelabeling/LabelGraphic.java | 6 +- .../textrecognition/TextGraphic.java | 6 +- .../TextRecognitionProcessor.java | 8 +- .../samples/apps/mlkit/kotlin/CameraSource.kt | 701 ++++++++++++++++++ .../apps/mlkit/kotlin/ChooserActivity.kt | 132 ++++ .../apps/mlkit/kotlin/FrameMetadata.kt | 58 ++ .../apps/mlkit/kotlin/GraphicOverlay.kt | 152 ++++ .../apps/mlkit/kotlin/LivePreviewActivity.kt | 252 +++++++ .../apps/mlkit/kotlin/StillImageActivity.kt | 321 ++++++++ .../apps/mlkit/kotlin/VisionImageProcessor.kt | 23 + .../apps/mlkit/kotlin/VisionProcessorBase.kt | 101 +++ .../kotlin/barcodescanning/BarcodeGraphic.kt | 54 ++ .../BarcodeScanningProcessor.kt | 60 ++ .../CloudImageLabellingProcessor.kt | 64 ++ .../cloudimagelabeling/CloudLabelGraphic.kt | 36 + .../CloudLandmarkGraphic.kt | 66 ++ .../CloudLandmarkRecognitionProcessor.kt | 54 ++ .../CloudDocumentTextGraphic.kt | 51 ++ .../CloudDocumentTextRecognitionProcessor.kt | 57 ++ .../cloudtextrecognition/CloudTextGraphic.kt | 51 ++ .../CloudTextRecognitionProcessor.kt | 58 ++ .../custommodel/CustomImageClassifier.kt | 201 +++++ .../CustomImageClassifierProcessor.kt | 48 ++ .../mlkit/kotlin/custommodel/LabelGraphic.kt | 37 + .../facedetection/FaceDetectionProcessor.kt | 64 ++ .../mlkit/kotlin/facedetection/FaceGraphic.kt | 135 ++++ .../imagelabeling/ImageLabelingProcessor.kt | 53 ++ .../kotlin/imagelabeling/LabelGraphic.kt | 34 + .../textrecognition/CameraSourcePreview.kt | 167 +++++ .../kotlin/textrecognition/TextGraphic.kt | 59 ++ .../TextRecognitionProcessor.kt | 63 ++ .../res/layout-land/activity_live_preview.xml | 6 +- .../main/res/layout/activity_live_preview.xml | 6 +- .../main/res/layout/activity_still_image.xml | 2 +- 62 files changed, 3289 insertions(+), 95 deletions(-) create mode 100644 mlkit/app/src/main/java/EntryChoiceActivity.kt rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/CameraSource.java (99%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/CameraSourcePreview.java (98%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/ChooserActivity.java (99%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/FrameMetadata.java (97%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/GraphicOverlay.java (98%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/LivePreviewActivity.java (94%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/StillImageActivity.java (96%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/VisionImageProcessor.java (96%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/VisionProcessorBase.java (98%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/barcodescanning/BarcodeGraphic.java (91%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/barcodescanning/BarcodeScanningProcessor.java (90%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudimagelabeling/CloudImageLabelingProcessor.java (90%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudimagelabeling/CloudLabelGraphic.java (87%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudlandmarkrecognition/CloudLandmarkGraphic.java (92%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java (90%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudtextrecognition/CloudDocumentTextGraphic.java (89%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java (89%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudtextrecognition/CloudTextGraphic.java (91%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/cloudtextrecognition/CloudTextRecognitionProcessor.java (90%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/custommodel/CustomImageClassifier.java (99%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/custommodel/CustomImageClassifierProcessor.java (88%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/custommodel/LabelGraphic.java (87%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/facedetection/FaceDetectionProcessor.java (90%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/facedetection/FaceGraphic.java (96%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/imagelabeling/ImageLabelingProcessor.java (88%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/imagelabeling/LabelGraphic.java (88%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/textrecognition/TextGraphic.java (91%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{ => java}/textrecognition/TextRecognitionProcessor.java (90%) create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudLabelGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextRecognitionProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt create mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt diff --git a/mlkit/app/build.gradle b/mlkit/app/build.gradle index 7843b3beef..4eebd02e76 100644 --- a/mlkit/app/build.gradle +++ b/mlkit/app/build.gradle @@ -1,4 +1,6 @@ apply plugin: 'com.android.application' +apply plugin: 'kotlin-android' +apply plugin: 'kotlin-android-extensions' android { compileSdkVersion 27 @@ -22,6 +24,9 @@ android { } dependencies { + implementation project(':internal') + implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.2.50" + implementation 'com.android.support:appcompat-v7:27.1.1' implementation 'com.android.support:design:27.1.1' implementation 'com.android.support.constraint:constraint-layout:1.1.3' diff --git a/mlkit/app/src/main/AndroidManifest.xml b/mlkit/app/src/main/AndroidManifest.xml index 12a78f45e1..13a242865b 100644 --- a/mlkit/app/src/main/AndroidManifest.xml +++ b/mlkit/app/src/main/AndroidManifest.xml @@ -17,8 +17,16 @@ + + + + + + @@ -27,12 +35,19 @@ - + + - + + + + + \ No newline at end of file diff --git a/mlkit/app/src/main/java/EntryChoiceActivity.kt b/mlkit/app/src/main/java/EntryChoiceActivity.kt new file mode 100644 index 0000000000..245f4e1723 --- /dev/null +++ b/mlkit/app/src/main/java/EntryChoiceActivity.kt @@ -0,0 +1,25 @@ +package com.google.firebase.samples.apps.mlkit + +import android.content.Intent +import com.firebase.example.internal.BaseEntryChoiceActivity +import com.firebase.example.internal.Choice +import com.google.firebase.samples.apps.mlkit.java.ChooserActivity + +class EntryChoiceActivity : BaseEntryChoiceActivity() { + + override fun getChoices(): List { + return listOf( + Choice( + "Java", + "Run the Firebase ML Kit quickstart written in Java.", + Intent(this, ChooserActivity::class.java)), + Choice( + "Kotlin", + "Run the Firebase ML Kit quickstart written in Kotlin.", + Intent( + this, + com.google.firebase.samples.apps.mlkit.kotlin.ChooserActivity::class.java)) + ) + } + +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/CameraSource.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSource.java similarity index 99% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/CameraSource.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSource.java index 98922014e0..f29d79c7fe 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/CameraSource.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSource.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.Manifest; import android.annotation.SuppressLint; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/CameraSourcePreview.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSourcePreview.java similarity index 98% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/CameraSourcePreview.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSourcePreview.java index a45858076c..73cc0e595c 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/CameraSourcePreview.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSourcePreview.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.annotation.SuppressLint; import android.content.Context; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/ChooserActivity.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/ChooserActivity.java similarity index 99% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/ChooserActivity.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/ChooserActivity.java index e4a816e201..7a497a4eda 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/ChooserActivity.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/ChooserActivity.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.content.Context; import android.content.Intent; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/FrameMetadata.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/FrameMetadata.java similarity index 97% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/FrameMetadata.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/FrameMetadata.java index c562fc27f8..46fa5b2b07 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/FrameMetadata.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/FrameMetadata.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; /** Describing a frame info. */ public class FrameMetadata { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/GraphicOverlay.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/GraphicOverlay.java similarity index 98% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/GraphicOverlay.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/GraphicOverlay.java index 450e848224..50a93148fb 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/GraphicOverlay.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/GraphicOverlay.java @@ -11,11 +11,10 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.content.Context; import android.graphics.Canvas; -import android.hardware.camera2.CameraCharacteristics; import android.util.AttributeSet; import android.view.View; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/LivePreviewActivity.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/LivePreviewActivity.java similarity index 94% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/LivePreviewActivity.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/LivePreviewActivity.java index 8b4dbd0945..c9ad555073 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/LivePreviewActivity.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/LivePreviewActivity.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.content.Context; import android.content.pm.PackageInfo; @@ -32,11 +32,11 @@ import com.google.android.gms.common.annotation.KeepName; import com.google.firebase.ml.common.FirebaseMLException; -import com.google.firebase.samples.apps.mlkit.barcodescanning.BarcodeScanningProcessor; -import com.google.firebase.samples.apps.mlkit.custommodel.CustomImageClassifierProcessor; -import com.google.firebase.samples.apps.mlkit.facedetection.FaceDetectionProcessor; -import com.google.firebase.samples.apps.mlkit.imagelabeling.ImageLabelingProcessor; -import com.google.firebase.samples.apps.mlkit.textrecognition.TextRecognitionProcessor; +import com.google.firebase.samples.apps.mlkit.java.barcodescanning.BarcodeScanningProcessor; +import com.google.firebase.samples.apps.mlkit.java.custommodel.CustomImageClassifierProcessor; +import com.google.firebase.samples.apps.mlkit.java.facedetection.FaceDetectionProcessor; +import com.google.firebase.samples.apps.mlkit.java.imagelabeling.ImageLabelingProcessor; +import com.google.firebase.samples.apps.mlkit.java.textrecognition.TextRecognitionProcessor; import java.io.IOException; import java.util.ArrayList; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/StillImageActivity.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java similarity index 96% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/StillImageActivity.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java index 252195c3da..63e3844509 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/StillImageActivity.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.content.ContentValues; import android.content.Intent; @@ -37,12 +37,12 @@ import android.widget.Spinner; import com.google.android.gms.common.annotation.KeepName; -import com.google.firebase.samples.apps.mlkit.cloudimagelabeling.CloudImageLabelingProcessor; -import com.google.firebase.samples.apps.mlkit.cloudlandmarkrecognition.CloudLandmarkRecognitionProcessor; -import com.google.firebase.samples.apps.mlkit.cloudtextrecognition.CloudDocumentTextRecognitionProcessor; +import com.google.firebase.samples.apps.mlkit.java.cloudimagelabeling.CloudImageLabelingProcessor; +import com.google.firebase.samples.apps.mlkit.java.cloudlandmarkrecognition.CloudLandmarkRecognitionProcessor; +import com.google.firebase.samples.apps.mlkit.java.cloudtextrecognition.CloudDocumentTextRecognitionProcessor; -import com.google.firebase.samples.apps.mlkit.cloudtextrecognition.CloudTextRecognitionProcessor; +import com.google.firebase.samples.apps.mlkit.java.cloudtextrecognition.CloudTextRecognitionProcessor; import java.io.IOException; import java.util.ArrayList; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/VisionImageProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionImageProcessor.java similarity index 96% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/VisionImageProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionImageProcessor.java index f9e0d82597..036b738c2e 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/VisionImageProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionImageProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.graphics.Bitmap; import android.media.Image; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/VisionProcessorBase.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionProcessorBase.java similarity index 98% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/VisionProcessorBase.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionProcessorBase.java index 87ca938ce5..296c61a2dd 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/VisionProcessorBase.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionProcessorBase.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit; +package com.google.firebase.samples.apps.mlkit.java; import android.graphics.Bitmap; import android.media.Image; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/barcodescanning/BarcodeGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeGraphic.java similarity index 91% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/barcodescanning/BarcodeGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeGraphic.java index 2e32e83f3c..9902b5ce5f 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/barcodescanning/BarcodeGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeGraphic.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.barcodescanning; +package com.google.firebase.samples.apps.mlkit.java.barcodescanning; import android.graphics.Canvas; import android.graphics.Color; @@ -19,8 +19,8 @@ import android.graphics.RectF; import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; /** Graphic instance for rendering Barcode position and content information in an overlay view. */ public class BarcodeGraphic extends Graphic { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/barcodescanning/BarcodeScanningProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeScanningProcessor.java similarity index 90% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/barcodescanning/BarcodeScanningProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeScanningProcessor.java index 1008daf80b..b33e85d0fd 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/barcodescanning/BarcodeScanningProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeScanningProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.barcodescanning; +package com.google.firebase.samples.apps.mlkit.java.barcodescanning; import android.support.annotation.NonNull; import android.util.Log; @@ -21,9 +21,9 @@ import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode; import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetector; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudimagelabeling/CloudImageLabelingProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudImageLabelingProcessor.java similarity index 90% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudimagelabeling/CloudImageLabelingProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudImageLabelingProcessor.java index 71fa75eb7f..43e3ce03f1 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudimagelabeling/CloudImageLabelingProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudImageLabelingProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudimagelabeling; +package com.google.firebase.samples.apps.mlkit.java.cloudimagelabeling; import android.support.annotation.NonNull; import android.util.Log; @@ -22,9 +22,9 @@ import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabel; import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabelDetector; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.ArrayList; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudimagelabeling/CloudLabelGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudLabelGraphic.java similarity index 87% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudimagelabeling/CloudLabelGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudLabelGraphic.java index d091349ec4..2598b3a6c8 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudimagelabeling/CloudLabelGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudLabelGraphic.java @@ -11,14 +11,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudimagelabeling; +package com.google.firebase.samples.apps.mlkit.java.cloudimagelabeling; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudlandmarkrecognition/CloudLandmarkGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkGraphic.java similarity index 92% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudlandmarkrecognition/CloudLandmarkGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkGraphic.java index e5d6a4e3f5..d23cf9438f 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudlandmarkrecognition/CloudLandmarkGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkGraphic.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudlandmarkrecognition; +package com.google.firebase.samples.apps.mlkit.java.cloudlandmarkrecognition; import android.graphics.Canvas; import android.graphics.Color; @@ -19,8 +19,8 @@ import android.graphics.RectF; import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; /** Graphic instance for rendering detected landmark. */ public class CloudLandmarkGraphic extends Graphic { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java similarity index 90% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java index c5678fe4fe..9526afe697 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudlandmarkrecognition; +package com.google.firebase.samples.apps.mlkit.java.cloudlandmarkrecognition; import android.support.annotation.NonNull; import android.util.Log; @@ -22,9 +22,9 @@ import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark; import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmarkDetector; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudDocumentTextGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextGraphic.java similarity index 89% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudDocumentTextGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextGraphic.java index 701a07911a..0305125451 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudDocumentTextGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextGraphic.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudtextrecognition; +package com.google.firebase.samples.apps.mlkit.java.cloudtextrecognition; import android.graphics.Canvas; import android.graphics.Color; @@ -19,9 +19,8 @@ import android.graphics.Rect; import com.google.firebase.ml.vision.document.FirebaseVisionDocumentText; -import com.google.firebase.ml.vision.text.FirebaseVisionText; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; /** * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java similarity index 89% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java index aab488197e..07324231e9 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudtextrecognition; +package com.google.firebase.samples.apps.mlkit.java.cloudtextrecognition; import android.support.annotation.NonNull; import android.util.Log; @@ -21,10 +21,9 @@ import com.google.firebase.ml.vision.common.FirebaseVisionImage; import com.google.firebase.ml.vision.document.FirebaseVisionDocumentTextRecognizer; import com.google.firebase.ml.vision.document.FirebaseVisionDocumentText; -import com.google.firebase.ml.vision.text.FirebaseVisionText; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudTextGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextGraphic.java similarity index 91% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudTextGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextGraphic.java index 5151cbff9c..15f51a4ed6 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudTextGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextGraphic.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudtextrecognition; +package com.google.firebase.samples.apps.mlkit.java.cloudtextrecognition; import android.graphics.Canvas; import android.graphics.Color; @@ -19,8 +19,8 @@ import android.graphics.Rect; import com.google.firebase.ml.vision.text.FirebaseVisionText; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; /** * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudTextRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextRecognitionProcessor.java similarity index 90% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudTextRecognitionProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextRecognitionProcessor.java index 51f074b7aa..c999896afe 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/cloudtextrecognition/CloudTextRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextRecognitionProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.cloudtextrecognition; +package com.google.firebase.samples.apps.mlkit.java.cloudtextrecognition; import android.support.annotation.NonNull; import android.util.Log; @@ -21,9 +21,9 @@ import com.google.firebase.ml.vision.text.FirebaseVisionText; import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/CustomImageClassifier.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifier.java similarity index 99% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/CustomImageClassifier.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifier.java index 58b8aa9757..d46011f45a 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/CustomImageClassifier.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifier.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.custommodel; +package com.google.firebase.samples.apps.mlkit.java.custommodel; import android.app.Activity; import android.graphics.Bitmap; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/CustomImageClassifierProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifierProcessor.java similarity index 88% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/CustomImageClassifierProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifierProcessor.java index 6506fc44c6..03efc207cb 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/CustomImageClassifierProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifierProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.custommodel; +package com.google.firebase.samples.apps.mlkit.java.custommodel; import android.app.Activity; import android.graphics.Bitmap; @@ -19,9 +19,9 @@ import com.google.android.gms.tasks.OnSuccessListener; import com.google.firebase.ml.common.FirebaseMLException; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionImageProcessor; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionImageProcessor; import java.nio.ByteBuffer; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/LabelGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/LabelGraphic.java similarity index 87% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/LabelGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/LabelGraphic.java index 7ec29e2735..5b34f42ac5 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/custommodel/LabelGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/LabelGraphic.java @@ -11,15 +11,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.custommodel; +package com.google.firebase.samples.apps.mlkit.java.custommodel; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/facedetection/FaceDetectionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceDetectionProcessor.java similarity index 90% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/facedetection/FaceDetectionProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceDetectionProcessor.java index d386e8a246..ab7f0e1bc5 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/facedetection/FaceDetectionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceDetectionProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.facedetection; +package com.google.firebase.samples.apps.mlkit.java.facedetection; import android.support.annotation.NonNull; import android.util.Log; @@ -22,9 +22,9 @@ import com.google.firebase.ml.vision.face.FirebaseVisionFace; import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector; import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/facedetection/FaceGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceGraphic.java similarity index 96% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/facedetection/FaceGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceGraphic.java index f7b63c5d27..ddede79cdc 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/facedetection/FaceGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceGraphic.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.facedetection; +package com.google.firebase.samples.apps.mlkit.java.facedetection; import android.graphics.Canvas; import android.graphics.Color; @@ -22,8 +22,8 @@ import com.google.firebase.ml.vision.common.FirebaseVisionPoint; import com.google.firebase.ml.vision.face.FirebaseVisionFace; import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; /** * Graphic instance for rendering face position, orientation, and landmarks within an associated diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/imagelabeling/ImageLabelingProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/ImageLabelingProcessor.java similarity index 88% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/imagelabeling/ImageLabelingProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/ImageLabelingProcessor.java index e952aa0541..b108845bb5 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/imagelabeling/ImageLabelingProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/ImageLabelingProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.imagelabeling; +package com.google.firebase.samples.apps.mlkit.java.imagelabeling; import android.support.annotation.NonNull; import android.util.Log; @@ -21,9 +21,9 @@ import com.google.firebase.ml.vision.common.FirebaseVisionImage; import com.google.firebase.ml.vision.label.FirebaseVisionLabel; import com.google.firebase.ml.vision.label.FirebaseVisionLabelDetector; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/imagelabeling/LabelGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/LabelGraphic.java similarity index 88% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/imagelabeling/LabelGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/LabelGraphic.java index 444dcdc94f..83e92bff53 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/imagelabeling/LabelGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/LabelGraphic.java @@ -11,15 +11,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.imagelabeling; +package com.google.firebase.samples.apps.mlkit.java.imagelabeling; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; import com.google.firebase.ml.vision.label.FirebaseVisionLabel; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/textrecognition/TextGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextGraphic.java similarity index 91% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/textrecognition/TextGraphic.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextGraphic.java index 0d8a9fcf76..8f5ae4ad27 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/textrecognition/TextGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextGraphic.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.textrecognition; +package com.google.firebase.samples.apps.mlkit.java.textrecognition; import android.graphics.Canvas; import android.graphics.Color; @@ -19,8 +19,8 @@ import android.graphics.RectF; import com.google.firebase.ml.vision.text.FirebaseVisionText; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; /** * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/textrecognition/TextRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextRecognitionProcessor.java similarity index 90% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/textrecognition/TextRecognitionProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextRecognitionProcessor.java index cb48266362..49fe3c8fa4 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/textrecognition/TextRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextRecognitionProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.textrecognition; +package com.google.firebase.samples.apps.mlkit.java.textrecognition; import android.support.annotation.NonNull; import android.util.Log; @@ -21,9 +21,9 @@ import com.google.firebase.ml.vision.common.FirebaseVisionImage; import com.google.firebase.ml.vision.text.FirebaseVisionText; import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer; -import com.google.firebase.samples.apps.mlkit.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.VisionProcessorBase; +import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt new file mode 100644 index 0000000000..44ffd10e23 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt @@ -0,0 +1,701 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +import android.Manifest +import android.annotation.SuppressLint +import android.app.Activity +import android.content.Context +import android.graphics.ImageFormat +import android.graphics.SurfaceTexture +import android.hardware.Camera +import android.support.annotation.RequiresPermission +import android.util.Log +import android.view.Surface +import android.view.SurfaceHolder +import android.view.WindowManager +import com.google.android.gms.common.images.Size +import java.io.IOException +import java.nio.ByteBuffer +import java.util.* +import java.util.concurrent.locks.ReentrantLock + +/** + * Manages the camera and allows UI updates on top of it (e.g. overlaying extra Graphics or + * displaying extra information). This receives preview frames from the camera at a specified rate, + * sending those frames to child classes' detectors / classifiers as fast as it is able to process. + */ +@SuppressLint("MissingPermission") +class CameraSource(protected var activity: Activity, private val graphicOverlay: GraphicOverlay) { + + private var camera: Camera? = null + + /** + * Returns the selected camera; one of [.CAMERA_FACING_BACK] or [ ][.CAMERA_FACING_FRONT]. + */ + var cameraFacing = CAMERA_FACING_BACK + protected set + + /** + * Rotation of the device, and thus the associated preview images captured from the device. See + * Frame.Metadata#getRotation(). + */ + private var rotation: Int = 0 + + /** Returns the preview size that is currently in use by the underlying camera. */ + var previewSize: Size? = null + private set + + // These values may be requested by the caller. Due to hardware limitations, we may need to + // select close, but not exactly the same values for these. + private val requestedFps = 20.0f + private val requestedPreviewWidth = 1280 + private val requestedPreviewHeight = 960 + private val requestedAutoFocus = true + + // These instances need to be held onto to avoid GC of their underlying resources. Even though + // these aren't used outside of the method that creates them, they still must have hard + // references maintained to them. + private var dummySurfaceTexture: SurfaceTexture? = null + + // True if a SurfaceTexture is being used for the preview, false if a SurfaceHolder is being + // used for the preview. We want to be compatible back to Gingerbread, but SurfaceTexture + // wasn't introduced until Honeycomb. Since the interface cannot use a SurfaceTexture, if the + // developer wants to display a preview we must use a SurfaceHolder. If the developer doesn't + // want to display a preview we use a SurfaceTexture if we are running at least Honeycomb. + private var usingSurfaceTexture: Boolean = false + + /** + * Dedicated thread and associated runnable for calling into the detector with frames, as the + * frames become available from the camera. + */ + private var processingThread: Thread? = null + + private val processingRunnable: FrameProcessingRunnable + + private val processorLock = Any() + // @GuardedBy("processorLock") + private var frameProcessor: VisionImageProcessor? = null + + /** + * Map to convert between a byte array, received from the camera, and its associated byte buffer. + * We use byte buffers internally because this is a more efficient way to call into native code + * later (avoids a potential copy). + * + * + * **Note:** uses IdentityHashMap here instead of HashMap because the behavior of an array's + * equals, hashCode and toString methods is both useless and unexpected. IdentityHashMap enforces + * identity ('==') check on the keys. + */ + private val bytesToByteBuffer = IdentityHashMap() + + init { + graphicOverlay.clear() + processingRunnable = FrameProcessingRunnable() + } + + // ============================================================================================== + // Public + // ============================================================================================== + + /** Stops the camera and releases the resources of the camera and underlying detector. */ + fun release() { + synchronized(processorLock) { + stop() + processingRunnable.release() + cleanScreen() + + if (frameProcessor != null) { + frameProcessor!!.stop() + } + } + } + + /** + * Opens the camera and starts sending preview frames to the underlying detector. The preview + * frames are not displayed. + * + * @throws IOException if the camera's preview texture or display could not be initialized + */ + @SuppressLint("MissingPermission") + @RequiresPermission(Manifest.permission.CAMERA) + @Synchronized + @Throws(IOException::class) + fun start(): CameraSource { + if (camera != null) { + return this + } + + camera = createCamera() + dummySurfaceTexture = SurfaceTexture(DUMMY_TEXTURE_NAME) + camera!!.setPreviewTexture(dummySurfaceTexture) + usingSurfaceTexture = true + camera!!.startPreview() + + processingThread = Thread(processingRunnable) + processingRunnable.setActive(true) + processingThread!!.start() + return this + } + + /** + * Opens the camera and starts sending preview frames to the underlying detector. The supplied + * surface holder is used for the preview so frames can be displayed to the user. + * + * @param surfaceHolder the surface holder to use for the preview frames + * @throws IOException if the supplied surface holder could not be used as the preview display + */ + @RequiresPermission(Manifest.permission.CAMERA) + @Synchronized + @Throws(IOException::class) + fun start(surfaceHolder: SurfaceHolder): CameraSource { + if (camera != null) { + return this + } + + camera = createCamera() + camera!!.setPreviewDisplay(surfaceHolder) + camera!!.startPreview() + + processingThread = Thread(processingRunnable) + processingRunnable.setActive(true) + processingThread!!.start() + + usingSurfaceTexture = false + return this + } + + /** + * Closes the camera and stops sending frames to the underlying frame detector. + * + * + * This camera source may be restarted again by calling [.start] or [ ][.start]. + * + * + * Call [.release] instead to completely shut down this camera source and release the + * resources of the underlying detector. + */ + @Synchronized + fun stop() { + processingRunnable.setActive(false) + if (processingThread != null) { + try { + // Wait for the thread to complete to ensure that we can't have multiple threads + // executing at the same time (i.e., which would happen if we called start too + // quickly after stop). + processingThread!!.join() + } catch (e: InterruptedException) { + Log.d(TAG, "Frame processing thread interrupted on release.") + } + + processingThread = null + } + + if (camera != null) { + camera!!.stopPreview() + camera!!.setPreviewCallbackWithBuffer(null) + try { + if (usingSurfaceTexture) { + camera!!.setPreviewTexture(null) + } else { + camera!!.setPreviewDisplay(null) + } + } catch (e: Exception) { + Log.e(TAG, "Failed to clear camera preview: $e") + } + + camera!!.release() + camera = null + } + + // Release the reference to any image buffers, since these will no longer be in use. + bytesToByteBuffer.clear() + } + + /** Changes the facing of the camera. */ + @Synchronized + fun setFacing(facing: Int) { + if (facing != CAMERA_FACING_BACK && facing != CAMERA_FACING_FRONT) { + throw IllegalArgumentException("Invalid camera: $facing") + } + this.cameraFacing = facing + } + + /** + * Opens the camera and applies the user settings. + * + * @throws IOException if camera cannot be found or preview cannot be processed + */ + @SuppressLint("InlinedApi") + @Throws(IOException::class) + private fun createCamera(): Camera { + val requestedCameraId = getIdForRequestedCamera(cameraFacing) + if (requestedCameraId == -1) { + throw IOException("Could not find requested camera.") + } + val camera = Camera.open(requestedCameraId) + + val sizePair = selectSizePair(camera, requestedPreviewWidth, requestedPreviewHeight) + ?: throw IOException("Could not find suitable preview size.") + val pictureSize = sizePair.pictureSize() + previewSize = sizePair.previewSize() + + val previewFpsRange = selectPreviewFpsRange(camera, requestedFps) + ?: throw IOException("Could not find suitable preview frames per second range.") + + val parameters = camera.parameters + + if (pictureSize != null) { + parameters.setPictureSize(pictureSize.width, pictureSize.height) + } + parameters.setPreviewSize(previewSize!!.width, previewSize!!.height) + parameters.setPreviewFpsRange( + previewFpsRange[Camera.Parameters.PREVIEW_FPS_MIN_INDEX], + previewFpsRange[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]) + parameters.previewFormat = ImageFormat.NV21 + + setRotation(camera, parameters, requestedCameraId) + + if (requestedAutoFocus) { + if (parameters + .supportedFocusModes + .contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) { + parameters.focusMode = Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO + } else { + Log.i(TAG, "Camera auto focus is not supported on this device.") + } + } + + camera.parameters = parameters + + // Four frame buffers are needed for working with the camera: + // + // one for the frame that is currently being executed upon in doing detection + // one for the next pending frame to process immediately upon completing detection + // two for the frames that the camera uses to populate future preview images + // + // Through trial and error it appears that two free buffers, in addition to the two buffers + // used in this code, are needed for the camera to work properly. Perhaps the camera has + // one thread for acquiring images, and another thread for calling into user code. If only + // three buffers are used, then the camera will spew thousands of warning messages when + // detection takes a non-trivial amount of time. + camera.setPreviewCallbackWithBuffer(CameraPreviewCallback()) + camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) + camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) + camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) + camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) + + return camera + } + + /** + * Stores a preview size and a corresponding same-aspect-ratio picture size. To avoid distorted + * preview images on some devices, the picture size must be set to a size that is the same aspect + * ratio as the preview size or the preview may end up being distorted. If the picture size is + * null, then there is no picture size with the same aspect ratio as the preview size. + */ + private class SizePair internal constructor( + previewSize: android.hardware.Camera.Size, + pictureSize: android.hardware.Camera.Size?) { + private val preview: Size + private var picture: Size? = null + + init { + preview = Size(previewSize.width, previewSize.height) + if (pictureSize != null) { + picture = Size(pictureSize.width, pictureSize.height) + } + } + + internal fun previewSize(): Size { + return preview + } + + internal fun pictureSize(): Size? { + return picture + } + } + + /** + * Calculates the correct rotation for the given camera id and sets the rotation in the + * parameters. It also sets the camera's display orientation and rotation. + * + * @param parameters the camera parameters for which to set the rotation + * @param cameraId the camera id to set rotation based on + */ + private fun setRotation(camera: Camera, parameters: Camera.Parameters, cameraId: Int) { + val windowManager = activity.getSystemService(Context.WINDOW_SERVICE) as WindowManager + var degrees = 0 + val rotation = windowManager.defaultDisplay.rotation + when (rotation) { + Surface.ROTATION_0 -> degrees = 0 + Surface.ROTATION_90 -> degrees = 90 + Surface.ROTATION_180 -> degrees = 180 + Surface.ROTATION_270 -> degrees = 270 + else -> Log.e(TAG, "Bad rotation value: $rotation") + } + + val cameraInfo = Camera.CameraInfo() + Camera.getCameraInfo(cameraId, cameraInfo) + + val angle: Int + val displayAngle: Int + if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) { + angle = (cameraInfo.orientation + degrees) % 360 + displayAngle = (360 - angle) % 360 // compensate for it being mirrored + } else { // back-facing + angle = (cameraInfo.orientation - degrees + 360) % 360 + displayAngle = angle + } + + // This corresponds to the rotation constants. + this.rotation = angle / 90 + + camera.setDisplayOrientation(displayAngle) + parameters.setRotation(angle) + } + + /** + * Creates one buffer for the camera preview callback. The size of the buffer is based off of the + * camera preview size and the format of the camera image. + * + * @return a new preview buffer of the appropriate size for the current camera settings + */ + @SuppressLint("InlinedApi") + private fun createPreviewBuffer(previewSize: Size): ByteArray { + val bitsPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.NV21) + val sizeInBits = previewSize.height.toLong() * previewSize.width.toLong() * bitsPerPixel.toLong() + val bufferSize = Math.ceil(sizeInBits / 8.0).toInt() + 1 + + // Creating the byte array this way and wrapping it, as opposed to using .allocate(), + // should guarantee that there will be an array to work with. + val byteArray = ByteArray(bufferSize) + val buffer = ByteBuffer.wrap(byteArray) + if (!buffer.hasArray() || buffer.array() != byteArray) { + // I don't think that this will ever happen. But if it does, then we wouldn't be + // passing the preview content to the underlying detector later. + throw IllegalStateException("Failed to create valid buffer for camera source.") + } + + bytesToByteBuffer[byteArray] = buffer + return byteArray + } + + // ============================================================================================== + // Frame processing + // ============================================================================================== + + /** Called when the camera has a new preview frame. */ + private inner class CameraPreviewCallback : Camera.PreviewCallback { + override fun onPreviewFrame(data: ByteArray, camera: Camera) { + processingRunnable.setNextFrame(data, camera) + } + } + + internal fun setMachineLearningFrameProcessor(processor: VisionImageProcessor) { + synchronized(processorLock) { + cleanScreen() + if (frameProcessor != null) { + frameProcessor!!.stop() + } + frameProcessor = processor + } + } + + /** + * This runnable controls access to the underlying receiver, calling it to process frames when + * available from the camera. This is designed to run detection on frames as fast as possible + * (i.e., without unnecessary context switching or waiting on the next frame). + * + * + * While detection is running on a frame, new frames may be received from the camera. As these + * frames come in, the most recent frame is held onto as pending. As soon as detection and its + * associated processing is done for the previous frame, detection on the mostly recently received + * frame will immediately start on the same thread. + */ + private inner class FrameProcessingRunnable internal constructor() : Runnable { + + // This lock guards all of the member variables below. + private val lock = ReentrantLock() + private var active = true + + // These pending variables hold the state associated with the new frame awaiting processing. + private var pendingFrameData: ByteBuffer? = null + + /** + * Releases the underlying receiver. This is only safe to do after the associated thread has + * completed, which is managed in camera source's release method above. + */ + @SuppressLint("Assert") + internal fun release() { + assert(processingThread!!.state == Thread.State.TERMINATED) + } + + /** Marks the runnable as active/not active. Signals any blocked threads to continue. */ + internal fun setActive(active: Boolean) { + synchronized(lock) { + this.active = active + val condition = lock.newCondition() + lock.lock() + condition.signalAll() + } + } + + /** + * Sets the frame data received from the camera. This adds the previous unused frame buffer (if + * present) back to the camera, and keeps a pending reference to the frame data for future use. + */ + internal fun setNextFrame(data: ByteArray, camera: Camera) { + synchronized(lock) { + if (pendingFrameData != null) { + camera.addCallbackBuffer(pendingFrameData!!.array()) + pendingFrameData = null + } + + if (!bytesToByteBuffer.containsKey(data)) { + Log.d( + TAG, + "Skipping frame. Could not find ByteBuffer associated with the image " + "data from the camera.") + return + } + + pendingFrameData = bytesToByteBuffer[data] + + // Notify the processor thread if it is waiting on the next frame (see below). + val condition = lock.newCondition() + lock.lock() + condition.signalAll() + } + } + + /** + * As long as the processing thread is active, this executes detection on frames continuously. + * The next pending frame is either immediately available or hasn't been received yet. Once it + * is available, we transfer the frame info to local variables and run detection on that frame. + * It immediately loops back for the next frame without pausing. + * + * + * If detection takes longer than the time in between new frames from the camera, this will + * mean that this loop will run without ever waiting on a frame, avoiding any context switching + * or frame acquisition time latency. + * + * + * If you find that this is using more CPU than you'd like, you should probably decrease the + * FPS setting above to allow for some idle time in between frames. + */ + @SuppressLint("InlinedApi") + override fun run() { + lateinit var data: ByteBuffer + + while (true) { + synchronized(lock) { + while (active && pendingFrameData == null) { + try { + // Wait for the next frame to be received from the camera, since we + // don't have it yet. + val condition = lock.newCondition() + lock.lock() + condition.await() + } catch (e: InterruptedException) { + Log.d(TAG, "Frame processing loop terminated.", e) + return + } + + } + + if (!active) { + // Exit the loop once this camera source is stopped or released. We check + // this here, immediately after the wait() above, to handle the case where + // setActive(false) had been called, triggering the termination of this + // loop. + return + } + + // Hold onto the frame data locally, so that we can use this for detection + // below. We need to clear pendingFrameData to ensure that this buffer isn't + // recycled back to the camera before we are done using that data. + data = pendingFrameData!! + pendingFrameData = null + } + + // The code below needs to run outside of synchronization, because this will allow + // the camera to add pending frame(s) while we are running detection on the current + // frame. + + try { + synchronized(processorLock) { + Log.d(TAG, "Process an image") + frameProcessor!!.process( + data, + FrameMetadata.Builder() + .setWidth(previewSize!!.width) + .setHeight(previewSize!!.height) + .setRotation(rotation) + .setCameraFacing(cameraFacing) + .build(), + graphicOverlay) + } + } catch (t: Throwable) { + Log.e(TAG, "Exception thrown from receiver.", t) + } finally { + camera!!.addCallbackBuffer(data.array()) + } + } + } + } + + /** Cleans up graphicOverlay and child classes can do their cleanups as well . */ + private fun cleanScreen() { + graphicOverlay.clear() + } + + companion object { + @SuppressLint("InlinedApi") + val CAMERA_FACING_BACK = Camera.CameraInfo.CAMERA_FACING_BACK + + @SuppressLint("InlinedApi") + val CAMERA_FACING_FRONT = Camera.CameraInfo.CAMERA_FACING_FRONT + + private const val TAG = "MIDemoApp:CameraSource" + + /** + * The dummy surface texture must be assigned a chosen name. Since we never use an OpenGL context, + * we can choose any ID we want here. The dummy surface texture is not a crazy hack - it is + * actually how the camera team recommends using the camera without a preview. + */ + private val DUMMY_TEXTURE_NAME = 100 + + /** + * If the absolute difference between a preview size aspect ratio and a picture size aspect ratio + * is less than this tolerance, they are considered to be the same aspect ratio. + */ + private val ASPECT_RATIO_TOLERANCE = 0.01f + + /** + * Gets the id for the camera specified by the direction it is facing. Returns -1 if no such + * camera was found. + * + * @param facing the desired camera (front-facing or rear-facing) + */ + private fun getIdForRequestedCamera(facing: Int): Int { + val cameraInfo = Camera.CameraInfo() + for (i in 0 until Camera.getNumberOfCameras()) { + Camera.getCameraInfo(i, cameraInfo) + if (cameraInfo.facing == facing) { + return i + } + } + return -1 + } + + /** + * Selects the most suitable preview and picture size, given the desired width and height. + * + * + * Even though we only need to find the preview size, it's necessary to find both the preview + * size and the picture size of the camera together, because these need to have the same aspect + * ratio. On some hardware, if you would only set the preview size, you will get a distorted + * image. + * + * @param camera the camera to select a preview size from + * @param desiredWidth the desired width of the camera preview frames + * @param desiredHeight the desired height of the camera preview frames + * @return the selected preview and picture size pair + */ + private fun selectSizePair(camera: Camera, desiredWidth: Int, desiredHeight: Int): SizePair? { + val validPreviewSizes = generateValidPreviewSizeList(camera) + + // The method for selecting the best size is to minimize the sum of the differences between + // the desired values and the actual values for width and height. This is certainly not the + // only way to select the best size, but it provides a decent tradeoff between using the + // closest aspect ratio vs. using the closest pixel area. + var selectedPair: SizePair? = null + var minDiff = Integer.MAX_VALUE + for (sizePair in validPreviewSizes) { + val size = sizePair.previewSize() + val diff = Math.abs(size.width - desiredWidth) + Math.abs(size.height - desiredHeight) + if (diff < minDiff) { + selectedPair = sizePair + minDiff = diff + } + } + + return selectedPair + } + + /** + * Generates a list of acceptable preview sizes. Preview sizes are not acceptable if there is not + * a corresponding picture size of the same aspect ratio. If there is a corresponding picture size + * of the same aspect ratio, the picture size is paired up with the preview size. + * + * + * This is necessary because even if we don't use still pictures, the still picture size must + * be set to a size that is the same aspect ratio as the preview size we choose. Otherwise, the + * preview images may be distorted on some devices. + */ + private fun generateValidPreviewSizeList(camera: Camera): List { + val parameters = camera.parameters + val supportedPreviewSizes = parameters.supportedPreviewSizes + val supportedPictureSizes = parameters.supportedPictureSizes + val validPreviewSizes = ArrayList() + for (previewSize in supportedPreviewSizes) { + val previewAspectRatio = previewSize.width.toFloat() / previewSize.height.toFloat() + + // By looping through the picture sizes in order, we favor the higher resolutions. + // We choose the highest resolution in order to support taking the full resolution + // picture later. + for (pictureSize in supportedPictureSizes) { + val pictureAspectRatio = pictureSize.width.toFloat() / pictureSize.height.toFloat() + if (Math.abs(previewAspectRatio - pictureAspectRatio) < ASPECT_RATIO_TOLERANCE) { + validPreviewSizes.add(SizePair(previewSize, pictureSize)) + break + } + } + } + + // If there are no picture sizes with the same aspect ratio as any preview sizes, allow all + // of the preview sizes and hope that the camera can handle it. Probably unlikely, but we + // still account for it. + if (validPreviewSizes.size == 0) { + Log.w(TAG, "No preview sizes have a corresponding same-aspect-ratio picture size") + for (previewSize in supportedPreviewSizes) { + // The null picture size will let us know that we shouldn't set a picture size. + validPreviewSizes.add(SizePair(previewSize, null)) + } + } + + return validPreviewSizes + } + + /** + * Selects the most suitable preview frames per second range, given the desired frames per second. + * + * @param camera the camera to select a frames per second range from + * @param desiredPreviewFps the desired frames per second for the camera preview frames + * @return the selected preview frames per second range + */ + @SuppressLint("InlinedApi") + private fun selectPreviewFpsRange(camera: Camera, desiredPreviewFps: Float): IntArray? { + // The camera API uses integers scaled by a factor of 1000 instead of floating-point frame + // rates. + val desiredPreviewFpsScaled = (desiredPreviewFps * 1000.0f).toInt() + + // The method for selecting the best range is to minimize the sum of the differences between + // the desired value and the upper and lower bounds of the range. This may select a range + // that the desired value is outside of, but this is often preferred. For example, if the + // desired frame rate is 29.97, the range (30, 30) is probably more desirable than the + // range (15, 30). + var selectedFpsRange: IntArray? = null + var minDiff = Integer.MAX_VALUE + val previewFpsRangeList = camera.parameters.supportedPreviewFpsRange + for (range in previewFpsRangeList) { + val deltaMin = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX] + val deltaMax = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX] + val diff = Math.abs(deltaMin) + Math.abs(deltaMax) + if (diff < minDiff) { + selectedFpsRange = range + minDiff = diff + } + } + return selectedFpsRange + } + } +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt new file mode 100644 index 0000000000..7de4526a7a --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt @@ -0,0 +1,132 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +import android.content.Context +import android.content.Intent +import android.content.pm.PackageManager +import android.os.Bundle +import android.support.v4.app.ActivityCompat +import android.support.v4.content.ContextCompat +import android.support.v7.app.AppCompatActivity +import android.util.Log +import android.view.LayoutInflater +import android.view.View +import android.view.ViewGroup +import android.widget.AdapterView +import android.widget.ArrayAdapter +import android.widget.TextView +import com.google.firebase.samples.apps.mlkit.java.LivePreviewActivity +import com.google.firebase.samples.apps.mlkit.java.StillImageActivity +import java.util.ArrayList +import kotlinx.android.synthetic.main.activity_chooser.* + +/** + * Demo app chooser which takes care of runtime permission requesting and allows you to pick from + * all available testing Activities. + */ +class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissionsResultCallback, + AdapterView.OnItemClickListener { + + private val TAG = "ChooserActivity" + private val PERMISSION_REQUESTS = 1 + + private val CLASSES = arrayOf>(LivePreviewActivity::class.java, StillImageActivity::class.java) + + private val DESCRIPTION_IDS = intArrayOf(R.string.desc_camera_source_activity, R.string.desc_still_image_activity) + + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + Log.d(TAG, "onCreate") + + setContentView(R.layout.activity_chooser) + + // Set up ListView and Adapter + val adapter = MyArrayAdapter(this, android.R.layout.simple_list_item_2, CLASSES) + adapter.setDescriptionIds(DESCRIPTION_IDS) + + test_activity_list_view.adapter = adapter + test_activity_list_view.onItemClickListener = this + + if (!allPermissionsGranted()) { + getRuntimePermissions() + } + + } + + override fun onItemClick(parent: AdapterView<*>, view: View, position: Int, id: Long) { + val clicked = CLASSES[position] + startActivity(Intent(this, clicked)) + } + + private fun getRequiredPermissions(): Array { + return try { + val info = this.packageManager + .getPackageInfo(this.packageName, PackageManager.GET_PERMISSIONS) + val ps = info.requestedPermissions + if (ps != null && ps.isNotEmpty()) { + ps + } else { + arrayOfNulls(0) + } + } catch (e: Exception) { + arrayOfNulls(0) + } + + } + + private fun allPermissionsGranted(): Boolean { + for (permission in getRequiredPermissions()) { + if (!isPermissionGranted(this, permission!!)) { + return false + } + } + return true + } + + private fun getRuntimePermissions() { + val allNeededPermissions = ArrayList() + for (permission in getRequiredPermissions()) { + if (!isPermissionGranted(this, permission!!)) { + allNeededPermissions.add(permission) + } + } + + if (!allNeededPermissions.isEmpty()) { + ActivityCompat.requestPermissions( + this, allNeededPermissions.toTypedArray(), PERMISSION_REQUESTS) + } + } + + private fun isPermissionGranted(context: Context, permission: String): Boolean { + if (ContextCompat.checkSelfPermission(context, permission) == PackageManager.PERMISSION_GRANTED) { + Log.i(TAG, "Permission granted: $permission") + return true + } + Log.i(TAG, "Permission NOT granted: $permission") + return false + } + + private class MyArrayAdapter( + private val ctx: Context, resource: Int, + private val classes: Array> + ) : ArrayAdapter>(ctx, resource, classes) { + private var descriptionIds: IntArray? = null + + override fun getView(position: Int, convertView: View?, parent: ViewGroup): View { + var view = convertView + + if (convertView == null) { + val inflater = ctx.getSystemService(Context.LAYOUT_INFLATER_SERVICE) as LayoutInflater + view = inflater.inflate(android.R.layout.simple_list_item_2, null) + } + + (view!!.findViewById(android.R.id.text1) as TextView).text = classes[position].simpleName + (view.findViewById(android.R.id.text2) as TextView).setText(descriptionIds!![position]) + + return view + } + + fun setDescriptionIds(descriptionIds: IntArray) { + this.descriptionIds = descriptionIds + } + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt new file mode 100644 index 0000000000..f18378b21e --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt @@ -0,0 +1,58 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +/** Describing a frame info. */ +data class FrameMetadata( + private val width: Int, + private val height: Int, + private val rotation: Int, + private val cameraFacing: Int +) { + fun getWidth(): Int { + return width + } + + fun getHeight(): Int { + return height + } + + fun getRotation(): Int { + return rotation + } + + fun getCameraFacing(): Int { + return cameraFacing + } + + /** Builder of [FrameMetadata]. */ + class Builder { + + private var width: Int = 0 + private var height: Int = 0 + private var rotation: Int = 0 + private var cameraFacing: Int = 0 + + fun setWidth(width: Int): Builder { + this.width = width + return this + } + + fun setHeight(height: Int): Builder { + this.height = height + return this + } + + fun setRotation(rotation: Int): Builder { + this.rotation = rotation + return this + } + + fun setCameraFacing(facing: Int): Builder { + cameraFacing = facing + return this + } + + fun build(): FrameMetadata { + return FrameMetadata(width, height, rotation, cameraFacing) + } + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt new file mode 100644 index 0000000000..0663874609 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt @@ -0,0 +1,152 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +import android.content.Context +import android.graphics.Canvas +import android.util.AttributeSet +import android.view.View +import com.google.android.gms.vision.CameraSource +import java.util.HashSet + +/** + * A view which renders a series of custom graphics to be overlayed on top of an associated preview + * (i.e., the camera preview). The creator can add graphics objects, update the objects, and remove + * them, triggering the appropriate drawing and invalidation within the view. + * + *

Supports scaling and mirroring of the graphics relative the camera's preview properties. The + * idea is that detection items are expressed in terms of a preview size, but need to be scaled up + * to the full view size, and also mirrored in the case of the front-facing camera. + * + *

Associated {@link Graphic} items should use the following methods to convert to view + * coordinates for the graphics that are drawn: + * + *

    + *
  1. {@link Graphic#scaleX(float)} and {@link Graphic#scaleY(float)} adjust the size of the + * supplied value from the preview scale to the view scale. + *
  2. {@link Graphic#translateX(float)} and {@link Graphic#translateY(float)} adjust the + * coordinate from the preview's coordinate system to the view coordinate system. + *
+ */ +class GraphicOverlay(context: Context, attrs: AttributeSet) : View(context, attrs) { + private val lock = Any() + private var previewWidth = 0 + private var previewHeight = 0 + private val graphics = HashSet() + private var widthScaleFactor = 1.0f + private var heightScaleFactor = 1.0f + private var facing = CameraSource.CAMERA_FACING_BACK + + /** + * Base class for a custom graphics object to be rendered within the graphic overlay. Subclass + * this and implement the [Graphic.draw] method to define the graphics element. Add + * instances to the overlay using [GraphicOverlay.add]. + */ + abstract class Graphic(private val overlay: GraphicOverlay) { + + /** + * Draw the graphic on the supplied canvas. Drawing should use the following methods to convert + * to view coordinates for the graphics that are drawn: + * + * + * 1. [Graphic.scaleX] and [Graphic.scaleY] adjust the size of the + * supplied value from the preview scale to the view scale. + * 1. [Graphic.translateX] and [Graphic.translateY] adjust the + * coordinate from the preview's coordinate system to the view coordinate system. + * + * + * @param canvas drawing canvas + */ + abstract fun draw(canvas: Canvas) + + /** + * Adjusts a horizontal value of the supplied value from the preview scale to the view scale. + */ + fun scaleX(horizontal: Float): Float { + return horizontal * overlay.widthScaleFactor + } + + /** Adjusts a vertical value of the supplied value from the preview scale to the view scale. */ + fun scaleY(vertical: Float): Float { + return vertical * overlay.heightScaleFactor + } + + /** Returns the application context of the app. */ + val applicationContext: Context + get() = overlay.context.applicationContext + + /** + * Adjusts the x coordinate from the preview's coordinate system to the view coordinate system. + */ + fun translateX(x: Float): Float { + return if (overlay.facing == CameraSource.CAMERA_FACING_FRONT) { + overlay.width - scaleX(x) + } else { + scaleX(x) + } + } + + /** + * Adjusts the y coordinate from the preview's coordinate system to the view coordinate system. + */ + fun translateY(y: Float): Float { + return scaleY(y) + } + + fun postInvalidate() { + overlay.postInvalidate() + } + } + + /** Removes all graphics from the overlay. */ + fun clear() { + synchronized (lock) { + graphics.clear() + } + postInvalidate() + } + + /** Adds a graphic to the overlay. */ + fun add(graphic: Graphic) { + synchronized (lock) { + graphics.add(graphic) + } + postInvalidate() + } + + /** Removes a graphic from the overlay. */ + fun remove(graphic: Graphic) { + synchronized (lock) { + graphics.remove(graphic) + } + postInvalidate() + } + + /** + * Sets the camera attributes for size and facing direction, which informs how to transform image + * coordinates later. + */ + fun setCameraInfo(previewWidth: Int, previewHeight: Int, facing: Int) { + synchronized (lock) { + this.previewWidth = previewWidth + this.previewHeight = previewHeight + this.facing = facing + } + postInvalidate() + } + + /** Draws the overlay with its associated graphic objects. */ + override fun onDraw(canvas: Canvas) { + super.onDraw(canvas) + + synchronized (lock) { + if (previewWidth != 0 && previewHeight != 0) { + widthScaleFactor = canvas.width.toFloat() / previewWidth.toFloat() + heightScaleFactor = canvas.height.toFloat() / previewHeight.toFloat() + } + + for (graphic in graphics) { + graphic.draw(canvas) + } + } + } + +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt new file mode 100644 index 0000000000..3cd22f4964 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt @@ -0,0 +1,252 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +import android.content.Context +import android.content.pm.PackageManager +import android.os.Bundle +import android.support.v4.app.ActivityCompat +import android.support.v4.content.ContextCompat +import android.support.v7.app.AppCompatActivity +import android.util.Log +import android.view.View +import android.widget.* +import com.google.android.gms.common.annotation.KeepName +import com.google.firebase.ml.common.FirebaseMLException +import com.google.firebase.samples.apps.mlkit.kotlin.barcodescanning.BarcodeScanningProcessor +import com.google.firebase.samples.apps.mlkit.kotlin.custommodel.CustomImageClassifierProcessor +import com.google.firebase.samples.apps.mlkit.kotlin.facedetection.FaceDetectionProcessor +import com.google.firebase.samples.apps.mlkit.kotlin.imagelabeling.ImageLabelingProcessor +import com.google.firebase.samples.apps.mlkit.kotlin.textrecognition.CameraSourcePreview +import com.google.firebase.samples.apps.mlkit.kotlin.textrecognition.TextRecognitionProcessor +import java.io.IOException +import java.util.* +import kotlinx.android.synthetic.main.activity_live_preview.* + +/** Demo app showing the various features of ML Kit for Firebase. This class is used to + * set up continuous frame processing on frames from a camera source. */ +@KeepName +class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissionsResultCallback, AdapterView.OnItemSelectedListener, CompoundButton.OnCheckedChangeListener { + + private var cameraSource: CameraSource? = null + private var preview: CameraSourcePreview? = null + private var graphicOverlay: GraphicOverlay? = null + private var selectedModel = FACE_DETECTION + + private val requiredPermissions: Array + get() { + return try { + val info = this.packageManager + .getPackageInfo(this.packageName, PackageManager.GET_PERMISSIONS) + val ps = info.requestedPermissions + if (ps != null && ps.isNotEmpty()) { + ps + } else { + arrayOfNulls(0) + } + } catch (e: Exception) { + arrayOfNulls(0) + } + + } + + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + Log.d(TAG, "onCreate") + + setContentView(R.layout.activity_live_preview) + + preview = findViewById(R.id.firePreview) as CameraSourcePreview + if (preview == null) { + Log.d(TAG, "Preview is null") + } + graphicOverlay = findViewById(R.id.fireFaceOverlay) as GraphicOverlay + if (graphicOverlay == null) { + Log.d(TAG, "graphicOverlay is null") + } + + val spinner = findViewById(R.id.spinner) as Spinner + val options = ArrayList() + options.add(FACE_DETECTION) + options.add(TEXT_DETECTION) + options.add(BARCODE_DETECTION) + options.add(IMAGE_LABEL_DETECTION) + options.add(CLASSIFICATION) + // Creating adapter for spinner + val dataAdapter = ArrayAdapter(this, R.layout.spinner_style, options) + // Drop down layout style - list view with radio button + dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item) + // attaching data adapter to spinner + spinner.adapter = dataAdapter + spinner.onItemSelectedListener = this + + val facingSwitch = findViewById(R.id.facingswitch) as ToggleButton + facingSwitch.setOnCheckedChangeListener(this) + + if (allPermissionsGranted()) { + createCameraSource(selectedModel) + } else { + getRuntimePermissions() + } + } + + @Synchronized + override fun onItemSelected(parent: AdapterView<*>, view: View, pos: Int, id: Long) { + // An item was selected. You can retrieve the selected item using + // parent.getItemAtPosition(pos) + selectedModel = parent.getItemAtPosition(pos).toString() + Log.d(TAG, "Selected model: $selectedModel") + preview!!.stop() + if (allPermissionsGranted()) { + createCameraSource(selectedModel) + startCameraSource() + } else { + getRuntimePermissions() + } + } + + override fun onNothingSelected(parent: AdapterView<*>) { + // Do nothing. + } + + override fun onCheckedChanged(buttonView: CompoundButton, isChecked: Boolean) { + Log.d(TAG, "Set facing") + if (cameraSource != null) { + if (isChecked) { + cameraSource!!.setFacing(CameraSource.CAMERA_FACING_FRONT) + } else { + cameraSource!!.setFacing(CameraSource.CAMERA_FACING_BACK) + } + } + preview!!.stop() + startCameraSource() + } + + private fun createCameraSource(model: String) { + // If there's no existing cameraSource, create one. + if (cameraSource == null) { + cameraSource = CameraSource(this, graphicOverlay!!) + } + + try { + when (model) { + CLASSIFICATION -> { + Log.i(TAG, "Using Custom Image Classifier Processor") + cameraSource!!.setMachineLearningFrameProcessor(CustomImageClassifierProcessor(this)) + } + TEXT_DETECTION -> { + Log.i(TAG, "Using Text Detector Processor") + cameraSource!!.setMachineLearningFrameProcessor(TextRecognitionProcessor()) + } + FACE_DETECTION -> { + Log.i(TAG, "Using Face Detector Processor") + cameraSource!!.setMachineLearningFrameProcessor(FaceDetectionProcessor()) + } + BARCODE_DETECTION -> { + Log.i(TAG, "Using Barcode Detector Processor") + cameraSource!!.setMachineLearningFrameProcessor(BarcodeScanningProcessor()) + } + IMAGE_LABEL_DETECTION -> { + Log.i(TAG, "Using Image Label Detector Processor") + cameraSource!!.setMachineLearningFrameProcessor(ImageLabelingProcessor()) + } + else -> Log.e(TAG, "Unknown model: $model") + } + } catch (e: FirebaseMLException) { + Log.e(TAG, "can not create camera source: $model") + } + + } + + /** + * Starts or restarts the camera source, if it exists. If the camera source doesn't exist yet + * (e.g., because onResume was called before the camera source was created), this will be called + * again when the camera source is created. + */ + private fun startCameraSource() { + if (cameraSource != null) { + try { + if (preview == null) { + Log.d(TAG, "resume: Preview is null") + } + if (graphicOverlay == null) { + Log.d(TAG, "resume: graphOverlay is null") + } + preview!!.start(cameraSource!!, graphicOverlay!!) + } catch (e: IOException) { + Log.e(TAG, "Unable to start camera source.", e) + cameraSource!!.release() + cameraSource = null + } + + } + } + + public override fun onResume() { + super.onResume() + Log.d(TAG, "onResume") + startCameraSource() + } + + /** Stops the camera. */ + override fun onPause() { + super.onPause() + preview!!.stop() + } + + public override fun onDestroy() { + super.onDestroy() + if (cameraSource != null) { + cameraSource!!.release() + } + } + + private fun allPermissionsGranted(): Boolean { + for (permission in requiredPermissions) { + if (!isPermissionGranted(this, permission!!)) { + return false + } + } + return true + } + + private fun getRuntimePermissions() { + val allNeededPermissions = ArrayList() + for (permission in requiredPermissions) { + if (!isPermissionGranted(this, permission!!)) { + allNeededPermissions.add(permission) + } + } + + if (!allNeededPermissions.isEmpty()) { + ActivityCompat.requestPermissions( + this, allNeededPermissions.toTypedArray(), PERMISSION_REQUESTS) + } + } + + override fun onRequestPermissionsResult( + requestCode: Int, permissions: Array, grantResults: IntArray) { + Log.i(TAG, "Permission granted!") + if (allPermissionsGranted()) { + createCameraSource(selectedModel) + } + super.onRequestPermissionsResult(requestCode, permissions, grantResults) + } + + companion object { + private const val FACE_DETECTION = "Face Detection" + private const val TEXT_DETECTION = "Text Detection" + private const val BARCODE_DETECTION = "Barcode Detection" + private const val IMAGE_LABEL_DETECTION = "Label Detection" + private const val CLASSIFICATION = "Classification" + private const val TAG = "LivePreviewActivity" + private const val PERMISSION_REQUESTS = 1 + + private fun isPermissionGranted(context: Context, permission: String): Boolean { + if (ContextCompat.checkSelfPermission(context, permission) == PackageManager.PERMISSION_GRANTED) { + Log.i(TAG, "Permission granted: $permission") + return true + } + Log.i(TAG, "Permission NOT granted: $permission") + return false + } + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt new file mode 100644 index 0000000000..01185b38fb --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt @@ -0,0 +1,321 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +import android.app.Activity +import android.content.ContentValues +import android.content.Intent +import android.content.res.Configuration +import android.graphics.Bitmap +import android.net.Uri +import android.os.Bundle +import android.provider.MediaStore +import android.support.v7.app.AppCompatActivity +import android.util.Log +import android.util.Pair +import android.view.View +import android.widget.* +import com.google.android.gms.common.annotation.KeepName +import com.google.firebase.samples.apps.mlkit.kotlin.cloudimagelabeling.CloudImageLabelingProcessor +import com.google.firebase.samples.apps.mlkit.kotlin.cloudlandmarkrecognition.CloudLandmarkRecognitionProcessor +import com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition.CloudDocumentTextRecognitionProcessor +import com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition.CloudTextRecognitionProcessor +import java.util.ArrayList +import java.io.IOException + +import kotlinx.android.synthetic.main.activity_still_image.* + +/** Activity demonstrating different image detector features with a still image from camera. */ +@KeepName +class StillImageActivity: AppCompatActivity() { + + private val TAG = "StillImageActivity" + + private val CLOUD_LABEL_DETECTION = "Cloud Label" + private val CLOUD_LANDMARK_DETECTION = "Landmark" + private val CLOUD_TEXT_DETECTION = "Cloud Text" + private val CLOUD_DOCUMENT_TEXT_DETECTION = "Doc Text" + + private val SIZE_PREVIEW = "w:max" // Available on-screen width. + private val SIZE_1024_768 = "w:1024" // ~1024*768 in a normal ratio + private val SIZE_640_480 = "w:640" // ~640*480 in a normal ratio + + private val KEY_IMAGE_URI = "com.googletest.firebase.ml.demo.KEY_IMAGE_URI" + private val KEY_IMAGE_MAX_WIDTH = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_WIDTH" + private val KEY_IMAGE_MAX_HEIGHT = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_HEIGHT" + private val KEY_SELECTED_SIZE = "com.googletest.firebase.ml.demo.KEY_SELECTED_SIZE" + + private val REQUEST_IMAGE_CAPTURE = 1001 + private val REQUEST_CHOOSE_IMAGE = 1002 + + private var preview: ImageView? = null + private var graphicOverlay: GraphicOverlay? = null + private var selectedMode = CLOUD_LABEL_DETECTION + private var selectedSize: String? = SIZE_PREVIEW + + internal var isLandScape: Boolean = false + + private var imageUri: Uri? = null + // Max width (portrait mode) + private var imageMaxWidth: Int? = null + // Max height (portrait mode) + private var imageMaxHeight: Int? = null + private var bitmapForDetection: Bitmap? = null + private var imageProcessor: VisionImageProcessor? = null + + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + + setContentView(R.layout.activity_still_image) + + getImageButton.setOnClickListener( + View.OnClickListener { view -> + // Menu for selecting either: a) take new photo b) select from existing + val popup = PopupMenu(this@StillImageActivity, view) + popup.setOnMenuItemClickListener { menuItem -> + when (menuItem.itemId) { + R.id.select_images_from_local -> { + startChooseImageIntentForResult() + true + } + R.id.take_photo_using_camera -> { + startCameraIntentForResult() + true + } + else -> false + } + } + + val inflater = popup.menuInflater + inflater.inflate(R.menu.camera_button_menu, popup.menu) + popup.show() + }) + preview = findViewById(R.id.previewPane) as ImageView + if (preview == null) { + Log.d(TAG, "Preview is null") + } + graphicOverlay = findViewById(R.id.previewOverlay) as GraphicOverlay + if (graphicOverlay == null) { + Log.d(TAG, "graphicOverlay is null") + } + + populateFeatureSelector() + populateSizeSelector() + + createImageProcessor() + + isLandScape = resources.configuration.orientation == Configuration.ORIENTATION_LANDSCAPE + + if (savedInstanceState != null) { + imageUri = savedInstanceState.getParcelable(KEY_IMAGE_URI) + imageMaxWidth = savedInstanceState.getInt(KEY_IMAGE_MAX_WIDTH) + imageMaxHeight = savedInstanceState.getInt(KEY_IMAGE_MAX_HEIGHT) + selectedSize = savedInstanceState.getString(KEY_SELECTED_SIZE) + + if (imageUri != null) { + tryReloadAndDetectInImage() + } + } + } + + + private fun populateFeatureSelector() { + val options = ArrayList() + options.add(CLOUD_LABEL_DETECTION) + options.add(CLOUD_LANDMARK_DETECTION) + options.add(CLOUD_TEXT_DETECTION) + options.add(CLOUD_DOCUMENT_TEXT_DETECTION) + // Creating adapter for featureSpinner + val dataAdapter = ArrayAdapter(this, R.layout.spinner_style, options) + // Drop down layout style - list view with radio button + dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item) + // attaching data adapter to spinner + featureSelector.adapter = dataAdapter + featureSelector.onItemSelectedListener = object : AdapterView.OnItemSelectedListener { + + override fun onItemSelected( + parentView: AdapterView<*>, selectedItemView: View, pos: Int, id: Long) { + selectedMode = parentView.getItemAtPosition(pos).toString() + createImageProcessor() + tryReloadAndDetectInImage() + } + + override fun onNothingSelected(arg0: AdapterView<*>) {} + } + } + + private fun populateSizeSelector() { + val options = ArrayList() + options.add(SIZE_PREVIEW) + options.add(SIZE_1024_768) + options.add(SIZE_640_480) + + // Creating adapter for featureSpinner + val dataAdapter = ArrayAdapter(this, R.layout.spinner_style, options) + // Drop down layout style - list view with radio button + dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item) + // attaching data adapter to spinner + sizeSelector.adapter = dataAdapter + sizeSelector.onItemSelectedListener = object : AdapterView.OnItemSelectedListener { + + override fun onItemSelected( + parentView: AdapterView<*>, selectedItemView: View, pos: Int, id: Long) { + selectedSize = parentView.getItemAtPosition(pos).toString() + tryReloadAndDetectInImage() + } + + override fun onNothingSelected(arg0: AdapterView<*>) {} + } + } + + public override fun onSaveInstanceState(outState: Bundle) { + super.onSaveInstanceState(outState) + + outState.putParcelable(KEY_IMAGE_URI, imageUri) + if (imageMaxWidth != null) { + outState.putInt(KEY_IMAGE_MAX_WIDTH, imageMaxWidth!!) + } + if (imageMaxHeight != null) { + outState.putInt(KEY_IMAGE_MAX_HEIGHT, imageMaxHeight!!) + } + outState.putString(KEY_SELECTED_SIZE, selectedSize) + } + + private fun startCameraIntentForResult() { + // Clean up last time's image + imageUri = null + preview?.setImageBitmap(null) + + val takePictureIntent = Intent(MediaStore.ACTION_IMAGE_CAPTURE) + if (takePictureIntent.resolveActivity(packageManager) != null) { + val values = ContentValues() + values.put(MediaStore.Images.Media.TITLE, "New Picture") + values.put(MediaStore.Images.Media.DESCRIPTION, "From Camera") + imageUri = contentResolver.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, values) + takePictureIntent.putExtra(MediaStore.EXTRA_OUTPUT, imageUri) + startActivityForResult(takePictureIntent, REQUEST_IMAGE_CAPTURE) + } + } + + private fun startChooseImageIntentForResult() { + val intent = Intent() + intent.type = "image/*" + intent.action = Intent.ACTION_GET_CONTENT + startActivityForResult(Intent.createChooser(intent, "Select Picture"), REQUEST_CHOOSE_IMAGE) + } + + override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent) { + if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == Activity.RESULT_OK) { + tryReloadAndDetectInImage() + } else if (requestCode == REQUEST_CHOOSE_IMAGE && resultCode == Activity.RESULT_OK) { + // In this case, imageUri is returned by the chooser, save it. + imageUri = data.data + tryReloadAndDetectInImage() + } + } + + private fun tryReloadAndDetectInImage() { + try { + if (imageUri == null) { + return + } + + // Clear the overlay first + graphicOverlay!!.clear() + + val imageBitmap = MediaStore.Images.Media.getBitmap(contentResolver, imageUri) + + // Get the dimensions of the View + val targetedSize = getTargetedWidthHeight() + + val targetWidth = targetedSize.first + val maxHeight = targetedSize.second + + // Determine how much to scale down the image + val scaleFactor = Math.max( + imageBitmap.width.toFloat() / targetWidth.toFloat(), + imageBitmap.height.toFloat() / maxHeight.toFloat()) + + val resizedBitmap = Bitmap.createScaledBitmap( + imageBitmap, + (imageBitmap.width / scaleFactor).toInt(), + (imageBitmap.height / scaleFactor).toInt(), + true) + + preview!!.setImageBitmap(resizedBitmap) + bitmapForDetection = resizedBitmap + + imageProcessor!!.process(bitmapForDetection, graphicOverlay) + } catch (e: IOException) { + Log.e(TAG, "Error retrieving saved image") + } + + } + + // Returns max image width, always for portrait mode. Caller needs to swap width / height for + // landscape mode. + private fun getImageMaxWidth(): Int? { + if (imageMaxWidth == null) { + // Calculate the max width in portrait mode. This is done lazily since we need to wait for + // a UI layout pass to get the right values. So delay it to first time image rendering time. + if (isLandScape) { + imageMaxWidth = (preview!!.parent as View).height - controlPanel.height + } else { + imageMaxWidth = (preview!!.parent as View).width + } + } + + return imageMaxWidth + } + + // Returns max image height, always for portrait mode. Caller needs to swap width / height for + // landscape mode. + private fun getImageMaxHeight(): Int? { + if (imageMaxHeight == null) { + // Calculate the max width in portrait mode. This is done lazily since we need to wait for + // a UI layout pass to get the right values. So delay it to first time image rendering time. + if (isLandScape) { + imageMaxHeight = (preview!!.parent as View).width + } else { + imageMaxHeight = (preview!!.parent as View).height - controlPanel.height + } + } + + return imageMaxHeight + } + + // Gets the targeted width / height. + private fun getTargetedWidthHeight(): Pair { + val targetWidth: Int + val targetHeight: Int + + when (selectedSize) { + SIZE_PREVIEW -> { + val maxWidthForPortraitMode = getImageMaxWidth()!! + val maxHeightForPortraitMode = getImageMaxHeight()!! + targetWidth = if (isLandScape) maxHeightForPortraitMode else maxWidthForPortraitMode + targetHeight = if (isLandScape) maxWidthForPortraitMode else maxHeightForPortraitMode + } + SIZE_640_480 -> { + targetWidth = if (isLandScape) 640 else 480 + targetHeight = if (isLandScape) 480 else 640 + } + SIZE_1024_768 -> { + targetWidth = if (isLandScape) 1024 else 768 + targetHeight = if (isLandScape) 768 else 1024 + } + else -> throw IllegalStateException("Unknown size") + } + + return Pair(targetWidth, targetHeight) + } + + private fun createImageProcessor() { + when (selectedMode) { + CLOUD_LABEL_DETECTION -> imageProcessor = CloudImageLabelingProcessor() + CLOUD_LANDMARK_DETECTION -> imageProcessor = CloudLandmarkRecognitionProcessor() + CLOUD_TEXT_DETECTION -> imageProcessor = CloudTextRecognitionProcessor() + CLOUD_DOCUMENT_TEXT_DETECTION -> imageProcessor = CloudDocumentTextRecognitionProcessor() + else -> throw IllegalStateException("Unknown selectedMode: $selectedMode") + } + } + +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt new file mode 100644 index 0000000000..ea9c511de4 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt @@ -0,0 +1,23 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +import android.graphics.Bitmap +import android.media.Image +import com.google.firebase.ml.common.FirebaseMLException +import java.nio.ByteBuffer + +/** An inferface to process the images with different ML Kit detectors and custom image models. */ +interface VisionImageProcessor { + + /** Processes the images with the underlying machine learning models. */ + @Throws(FirebaseMLException::class) + fun process(data: ByteBuffer, frameMetadata: FrameMetadata, graphicOverlay: GraphicOverlay) + + /** Processes the bitmap images. */ + fun process(bitmap: Bitmap, graphicOverlay: GraphicOverlay) + + /** Processes the images. */ + fun process(bitmap: Image, rotation: Int, graphicOverlay: GraphicOverlay) + + /** Stops the underlying machine learning model and release resources. */ + fun stop() +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt new file mode 100644 index 0000000000..e46c783808 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt @@ -0,0 +1,101 @@ +package com.google.firebase.samples.apps.mlkit.kotlin + +import android.graphics.Bitmap +import android.media.Image +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata +import java.nio.ByteBuffer +import java.util.concurrent.atomic.AtomicBoolean + + +/** + * Abstract base class for ML Kit frame processors. Subclasses need to implement {@link + * #onSuccess(T, FrameMetadata, GraphicOverlay)} to define what they want to with the detection + * results and {@link #detectInImage(FirebaseVisionImage)} to specify the detector object. + * + * @param The type of the detected feature. + */ +abstract class VisionProcessorBase: VisionImageProcessor { + + // Whether we should ignore process(). This is usually caused by feeding input data faster than + // the model can handle. + private val shouldThrottle = AtomicBoolean(false) + + override fun process( + data: ByteBuffer, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + if (shouldThrottle.get()) { + return + } + + val metadata = FirebaseVisionImageMetadata.Builder() + .setFormat(FirebaseVisionImageMetadata.IMAGE_FORMAT_NV21) + .setWidth(frameMetadata.getWidth()) + .setHeight(frameMetadata.getHeight()) + .setRotation(frameMetadata.getRotation()) + .build() + + detectInVisionImage( + FirebaseVisionImage.fromByteBuffer(data, metadata), frameMetadata, graphicOverlay) + } + + // Bitmap version + override fun process(bitmap: Bitmap, graphicOverlay: GraphicOverlay) { + if (shouldThrottle.get()) { + return + } + detectInVisionImage(FirebaseVisionImage.fromBitmap(bitmap), null, graphicOverlay) + } + + /** + * Detects feature from given media.Image + * + * @return created FirebaseVisionImage + */ + override fun process(image: Image, rotation:Int, graphicOverlay: GraphicOverlay) { + if (shouldThrottle.get()) { + return + } + // This is for overlay display's usage + val frameMetadata = FrameMetadata.Builder() + .setWidth(image.width) + .setHeight(image.height) + .build() + val fbVisionImage = FirebaseVisionImage.fromMediaImage(image, rotation) + detectInVisionImage(fbVisionImage, frameMetadata, graphicOverlay) + } + + private fun detectInVisionImage( + image: FirebaseVisionImage, + metadata: FrameMetadata?, + graphicOverlay: GraphicOverlay) { + detectInImage(image) + .addOnSuccessListener { results -> + shouldThrottle.set(false) + this@VisionProcessorBase.onSuccess(results, metadata!!, + graphicOverlay) + } + .addOnFailureListener { e -> + shouldThrottle.set(false) + this@VisionProcessorBase.onFailure(e) + } + // Begin throttling until this frame of input has been processed, either in onSuccess or + // onFailure. + shouldThrottle.set(true) + } + + override fun stop() { + } + + protected abstract fun detectInImage(image: FirebaseVisionImage): Task + + protected abstract fun onSuccess( + results:T, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) + + protected abstract fun onFailure(e:Exception) + +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt new file mode 100644 index 0000000000..1678a33356 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt @@ -0,0 +1,54 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.barcodescanning + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import android.graphics.RectF +import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +class BarcodeGraphic(overlay: GraphicOverlay, barcode: FirebaseVisionBarcode) : GraphicOverlay.Graphic(overlay) { + + private val TEXT_COLOR = Color.WHITE + private val TEXT_SIZE = 54.0f + private val STROKE_WIDTH = 4.0f + + private var rectPaint: Paint + private var barcodePaint: Paint + private val barcode: FirebaseVisionBarcode? + + init { + this.barcode = barcode + + rectPaint = Paint() + rectPaint.color = TEXT_COLOR + rectPaint.style = Paint.Style.STROKE + rectPaint.strokeWidth = STROKE_WIDTH + + barcodePaint = Paint() + barcodePaint.color = TEXT_COLOR + barcodePaint.textSize = TEXT_SIZE + // Redraw the overlay, as this graphic has been added. + postInvalidate() + } + + /** + * Draws the barcode block annotations for position, size, and raw value on the supplied canvas. + */ + override fun draw(canvas: Canvas) { + if (barcode == null) { + throw IllegalStateException("Attempting to draw a null barcode.") + } + + // Draws the bounding box around the BarcodeBlock. + val rect = RectF(barcode.boundingBox) + rect.left = translateX(rect.left) + rect.top = translateY(rect.top) + rect.right = translateX(rect.right) + rect.bottom = translateY(rect.bottom) + canvas.drawRect(rect, rectPaint) + + // Renders the barcode at the bottom of the box. + canvas.drawText(barcode.rawValue!!, rect.left, rect.bottom, barcodePaint) + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt new file mode 100644 index 0000000000..8ab5aca992 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt @@ -0,0 +1,60 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.barcodescanning + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode +import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetector +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase +import java.io.IOException + +/** Barcode Detector Demo. */ +class BarcodeScanningProcessor : VisionProcessorBase>() { + + private val detector: FirebaseVisionBarcodeDetector + + init { + // Note that if you know which format of barcode your app is dealing with, detection will be + // faster to specify the supported barcode formats one by one, e.g. + // FirebaseVisionBarcodeDetectorOptions.Builder() + // .setBarcodeFormats(FirebaseVisionBarcode.FORMAT_QR_CODE) + // .build() + detector = FirebaseVision.getInstance().visionBarcodeDetector + } + + override fun stop() { + try { + detector.close() + } catch (e: IOException) { + Log.e(TAG, "Exception thrown while trying to close Barcode Detector: $e") + } + + } + + override fun detectInImage(image: FirebaseVisionImage): Task> { + return detector.detectInImage(image) + } + + override fun onSuccess( + barcodes: List, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + graphicOverlay.clear() + for (i in barcodes.indices) { + val barcode = barcodes[i] + val barcodeGraphic = BarcodeGraphic(graphicOverlay, barcode) + graphicOverlay.add(barcodeGraphic) + } + } + + override fun onFailure(e: Exception) { + Log.e(TAG, "Barcode detection failed $e") + } + + companion object { + private const val TAG = "BarcodeScanProc" + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt new file mode 100644 index 0000000000..57cdfcb373 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt @@ -0,0 +1,64 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudimagelabeling + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions +import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabel +import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabelDetector +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase + +/** Cloud Label Detector Demo. */ +class CloudImageLabelingProcessor : VisionProcessorBase>() { + + private val detector: FirebaseVisionCloudLabelDetector + + init { + val options = FirebaseVisionCloudDetectorOptions.Builder() + .setMaxResults(10) + .setModelType(FirebaseVisionCloudDetectorOptions.STABLE_MODEL) + .build() + + detector = FirebaseVision.getInstance().getVisionCloudLabelDetector(options) + } + + override fun detectInImage(image: FirebaseVisionImage): Task> { + return detector.detectInImage(image) + } + + override fun onSuccess( + labels: List, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + + graphicOverlay.clear() + + Log.d(TAG, "cloud label size: " + labels.size) + + val labelsStr = ArrayList() + for (i in labels.indices) { + val label = labels[i] + + Log.d(TAG, "cloud label: $label") + + if (label.label != null) { + labelsStr.add(label.label) + } + } + + val cloudLabelGraphic = CloudLabelGraphic(graphicOverlay) + graphicOverlay.add(cloudLabelGraphic) + cloudLabelGraphic.updateLabel(labelsStr) + } + + override fun onFailure(e: Exception) { + Log.e(TAG, "Cloud Label detection failed $e") + } + + companion object { + private const val TAG = "CloudImgLabelProc" + } +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudLabelGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudLabelGraphic.kt new file mode 100644 index 0000000000..0f5332a30f --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudLabelGraphic.kt @@ -0,0 +1,36 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudimagelabeling + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** Graphic instance for rendering detected label. */ +class CloudLabelGraphic(private val overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { + private val textPaint: Paint + + private var labels: List? = null + + init { + textPaint = Paint() + textPaint.color = Color.WHITE + textPaint.textSize = 60.0f + } + + @Synchronized + internal fun updateLabel(labels: List) { + this.labels = labels + postInvalidate() + } + + @Synchronized + override fun draw(canvas: Canvas) { + val x = overlay.width / 4.0f + var y = overlay.height / 4.0f + + for (label in labels!!) { + canvas.drawText(label, x, y, textPaint) + y -= 62.0f + } + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt new file mode 100644 index 0000000000..99c3792f9e --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt @@ -0,0 +1,66 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudlandmarkrecognition + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import android.graphics.RectF +import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** Graphic instance for rendering detected landmark. */ +class CloudLandmarkGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { + + private val rectPaint: Paint + private val landmarkPaint: Paint + private var landmark: FirebaseVisionCloudLandmark? = null + + init { + + rectPaint = Paint() + rectPaint.color = TEXT_COLOR + rectPaint.style = Paint.Style.STROKE + rectPaint.strokeWidth = STROKE_WIDTH + + landmarkPaint = Paint() + landmarkPaint.color = TEXT_COLOR + landmarkPaint.textSize = TEXT_SIZE + } + + /** + * Updates the landmark instance from the detection of the most recent frame. Invalidates the + * relevant portions of the overlay to trigger a redraw. + */ + internal fun updateLandmark(landmark: FirebaseVisionCloudLandmark) { + this.landmark = landmark + postInvalidate() + } + + /** + * Draws the landmark block annotations for position, size, and raw value on the supplied canvas. + */ + override fun draw(canvas: Canvas) { + if (landmark == null) { + throw IllegalStateException("Attempting to draw a null landmark.") + } + if (landmark!!.landmark == null || landmark!!.boundingBox == null) { + return + } + + // Draws the bounding box around the LandmarkBlock. + val rect = RectF(landmark!!.boundingBox) + rect.left = translateX(rect.left) + rect.top = translateY(rect.top) + rect.right = translateX(rect.right) + rect.bottom = translateY(rect.bottom) + canvas.drawRect(rect, rectPaint) + + // Renders the landmark at the bottom of the box. + canvas.drawText(landmark!!.landmark, rect.left, rect.bottom, landmarkPaint) + } + + companion object { + private const val TEXT_COLOR = Color.WHITE + private const val TEXT_SIZE = 54.0f + private const val STROKE_WIDTH = 4.0f + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt new file mode 100644 index 0000000000..7a292b95ae --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt @@ -0,0 +1,54 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudlandmarkrecognition + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions +import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark +import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmarkDetector +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase + +/** Cloud Landmark Detector Demo. */ +class CloudLandmarkRecognitionProcessor : VisionProcessorBase>() { + + private val detector: FirebaseVisionCloudLandmarkDetector + + init { + val options = FirebaseVisionCloudDetectorOptions.Builder() + .setMaxResults(10) + .setModelType(FirebaseVisionCloudDetectorOptions.STABLE_MODEL) + .build() + + detector = FirebaseVision.getInstance().getVisionCloudLandmarkDetector(options) + } + + override fun detectInImage(image: FirebaseVisionImage): Task> { + return detector.detectInImage(image) + } + + override fun onSuccess( + landmarks: List, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + graphicOverlay.clear() + Log.d(TAG, "cloud landmark size: " + landmarks.size) + for (i in landmarks.indices) { + val landmark = landmarks[i] + Log.d(TAG, "cloud landmark: $landmark") + val cloudLandmarkGraphic = CloudLandmarkGraphic(graphicOverlay) + graphicOverlay.add(cloudLandmarkGraphic) + cloudLandmarkGraphic.updateLandmark(landmark) + } + } + + override fun onFailure(e: Exception) { + Log.e(TAG, "Cloud Landmark detection failed $e") + } + + companion object { + private const val TAG = "CloudLmkRecProc" + } +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextGraphic.kt new file mode 100644 index 0000000000..11b5e66727 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextGraphic.kt @@ -0,0 +1,51 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import com.google.firebase.ml.vision.document.FirebaseVisionDocumentText +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** + * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic + * overlay view. + */ +class CloudDocumentTextGraphic( + private val overlay: GraphicOverlay, + private val symbol: FirebaseVisionDocumentText.Symbol? +) : GraphicOverlay.Graphic(overlay) { + + private val rectPaint: Paint + private val textPaint: Paint + + init { + + rectPaint = Paint() + rectPaint.color = TEXT_COLOR + rectPaint.style = Paint.Style.STROKE + rectPaint.strokeWidth = STROKE_WIDTH + + textPaint = Paint() + textPaint.color = TEXT_COLOR + textPaint.textSize = TEXT_SIZE + // Redraw the overlay, as this graphic has been added. + postInvalidate() + } + + /** Draws the text block annotations for position, size, and raw value on the supplied canvas. */ + override fun draw(canvas: Canvas) { + if (symbol == null) { + throw IllegalStateException("Attempting to draw a null text.") + } + + val rect = symbol.boundingBox + canvas.drawRect(rect!!, rectPaint) + canvas.drawText(symbol.text, rect.left.toFloat(), rect.bottom.toFloat(), textPaint) + } + + companion object { + private const val TEXT_COLOR = Color.WHITE + private const val TEXT_SIZE = 54.0f + private const val STROKE_WIDTH = 4.0f + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.kt new file mode 100644 index 0000000000..8c9fbf27c0 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.kt @@ -0,0 +1,57 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.ml.vision.document.FirebaseVisionDocumentText +import com.google.firebase.ml.vision.document.FirebaseVisionDocumentTextRecognizer +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase + +/** Processor for the cloud document text detector demo. */ +class CloudDocumentTextRecognitionProcessor : VisionProcessorBase() { + + private val detector: FirebaseVisionDocumentTextRecognizer + + init { + detector = FirebaseVision.getInstance().cloudDocumentTextRecognizer + } + + override fun detectInImage(image: FirebaseVisionImage): Task { + return detector.processImage(image) + } + + override fun onSuccess( + text: FirebaseVisionDocumentText, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + graphicOverlay.clear() + Log.d(TAG, "detected text is: " + text.text) + val blocks = text.blocks + for (i in blocks.indices) { + val paragraphs = blocks[i].paragraphs + for (j in paragraphs.indices) { + val words = paragraphs[j].words + for (l in words.indices) { + val symbols = words[l].symbols + for (m in symbols.indices) { + val cloudDocumentTextGraphic = CloudDocumentTextGraphic(graphicOverlay, + symbols[m]) + graphicOverlay.add(cloudDocumentTextGraphic) + } + } + } + } + } + + override fun onFailure(e: Exception) { + Log.w(TAG, "Cloud Document Text detection failed.$e") + } + + companion object { + + private const val TAG = "CloudDocTextRecProc" + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextGraphic.kt new file mode 100644 index 0000000000..a12477ed3e --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextGraphic.kt @@ -0,0 +1,51 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import com.google.firebase.ml.vision.text.FirebaseVisionText +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** + * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic + * overlay view. + */ +class CloudTextGraphic( + private val overlay: GraphicOverlay, + private val element: FirebaseVisionText.Element? +) : GraphicOverlay.Graphic(overlay) { + + private val rectPaint: Paint + private val textPaint: Paint + + init { + + rectPaint = Paint() + rectPaint.color = TEXT_COLOR + rectPaint.style = Paint.Style.STROKE + rectPaint.strokeWidth = STROKE_WIDTH + + textPaint = Paint() + textPaint.color = TEXT_COLOR + textPaint.textSize = TEXT_SIZE + // Redraw the overlay, as this graphic has been added. + postInvalidate() + } + + /** Draws the text block annotations for position, size, and raw value on the supplied canvas. */ + override fun draw(canvas: Canvas) { + if (element == null) { + throw IllegalStateException("Attempting to draw a null text.") + } + + val rect = element.boundingBox + canvas.drawRect(rect!!, rectPaint) + canvas.drawText(element.text, rect.left.toFloat(), rect.bottom.toFloat(), textPaint) + } + + companion object { + private const val TEXT_COLOR = Color.WHITE + private const val TEXT_SIZE = 54.0f + private const val STROKE_WIDTH = 4.0f + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextRecognitionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextRecognitionProcessor.kt new file mode 100644 index 0000000000..a02b35fa88 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudtextrecognition/CloudTextRecognitionProcessor.kt @@ -0,0 +1,58 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.ml.vision.text.FirebaseVisionText +import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase + +/** + * Processor for the cloud text detector demo. + */ +class CloudTextRecognitionProcessor : VisionProcessorBase() { + + private val detector: FirebaseVisionTextRecognizer + + init { + detector = FirebaseVision.getInstance().cloudTextRecognizer + } + + override fun detectInImage(image: FirebaseVisionImage): Task { + return detector.processImage(image) + } + + override fun onSuccess( + text: FirebaseVisionText, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + graphicOverlay.clear() + if (text == null) { + return // TODO: investigate why this is needed + } + val blocks = text.textBlocks + for (i in blocks.indices) { + val lines = blocks[i].lines + for (j in lines.indices) { + val elements = lines[j].elements + for (l in elements.indices) { + val cloudTextGraphic = CloudTextGraphic(graphicOverlay, + elements[l]) + graphicOverlay.add(cloudTextGraphic) + } + } + } + } + + override fun onFailure(e: Exception) { + Log.w(TAG, "Cloud Text detection failed.$e") + } + + companion object { + + private val TAG = "CloudTextRecProc" + } +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt new file mode 100644 index 0000000000..1acf30a1a0 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt @@ -0,0 +1,201 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.custommodel + +import android.app.Activity +import android.graphics.* +import android.os.SystemClock +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.android.gms.tasks.Tasks +import com.google.firebase.ml.common.FirebaseMLException +import com.google.firebase.ml.custom.* +import com.google.firebase.ml.custom.model.FirebaseCloudModelSource +import com.google.firebase.ml.custom.model.FirebaseLocalModelSource +import com.google.firebase.ml.custom.model.FirebaseModelDownloadConditions +import java.io.BufferedReader +import java.io.ByteArrayOutputStream +import java.io.IOException +import java.io.InputStreamReader +import java.nio.ByteBuffer +import java.nio.ByteOrder +import java.util.* +import kotlin.experimental.and + +/** A `FirebaseModelInterpreter` based image classifier. */ +class CustomImageClassifier +/** Initializes an `CustomImageClassifier`. */ +@Throws(FirebaseMLException::class) +constructor(activity: Activity) { + + /* Preallocated buffers for storing image data in. */ + private val intValues = IntArray(DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y) + + /** An instance of the driver class to run model inference with Firebase. */ + private val interpreter: FirebaseModelInterpreter? + + /** Data configuration of input & output data of model. */ + private val dataOptions: FirebaseModelInputOutputOptions + + /** Labels corresponding to the output of the vision model. */ + private val labelList: List + + private val sortedLabels = PriorityQueue>( + RESULTS_TO_SHOW, + Comparator> { o1, o2 -> + o1.value.compareTo(o2.value) + }) + + init { + val modelOptions = FirebaseModelOptions.Builder() + .setCloudModelName(HOSTED_MODEL_NAME) + .setLocalModelName(LOCAL_MODEL_NAME) + .build() + val conditions = FirebaseModelDownloadConditions.Builder() + .requireWifi() + .build() + val localModelSource = FirebaseLocalModelSource.Builder(LOCAL_MODEL_NAME) + .setAssetFilePath(LOCAL_MODEL_PATH).build() + val cloudSource = FirebaseCloudModelSource.Builder(HOSTED_MODEL_NAME) + .enableModelUpdates(true) + .setInitialDownloadConditions(conditions) + .setUpdatesDownloadConditions(conditions) // You could also specify different + // conditions for updates. + .build() + val manager = FirebaseModelManager.getInstance() + manager.registerLocalModelSource(localModelSource) + manager.registerCloudModelSource(cloudSource) + interpreter = FirebaseModelInterpreter.getInstance(modelOptions) + labelList = loadLabelList(activity) + Log.d(TAG, "Created a Custom Image Classifier.") + val inputDims = intArrayOf(DIM_BATCH_SIZE, DIM_IMG_SIZE_X, DIM_IMG_SIZE_Y, DIM_PIXEL_SIZE) + val outputDims = intArrayOf(1, labelList.size) + dataOptions = FirebaseModelInputOutputOptions.Builder() + .setInputFormat(0, FirebaseModelDataType.BYTE, inputDims) + .setOutputFormat(0, FirebaseModelDataType.BYTE, outputDims) + .build() + Log.d(TAG, "Configured input & output data for the custom image classifier.") + } + + /** Classifies a frame from the preview stream. */ + @Throws(FirebaseMLException::class) + internal fun classifyFrame(buffer: ByteBuffer, width: Int, height: Int): Task> { + if (interpreter == null) { + Log.e(TAG, "Image classifier has not been initialized; Skipped.") + val uninitialized = ArrayList() + uninitialized.add("Uninitialized Classifier.") + Tasks.forResult>(uninitialized) + } + // Create input data. + val imgData = convertBitmapToByteBuffer(buffer, width, height) + + val inputs = FirebaseModelInputs.Builder().add(imgData).build() + // Here's where the magic happens!! + return interpreter!! + .run(inputs, dataOptions) + .continueWith { task -> + val labelProbArray = task.result.getOutput>(0) + printTopKLabels(labelProbArray) + } + } + + /** Reads label list from Assets. */ + private fun loadLabelList(activity: Activity): List { + val labelList = ArrayList() + try { + BufferedReader(InputStreamReader(activity.assets.open(LABEL_PATH))).use { reader -> + var line = reader.readLine() + while (line != null) { + labelList.add(line) + line = reader.readLine() + } + } + } catch (e: IOException) { + Log.e(TAG, "Failed to read label list.", e) + } + + return labelList + } + + /** Writes Image data into a `ByteBuffer`. */ + @Synchronized + private fun convertBitmapToByteBuffer( + buffer: ByteBuffer, width: Int, height: Int): ByteBuffer { + val imgData = ByteBuffer.allocateDirect( + DIM_BATCH_SIZE * DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y * DIM_PIXEL_SIZE) + imgData.order(ByteOrder.nativeOrder()) + val bitmap = createResizedBitmap(buffer, width, height) + imgData.rewind() + bitmap.getPixels(intValues, 0, bitmap.width, 0, 0, bitmap.width, bitmap.height) + // Convert the image to int points. + var pixel = 0 + val startTime = SystemClock.uptimeMillis() + for (i in 0 until DIM_IMG_SIZE_X) { + for (j in 0 until DIM_IMG_SIZE_Y) { + val `val` = intValues[pixel++] + imgData.put((`val` shr 16 and 0xFF).toByte()) + imgData.put((`val` shr 8 and 0xFF).toByte()) + imgData.put((`val` and 0xFF).toByte()) + } + } + val endTime = SystemClock.uptimeMillis() + Log.d(TAG, "Timecost to put values into ByteBuffer: " + (endTime - startTime)) + return imgData + } + + /** Resizes image data from `ByteBuffer`. */ + private fun createResizedBitmap(buffer: ByteBuffer, width: Int, height: Int): Bitmap { + val img = YuvImage(buffer.array(), ImageFormat.NV21, width, height, null) + val out = ByteArrayOutputStream() + img.compressToJpeg(Rect(0, 0, img.width, img.height), 50, out) + val imageBytes = out.toByteArray() + val bitmap = BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size) + return Bitmap.createScaledBitmap(bitmap, DIM_IMG_SIZE_X, DIM_IMG_SIZE_Y, true) + } + + /** Prints top-K labels, to be shown in UI as the results. */ + @Synchronized + private fun printTopKLabels(labelProbArray: Array): List { + for (i in labelList.indices) { + sortedLabels.add( + AbstractMap.SimpleEntry(labelList[i], (labelProbArray[0][i] and 0xff.toByte()) / 255.0f)) + if (sortedLabels.size > RESULTS_TO_SHOW) { + sortedLabels.poll() + } + } + val result = ArrayList() + val size = sortedLabels.size + for (i in 0 until size) { + val label = sortedLabels.poll() + result.add(label.key + ":" + label.value) + } + return result + } + + companion object { + + /** Tag for the [Log]. */ + private const val TAG = "MLKitDemoApp:Classifier" + + /** Name of the model file. */ + private const val LOCAL_MODEL_NAME = "mobilenet_quant_v1" + + /** Path of the model file stored in Assets. */ + private const val LOCAL_MODEL_PATH = "mobilenet_quant_v1_224.tflite" + + /** Name of the model uploaded to the Firebase console. */ + private const val HOSTED_MODEL_NAME = "mobilenet_v1" + + /** Name of the label file stored in Assets. */ + private const val LABEL_PATH = "labels.txt" + + /** Number of results to show in the UI. */ + private const val RESULTS_TO_SHOW = 3 + + /** Dimensions of inputs. */ + private const val DIM_BATCH_SIZE = 1 + + private const val DIM_PIXEL_SIZE = 3 + + private const val DIM_IMG_SIZE_X = 224 + private const val DIM_IMG_SIZE_Y = 224 + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt new file mode 100644 index 0000000000..b68aa0d5a0 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt @@ -0,0 +1,48 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.custommodel + +import android.app.Activity +import android.graphics.Bitmap +import android.media.Image +import com.google.firebase.ml.common.FirebaseMLException +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionImageProcessor +import java.nio.ByteBuffer + +/** Custom Image Classifier Demo. */ +class CustomImageClassifierProcessor @Throws(FirebaseMLException::class) +constructor(private val activity: Activity): VisionImageProcessor { + + private val classifier: CustomImageClassifier + + init{ + classifier = + com.google.firebase.samples.apps.mlkit.kotlin.custommodel.CustomImageClassifier(activity) + } + + @Throws(FirebaseMLException::class) + override fun process( + data: ByteBuffer, frameMetadata: FrameMetadata, graphicOverlay: GraphicOverlay) { + classifier + .classifyFrame(data, frameMetadata.getWidth(), frameMetadata.getHeight()) + .addOnSuccessListener( + activity + ) { result -> + val labelGraphic = LabelGraphic(graphicOverlay) + graphicOverlay.clear() + graphicOverlay.add(labelGraphic) + labelGraphic.updateLabel(result) + } + } + + override fun process(bitmap: Bitmap, graphicOverlay: GraphicOverlay) { + // nop + } + + override fun process(bitmap: Image, rotation:Int, graphicOverlay: GraphicOverlay) { + // nop + + } + + override fun stop() {} +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt new file mode 100644 index 0000000000..f67d9b5eec --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt @@ -0,0 +1,37 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.custommodel + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** Graphic instance for rendering image labels. */ +class LabelGraphic(private val overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { + + private val textPaint: Paint + + private var labels: List? = null + + init { + textPaint = Paint() + textPaint.color = Color.WHITE + textPaint.textSize = 60.0f + } + + @Synchronized + internal fun updateLabel(labels: List) { + this.labels = labels + postInvalidate() + } + + @Synchronized + override fun draw(canvas: Canvas) { + val x = overlay.width / 4.0f + var y = overlay.height / 4.0f + + for (label in labels!!) { + canvas.drawText(label, x, y, textPaint) + y -= 62.0f + } + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt new file mode 100644 index 0000000000..e20511d5ac --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt @@ -0,0 +1,64 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.facedetection + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.ml.vision.face.FirebaseVisionFace +import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector +import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase +import java.io.IOException + +/** Face Detector Demo. */ +class FaceDetectionProcessor : VisionProcessorBase>() { + + private val detector: FirebaseVisionFaceDetector + + init { + val options = FirebaseVisionFaceDetectorOptions.Builder() + .setClassificationType(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS) + .setLandmarkType(FirebaseVisionFaceDetectorOptions.ALL_LANDMARKS) + .setTrackingEnabled(true) + .build() + + detector = FirebaseVision.getInstance().getVisionFaceDetector(options) + } + + override fun stop() { + try { + detector.close() + } catch (e: IOException) { + Log.e(TAG, "Exception thrown while trying to close Face Detector: $e") + } + + } + + override fun detectInImage(image: FirebaseVisionImage): Task> { + return detector.detectInImage(image) + } + + override fun onSuccess( + faces: List, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + graphicOverlay.clear() + for (i in faces.indices) { + val face = faces[i] + val faceGraphic = FaceGraphic(graphicOverlay) + graphicOverlay.add(faceGraphic) + faceGraphic.updateFace(face, frameMetadata.getCameraFacing()) + } + } + + override fun onFailure(e: Exception) { + Log.e(TAG, "Face detection failed $e") + } + + companion object { + + private const val TAG = "FaceDetectionProcessor" + } +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt new file mode 100644 index 0000000000..6b32f75039 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt @@ -0,0 +1,135 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.facedetection + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import com.google.android.gms.vision.CameraSource +import com.google.firebase.ml.vision.face.FirebaseVisionFace +import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** + * Graphic instance for rendering face position, orientation, and landmarks within an associated + * graphic overlay view. + */ +class FaceGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { + + private var facing: Int = 0 + + private val facePositionPaint: Paint + private val idPaint: Paint + private val boxPaint: Paint + + @Volatile + private var firebaseVisionFace: FirebaseVisionFace? = null + + init { + + currentColorIndex = (currentColorIndex + 1) % COLOR_CHOICES.size + val selectedColor = COLOR_CHOICES[currentColorIndex] + + facePositionPaint = Paint() + facePositionPaint.color = selectedColor + + idPaint = Paint() + idPaint.color = selectedColor + idPaint.textSize = ID_TEXT_SIZE + + boxPaint = Paint() + boxPaint.color = selectedColor + boxPaint.style = Paint.Style.STROKE + boxPaint.strokeWidth = BOX_STROKE_WIDTH + } + + /** + * Updates the face instance from the detection of the most recent frame. Invalidates the relevant + * portions of the overlay to trigger a redraw. + */ + fun updateFace(face: FirebaseVisionFace, facing: Int) { + firebaseVisionFace = face + this.facing = facing + postInvalidate() + } + + /** Draws the face annotations for position on the supplied canvas. */ + override fun draw(canvas: Canvas) { + val face = firebaseVisionFace ?: return + + // Draws a circle at the position of the detected face, with the face's track id below. + val x = translateX(face.boundingBox.centerX().toFloat()) + val y = translateY(face.boundingBox.centerY().toFloat()) + canvas.drawCircle(x, y, FACE_POSITION_RADIUS, facePositionPaint) + canvas.drawText("id: " + face.trackingId, x + ID_X_OFFSET, y + ID_Y_OFFSET, idPaint) + canvas.drawText( + "happiness: " + String.format("%.2f", face.smilingProbability), + x + ID_X_OFFSET * 3, + y - ID_Y_OFFSET, + idPaint) + if (facing == CameraSource.CAMERA_FACING_FRONT) { + canvas.drawText( + "right eye: " + String.format("%.2f", face.rightEyeOpenProbability), + x - ID_X_OFFSET, + y, + idPaint) + canvas.drawText( + "left eye: " + String.format("%.2f", face.leftEyeOpenProbability), + x + ID_X_OFFSET * 6, + y, + idPaint) + } else { + canvas.drawText( + "left eye: " + String.format("%.2f", face.leftEyeOpenProbability), + x - ID_X_OFFSET, + y, + idPaint) + canvas.drawText( + "right eye: " + String.format("%.2f", face.rightEyeOpenProbability), + x + ID_X_OFFSET * 6, + y, + idPaint) + } + + // Draws a bounding box around the face. + val xOffset = scaleX(face.boundingBox.width() / 2.0f) + val yOffset = scaleY(face.boundingBox.height() / 2.0f) + val left = x - xOffset + val top = y - yOffset + val right = x + xOffset + val bottom = y + yOffset + canvas.drawRect(left, top, right, bottom, boxPaint) + + // draw landmarks + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.BOTTOM_MOUTH) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.LEFT_CHEEK) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.LEFT_EAR) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.LEFT_MOUTH) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.LEFT_EYE) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.NOSE_BASE) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.RIGHT_CHEEK) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.RIGHT_EAR) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.RIGHT_EYE) + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.RIGHT_MOUTH) + } + + private fun drawLandmarkPosition(canvas: Canvas, face: FirebaseVisionFace, landmarkID: Int) { + val landmark = face.getLandmark(landmarkID) + if (landmark != null) { + val point = landmark.position + canvas.drawCircle( + translateX(point.x!!), + translateY(point.y!!), + 10f, idPaint) + } + } + + companion object { + private const val FACE_POSITION_RADIUS = 10.0f + private const val ID_TEXT_SIZE = 40.0f + private const val ID_Y_OFFSET = 50.0f + private const val ID_X_OFFSET = -50.0f + private const val BOX_STROKE_WIDTH = 5.0f + + private val COLOR_CHOICES = intArrayOf(Color.BLUE, Color.CYAN, Color.GREEN, Color.MAGENTA, Color.RED, Color.WHITE, Color.YELLOW) + private var currentColorIndex = 0 + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt new file mode 100644 index 0000000000..f5be5b6df8 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt @@ -0,0 +1,53 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.imagelabeling + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.ml.vision.label.FirebaseVisionLabel +import com.google.firebase.ml.vision.label.FirebaseVisionLabelDetector +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase +import java.io.IOException + +/** Custom Image Classifier Demo. */ +class ImageLabelingProcessor : VisionProcessorBase>() { + + private val detector: FirebaseVisionLabelDetector + + init { + detector = FirebaseVision.getInstance().visionLabelDetector + } + + override fun stop() { + try { + detector.close() + } catch (e: IOException) { + Log.e(TAG, "Exception thrown while trying to close Text Detector: $e") + } + + } + + override fun detectInImage(image: FirebaseVisionImage): Task> { + return detector.detectInImage(image) + } + + override fun onSuccess( + labels: List, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + graphicOverlay.clear() + val labelGraphic = LabelGraphic(graphicOverlay, labels) + graphicOverlay.add(labelGraphic) + } + + override fun onFailure(e: Exception) { + Log.w(TAG, "Label detection failed.$e") + } + + companion object { + + private val TAG = "ImageLabelingProcessor" + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt new file mode 100644 index 0000000000..8d2530f281 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt @@ -0,0 +1,34 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.imagelabeling + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import com.google.firebase.ml.vision.label.FirebaseVisionLabel +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** Graphic instance for rendering a label within an associated graphic overlay view. */ +class LabelGraphic ( + private val overlay: GraphicOverlay, + private val labels: List +) : GraphicOverlay.Graphic(overlay) { + + private val textPaint: Paint + + init { + textPaint = Paint() + textPaint.color = Color.WHITE + textPaint.textSize = 60.0f + postInvalidate() + } + + @Synchronized + override fun draw(canvas: Canvas) { + val x = overlay.width / 4.0f + var y = overlay.height / 2.0f + + for (label in labels) { + canvas.drawText(label.label, x, y, textPaint) + y -= 62.0f + } + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt new file mode 100644 index 0000000000..6333dac45c --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt @@ -0,0 +1,167 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.textrecognition + +import android.annotation.SuppressLint +import android.content.Context +import android.content.res.Configuration +import android.util.AttributeSet +import android.util.Log +import android.view.SurfaceHolder +import android.view.SurfaceView +import android.view.ViewGroup +import com.google.firebase.samples.apps.mlkit.kotlin.CameraSource +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import java.io.IOException + +/** Preview the camera image in the screen. */ +class CameraSourcePreview(private val ctx: Context, attrs: AttributeSet) : ViewGroup(ctx, attrs) { + private val surfaceView: SurfaceView + private var startRequested: Boolean = false + private var surfaceAvailable: Boolean = false + private var cameraSource: CameraSource? = null + + private var overlay: GraphicOverlay? = null + + private val isPortraitMode: Boolean + get() { + val orientation = ctx.resources.configuration.orientation + if (orientation == Configuration.ORIENTATION_LANDSCAPE) { + return false + } + if (orientation == Configuration.ORIENTATION_PORTRAIT) { + return true + } + + Log.d(TAG, "isPortraitMode returning false by default") + return false + } + + init { + startRequested = false + surfaceAvailable = false + + surfaceView = SurfaceView(ctx) + surfaceView.holder.addCallback(SurfaceCallback()) + addView(surfaceView) + } + + @Throws(IOException::class) + fun start(cameraSource: CameraSource?) { + if (cameraSource == null) { + stop() + } + + this.cameraSource = cameraSource + + if (this.cameraSource != null) { + startRequested = true + startIfReady() + } + } + + @Throws(IOException::class) + fun start(cameraSource: CameraSource, overlay: GraphicOverlay) { + this.overlay = overlay + start(cameraSource) + } + + fun stop() { + if (cameraSource != null) { + cameraSource!!.stop() + } + } + + fun release() { + if (cameraSource != null) { + cameraSource!!.release() + cameraSource = null + } + } + + @SuppressLint("MissingPermission") + @Throws(IOException::class) + private fun startIfReady() { + if (startRequested && surfaceAvailable) { + cameraSource!!.start(surfaceView.holder) + if (overlay != null) { + val size = cameraSource!!.previewSize!! + val min = Math.min(size.width, size.height) + val max = Math.max(size.width, size.height) + if (isPortraitMode) { + // Swap width and height sizes when in portrait, since it will be rotated by + // 90 degrees + overlay!!.setCameraInfo(min, max, cameraSource!!.cameraFacing) + } else { + overlay!!.setCameraInfo(max, min, cameraSource!!.cameraFacing) + } + overlay!!.clear() + } + startRequested = false + } + } + + private inner class SurfaceCallback : SurfaceHolder.Callback { + override fun surfaceCreated(surface: SurfaceHolder) { + surfaceAvailable = true + try { + startIfReady() + } catch (e: IOException) { + Log.e(TAG, "Could not start camera source.", e) + } + + } + + override fun surfaceDestroyed(surface: SurfaceHolder) { + surfaceAvailable = false + } + + override fun surfaceChanged(holder: SurfaceHolder, format: Int, width: Int, height: Int) {} + } + + override fun onLayout(changed: Boolean, left: Int, top: Int, right: Int, bottom: Int) { + var width = 320 + var height = 240 + if (cameraSource != null) { + val size = cameraSource!!.previewSize + if (size != null) { + width = size.width + height = size.height + } + } + + // Swap width and height sizes when in portrait, since it will be rotated 90 degrees + if (isPortraitMode) { + val tmp = width + width = height + height = tmp + } + + val layoutWidth = right - left + val layoutHeight = bottom - top + + // Computes height and width for potentially doing fit width. + var childWidth = layoutWidth + var childHeight = (layoutWidth.toFloat() / width.toFloat() * height).toInt() + + // If height is too tall using fit width, does fit height instead. + if (childHeight > layoutHeight) { + childHeight = layoutHeight + childWidth = (layoutHeight.toFloat() / height.toFloat() * width).toInt() + } + + for (i in 0 until childCount) { + getChildAt(i).layout(0, 0, childWidth, childHeight) + Log.d(TAG, "Assigned view: $i") + } + + try { + startIfReady() + } catch (e: IOException) { + Log.e(TAG, "Could not start camera source.", e) + } + + } + + companion object { + private val TAG = "MIDemoApp:Preview" + } +} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt new file mode 100644 index 0000000000..6a5a783218 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt @@ -0,0 +1,59 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.textrecognition + +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.Paint +import android.graphics.RectF +import com.google.firebase.ml.vision.text.FirebaseVisionText +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay + +/** + * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic + * overlay view. + */ +class TextGraphic(overlay: GraphicOverlay, + private val text: FirebaseVisionText.Element? +) : GraphicOverlay.Graphic(overlay) { + + private val rectPaint: Paint + private val textPaint: Paint + + init { + + rectPaint = Paint() + rectPaint.color = TEXT_COLOR + rectPaint.style = Paint.Style.STROKE + rectPaint.strokeWidth = STROKE_WIDTH + + textPaint = Paint() + textPaint.color = TEXT_COLOR + textPaint.textSize = TEXT_SIZE + // Redraw the overlay, as this graphic has been added. + postInvalidate() + } + + /** Draws the text block annotations for position, size, and raw value on the supplied canvas. */ + override fun draw(canvas: Canvas) { + if (text == null) { + throw IllegalStateException("Attempting to draw a null text.") + } + + // Draws the bounding box around the TextBlock. + val rect = RectF(text.boundingBox) + rect.left = translateX(rect.left) + rect.top = translateY(rect.top) + rect.right = translateX(rect.right) + rect.bottom = translateY(rect.bottom) + canvas.drawRect(rect, rectPaint) + + // Renders the text at the bottom of the box. + canvas.drawText(text.text, rect.left, rect.bottom, textPaint) + } + + companion object { + + private const val TEXT_COLOR = Color.WHITE + private const val TEXT_SIZE = 54.0f + private const val STROKE_WIDTH = 4.0f + } +} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt new file mode 100644 index 0000000000..2d234fe048 --- /dev/null +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt @@ -0,0 +1,63 @@ +package com.google.firebase.samples.apps.mlkit.kotlin.textrecognition + +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.firebase.ml.vision.FirebaseVision +import com.google.firebase.ml.vision.common.FirebaseVisionImage +import com.google.firebase.ml.vision.text.FirebaseVisionText +import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer +import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata +import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase +import java.io.IOException + +/** Processor for the text recognition demo. */ +class TextRecognitionProcessor : VisionProcessorBase() { + + private val detector: FirebaseVisionTextRecognizer + + init { + detector = FirebaseVision.getInstance().onDeviceTextRecognizer + } + + override fun stop() { + try { + detector.close() + } catch (e: IOException) { + Log.e(TAG, "Exception thrown while trying to close Text Detector: $e") + } + + } + + override fun detectInImage(image: FirebaseVisionImage): Task { + return detector.processImage(image) + } + + override fun onSuccess( + results: FirebaseVisionText, + frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { + graphicOverlay.clear() + val blocks = results.textBlocks + for (i in blocks.indices) { + val lines = blocks[i].lines + for (j in lines.indices) { + val elements = lines[j].elements + for (k in elements.indices) { + val textGraphic = TextGraphic(graphicOverlay, elements[k]) + graphicOverlay.add(textGraphic) + + } + } + } + } + + override fun onFailure(e: Exception) { + Log.w(TAG, "Text detection failed.$e") + } + + companion object { + + private const val TAG = "TextRecProc" + } +} diff --git a/mlkit/app/src/main/res/layout-land/activity_live_preview.xml b/mlkit/app/src/main/res/layout-land/activity_live_preview.xml index 1c2df065a9..48df1c0d16 100644 --- a/mlkit/app/src/main/res/layout-land/activity_live_preview.xml +++ b/mlkit/app/src/main/res/layout-land/activity_live_preview.xml @@ -8,17 +8,17 @@ android:background="#000" android:keepScreenOn="true"> - - - + - - - + - Date: Fri, 7 Sep 2018 04:33:48 +0200 Subject: [PATCH 2/7] fix: fix build --- .../firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt | 1 + .../firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt | 1 + .../firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt index 7de4526a7a..4a82fb3784 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt @@ -14,6 +14,7 @@ import android.view.ViewGroup import android.widget.AdapterView import android.widget.ArrayAdapter import android.widget.TextView +import com.google.firebase.samples.apps.mlkit.R import com.google.firebase.samples.apps.mlkit.java.LivePreviewActivity import com.google.firebase.samples.apps.mlkit.java.StillImageActivity import java.util.ArrayList diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt index 3cd22f4964..96d769ddf3 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt @@ -11,6 +11,7 @@ import android.view.View import android.widget.* import com.google.android.gms.common.annotation.KeepName import com.google.firebase.ml.common.FirebaseMLException +import com.google.firebase.samples.apps.mlkit.R import com.google.firebase.samples.apps.mlkit.kotlin.barcodescanning.BarcodeScanningProcessor import com.google.firebase.samples.apps.mlkit.kotlin.custommodel.CustomImageClassifierProcessor import com.google.firebase.samples.apps.mlkit.kotlin.facedetection.FaceDetectionProcessor diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt index 01185b38fb..568d922471 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt @@ -14,6 +14,7 @@ import android.util.Pair import android.view.View import android.widget.* import com.google.android.gms.common.annotation.KeepName +import com.google.firebase.samples.apps.mlkit.R import com.google.firebase.samples.apps.mlkit.kotlin.cloudimagelabeling.CloudImageLabelingProcessor import com.google.firebase.samples.apps.mlkit.kotlin.cloudlandmarkrecognition.CloudLandmarkRecognitionProcessor import com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition.CloudDocumentTextRecognitionProcessor @@ -243,7 +244,7 @@ class StillImageActivity: AppCompatActivity() { preview!!.setImageBitmap(resizedBitmap) bitmapForDetection = resizedBitmap - imageProcessor!!.process(bitmapForDetection, graphicOverlay) + imageProcessor!!.process(bitmapForDetection!!, graphicOverlay!!) } catch (e: IOException) { Log.e(TAG, "Error retrieving saved image") } From 6c8f007cf07c51926d3afb8cea5b7fd106a9acc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ros=C3=A1rio=20Pereira=20Fernandes?= Date: Thu, 13 Sep 2018 23:33:00 +0200 Subject: [PATCH 3/7] refactor: Create .common package --- .../mlkit/{java => common}/CameraSource.java | 4 +- .../{java => common}/CameraSourcePreview.java | 2 +- .../mlkit/{java => common}/FrameMetadata.java | 2 +- .../{java => common}/GraphicOverlay.java | 2 +- .../VisionImageProcessor.java | 2 +- .../apps/mlkit/java/ChooserActivity.java | 4 +- .../apps/mlkit/java/LivePreviewActivity.java | 6 +- .../apps/mlkit/java/StillImageActivity.java | 2 + .../apps/mlkit/java/VisionProcessorBase.java | 3 + .../java/barcodescanning/BarcodeGraphic.java | 4 +- .../BarcodeScanningProcessor.java | 4 +- .../CloudImageLabelingProcessor.java | 4 +- .../cloudimagelabeling/CloudLabelGraphic.java | 4 +- .../CloudLandmarkGraphic.java | 4 +- .../CloudLandmarkRecognitionProcessor.java | 4 +- .../CloudDocumentTextGraphic.java | 4 +- ...CloudDocumentTextRecognitionProcessor.java | 4 +- .../CloudTextGraphic.java | 4 +- .../CloudTextRecognitionProcessor.java | 4 +- .../CustomImageClassifierProcessor.java | 6 +- .../mlkit/java/custommodel/LabelGraphic.java | 4 +- .../facedetection/FaceDetectionProcessor.java | 4 +- .../mlkit/java/facedetection/FaceGraphic.java | 4 +- .../imagelabeling/ImageLabelingProcessor.java | 4 +- .../java/imagelabeling/LabelGraphic.java | 4 +- .../java/textrecognition/TextGraphic.java | 4 +- .../TextRecognitionProcessor.java | 4 +- .../samples/apps/mlkit/kotlin/CameraSource.kt | 701 ------------------ .../apps/mlkit/kotlin/ChooserActivity.kt | 4 +- .../apps/mlkit/kotlin/FrameMetadata.kt | 58 -- .../apps/mlkit/kotlin/GraphicOverlay.kt | 152 ---- .../apps/mlkit/kotlin/LivePreviewActivity.kt | 6 +- .../apps/mlkit/kotlin/StillImageActivity.kt | 2 + .../apps/mlkit/kotlin/VisionImageProcessor.kt | 23 - .../apps/mlkit/kotlin/VisionProcessorBase.kt | 25 +- .../kotlin/barcodescanning/BarcodeGraphic.kt | 12 +- .../BarcodeScanningProcessor.kt | 4 +- .../CloudImageLabellingProcessor.kt | 10 +- .../cloudimagelabeling/CloudLabelGraphic.kt | 6 +- .../CloudLandmarkGraphic.kt | 10 +- .../CloudLandmarkRecognitionProcessor.kt | 6 +- .../CloudDocumentTextGraphic.kt | 10 +- .../CloudDocumentTextRecognitionProcessor.kt | 6 +- .../cloudtextrecognition/CloudTextGraphic.kt | 10 +- .../CloudTextRecognitionProcessor.kt | 6 +- .../custommodel/CustomImageClassifier.kt | 13 +- .../CustomImageClassifierProcessor.kt | 14 +- .../mlkit/kotlin/custommodel/LabelGraphic.kt | 6 +- .../facedetection/FaceDetectionProcessor.kt | 6 +- .../mlkit/kotlin/facedetection/FaceGraphic.kt | 25 +- .../imagelabeling/ImageLabelingProcessor.kt | 6 +- .../kotlin/imagelabeling/LabelGraphic.kt | 2 +- .../textrecognition/CameraSourcePreview.kt | 167 ----- .../kotlin/textrecognition/TextGraphic.kt | 2 +- .../TextRecognitionProcessor.kt | 4 +- .../res/layout-land/activity_live_preview.xml | 8 +- .../src/main/res/layout/activity_chooser.xml | 2 +- .../main/res/layout/activity_live_preview.xml | 8 +- .../main/res/layout/activity_still_image.xml | 2 +- 59 files changed, 168 insertions(+), 1249 deletions(-) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{java => common}/CameraSource.java (99%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{java => common}/CameraSourcePreview.java (98%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{java => common}/FrameMetadata.java (97%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{java => common}/GraphicOverlay.java (99%) rename mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/{java => common}/VisionImageProcessor.java (96%) delete mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt delete mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt delete mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt delete mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt delete mode 100644 mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSource.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/CameraSource.java similarity index 99% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSource.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/CameraSource.java index f29d79c7fe..79816f4937 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSource.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/CameraSource.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.java; +package com.google.firebase.samples.apps.mlkit.common; import android.Manifest; import android.annotation.SuppressLint; @@ -577,7 +577,7 @@ public void onPreviewFrame(byte[] data, Camera camera) { } } - void setMachineLearningFrameProcessor(VisionImageProcessor processor) { + public void setMachineLearningFrameProcessor(VisionImageProcessor processor) { synchronized (processorLock) { cleanScreen(); if (frameProcessor != null) { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSourcePreview.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/CameraSourcePreview.java similarity index 98% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSourcePreview.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/CameraSourcePreview.java index 73cc0e595c..af8d9f239c 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/CameraSourcePreview.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/CameraSourcePreview.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.java; +package com.google.firebase.samples.apps.mlkit.common; import android.annotation.SuppressLint; import android.content.Context; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/FrameMetadata.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/FrameMetadata.java similarity index 97% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/FrameMetadata.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/FrameMetadata.java index 46fa5b2b07..74270b386c 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/FrameMetadata.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/FrameMetadata.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.java; +package com.google.firebase.samples.apps.mlkit.common; /** Describing a frame info. */ public class FrameMetadata { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/GraphicOverlay.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/GraphicOverlay.java similarity index 99% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/GraphicOverlay.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/GraphicOverlay.java index 50a93148fb..1f73d7a7e2 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/GraphicOverlay.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/GraphicOverlay.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.java; +package com.google.firebase.samples.apps.mlkit.common; import android.content.Context; import android.graphics.Canvas; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionImageProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/VisionImageProcessor.java similarity index 96% rename from mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionImageProcessor.java rename to mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/VisionImageProcessor.java index 036b738c2e..a0586eaeaa 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionImageProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/common/VisionImageProcessor.java @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package com.google.firebase.samples.apps.mlkit.java; +package com.google.firebase.samples.apps.mlkit.common; import android.graphics.Bitmap; import android.media.Image; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/ChooserActivity.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/ChooserActivity.java index 7a497a4eda..b5383ad2b9 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/ChooserActivity.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/ChooserActivity.java @@ -31,6 +31,8 @@ import android.widget.ListView; import android.widget.TextView; +import com.google.firebase.samples.apps.mlkit.R; + import java.util.ArrayList; import java.util.List; @@ -61,7 +63,7 @@ protected void onCreate(Bundle savedInstanceState) { setContentView(R.layout.activity_chooser); // Set up ListView and Adapter - ListView listView = (ListView) findViewById(R.id.test_activity_list_view); + ListView listView = (ListView) findViewById(R.id.testActivityListView); MyArrayAdapter adapter = new MyArrayAdapter(this, android.R.layout.simple_list_item_2, CLASSES); adapter.setDescriptionIds(DESCRIPTION_IDS); diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/LivePreviewActivity.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/LivePreviewActivity.java index c9ad555073..84bf6dd09e 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/LivePreviewActivity.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/LivePreviewActivity.java @@ -32,6 +32,10 @@ import com.google.android.gms.common.annotation.KeepName; import com.google.firebase.ml.common.FirebaseMLException; +import com.google.firebase.samples.apps.mlkit.R; +import com.google.firebase.samples.apps.mlkit.common.CameraSource; +import com.google.firebase.samples.apps.mlkit.common.CameraSourcePreview; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.barcodescanning.BarcodeScanningProcessor; import com.google.firebase.samples.apps.mlkit.java.custommodel.CustomImageClassifierProcessor; import com.google.firebase.samples.apps.mlkit.java.facedetection.FaceDetectionProcessor; @@ -93,7 +97,7 @@ protected void onCreate(Bundle savedInstanceState) { spinner.setAdapter(dataAdapter); spinner.setOnItemSelectedListener(this); - ToggleButton facingSwitch = (ToggleButton) findViewById(R.id.facingswitch); + ToggleButton facingSwitch = (ToggleButton) findViewById(R.id.facingSwitch); facingSwitch.setOnCheckedChangeListener(this); if (allPermissionsGranted()) { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java index 63e3844509..bbaf6daff8 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java @@ -37,6 +37,8 @@ import android.widget.Spinner; import com.google.android.gms.common.annotation.KeepName; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor; import com.google.firebase.samples.apps.mlkit.java.cloudimagelabeling.CloudImageLabelingProcessor; import com.google.firebase.samples.apps.mlkit.java.cloudlandmarkrecognition.CloudLandmarkRecognitionProcessor; import com.google.firebase.samples.apps.mlkit.java.cloudtextrecognition.CloudDocumentTextRecognitionProcessor; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionProcessorBase.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionProcessorBase.java index 296c61a2dd..f0fd97ac71 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionProcessorBase.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/VisionProcessorBase.java @@ -22,6 +22,9 @@ import com.google.android.gms.tasks.Task; import com.google.firebase.ml.vision.common.FirebaseVisionImage; import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor; import java.nio.ByteBuffer; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeGraphic.java index 9902b5ce5f..48aea82345 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeGraphic.java @@ -19,8 +19,8 @@ import android.graphics.RectF; import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; /** Graphic instance for rendering Barcode position and content information in an overlay view. */ public class BarcodeGraphic extends Graphic { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeScanningProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeScanningProcessor.java index b33e85d0fd..2c1290b27f 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeScanningProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/barcodescanning/BarcodeScanningProcessor.java @@ -21,8 +21,8 @@ import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode; import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetector; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudImageLabelingProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudImageLabelingProcessor.java index 43e3ce03f1..a2a7856b92 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudImageLabelingProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudImageLabelingProcessor.java @@ -22,8 +22,8 @@ import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabel; import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabelDetector; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.ArrayList; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudLabelGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudLabelGraphic.java index 2598b3a6c8..07c6bffa78 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudLabelGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudimagelabeling/CloudLabelGraphic.java @@ -17,8 +17,8 @@ import android.graphics.Color; import android.graphics.Paint; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkGraphic.java index d23cf9438f..a2a13b9e01 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkGraphic.java @@ -19,8 +19,8 @@ import android.graphics.RectF; import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; /** Graphic instance for rendering detected landmark. */ public class CloudLandmarkGraphic extends Graphic { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java index 9526afe697..6950cde8a8 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.java @@ -22,8 +22,8 @@ import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark; import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmarkDetector; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextGraphic.java index 0305125451..7ed6f89b24 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextGraphic.java @@ -19,8 +19,8 @@ import android.graphics.Rect; import com.google.firebase.ml.vision.document.FirebaseVisionDocumentText; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; /** * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java index 07324231e9..99e577707a 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudDocumentTextRecognitionProcessor.java @@ -21,8 +21,8 @@ import com.google.firebase.ml.vision.common.FirebaseVisionImage; import com.google.firebase.ml.vision.document.FirebaseVisionDocumentTextRecognizer; import com.google.firebase.ml.vision.document.FirebaseVisionDocumentText; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextGraphic.java index 15f51a4ed6..13eebe7ada 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextGraphic.java @@ -19,8 +19,8 @@ import android.graphics.Rect; import com.google.firebase.ml.vision.text.FirebaseVisionText; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; /** * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextRecognitionProcessor.java index c999896afe..32636bd8a0 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/cloudtextrecognition/CloudTextRecognitionProcessor.java @@ -21,8 +21,8 @@ import com.google.firebase.ml.vision.text.FirebaseVisionText; import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer; import com.google.firebase.ml.vision.common.FirebaseVisionImage; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifierProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifierProcessor.java index 03efc207cb..4c41ec51e8 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifierProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/CustomImageClassifierProcessor.java @@ -19,9 +19,9 @@ import com.google.android.gms.tasks.OnSuccessListener; import com.google.firebase.ml.common.FirebaseMLException; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.VisionImageProcessor; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor; import java.nio.ByteBuffer; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/LabelGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/LabelGraphic.java index 5b34f42ac5..e574656bab 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/LabelGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/custommodel/LabelGraphic.java @@ -18,8 +18,8 @@ import android.graphics.Paint; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceDetectionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceDetectionProcessor.java index ab7f0e1bc5..993735b831 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceDetectionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceDetectionProcessor.java @@ -22,8 +22,8 @@ import com.google.firebase.ml.vision.face.FirebaseVisionFace; import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector; import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceGraphic.java index ddede79cdc..8ab6f08cd6 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/facedetection/FaceGraphic.java @@ -22,8 +22,8 @@ import com.google.firebase.ml.vision.common.FirebaseVisionPoint; import com.google.firebase.ml.vision.face.FirebaseVisionFace; import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; /** * Graphic instance for rendering face position, orientation, and landmarks within an associated diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/ImageLabelingProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/ImageLabelingProcessor.java index b108845bb5..d2a3a21d08 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/ImageLabelingProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/ImageLabelingProcessor.java @@ -21,8 +21,8 @@ import com.google.firebase.ml.vision.common.FirebaseVisionImage; import com.google.firebase.ml.vision.label.FirebaseVisionLabel; import com.google.firebase.ml.vision.label.FirebaseVisionLabelDetector; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/LabelGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/LabelGraphic.java index 83e92bff53..442fc4a74d 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/LabelGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/imagelabeling/LabelGraphic.java @@ -18,8 +18,8 @@ import android.graphics.Paint; import com.google.firebase.ml.vision.label.FirebaseVisionLabel; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; import java.util.List; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextGraphic.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextGraphic.java index 8f5ae4ad27..78e084c20a 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextGraphic.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextGraphic.java @@ -19,8 +19,8 @@ import android.graphics.RectF; import com.google.firebase.ml.vision.text.FirebaseVisionText; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay.Graphic; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay.Graphic; /** * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextRecognitionProcessor.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextRecognitionProcessor.java index 49fe3c8fa4..4538160a6a 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextRecognitionProcessor.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/textrecognition/TextRecognitionProcessor.java @@ -21,8 +21,8 @@ import com.google.firebase.ml.vision.common.FirebaseVisionImage; import com.google.firebase.ml.vision.text.FirebaseVisionText; import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer; -import com.google.firebase.samples.apps.mlkit.java.FrameMetadata; -import com.google.firebase.samples.apps.mlkit.java.GraphicOverlay; +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata; +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.java.VisionProcessorBase; import java.io.IOException; diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt deleted file mode 100644 index 44ffd10e23..0000000000 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/CameraSource.kt +++ /dev/null @@ -1,701 +0,0 @@ -package com.google.firebase.samples.apps.mlkit.kotlin - -import android.Manifest -import android.annotation.SuppressLint -import android.app.Activity -import android.content.Context -import android.graphics.ImageFormat -import android.graphics.SurfaceTexture -import android.hardware.Camera -import android.support.annotation.RequiresPermission -import android.util.Log -import android.view.Surface -import android.view.SurfaceHolder -import android.view.WindowManager -import com.google.android.gms.common.images.Size -import java.io.IOException -import java.nio.ByteBuffer -import java.util.* -import java.util.concurrent.locks.ReentrantLock - -/** - * Manages the camera and allows UI updates on top of it (e.g. overlaying extra Graphics or - * displaying extra information). This receives preview frames from the camera at a specified rate, - * sending those frames to child classes' detectors / classifiers as fast as it is able to process. - */ -@SuppressLint("MissingPermission") -class CameraSource(protected var activity: Activity, private val graphicOverlay: GraphicOverlay) { - - private var camera: Camera? = null - - /** - * Returns the selected camera; one of [.CAMERA_FACING_BACK] or [ ][.CAMERA_FACING_FRONT]. - */ - var cameraFacing = CAMERA_FACING_BACK - protected set - - /** - * Rotation of the device, and thus the associated preview images captured from the device. See - * Frame.Metadata#getRotation(). - */ - private var rotation: Int = 0 - - /** Returns the preview size that is currently in use by the underlying camera. */ - var previewSize: Size? = null - private set - - // These values may be requested by the caller. Due to hardware limitations, we may need to - // select close, but not exactly the same values for these. - private val requestedFps = 20.0f - private val requestedPreviewWidth = 1280 - private val requestedPreviewHeight = 960 - private val requestedAutoFocus = true - - // These instances need to be held onto to avoid GC of their underlying resources. Even though - // these aren't used outside of the method that creates them, they still must have hard - // references maintained to them. - private var dummySurfaceTexture: SurfaceTexture? = null - - // True if a SurfaceTexture is being used for the preview, false if a SurfaceHolder is being - // used for the preview. We want to be compatible back to Gingerbread, but SurfaceTexture - // wasn't introduced until Honeycomb. Since the interface cannot use a SurfaceTexture, if the - // developer wants to display a preview we must use a SurfaceHolder. If the developer doesn't - // want to display a preview we use a SurfaceTexture if we are running at least Honeycomb. - private var usingSurfaceTexture: Boolean = false - - /** - * Dedicated thread and associated runnable for calling into the detector with frames, as the - * frames become available from the camera. - */ - private var processingThread: Thread? = null - - private val processingRunnable: FrameProcessingRunnable - - private val processorLock = Any() - // @GuardedBy("processorLock") - private var frameProcessor: VisionImageProcessor? = null - - /** - * Map to convert between a byte array, received from the camera, and its associated byte buffer. - * We use byte buffers internally because this is a more efficient way to call into native code - * later (avoids a potential copy). - * - * - * **Note:** uses IdentityHashMap here instead of HashMap because the behavior of an array's - * equals, hashCode and toString methods is both useless and unexpected. IdentityHashMap enforces - * identity ('==') check on the keys. - */ - private val bytesToByteBuffer = IdentityHashMap() - - init { - graphicOverlay.clear() - processingRunnable = FrameProcessingRunnable() - } - - // ============================================================================================== - // Public - // ============================================================================================== - - /** Stops the camera and releases the resources of the camera and underlying detector. */ - fun release() { - synchronized(processorLock) { - stop() - processingRunnable.release() - cleanScreen() - - if (frameProcessor != null) { - frameProcessor!!.stop() - } - } - } - - /** - * Opens the camera and starts sending preview frames to the underlying detector. The preview - * frames are not displayed. - * - * @throws IOException if the camera's preview texture or display could not be initialized - */ - @SuppressLint("MissingPermission") - @RequiresPermission(Manifest.permission.CAMERA) - @Synchronized - @Throws(IOException::class) - fun start(): CameraSource { - if (camera != null) { - return this - } - - camera = createCamera() - dummySurfaceTexture = SurfaceTexture(DUMMY_TEXTURE_NAME) - camera!!.setPreviewTexture(dummySurfaceTexture) - usingSurfaceTexture = true - camera!!.startPreview() - - processingThread = Thread(processingRunnable) - processingRunnable.setActive(true) - processingThread!!.start() - return this - } - - /** - * Opens the camera and starts sending preview frames to the underlying detector. The supplied - * surface holder is used for the preview so frames can be displayed to the user. - * - * @param surfaceHolder the surface holder to use for the preview frames - * @throws IOException if the supplied surface holder could not be used as the preview display - */ - @RequiresPermission(Manifest.permission.CAMERA) - @Synchronized - @Throws(IOException::class) - fun start(surfaceHolder: SurfaceHolder): CameraSource { - if (camera != null) { - return this - } - - camera = createCamera() - camera!!.setPreviewDisplay(surfaceHolder) - camera!!.startPreview() - - processingThread = Thread(processingRunnable) - processingRunnable.setActive(true) - processingThread!!.start() - - usingSurfaceTexture = false - return this - } - - /** - * Closes the camera and stops sending frames to the underlying frame detector. - * - * - * This camera source may be restarted again by calling [.start] or [ ][.start]. - * - * - * Call [.release] instead to completely shut down this camera source and release the - * resources of the underlying detector. - */ - @Synchronized - fun stop() { - processingRunnable.setActive(false) - if (processingThread != null) { - try { - // Wait for the thread to complete to ensure that we can't have multiple threads - // executing at the same time (i.e., which would happen if we called start too - // quickly after stop). - processingThread!!.join() - } catch (e: InterruptedException) { - Log.d(TAG, "Frame processing thread interrupted on release.") - } - - processingThread = null - } - - if (camera != null) { - camera!!.stopPreview() - camera!!.setPreviewCallbackWithBuffer(null) - try { - if (usingSurfaceTexture) { - camera!!.setPreviewTexture(null) - } else { - camera!!.setPreviewDisplay(null) - } - } catch (e: Exception) { - Log.e(TAG, "Failed to clear camera preview: $e") - } - - camera!!.release() - camera = null - } - - // Release the reference to any image buffers, since these will no longer be in use. - bytesToByteBuffer.clear() - } - - /** Changes the facing of the camera. */ - @Synchronized - fun setFacing(facing: Int) { - if (facing != CAMERA_FACING_BACK && facing != CAMERA_FACING_FRONT) { - throw IllegalArgumentException("Invalid camera: $facing") - } - this.cameraFacing = facing - } - - /** - * Opens the camera and applies the user settings. - * - * @throws IOException if camera cannot be found or preview cannot be processed - */ - @SuppressLint("InlinedApi") - @Throws(IOException::class) - private fun createCamera(): Camera { - val requestedCameraId = getIdForRequestedCamera(cameraFacing) - if (requestedCameraId == -1) { - throw IOException("Could not find requested camera.") - } - val camera = Camera.open(requestedCameraId) - - val sizePair = selectSizePair(camera, requestedPreviewWidth, requestedPreviewHeight) - ?: throw IOException("Could not find suitable preview size.") - val pictureSize = sizePair.pictureSize() - previewSize = sizePair.previewSize() - - val previewFpsRange = selectPreviewFpsRange(camera, requestedFps) - ?: throw IOException("Could not find suitable preview frames per second range.") - - val parameters = camera.parameters - - if (pictureSize != null) { - parameters.setPictureSize(pictureSize.width, pictureSize.height) - } - parameters.setPreviewSize(previewSize!!.width, previewSize!!.height) - parameters.setPreviewFpsRange( - previewFpsRange[Camera.Parameters.PREVIEW_FPS_MIN_INDEX], - previewFpsRange[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]) - parameters.previewFormat = ImageFormat.NV21 - - setRotation(camera, parameters, requestedCameraId) - - if (requestedAutoFocus) { - if (parameters - .supportedFocusModes - .contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) { - parameters.focusMode = Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO - } else { - Log.i(TAG, "Camera auto focus is not supported on this device.") - } - } - - camera.parameters = parameters - - // Four frame buffers are needed for working with the camera: - // - // one for the frame that is currently being executed upon in doing detection - // one for the next pending frame to process immediately upon completing detection - // two for the frames that the camera uses to populate future preview images - // - // Through trial and error it appears that two free buffers, in addition to the two buffers - // used in this code, are needed for the camera to work properly. Perhaps the camera has - // one thread for acquiring images, and another thread for calling into user code. If only - // three buffers are used, then the camera will spew thousands of warning messages when - // detection takes a non-trivial amount of time. - camera.setPreviewCallbackWithBuffer(CameraPreviewCallback()) - camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) - camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) - camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) - camera.addCallbackBuffer(createPreviewBuffer(previewSize!!)) - - return camera - } - - /** - * Stores a preview size and a corresponding same-aspect-ratio picture size. To avoid distorted - * preview images on some devices, the picture size must be set to a size that is the same aspect - * ratio as the preview size or the preview may end up being distorted. If the picture size is - * null, then there is no picture size with the same aspect ratio as the preview size. - */ - private class SizePair internal constructor( - previewSize: android.hardware.Camera.Size, - pictureSize: android.hardware.Camera.Size?) { - private val preview: Size - private var picture: Size? = null - - init { - preview = Size(previewSize.width, previewSize.height) - if (pictureSize != null) { - picture = Size(pictureSize.width, pictureSize.height) - } - } - - internal fun previewSize(): Size { - return preview - } - - internal fun pictureSize(): Size? { - return picture - } - } - - /** - * Calculates the correct rotation for the given camera id and sets the rotation in the - * parameters. It also sets the camera's display orientation and rotation. - * - * @param parameters the camera parameters for which to set the rotation - * @param cameraId the camera id to set rotation based on - */ - private fun setRotation(camera: Camera, parameters: Camera.Parameters, cameraId: Int) { - val windowManager = activity.getSystemService(Context.WINDOW_SERVICE) as WindowManager - var degrees = 0 - val rotation = windowManager.defaultDisplay.rotation - when (rotation) { - Surface.ROTATION_0 -> degrees = 0 - Surface.ROTATION_90 -> degrees = 90 - Surface.ROTATION_180 -> degrees = 180 - Surface.ROTATION_270 -> degrees = 270 - else -> Log.e(TAG, "Bad rotation value: $rotation") - } - - val cameraInfo = Camera.CameraInfo() - Camera.getCameraInfo(cameraId, cameraInfo) - - val angle: Int - val displayAngle: Int - if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) { - angle = (cameraInfo.orientation + degrees) % 360 - displayAngle = (360 - angle) % 360 // compensate for it being mirrored - } else { // back-facing - angle = (cameraInfo.orientation - degrees + 360) % 360 - displayAngle = angle - } - - // This corresponds to the rotation constants. - this.rotation = angle / 90 - - camera.setDisplayOrientation(displayAngle) - parameters.setRotation(angle) - } - - /** - * Creates one buffer for the camera preview callback. The size of the buffer is based off of the - * camera preview size and the format of the camera image. - * - * @return a new preview buffer of the appropriate size for the current camera settings - */ - @SuppressLint("InlinedApi") - private fun createPreviewBuffer(previewSize: Size): ByteArray { - val bitsPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.NV21) - val sizeInBits = previewSize.height.toLong() * previewSize.width.toLong() * bitsPerPixel.toLong() - val bufferSize = Math.ceil(sizeInBits / 8.0).toInt() + 1 - - // Creating the byte array this way and wrapping it, as opposed to using .allocate(), - // should guarantee that there will be an array to work with. - val byteArray = ByteArray(bufferSize) - val buffer = ByteBuffer.wrap(byteArray) - if (!buffer.hasArray() || buffer.array() != byteArray) { - // I don't think that this will ever happen. But if it does, then we wouldn't be - // passing the preview content to the underlying detector later. - throw IllegalStateException("Failed to create valid buffer for camera source.") - } - - bytesToByteBuffer[byteArray] = buffer - return byteArray - } - - // ============================================================================================== - // Frame processing - // ============================================================================================== - - /** Called when the camera has a new preview frame. */ - private inner class CameraPreviewCallback : Camera.PreviewCallback { - override fun onPreviewFrame(data: ByteArray, camera: Camera) { - processingRunnable.setNextFrame(data, camera) - } - } - - internal fun setMachineLearningFrameProcessor(processor: VisionImageProcessor) { - synchronized(processorLock) { - cleanScreen() - if (frameProcessor != null) { - frameProcessor!!.stop() - } - frameProcessor = processor - } - } - - /** - * This runnable controls access to the underlying receiver, calling it to process frames when - * available from the camera. This is designed to run detection on frames as fast as possible - * (i.e., without unnecessary context switching or waiting on the next frame). - * - * - * While detection is running on a frame, new frames may be received from the camera. As these - * frames come in, the most recent frame is held onto as pending. As soon as detection and its - * associated processing is done for the previous frame, detection on the mostly recently received - * frame will immediately start on the same thread. - */ - private inner class FrameProcessingRunnable internal constructor() : Runnable { - - // This lock guards all of the member variables below. - private val lock = ReentrantLock() - private var active = true - - // These pending variables hold the state associated with the new frame awaiting processing. - private var pendingFrameData: ByteBuffer? = null - - /** - * Releases the underlying receiver. This is only safe to do after the associated thread has - * completed, which is managed in camera source's release method above. - */ - @SuppressLint("Assert") - internal fun release() { - assert(processingThread!!.state == Thread.State.TERMINATED) - } - - /** Marks the runnable as active/not active. Signals any blocked threads to continue. */ - internal fun setActive(active: Boolean) { - synchronized(lock) { - this.active = active - val condition = lock.newCondition() - lock.lock() - condition.signalAll() - } - } - - /** - * Sets the frame data received from the camera. This adds the previous unused frame buffer (if - * present) back to the camera, and keeps a pending reference to the frame data for future use. - */ - internal fun setNextFrame(data: ByteArray, camera: Camera) { - synchronized(lock) { - if (pendingFrameData != null) { - camera.addCallbackBuffer(pendingFrameData!!.array()) - pendingFrameData = null - } - - if (!bytesToByteBuffer.containsKey(data)) { - Log.d( - TAG, - "Skipping frame. Could not find ByteBuffer associated with the image " + "data from the camera.") - return - } - - pendingFrameData = bytesToByteBuffer[data] - - // Notify the processor thread if it is waiting on the next frame (see below). - val condition = lock.newCondition() - lock.lock() - condition.signalAll() - } - } - - /** - * As long as the processing thread is active, this executes detection on frames continuously. - * The next pending frame is either immediately available or hasn't been received yet. Once it - * is available, we transfer the frame info to local variables and run detection on that frame. - * It immediately loops back for the next frame without pausing. - * - * - * If detection takes longer than the time in between new frames from the camera, this will - * mean that this loop will run without ever waiting on a frame, avoiding any context switching - * or frame acquisition time latency. - * - * - * If you find that this is using more CPU than you'd like, you should probably decrease the - * FPS setting above to allow for some idle time in between frames. - */ - @SuppressLint("InlinedApi") - override fun run() { - lateinit var data: ByteBuffer - - while (true) { - synchronized(lock) { - while (active && pendingFrameData == null) { - try { - // Wait for the next frame to be received from the camera, since we - // don't have it yet. - val condition = lock.newCondition() - lock.lock() - condition.await() - } catch (e: InterruptedException) { - Log.d(TAG, "Frame processing loop terminated.", e) - return - } - - } - - if (!active) { - // Exit the loop once this camera source is stopped or released. We check - // this here, immediately after the wait() above, to handle the case where - // setActive(false) had been called, triggering the termination of this - // loop. - return - } - - // Hold onto the frame data locally, so that we can use this for detection - // below. We need to clear pendingFrameData to ensure that this buffer isn't - // recycled back to the camera before we are done using that data. - data = pendingFrameData!! - pendingFrameData = null - } - - // The code below needs to run outside of synchronization, because this will allow - // the camera to add pending frame(s) while we are running detection on the current - // frame. - - try { - synchronized(processorLock) { - Log.d(TAG, "Process an image") - frameProcessor!!.process( - data, - FrameMetadata.Builder() - .setWidth(previewSize!!.width) - .setHeight(previewSize!!.height) - .setRotation(rotation) - .setCameraFacing(cameraFacing) - .build(), - graphicOverlay) - } - } catch (t: Throwable) { - Log.e(TAG, "Exception thrown from receiver.", t) - } finally { - camera!!.addCallbackBuffer(data.array()) - } - } - } - } - - /** Cleans up graphicOverlay and child classes can do their cleanups as well . */ - private fun cleanScreen() { - graphicOverlay.clear() - } - - companion object { - @SuppressLint("InlinedApi") - val CAMERA_FACING_BACK = Camera.CameraInfo.CAMERA_FACING_BACK - - @SuppressLint("InlinedApi") - val CAMERA_FACING_FRONT = Camera.CameraInfo.CAMERA_FACING_FRONT - - private const val TAG = "MIDemoApp:CameraSource" - - /** - * The dummy surface texture must be assigned a chosen name. Since we never use an OpenGL context, - * we can choose any ID we want here. The dummy surface texture is not a crazy hack - it is - * actually how the camera team recommends using the camera without a preview. - */ - private val DUMMY_TEXTURE_NAME = 100 - - /** - * If the absolute difference between a preview size aspect ratio and a picture size aspect ratio - * is less than this tolerance, they are considered to be the same aspect ratio. - */ - private val ASPECT_RATIO_TOLERANCE = 0.01f - - /** - * Gets the id for the camera specified by the direction it is facing. Returns -1 if no such - * camera was found. - * - * @param facing the desired camera (front-facing or rear-facing) - */ - private fun getIdForRequestedCamera(facing: Int): Int { - val cameraInfo = Camera.CameraInfo() - for (i in 0 until Camera.getNumberOfCameras()) { - Camera.getCameraInfo(i, cameraInfo) - if (cameraInfo.facing == facing) { - return i - } - } - return -1 - } - - /** - * Selects the most suitable preview and picture size, given the desired width and height. - * - * - * Even though we only need to find the preview size, it's necessary to find both the preview - * size and the picture size of the camera together, because these need to have the same aspect - * ratio. On some hardware, if you would only set the preview size, you will get a distorted - * image. - * - * @param camera the camera to select a preview size from - * @param desiredWidth the desired width of the camera preview frames - * @param desiredHeight the desired height of the camera preview frames - * @return the selected preview and picture size pair - */ - private fun selectSizePair(camera: Camera, desiredWidth: Int, desiredHeight: Int): SizePair? { - val validPreviewSizes = generateValidPreviewSizeList(camera) - - // The method for selecting the best size is to minimize the sum of the differences between - // the desired values and the actual values for width and height. This is certainly not the - // only way to select the best size, but it provides a decent tradeoff between using the - // closest aspect ratio vs. using the closest pixel area. - var selectedPair: SizePair? = null - var minDiff = Integer.MAX_VALUE - for (sizePair in validPreviewSizes) { - val size = sizePair.previewSize() - val diff = Math.abs(size.width - desiredWidth) + Math.abs(size.height - desiredHeight) - if (diff < minDiff) { - selectedPair = sizePair - minDiff = diff - } - } - - return selectedPair - } - - /** - * Generates a list of acceptable preview sizes. Preview sizes are not acceptable if there is not - * a corresponding picture size of the same aspect ratio. If there is a corresponding picture size - * of the same aspect ratio, the picture size is paired up with the preview size. - * - * - * This is necessary because even if we don't use still pictures, the still picture size must - * be set to a size that is the same aspect ratio as the preview size we choose. Otherwise, the - * preview images may be distorted on some devices. - */ - private fun generateValidPreviewSizeList(camera: Camera): List { - val parameters = camera.parameters - val supportedPreviewSizes = parameters.supportedPreviewSizes - val supportedPictureSizes = parameters.supportedPictureSizes - val validPreviewSizes = ArrayList() - for (previewSize in supportedPreviewSizes) { - val previewAspectRatio = previewSize.width.toFloat() / previewSize.height.toFloat() - - // By looping through the picture sizes in order, we favor the higher resolutions. - // We choose the highest resolution in order to support taking the full resolution - // picture later. - for (pictureSize in supportedPictureSizes) { - val pictureAspectRatio = pictureSize.width.toFloat() / pictureSize.height.toFloat() - if (Math.abs(previewAspectRatio - pictureAspectRatio) < ASPECT_RATIO_TOLERANCE) { - validPreviewSizes.add(SizePair(previewSize, pictureSize)) - break - } - } - } - - // If there are no picture sizes with the same aspect ratio as any preview sizes, allow all - // of the preview sizes and hope that the camera can handle it. Probably unlikely, but we - // still account for it. - if (validPreviewSizes.size == 0) { - Log.w(TAG, "No preview sizes have a corresponding same-aspect-ratio picture size") - for (previewSize in supportedPreviewSizes) { - // The null picture size will let us know that we shouldn't set a picture size. - validPreviewSizes.add(SizePair(previewSize, null)) - } - } - - return validPreviewSizes - } - - /** - * Selects the most suitable preview frames per second range, given the desired frames per second. - * - * @param camera the camera to select a frames per second range from - * @param desiredPreviewFps the desired frames per second for the camera preview frames - * @return the selected preview frames per second range - */ - @SuppressLint("InlinedApi") - private fun selectPreviewFpsRange(camera: Camera, desiredPreviewFps: Float): IntArray? { - // The camera API uses integers scaled by a factor of 1000 instead of floating-point frame - // rates. - val desiredPreviewFpsScaled = (desiredPreviewFps * 1000.0f).toInt() - - // The method for selecting the best range is to minimize the sum of the differences between - // the desired value and the upper and lower bounds of the range. This may select a range - // that the desired value is outside of, but this is often preferred. For example, if the - // desired frame rate is 29.97, the range (30, 30) is probably more desirable than the - // range (15, 30). - var selectedFpsRange: IntArray? = null - var minDiff = Integer.MAX_VALUE - val previewFpsRangeList = camera.parameters.supportedPreviewFpsRange - for (range in previewFpsRangeList) { - val deltaMin = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX] - val deltaMax = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX] - val diff = Math.abs(deltaMin) + Math.abs(deltaMax) - if (diff < minDiff) { - selectedFpsRange = range - minDiff = diff - } - } - return selectedFpsRange - } - } -} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt index 4a82fb3784..3b62bafe21 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt @@ -44,8 +44,8 @@ class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissions val adapter = MyArrayAdapter(this, android.R.layout.simple_list_item_2, CLASSES) adapter.setDescriptionIds(DESCRIPTION_IDS) - test_activity_list_view.adapter = adapter - test_activity_list_view.onItemClickListener = this + testActivityListView.adapter = adapter + testActivityListView.onItemClickListener = this if (!allPermissionsGranted()) { getRuntimePermissions() diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt deleted file mode 100644 index f18378b21e..0000000000 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/FrameMetadata.kt +++ /dev/null @@ -1,58 +0,0 @@ -package com.google.firebase.samples.apps.mlkit.kotlin - -/** Describing a frame info. */ -data class FrameMetadata( - private val width: Int, - private val height: Int, - private val rotation: Int, - private val cameraFacing: Int -) { - fun getWidth(): Int { - return width - } - - fun getHeight(): Int { - return height - } - - fun getRotation(): Int { - return rotation - } - - fun getCameraFacing(): Int { - return cameraFacing - } - - /** Builder of [FrameMetadata]. */ - class Builder { - - private var width: Int = 0 - private var height: Int = 0 - private var rotation: Int = 0 - private var cameraFacing: Int = 0 - - fun setWidth(width: Int): Builder { - this.width = width - return this - } - - fun setHeight(height: Int): Builder { - this.height = height - return this - } - - fun setRotation(rotation: Int): Builder { - this.rotation = rotation - return this - } - - fun setCameraFacing(facing: Int): Builder { - cameraFacing = facing - return this - } - - fun build(): FrameMetadata { - return FrameMetadata(width, height, rotation, cameraFacing) - } - } -} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt deleted file mode 100644 index 0663874609..0000000000 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/GraphicOverlay.kt +++ /dev/null @@ -1,152 +0,0 @@ -package com.google.firebase.samples.apps.mlkit.kotlin - -import android.content.Context -import android.graphics.Canvas -import android.util.AttributeSet -import android.view.View -import com.google.android.gms.vision.CameraSource -import java.util.HashSet - -/** - * A view which renders a series of custom graphics to be overlayed on top of an associated preview - * (i.e., the camera preview). The creator can add graphics objects, update the objects, and remove - * them, triggering the appropriate drawing and invalidation within the view. - * - *

Supports scaling and mirroring of the graphics relative the camera's preview properties. The - * idea is that detection items are expressed in terms of a preview size, but need to be scaled up - * to the full view size, and also mirrored in the case of the front-facing camera. - * - *

Associated {@link Graphic} items should use the following methods to convert to view - * coordinates for the graphics that are drawn: - * - *

    - *
  1. {@link Graphic#scaleX(float)} and {@link Graphic#scaleY(float)} adjust the size of the - * supplied value from the preview scale to the view scale. - *
  2. {@link Graphic#translateX(float)} and {@link Graphic#translateY(float)} adjust the - * coordinate from the preview's coordinate system to the view coordinate system. - *
- */ -class GraphicOverlay(context: Context, attrs: AttributeSet) : View(context, attrs) { - private val lock = Any() - private var previewWidth = 0 - private var previewHeight = 0 - private val graphics = HashSet() - private var widthScaleFactor = 1.0f - private var heightScaleFactor = 1.0f - private var facing = CameraSource.CAMERA_FACING_BACK - - /** - * Base class for a custom graphics object to be rendered within the graphic overlay. Subclass - * this and implement the [Graphic.draw] method to define the graphics element. Add - * instances to the overlay using [GraphicOverlay.add]. - */ - abstract class Graphic(private val overlay: GraphicOverlay) { - - /** - * Draw the graphic on the supplied canvas. Drawing should use the following methods to convert - * to view coordinates for the graphics that are drawn: - * - * - * 1. [Graphic.scaleX] and [Graphic.scaleY] adjust the size of the - * supplied value from the preview scale to the view scale. - * 1. [Graphic.translateX] and [Graphic.translateY] adjust the - * coordinate from the preview's coordinate system to the view coordinate system. - * - * - * @param canvas drawing canvas - */ - abstract fun draw(canvas: Canvas) - - /** - * Adjusts a horizontal value of the supplied value from the preview scale to the view scale. - */ - fun scaleX(horizontal: Float): Float { - return horizontal * overlay.widthScaleFactor - } - - /** Adjusts a vertical value of the supplied value from the preview scale to the view scale. */ - fun scaleY(vertical: Float): Float { - return vertical * overlay.heightScaleFactor - } - - /** Returns the application context of the app. */ - val applicationContext: Context - get() = overlay.context.applicationContext - - /** - * Adjusts the x coordinate from the preview's coordinate system to the view coordinate system. - */ - fun translateX(x: Float): Float { - return if (overlay.facing == CameraSource.CAMERA_FACING_FRONT) { - overlay.width - scaleX(x) - } else { - scaleX(x) - } - } - - /** - * Adjusts the y coordinate from the preview's coordinate system to the view coordinate system. - */ - fun translateY(y: Float): Float { - return scaleY(y) - } - - fun postInvalidate() { - overlay.postInvalidate() - } - } - - /** Removes all graphics from the overlay. */ - fun clear() { - synchronized (lock) { - graphics.clear() - } - postInvalidate() - } - - /** Adds a graphic to the overlay. */ - fun add(graphic: Graphic) { - synchronized (lock) { - graphics.add(graphic) - } - postInvalidate() - } - - /** Removes a graphic from the overlay. */ - fun remove(graphic: Graphic) { - synchronized (lock) { - graphics.remove(graphic) - } - postInvalidate() - } - - /** - * Sets the camera attributes for size and facing direction, which informs how to transform image - * coordinates later. - */ - fun setCameraInfo(previewWidth: Int, previewHeight: Int, facing: Int) { - synchronized (lock) { - this.previewWidth = previewWidth - this.previewHeight = previewHeight - this.facing = facing - } - postInvalidate() - } - - /** Draws the overlay with its associated graphic objects. */ - override fun onDraw(canvas: Canvas) { - super.onDraw(canvas) - - synchronized (lock) { - if (previewWidth != 0 && previewHeight != 0) { - widthScaleFactor = canvas.width.toFloat() / previewWidth.toFloat() - heightScaleFactor = canvas.height.toFloat() / previewHeight.toFloat() - } - - for (graphic in graphics) { - graphic.draw(canvas) - } - } - } - -} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt index 96d769ddf3..4c484a2321 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt @@ -12,11 +12,13 @@ import android.widget.* import com.google.android.gms.common.annotation.KeepName import com.google.firebase.ml.common.FirebaseMLException import com.google.firebase.samples.apps.mlkit.R +import com.google.firebase.samples.apps.mlkit.common.CameraSource +import com.google.firebase.samples.apps.mlkit.common.CameraSourcePreview +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.kotlin.barcodescanning.BarcodeScanningProcessor import com.google.firebase.samples.apps.mlkit.kotlin.custommodel.CustomImageClassifierProcessor import com.google.firebase.samples.apps.mlkit.kotlin.facedetection.FaceDetectionProcessor import com.google.firebase.samples.apps.mlkit.kotlin.imagelabeling.ImageLabelingProcessor -import com.google.firebase.samples.apps.mlkit.kotlin.textrecognition.CameraSourcePreview import com.google.firebase.samples.apps.mlkit.kotlin.textrecognition.TextRecognitionProcessor import java.io.IOException import java.util.* @@ -79,7 +81,7 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss spinner.adapter = dataAdapter spinner.onItemSelectedListener = this - val facingSwitch = findViewById(R.id.facingswitch) as ToggleButton + val facingSwitch = findViewById(R.id.facingSwitch) as ToggleButton facingSwitch.setOnCheckedChangeListener(this) if (allPermissionsGranted()) { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt index 568d922471..3fb15a1dd5 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt @@ -15,6 +15,8 @@ import android.view.View import android.widget.* import com.google.android.gms.common.annotation.KeepName import com.google.firebase.samples.apps.mlkit.R +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor import com.google.firebase.samples.apps.mlkit.kotlin.cloudimagelabeling.CloudImageLabelingProcessor import com.google.firebase.samples.apps.mlkit.kotlin.cloudlandmarkrecognition.CloudLandmarkRecognitionProcessor import com.google.firebase.samples.apps.mlkit.kotlin.cloudtextrecognition.CloudDocumentTextRecognitionProcessor diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt deleted file mode 100644 index ea9c511de4..0000000000 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionImageProcessor.kt +++ /dev/null @@ -1,23 +0,0 @@ -package com.google.firebase.samples.apps.mlkit.kotlin - -import android.graphics.Bitmap -import android.media.Image -import com.google.firebase.ml.common.FirebaseMLException -import java.nio.ByteBuffer - -/** An inferface to process the images with different ML Kit detectors and custom image models. */ -interface VisionImageProcessor { - - /** Processes the images with the underlying machine learning models. */ - @Throws(FirebaseMLException::class) - fun process(data: ByteBuffer, frameMetadata: FrameMetadata, graphicOverlay: GraphicOverlay) - - /** Processes the bitmap images. */ - fun process(bitmap: Bitmap, graphicOverlay: GraphicOverlay) - - /** Processes the images. */ - fun process(bitmap: Image, rotation: Int, graphicOverlay: GraphicOverlay) - - /** Stops the underlying machine learning model and release resources. */ - fun stop() -} \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt index e46c783808..ca577af926 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/VisionProcessorBase.kt @@ -5,10 +5,12 @@ import android.media.Image import com.google.android.gms.tasks.Task import com.google.firebase.ml.vision.common.FirebaseVisionImage import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor import java.nio.ByteBuffer import java.util.concurrent.atomic.AtomicBoolean - /** * Abstract base class for ML Kit frame processors. Subclasses need to implement {@link * #onSuccess(T, FrameMetadata, GraphicOverlay)} to define what they want to with the detection @@ -16,25 +18,22 @@ import java.util.concurrent.atomic.AtomicBoolean * * @param The type of the detected feature. */ -abstract class VisionProcessorBase: VisionImageProcessor { +abstract class VisionProcessorBase : VisionImageProcessor { // Whether we should ignore process(). This is usually caused by feeding input data faster than // the model can handle. private val shouldThrottle = AtomicBoolean(false) override fun process( - data: ByteBuffer, - frameMetadata: FrameMetadata, - graphicOverlay: GraphicOverlay) { + data: ByteBuffer, frameMetadata: FrameMetadata, graphicOverlay: GraphicOverlay) { if (shouldThrottle.get()) { return } - val metadata = FirebaseVisionImageMetadata.Builder() .setFormat(FirebaseVisionImageMetadata.IMAGE_FORMAT_NV21) - .setWidth(frameMetadata.getWidth()) - .setHeight(frameMetadata.getHeight()) - .setRotation(frameMetadata.getRotation()) + .setWidth(frameMetadata.width) + .setHeight(frameMetadata.height) + .setRotation(frameMetadata.rotation) .build() detectInVisionImage( @@ -74,8 +73,9 @@ abstract class VisionProcessorBase: VisionImageProcessor { detectInImage(image) .addOnSuccessListener { results -> shouldThrottle.set(false) - this@VisionProcessorBase.onSuccess(results, metadata!!, - graphicOverlay) + metadata?.let { + onSuccess(results, it, graphicOverlay) + } } .addOnFailureListener { e -> shouldThrottle.set(false) @@ -86,8 +86,7 @@ abstract class VisionProcessorBase: VisionImageProcessor { shouldThrottle.set(true) } - override fun stop() { - } + override fun stop() {} protected abstract fun detectInImage(image: FirebaseVisionImage): Task diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt index 1678a33356..ba569b5794 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeGraphic.kt @@ -5,13 +5,15 @@ import android.graphics.Color import android.graphics.Paint import android.graphics.RectF import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay class BarcodeGraphic(overlay: GraphicOverlay, barcode: FirebaseVisionBarcode) : GraphicOverlay.Graphic(overlay) { - private val TEXT_COLOR = Color.WHITE - private val TEXT_SIZE = 54.0f - private val STROKE_WIDTH = 4.0f + companion object { + private const val TEXT_COLOR = Color.WHITE + private const val TEXT_SIZE = 54.0f + private const val STROKE_WIDTH = 4.0f + } private var rectPaint: Paint private var barcodePaint: Paint @@ -49,6 +51,6 @@ class BarcodeGraphic(overlay: GraphicOverlay, barcode: FirebaseVisionBarcode) : canvas.drawRect(rect, rectPaint) // Renders the barcode at the bottom of the box. - canvas.drawText(barcode.rawValue!!, rect.left, rect.bottom, barcodePaint) + canvas.drawText(barcode.rawValue, rect.left, rect.bottom, barcodePaint) } } \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt index 8ab5aca992..5b7dd19cdc 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt @@ -6,8 +6,8 @@ import com.google.firebase.ml.vision.FirebaseVision import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetector import com.google.firebase.ml.vision.common.FirebaseVisionImage -import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase import java.io.IOException diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt index 57cdfcb373..76f060b019 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudimagelabeling/CloudImageLabellingProcessor.kt @@ -7,8 +7,8 @@ import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabel import com.google.firebase.ml.vision.cloud.label.FirebaseVisionCloudLabelDetector import com.google.firebase.ml.vision.common.FirebaseVisionImage -import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase /** Cloud Label Detector Demo. */ @@ -36,7 +36,7 @@ class CloudImageLabelingProcessor : VisionProcessorBase() for (i in labels.indices) { @@ -44,8 +44,8 @@ class CloudImageLabelingProcessor : VisionProcessorBase? = null + private lateinit var labels: List init { textPaint = Paint() @@ -28,7 +28,7 @@ class CloudLabelGraphic(private val overlay: GraphicOverlay) : GraphicOverlay.Gr val x = overlay.width / 4.0f var y = overlay.height / 4.0f - for (label in labels!!) { + for (label in labels) { canvas.drawText(label, x, y, textPaint) y -= 62.0f } diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt index 99c3792f9e..cba7b69715 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt @@ -5,14 +5,14 @@ import android.graphics.Color import android.graphics.Paint import android.graphics.RectF import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay /** Graphic instance for rendering detected landmark. */ class CloudLandmarkGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { private val rectPaint: Paint private val landmarkPaint: Paint - private var landmark: FirebaseVisionCloudLandmark? = null + private lateinit var landmark: FirebaseVisionCloudLandmark init { @@ -42,12 +42,12 @@ class CloudLandmarkGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(ove if (landmark == null) { throw IllegalStateException("Attempting to draw a null landmark.") } - if (landmark!!.landmark == null || landmark!!.boundingBox == null) { + if (landmark.landmark == null || landmark.boundingBox == null) { return } // Draws the bounding box around the LandmarkBlock. - val rect = RectF(landmark!!.boundingBox) + val rect = RectF(landmark.boundingBox) rect.left = translateX(rect.left) rect.top = translateY(rect.top) rect.right = translateX(rect.right) @@ -55,7 +55,7 @@ class CloudLandmarkGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(ove canvas.drawRect(rect, rectPaint) // Renders the landmark at the bottom of the box. - canvas.drawText(landmark!!.landmark, rect.left, rect.bottom, landmarkPaint) + canvas.drawText(landmark.landmark, rect.left, rect.bottom, landmarkPaint) } companion object { diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt index 7a292b95ae..4dc58498c4 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkRecognitionProcessor.kt @@ -7,8 +7,8 @@ import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmark import com.google.firebase.ml.vision.cloud.landmark.FirebaseVisionCloudLandmarkDetector import com.google.firebase.ml.vision.common.FirebaseVisionImage -import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase /** Cloud Landmark Detector Demo. */ @@ -34,7 +34,7 @@ class CloudLandmarkRecognitionProcessor : VisionProcessorBase() companion object { - private val TAG = "CloudTextRecProc" + private const val TAG = "CloudTextRecProc" } } diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt index 1acf30a1a0..927fe3e5d0 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifier.kt @@ -130,14 +130,15 @@ constructor(activity: Activity) { val startTime = SystemClock.uptimeMillis() for (i in 0 until DIM_IMG_SIZE_X) { for (j in 0 until DIM_IMG_SIZE_Y) { - val `val` = intValues[pixel++] - imgData.put((`val` shr 16 and 0xFF).toByte()) - imgData.put((`val` shr 8 and 0xFF).toByte()) - imgData.put((`val` and 0xFF).toByte()) + val value = intValues[pixel++] + imgData.put((value shr 16 and 0xFF).toByte()) + imgData.put((value shr 8 and 0xFF).toByte()) + imgData.put((value and 0xFF).toByte()) } } val endTime = SystemClock.uptimeMillis() - Log.d(TAG, "Timecost to put values into ByteBuffer: " + (endTime - startTime)) + val timeCost = endTime - startTime + Log.d(TAG, "Timecost to put values into ByteBuffer: $timeCost") return imgData } @@ -165,7 +166,7 @@ constructor(activity: Activity) { val size = sortedLabels.size for (i in 0 until size) { val label = sortedLabels.poll() - result.add(label.key + ":" + label.value) + result.add("${label.key}:${label.value}") } return result } diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt index b68aa0d5a0..a61a068059 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/CustomImageClassifierProcessor.kt @@ -4,9 +4,9 @@ import android.app.Activity import android.graphics.Bitmap import android.media.Image import com.google.firebase.ml.common.FirebaseMLException -import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay -import com.google.firebase.samples.apps.mlkit.kotlin.VisionImageProcessor +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor import java.nio.ByteBuffer /** Custom Image Classifier Demo. */ @@ -16,15 +16,13 @@ constructor(private val activity: Activity): VisionImageProcessor { private val classifier: CustomImageClassifier init{ - classifier = - com.google.firebase.samples.apps.mlkit.kotlin.custommodel.CustomImageClassifier(activity) + classifier = CustomImageClassifier(activity) } @Throws(FirebaseMLException::class) - override fun process( - data: ByteBuffer, frameMetadata: FrameMetadata, graphicOverlay: GraphicOverlay) { + override fun process(data: ByteBuffer, frameMetadata: FrameMetadata, graphicOverlay: GraphicOverlay) { classifier - .classifyFrame(data, frameMetadata.getWidth(), frameMetadata.getHeight()) + .classifyFrame(data, frameMetadata.width, frameMetadata.height) .addOnSuccessListener( activity ) { result -> diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt index f67d9b5eec..ad839a128d 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/custommodel/LabelGraphic.kt @@ -3,14 +3,14 @@ package com.google.firebase.samples.apps.mlkit.kotlin.custommodel import android.graphics.Canvas import android.graphics.Color import android.graphics.Paint -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay /** Graphic instance for rendering image labels. */ class LabelGraphic(private val overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { private val textPaint: Paint - private var labels: List? = null + private lateinit var labels: List init { textPaint = Paint() @@ -29,7 +29,7 @@ class LabelGraphic(private val overlay: GraphicOverlay) : GraphicOverlay.Graphic val x = overlay.width / 4.0f var y = overlay.height / 4.0f - for (label in labels!!) { + for (label in labels) { canvas.drawText(label, x, y, textPaint) y -= 62.0f } diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt index e20511d5ac..4b76540057 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceDetectionProcessor.kt @@ -7,8 +7,8 @@ import com.google.firebase.ml.vision.common.FirebaseVisionImage import com.google.firebase.ml.vision.face.FirebaseVisionFace import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions -import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase import java.io.IOException @@ -49,7 +49,7 @@ class FaceDetectionProcessor : VisionProcessorBase>() { val face = faces[i] val faceGraphic = FaceGraphic(graphicOverlay) graphicOverlay.add(faceGraphic) - faceGraphic.updateFace(face, frameMetadata.getCameraFacing()) + faceGraphic.updateFace(face, frameMetadata.cameraFacing) } } diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt index 6b32f75039..5f1a208dcc 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/facedetection/FaceGraphic.kt @@ -6,7 +6,7 @@ import android.graphics.Paint import com.google.android.gms.vision.CameraSource import com.google.firebase.ml.vision.face.FirebaseVisionFace import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay /** * Graphic instance for rendering face position, orientation, and landmarks within an associated @@ -21,7 +21,7 @@ class FaceGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { private val boxPaint: Paint @Volatile - private var firebaseVisionFace: FirebaseVisionFace? = null + private lateinit var firebaseVisionFace: FirebaseVisionFace init { @@ -59,31 +59,31 @@ class FaceGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { val x = translateX(face.boundingBox.centerX().toFloat()) val y = translateY(face.boundingBox.centerY().toFloat()) canvas.drawCircle(x, y, FACE_POSITION_RADIUS, facePositionPaint) - canvas.drawText("id: " + face.trackingId, x + ID_X_OFFSET, y + ID_Y_OFFSET, idPaint) + canvas.drawText("id: ${face.trackingId}" , x + ID_X_OFFSET, y + ID_Y_OFFSET, idPaint) canvas.drawText( - "happiness: " + String.format("%.2f", face.smilingProbability), + "happiness: ${String.format("%.2f", face.smilingProbability)}", x + ID_X_OFFSET * 3, y - ID_Y_OFFSET, idPaint) if (facing == CameraSource.CAMERA_FACING_FRONT) { canvas.drawText( - "right eye: " + String.format("%.2f", face.rightEyeOpenProbability), + "right eye: ${String.format("%.2f", face.rightEyeOpenProbability)}", x - ID_X_OFFSET, y, idPaint) canvas.drawText( - "left eye: " + String.format("%.2f", face.leftEyeOpenProbability), + "left eye: ${String.format("%.2f", face.leftEyeOpenProbability)}", x + ID_X_OFFSET * 6, y, idPaint) } else { canvas.drawText( - "left eye: " + String.format("%.2f", face.leftEyeOpenProbability), + "left eye: ${String.format("%.2f", face.leftEyeOpenProbability)}", x - ID_X_OFFSET, y, idPaint) canvas.drawText( - "right eye: " + String.format("%.2f", face.rightEyeOpenProbability), + "right eye: ${String.format("%.2f", face.rightEyeOpenProbability)}", x + ID_X_OFFSET * 6, y, idPaint) @@ -113,11 +113,11 @@ class FaceGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { private fun drawLandmarkPosition(canvas: Canvas, face: FirebaseVisionFace, landmarkID: Int) { val landmark = face.getLandmark(landmarkID) - if (landmark != null) { + landmark?.let { val point = landmark.position canvas.drawCircle( - translateX(point.x!!), - translateY(point.y!!), + translateX(point.x), + translateY(point.y), 10f, idPaint) } } @@ -129,7 +129,8 @@ class FaceGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(overlay) { private const val ID_X_OFFSET = -50.0f private const val BOX_STROKE_WIDTH = 5.0f - private val COLOR_CHOICES = intArrayOf(Color.BLUE, Color.CYAN, Color.GREEN, Color.MAGENTA, Color.RED, Color.WHITE, Color.YELLOW) + private val COLOR_CHOICES = intArrayOf(Color.BLUE, Color.CYAN, Color.GREEN, Color.MAGENTA, + Color.RED, Color.WHITE, Color.YELLOW) private var currentColorIndex = 0 } } \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt index f5be5b6df8..24e2d20923 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/ImageLabelingProcessor.kt @@ -6,8 +6,8 @@ import com.google.firebase.ml.vision.FirebaseVision import com.google.firebase.ml.vision.common.FirebaseVisionImage import com.google.firebase.ml.vision.label.FirebaseVisionLabel import com.google.firebase.ml.vision.label.FirebaseVisionLabelDetector -import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase import java.io.IOException @@ -48,6 +48,6 @@ class ImageLabelingProcessor : VisionProcessorBase>() companion object { - private val TAG = "ImageLabelingProcessor" + private const val TAG = "ImageLabelingProcessor" } } \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt index 8d2530f281..475484884e 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/imagelabeling/LabelGraphic.kt @@ -4,7 +4,7 @@ import android.graphics.Canvas import android.graphics.Color import android.graphics.Paint import com.google.firebase.ml.vision.label.FirebaseVisionLabel -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay /** Graphic instance for rendering a label within an associated graphic overlay view. */ class LabelGraphic ( diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt deleted file mode 100644 index 6333dac45c..0000000000 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/CameraSourcePreview.kt +++ /dev/null @@ -1,167 +0,0 @@ -package com.google.firebase.samples.apps.mlkit.kotlin.textrecognition - -import android.annotation.SuppressLint -import android.content.Context -import android.content.res.Configuration -import android.util.AttributeSet -import android.util.Log -import android.view.SurfaceHolder -import android.view.SurfaceView -import android.view.ViewGroup -import com.google.firebase.samples.apps.mlkit.kotlin.CameraSource -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay -import java.io.IOException - -/** Preview the camera image in the screen. */ -class CameraSourcePreview(private val ctx: Context, attrs: AttributeSet) : ViewGroup(ctx, attrs) { - private val surfaceView: SurfaceView - private var startRequested: Boolean = false - private var surfaceAvailable: Boolean = false - private var cameraSource: CameraSource? = null - - private var overlay: GraphicOverlay? = null - - private val isPortraitMode: Boolean - get() { - val orientation = ctx.resources.configuration.orientation - if (orientation == Configuration.ORIENTATION_LANDSCAPE) { - return false - } - if (orientation == Configuration.ORIENTATION_PORTRAIT) { - return true - } - - Log.d(TAG, "isPortraitMode returning false by default") - return false - } - - init { - startRequested = false - surfaceAvailable = false - - surfaceView = SurfaceView(ctx) - surfaceView.holder.addCallback(SurfaceCallback()) - addView(surfaceView) - } - - @Throws(IOException::class) - fun start(cameraSource: CameraSource?) { - if (cameraSource == null) { - stop() - } - - this.cameraSource = cameraSource - - if (this.cameraSource != null) { - startRequested = true - startIfReady() - } - } - - @Throws(IOException::class) - fun start(cameraSource: CameraSource, overlay: GraphicOverlay) { - this.overlay = overlay - start(cameraSource) - } - - fun stop() { - if (cameraSource != null) { - cameraSource!!.stop() - } - } - - fun release() { - if (cameraSource != null) { - cameraSource!!.release() - cameraSource = null - } - } - - @SuppressLint("MissingPermission") - @Throws(IOException::class) - private fun startIfReady() { - if (startRequested && surfaceAvailable) { - cameraSource!!.start(surfaceView.holder) - if (overlay != null) { - val size = cameraSource!!.previewSize!! - val min = Math.min(size.width, size.height) - val max = Math.max(size.width, size.height) - if (isPortraitMode) { - // Swap width and height sizes when in portrait, since it will be rotated by - // 90 degrees - overlay!!.setCameraInfo(min, max, cameraSource!!.cameraFacing) - } else { - overlay!!.setCameraInfo(max, min, cameraSource!!.cameraFacing) - } - overlay!!.clear() - } - startRequested = false - } - } - - private inner class SurfaceCallback : SurfaceHolder.Callback { - override fun surfaceCreated(surface: SurfaceHolder) { - surfaceAvailable = true - try { - startIfReady() - } catch (e: IOException) { - Log.e(TAG, "Could not start camera source.", e) - } - - } - - override fun surfaceDestroyed(surface: SurfaceHolder) { - surfaceAvailable = false - } - - override fun surfaceChanged(holder: SurfaceHolder, format: Int, width: Int, height: Int) {} - } - - override fun onLayout(changed: Boolean, left: Int, top: Int, right: Int, bottom: Int) { - var width = 320 - var height = 240 - if (cameraSource != null) { - val size = cameraSource!!.previewSize - if (size != null) { - width = size.width - height = size.height - } - } - - // Swap width and height sizes when in portrait, since it will be rotated 90 degrees - if (isPortraitMode) { - val tmp = width - width = height - height = tmp - } - - val layoutWidth = right - left - val layoutHeight = bottom - top - - // Computes height and width for potentially doing fit width. - var childWidth = layoutWidth - var childHeight = (layoutWidth.toFloat() / width.toFloat() * height).toInt() - - // If height is too tall using fit width, does fit height instead. - if (childHeight > layoutHeight) { - childHeight = layoutHeight - childWidth = (layoutHeight.toFloat() / height.toFloat() * width).toInt() - } - - for (i in 0 until childCount) { - getChildAt(i).layout(0, 0, childWidth, childHeight) - Log.d(TAG, "Assigned view: $i") - } - - try { - startIfReady() - } catch (e: IOException) { - Log.e(TAG, "Could not start camera source.", e) - } - - } - - companion object { - private val TAG = "MIDemoApp:Preview" - } -} diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt index 6a5a783218..b53e32218a 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextGraphic.kt @@ -5,7 +5,7 @@ import android.graphics.Color import android.graphics.Paint import android.graphics.RectF import com.google.firebase.ml.vision.text.FirebaseVisionText -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay /** * Graphic instance for rendering TextBlock position, size, and ID within an associated graphic diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt index 2d234fe048..19d7df9023 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/textrecognition/TextRecognitionProcessor.kt @@ -6,8 +6,8 @@ import com.google.firebase.ml.vision.FirebaseVision import com.google.firebase.ml.vision.common.FirebaseVisionImage import com.google.firebase.ml.vision.text.FirebaseVisionText import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer -import com.google.firebase.samples.apps.mlkit.kotlin.FrameMetadata -import com.google.firebase.samples.apps.mlkit.kotlin.GraphicOverlay +import com.google.firebase.samples.apps.mlkit.common.FrameMetadata +import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.kotlin.VisionProcessorBase import java.io.IOException diff --git a/mlkit/app/src/main/res/layout-land/activity_live_preview.xml b/mlkit/app/src/main/res/layout-land/activity_live_preview.xml index 48df1c0d16..2e78a070fc 100644 --- a/mlkit/app/src/main/res/layout-land/activity_live_preview.xml +++ b/mlkit/app/src/main/res/layout-land/activity_live_preview.xml @@ -8,17 +8,17 @@ android:background="#000" android:keepScreenOn="true"> - - - + diff --git a/mlkit/app/src/main/res/layout/activity_live_preview.xml b/mlkit/app/src/main/res/layout/activity_live_preview.xml index dca2164e49..eb040e459c 100644 --- a/mlkit/app/src/main/res/layout/activity_live_preview.xml +++ b/mlkit/app/src/main/res/layout/activity_live_preview.xml @@ -9,21 +9,21 @@ android:background="#000" android:keepScreenOn="true"> - - - + - Date: Thu, 13 Sep 2018 23:41:38 +0200 Subject: [PATCH 4/7] chore: minor import changes --- mlkit/app/src/main/java/EntryChoiceActivity.kt | 4 ++-- .../firebase/samples/apps/mlkit/java/StillImageActivity.java | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mlkit/app/src/main/java/EntryChoiceActivity.kt b/mlkit/app/src/main/java/EntryChoiceActivity.kt index 245f4e1723..351d4a6e22 100644 --- a/mlkit/app/src/main/java/EntryChoiceActivity.kt +++ b/mlkit/app/src/main/java/EntryChoiceActivity.kt @@ -3,7 +3,6 @@ package com.google.firebase.samples.apps.mlkit import android.content.Intent import com.firebase.example.internal.BaseEntryChoiceActivity import com.firebase.example.internal.Choice -import com.google.firebase.samples.apps.mlkit.java.ChooserActivity class EntryChoiceActivity : BaseEntryChoiceActivity() { @@ -12,7 +11,8 @@ class EntryChoiceActivity : BaseEntryChoiceActivity() { Choice( "Java", "Run the Firebase ML Kit quickstart written in Java.", - Intent(this, ChooserActivity::class.java)), + Intent(this, + com.google.firebase.samples.apps.mlkit.java.ChooserActivity::class.java)), Choice( "Kotlin", "Run the Firebase ML Kit quickstart written in Kotlin.", diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java index bbaf6daff8..d3b3037d69 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/java/StillImageActivity.java @@ -37,6 +37,7 @@ import android.widget.Spinner; import com.google.android.gms.common.annotation.KeepName; +import com.google.firebase.samples.apps.mlkit.R; import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay; import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor; import com.google.firebase.samples.apps.mlkit.java.cloudimagelabeling.CloudImageLabelingProcessor; From 2d774d19c60374616e69b9ded5ded7ae3da1e37a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ros=C3=A1rio=20Pereira=20Fernandes?= Date: Fri, 14 Sep 2018 00:03:52 +0200 Subject: [PATCH 5/7] chore: Add Lint --- mlkit/app/build.gradle | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mlkit/app/build.gradle b/mlkit/app/build.gradle index 4eebd02e76..8396a3b800 100644 --- a/mlkit/app/build.gradle +++ b/mlkit/app/build.gradle @@ -24,8 +24,9 @@ android { } dependencies { - implementation project(':internal') - implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.2.50" + implementation project(":internal:lintchecks") + implementation project(':internal:chooser') + implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.2.70" implementation 'com.android.support:appcompat-v7:27.1.1' implementation 'com.android.support:design:27.1.1' From 81213d9860d0710118e2bbf015b2dc0751ea292d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ros=C3=A1rio=20Pereira=20Fernandes?= Date: Fri, 14 Sep 2018 00:49:41 +0200 Subject: [PATCH 6/7] style: final Kotlin improvements --- .../apps/mlkit/kotlin/ChooserActivity.kt | 31 +++-- .../apps/mlkit/kotlin/LivePreviewActivity.kt | 50 ++++---- .../apps/mlkit/kotlin/StillImageActivity.kt | 120 +++++++++--------- 3 files changed, 105 insertions(+), 96 deletions(-) diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt index 3b62bafe21..e0ce0b209b 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt @@ -15,8 +15,6 @@ import android.widget.AdapterView import android.widget.ArrayAdapter import android.widget.TextView import com.google.firebase.samples.apps.mlkit.R -import com.google.firebase.samples.apps.mlkit.java.LivePreviewActivity -import com.google.firebase.samples.apps.mlkit.java.StillImageActivity import java.util.ArrayList import kotlinx.android.synthetic.main.activity_chooser.* @@ -27,12 +25,17 @@ import kotlinx.android.synthetic.main.activity_chooser.* class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissionsResultCallback, AdapterView.OnItemClickListener { - private val TAG = "ChooserActivity" - private val PERMISSION_REQUESTS = 1 + companion object { + private const val TAG = "ChooserActivity" + private const val PERMISSION_REQUESTS = 1 - private val CLASSES = arrayOf>(LivePreviewActivity::class.java, StillImageActivity::class.java) + private val CLASSES = + arrayOf>(LivePreviewActivity::class.java, StillImageActivity::class.java) + + private val DESCRIPTION_IDS = + intArrayOf(R.string.desc_camera_source_activity, R.string.desc_still_image_activity) + } - private val DESCRIPTION_IDS = intArrayOf(R.string.desc_camera_source_activity, R.string.desc_still_image_activity) override fun onCreate(savedInstanceState: Bundle?) { super.onCreate(savedInstanceState) @@ -76,8 +79,10 @@ class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissions private fun allPermissionsGranted(): Boolean { for (permission in getRequiredPermissions()) { - if (!isPermissionGranted(this, permission!!)) { - return false + permission?.let { + if (!isPermissionGranted(this, it)) { + return false + } } } return true @@ -86,8 +91,10 @@ class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissions private fun getRuntimePermissions() { val allNeededPermissions = ArrayList() for (permission in getRequiredPermissions()) { - if (!isPermissionGranted(this, permission!!)) { - allNeededPermissions.add(permission) + permission?.let { + if (!isPermissionGranted(this, it)) { + allNeededPermissions.add(permission) + } } } @@ -121,7 +128,9 @@ class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissions } (view!!.findViewById(android.R.id.text1) as TextView).text = classes[position].simpleName - (view.findViewById(android.R.id.text2) as TextView).setText(descriptionIds!![position]) + descriptionIds?.let { + (view.findViewById(android.R.id.text2) as TextView).setText(it[position]) + } return view } diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt index 4c484a2321..01e32389c4 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt @@ -30,8 +30,6 @@ import kotlinx.android.synthetic.main.activity_live_preview.* class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissionsResultCallback, AdapterView.OnItemSelectedListener, CompoundButton.OnCheckedChangeListener { private var cameraSource: CameraSource? = null - private var preview: CameraSourcePreview? = null - private var graphicOverlay: GraphicOverlay? = null private var selectedModel = FACE_DETECTION private val requiredPermissions: Array @@ -57,16 +55,13 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss setContentView(R.layout.activity_live_preview) - preview = findViewById(R.id.firePreview) as CameraSourcePreview - if (preview == null) { + if (firePreview == null) { Log.d(TAG, "Preview is null") } - graphicOverlay = findViewById(R.id.fireFaceOverlay) as GraphicOverlay - if (graphicOverlay == null) { + if (fireFaceOverlay == null) { Log.d(TAG, "graphicOverlay is null") } - val spinner = findViewById(R.id.spinner) as Spinner val options = ArrayList() options.add(FACE_DETECTION) options.add(TEXT_DETECTION) @@ -81,7 +76,6 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss spinner.adapter = dataAdapter spinner.onItemSelectedListener = this - val facingSwitch = findViewById(R.id.facingSwitch) as ToggleButton facingSwitch.setOnCheckedChangeListener(this) if (allPermissionsGranted()) { @@ -97,7 +91,7 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss // parent.getItemAtPosition(pos) selectedModel = parent.getItemAtPosition(pos).toString() Log.d(TAG, "Selected model: $selectedModel") - preview!!.stop() + firePreview.stop() if (allPermissionsGranted()) { createCameraSource(selectedModel) startCameraSource() @@ -112,21 +106,21 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss override fun onCheckedChanged(buttonView: CompoundButton, isChecked: Boolean) { Log.d(TAG, "Set facing") - if (cameraSource != null) { + cameraSource?.let { if (isChecked) { - cameraSource!!.setFacing(CameraSource.CAMERA_FACING_FRONT) + it.setFacing(CameraSource.CAMERA_FACING_FRONT) } else { - cameraSource!!.setFacing(CameraSource.CAMERA_FACING_BACK) + it.setFacing(CameraSource.CAMERA_FACING_BACK) } } - preview!!.stop() + firePreview.stop() startCameraSource() } private fun createCameraSource(model: String) { // If there's no existing cameraSource, create one. if (cameraSource == null) { - cameraSource = CameraSource(this, graphicOverlay!!) + cameraSource = CameraSource(this, fireFaceOverlay) } try { @@ -165,18 +159,18 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss * again when the camera source is created. */ private fun startCameraSource() { - if (cameraSource != null) { + cameraSource.let { try { - if (preview == null) { + if (firePreview == null) { Log.d(TAG, "resume: Preview is null") } - if (graphicOverlay == null) { + if (fireFaceOverlay == null) { Log.d(TAG, "resume: graphOverlay is null") } - preview!!.start(cameraSource!!, graphicOverlay!!) + firePreview.start(it, fireFaceOverlay) } catch (e: IOException) { Log.e(TAG, "Unable to start camera source.", e) - cameraSource!!.release() + it?.release() cameraSource = null } @@ -192,20 +186,22 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss /** Stops the camera. */ override fun onPause() { super.onPause() - preview!!.stop() + firePreview.stop() } public override fun onDestroy() { super.onDestroy() - if (cameraSource != null) { - cameraSource!!.release() + cameraSource?.let { + it?.release() } } private fun allPermissionsGranted(): Boolean { for (permission in requiredPermissions) { - if (!isPermissionGranted(this, permission!!)) { - return false + permission?.let { + if (!isPermissionGranted(this, it)) { + return false + } } } return true @@ -214,8 +210,10 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss private fun getRuntimePermissions() { val allNeededPermissions = ArrayList() for (permission in requiredPermissions) { - if (!isPermissionGranted(this, permission!!)) { - allNeededPermissions.add(permission) + permission?.let { + if (!isPermissionGranted(this, it)) { + allNeededPermissions.add(it) + } } } diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt index 3fb15a1dd5..7f69f36db8 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt @@ -15,7 +15,6 @@ import android.view.View import android.widget.* import com.google.android.gms.common.annotation.KeepName import com.google.firebase.samples.apps.mlkit.R -import com.google.firebase.samples.apps.mlkit.common.GraphicOverlay import com.google.firebase.samples.apps.mlkit.common.VisionImageProcessor import com.google.firebase.samples.apps.mlkit.kotlin.cloudimagelabeling.CloudImageLabelingProcessor import com.google.firebase.samples.apps.mlkit.kotlin.cloudlandmarkrecognition.CloudLandmarkRecognitionProcessor @@ -30,31 +29,32 @@ import kotlinx.android.synthetic.main.activity_still_image.* @KeepName class StillImageActivity: AppCompatActivity() { - private val TAG = "StillImageActivity" + companion object { - private val CLOUD_LABEL_DETECTION = "Cloud Label" - private val CLOUD_LANDMARK_DETECTION = "Landmark" - private val CLOUD_TEXT_DETECTION = "Cloud Text" - private val CLOUD_DOCUMENT_TEXT_DETECTION = "Doc Text" + private const val TAG = "StillImageActivity" - private val SIZE_PREVIEW = "w:max" // Available on-screen width. - private val SIZE_1024_768 = "w:1024" // ~1024*768 in a normal ratio - private val SIZE_640_480 = "w:640" // ~640*480 in a normal ratio + private const val CLOUD_LABEL_DETECTION = "Cloud Label" + private const val CLOUD_LANDMARK_DETECTION = "Landmark" + private const val CLOUD_TEXT_DETECTION = "Cloud Text" + private const val CLOUD_DOCUMENT_TEXT_DETECTION = "Doc Text" - private val KEY_IMAGE_URI = "com.googletest.firebase.ml.demo.KEY_IMAGE_URI" - private val KEY_IMAGE_MAX_WIDTH = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_WIDTH" - private val KEY_IMAGE_MAX_HEIGHT = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_HEIGHT" - private val KEY_SELECTED_SIZE = "com.googletest.firebase.ml.demo.KEY_SELECTED_SIZE" + private const val SIZE_PREVIEW = "w:max" // Available on-screen width. + private const val SIZE_1024_768 = "w:1024" // ~1024*768 in a normal ratio + private const val SIZE_640_480 = "w:640" // ~640*480 in a normal ratio - private val REQUEST_IMAGE_CAPTURE = 1001 - private val REQUEST_CHOOSE_IMAGE = 1002 + private const val KEY_IMAGE_URI = "com.googletest.firebase.ml.demo.KEY_IMAGE_URI" + private const val KEY_IMAGE_MAX_WIDTH = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_WIDTH" + private const val KEY_IMAGE_MAX_HEIGHT = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_HEIGHT" + private const val KEY_SELECTED_SIZE = "com.googletest.firebase.ml.demo.KEY_SELECTED_SIZE" + + private const val REQUEST_IMAGE_CAPTURE = 1001 + private const val REQUEST_CHOOSE_IMAGE = 1002 + } - private var preview: ImageView? = null - private var graphicOverlay: GraphicOverlay? = null private var selectedMode = CLOUD_LABEL_DETECTION private var selectedSize: String? = SIZE_PREVIEW - internal var isLandScape: Boolean = false + private var isLandScape: Boolean = false private var imageUri: Uri? = null // Max width (portrait mode) @@ -69,10 +69,9 @@ class StillImageActivity: AppCompatActivity() { setContentView(R.layout.activity_still_image) - getImageButton.setOnClickListener( - View.OnClickListener { view -> + getImageButton.setOnClickListener{ view -> // Menu for selecting either: a) take new photo b) select from existing - val popup = PopupMenu(this@StillImageActivity, view) + val popup = PopupMenu(this, view) popup.setOnMenuItemClickListener { menuItem -> when (menuItem.itemId) { R.id.select_images_from_local -> { @@ -90,13 +89,11 @@ class StillImageActivity: AppCompatActivity() { val inflater = popup.menuInflater inflater.inflate(R.menu.camera_button_menu, popup.menu) popup.show() - }) - preview = findViewById(R.id.previewPane) as ImageView - if (preview == null) { + } + if (previewPane == null) { Log.d(TAG, "Preview is null") } - graphicOverlay = findViewById(R.id.previewOverlay) as GraphicOverlay - if (graphicOverlay == null) { + if (previewOverlay == null) { Log.d(TAG, "graphicOverlay is null") } @@ -107,13 +104,13 @@ class StillImageActivity: AppCompatActivity() { isLandScape = resources.configuration.orientation == Configuration.ORIENTATION_LANDSCAPE - if (savedInstanceState != null) { - imageUri = savedInstanceState.getParcelable(KEY_IMAGE_URI) - imageMaxWidth = savedInstanceState.getInt(KEY_IMAGE_MAX_WIDTH) - imageMaxHeight = savedInstanceState.getInt(KEY_IMAGE_MAX_HEIGHT) - selectedSize = savedInstanceState.getString(KEY_SELECTED_SIZE) + savedInstanceState?.let { + imageUri = it.getParcelable(KEY_IMAGE_URI) + imageMaxWidth = it.getInt(KEY_IMAGE_MAX_WIDTH) + imageMaxHeight = it.getInt(KEY_IMAGE_MAX_HEIGHT) + selectedSize = it.getString(KEY_SELECTED_SIZE) - if (imageUri != null) { + imageUri?.let { _ -> tryReloadAndDetectInImage() } } @@ -173,11 +170,11 @@ class StillImageActivity: AppCompatActivity() { super.onSaveInstanceState(outState) outState.putParcelable(KEY_IMAGE_URI, imageUri) - if (imageMaxWidth != null) { - outState.putInt(KEY_IMAGE_MAX_WIDTH, imageMaxWidth!!) + imageMaxWidth?.let { + outState.putInt(KEY_IMAGE_MAX_WIDTH, it) } - if (imageMaxHeight != null) { - outState.putInt(KEY_IMAGE_MAX_HEIGHT, imageMaxHeight!!) + imageMaxHeight?.let { + outState.putInt(KEY_IMAGE_MAX_HEIGHT, it) } outState.putString(KEY_SELECTED_SIZE, selectedSize) } @@ -185,10 +182,10 @@ class StillImageActivity: AppCompatActivity() { private fun startCameraIntentForResult() { // Clean up last time's image imageUri = null - preview?.setImageBitmap(null) + previewPane?.setImageBitmap(null) val takePictureIntent = Intent(MediaStore.ACTION_IMAGE_CAPTURE) - if (takePictureIntent.resolveActivity(packageManager) != null) { + takePictureIntent.resolveActivity(packageManager)?.let { val values = ContentValues() values.put(MediaStore.Images.Media.TITLE, "New Picture") values.put(MediaStore.Images.Media.DESCRIPTION, "From Camera") @@ -222,7 +219,7 @@ class StillImageActivity: AppCompatActivity() { } // Clear the overlay first - graphicOverlay!!.clear() + previewOverlay?.clear() val imageBitmap = MediaStore.Images.Media.getBitmap(contentResolver, imageUri) @@ -243,10 +240,11 @@ class StillImageActivity: AppCompatActivity() { (imageBitmap.height / scaleFactor).toInt(), true) - preview!!.setImageBitmap(resizedBitmap) + previewPane?.setImageBitmap(resizedBitmap) bitmapForDetection = resizedBitmap - - imageProcessor!!.process(bitmapForDetection!!, graphicOverlay!!) + bitmapForDetection?.let { + imageProcessor?.process(it, previewOverlay) + } } catch (e: IOException) { Log.e(TAG, "Error retrieving saved image") } @@ -259,10 +257,10 @@ class StillImageActivity: AppCompatActivity() { if (imageMaxWidth == null) { // Calculate the max width in portrait mode. This is done lazily since we need to wait for // a UI layout pass to get the right values. So delay it to first time image rendering time. - if (isLandScape) { - imageMaxWidth = (preview!!.parent as View).height - controlPanel.height + imageMaxWidth = if (isLandScape) { + (previewPane.parent as View).height - controlPanel.height } else { - imageMaxWidth = (preview!!.parent as View).width + (previewPane.parent as View).width } } @@ -275,10 +273,10 @@ class StillImageActivity: AppCompatActivity() { if (imageMaxHeight == null) { // Calculate the max width in portrait mode. This is done lazily since we need to wait for // a UI layout pass to get the right values. So delay it to first time image rendering time. - if (isLandScape) { - imageMaxHeight = (preview!!.parent as View).width + imageMaxHeight = if (isLandScape) { + (previewPane.parent as View).width } else { - imageMaxHeight = (preview!!.parent as View).height - controlPanel.height + (previewPane.parent as View).height - controlPanel.height } } @@ -287,15 +285,19 @@ class StillImageActivity: AppCompatActivity() { // Gets the targeted width / height. private fun getTargetedWidthHeight(): Pair { - val targetWidth: Int - val targetHeight: Int + var targetWidth = 0 + var targetHeight = 0 when (selectedSize) { SIZE_PREVIEW -> { - val maxWidthForPortraitMode = getImageMaxWidth()!! - val maxHeightForPortraitMode = getImageMaxHeight()!! - targetWidth = if (isLandScape) maxHeightForPortraitMode else maxWidthForPortraitMode - targetHeight = if (isLandScape) maxWidthForPortraitMode else maxHeightForPortraitMode + val maxWidthForPortraitMode = getImageMaxWidth() + val maxHeightForPortraitMode = getImageMaxHeight() + maxWidthForPortraitMode?.let { width -> + maxHeightForPortraitMode?.let { height -> + targetWidth = if (isLandScape) height else width + targetHeight = if (isLandScape) width else height + } + } } SIZE_640_480 -> { targetWidth = if (isLandScape) 640 else 480 @@ -312,11 +314,11 @@ class StillImageActivity: AppCompatActivity() { } private fun createImageProcessor() { - when (selectedMode) { - CLOUD_LABEL_DETECTION -> imageProcessor = CloudImageLabelingProcessor() - CLOUD_LANDMARK_DETECTION -> imageProcessor = CloudLandmarkRecognitionProcessor() - CLOUD_TEXT_DETECTION -> imageProcessor = CloudTextRecognitionProcessor() - CLOUD_DOCUMENT_TEXT_DETECTION -> imageProcessor = CloudDocumentTextRecognitionProcessor() + imageProcessor = when (selectedMode) { + CLOUD_LABEL_DETECTION -> CloudImageLabelingProcessor() + CLOUD_LANDMARK_DETECTION -> CloudLandmarkRecognitionProcessor() + CLOUD_TEXT_DETECTION -> CloudTextRecognitionProcessor() + CLOUD_DOCUMENT_TEXT_DETECTION -> CloudDocumentTextRecognitionProcessor() else -> throw IllegalStateException("Unknown selectedMode: $selectedMode") } } From a1d96b6b1164bfdf28293a9be83fa8e073cabbbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ros=C3=A1rio=20Pereira=20Fernandes?= Date: Fri, 14 Sep 2018 01:45:18 +0200 Subject: [PATCH 7/7] style: changes after feedback from review --- .../apps/mlkit/kotlin/ChooserActivity.kt | 22 +++--- .../apps/mlkit/kotlin/LivePreviewActivity.kt | 56 ++++++------- .../apps/mlkit/kotlin/StillImageActivity.kt | 78 +++++++++---------- .../BarcodeScanningProcessor.kt | 6 +- .../CloudLandmarkGraphic.kt | 17 ++-- 5 files changed, 88 insertions(+), 91 deletions(-) diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt index e0ce0b209b..ca8b3d5cd8 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/ChooserActivity.kt @@ -25,17 +25,6 @@ import kotlinx.android.synthetic.main.activity_chooser.* class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissionsResultCallback, AdapterView.OnItemClickListener { - companion object { - private const val TAG = "ChooserActivity" - private const val PERMISSION_REQUESTS = 1 - - private val CLASSES = - arrayOf>(LivePreviewActivity::class.java, StillImageActivity::class.java) - - private val DESCRIPTION_IDS = - intArrayOf(R.string.desc_camera_source_activity, R.string.desc_still_image_activity) - } - override fun onCreate(savedInstanceState: Bundle?) { super.onCreate(savedInstanceState) @@ -139,4 +128,15 @@ class ChooserActivity : AppCompatActivity(), ActivityCompat.OnRequestPermissions this.descriptionIds = descriptionIds } } + + companion object { + private const val TAG = "ChooserActivity" + private const val PERMISSION_REQUESTS = 1 + + private val CLASSES = + arrayOf>(LivePreviewActivity::class.java, StillImageActivity::class.java) + + private val DESCRIPTION_IDS = + intArrayOf(R.string.desc_camera_source_activity, R.string.desc_still_image_activity) + } } \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt index 01e32389c4..77c1b756af 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/LivePreviewActivity.kt @@ -62,12 +62,12 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss Log.d(TAG, "graphicOverlay is null") } - val options = ArrayList() - options.add(FACE_DETECTION) - options.add(TEXT_DETECTION) - options.add(BARCODE_DETECTION) - options.add(IMAGE_LABEL_DETECTION) - options.add(CLASSIFICATION) + val options = arrayListOf( + FACE_DETECTION, + TEXT_DETECTION, + BARCODE_DETECTION, + IMAGE_LABEL_DETECTION, + CLASSIFICATION) // Creating adapter for spinner val dataAdapter = ArrayAdapter(this, R.layout.spinner_style, options) // Drop down layout style - list view with radio button @@ -124,28 +124,30 @@ class LivePreviewActivity : AppCompatActivity(), ActivityCompat.OnRequestPermiss } try { - when (model) { - CLASSIFICATION -> { - Log.i(TAG, "Using Custom Image Classifier Processor") - cameraSource!!.setMachineLearningFrameProcessor(CustomImageClassifierProcessor(this)) + cameraSource?.let { + when (model) { + CLASSIFICATION -> { + Log.i(TAG, "Using Custom Image Classifier Processor") + it.setMachineLearningFrameProcessor(CustomImageClassifierProcessor(this)) + } + TEXT_DETECTION -> { + Log.i(TAG, "Using Text Detector Processor") + it.setMachineLearningFrameProcessor(TextRecognitionProcessor()) + } + FACE_DETECTION -> { + Log.i(TAG, "Using Face Detector Processor") + it.setMachineLearningFrameProcessor(FaceDetectionProcessor()) + } + BARCODE_DETECTION -> { + Log.i(TAG, "Using Barcode Detector Processor") + it.setMachineLearningFrameProcessor(BarcodeScanningProcessor()) + } + IMAGE_LABEL_DETECTION -> { + Log.i(TAG, "Using Image Label Detector Processor") + it.setMachineLearningFrameProcessor(ImageLabelingProcessor()) + } + else -> Log.e(TAG, "Unknown model: $model") } - TEXT_DETECTION -> { - Log.i(TAG, "Using Text Detector Processor") - cameraSource!!.setMachineLearningFrameProcessor(TextRecognitionProcessor()) - } - FACE_DETECTION -> { - Log.i(TAG, "Using Face Detector Processor") - cameraSource!!.setMachineLearningFrameProcessor(FaceDetectionProcessor()) - } - BARCODE_DETECTION -> { - Log.i(TAG, "Using Barcode Detector Processor") - cameraSource!!.setMachineLearningFrameProcessor(BarcodeScanningProcessor()) - } - IMAGE_LABEL_DETECTION -> { - Log.i(TAG, "Using Image Label Detector Processor") - cameraSource!!.setMachineLearningFrameProcessor(ImageLabelingProcessor()) - } - else -> Log.e(TAG, "Unknown model: $model") } } catch (e: FirebaseMLException) { Log.e(TAG, "can not create camera source: $model") diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt index 7f69f36db8..8955e5b757 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/StillImageActivity.kt @@ -29,38 +29,16 @@ import kotlinx.android.synthetic.main.activity_still_image.* @KeepName class StillImageActivity: AppCompatActivity() { - companion object { - - private const val TAG = "StillImageActivity" - - private const val CLOUD_LABEL_DETECTION = "Cloud Label" - private const val CLOUD_LANDMARK_DETECTION = "Landmark" - private const val CLOUD_TEXT_DETECTION = "Cloud Text" - private const val CLOUD_DOCUMENT_TEXT_DETECTION = "Doc Text" - - private const val SIZE_PREVIEW = "w:max" // Available on-screen width. - private const val SIZE_1024_768 = "w:1024" // ~1024*768 in a normal ratio - private const val SIZE_640_480 = "w:640" // ~640*480 in a normal ratio - - private const val KEY_IMAGE_URI = "com.googletest.firebase.ml.demo.KEY_IMAGE_URI" - private const val KEY_IMAGE_MAX_WIDTH = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_WIDTH" - private const val KEY_IMAGE_MAX_HEIGHT = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_HEIGHT" - private const val KEY_SELECTED_SIZE = "com.googletest.firebase.ml.demo.KEY_SELECTED_SIZE" - - private const val REQUEST_IMAGE_CAPTURE = 1001 - private const val REQUEST_CHOOSE_IMAGE = 1002 - } - private var selectedMode = CLOUD_LABEL_DETECTION - private var selectedSize: String? = SIZE_PREVIEW + private var selectedSize: String = SIZE_PREVIEW private var isLandScape: Boolean = false private var imageUri: Uri? = null // Max width (portrait mode) - private var imageMaxWidth: Int? = null + private var imageMaxWidth = 0 // Max height (portrait mode) - private var imageMaxHeight: Int? = null + private var imageMaxHeight = 0 private var bitmapForDetection: Bitmap? = null private var imageProcessor: VisionImageProcessor? = null @@ -169,14 +147,12 @@ class StillImageActivity: AppCompatActivity() { public override fun onSaveInstanceState(outState: Bundle) { super.onSaveInstanceState(outState) - outState.putParcelable(KEY_IMAGE_URI, imageUri) - imageMaxWidth?.let { - outState.putInt(KEY_IMAGE_MAX_WIDTH, it) - } - imageMaxHeight?.let { - outState.putInt(KEY_IMAGE_MAX_HEIGHT, it) + with(outState) { + putParcelable(KEY_IMAGE_URI, imageUri) + putInt(KEY_IMAGE_MAX_WIDTH, imageMaxWidth) + putInt(KEY_IMAGE_MAX_HEIGHT, imageMaxHeight) + putString(KEY_SELECTED_SIZE, selectedSize) } - outState.putString(KEY_SELECTED_SIZE, selectedSize) } private fun startCameraIntentForResult() { @@ -253,8 +229,8 @@ class StillImageActivity: AppCompatActivity() { // Returns max image width, always for portrait mode. Caller needs to swap width / height for // landscape mode. - private fun getImageMaxWidth(): Int? { - if (imageMaxWidth == null) { + private fun getImageMaxWidth(): Int { + if (imageMaxWidth == 0) { // Calculate the max width in portrait mode. This is done lazily since we need to wait for // a UI layout pass to get the right values. So delay it to first time image rendering time. imageMaxWidth = if (isLandScape) { @@ -269,8 +245,8 @@ class StillImageActivity: AppCompatActivity() { // Returns max image height, always for portrait mode. Caller needs to swap width / height for // landscape mode. - private fun getImageMaxHeight(): Int? { - if (imageMaxHeight == null) { + private fun getImageMaxHeight(): Int { + if (imageMaxHeight == 0) { // Calculate the max width in portrait mode. This is done lazily since we need to wait for // a UI layout pass to get the right values. So delay it to first time image rendering time. imageMaxHeight = if (isLandScape) { @@ -292,12 +268,8 @@ class StillImageActivity: AppCompatActivity() { SIZE_PREVIEW -> { val maxWidthForPortraitMode = getImageMaxWidth() val maxHeightForPortraitMode = getImageMaxHeight() - maxWidthForPortraitMode?.let { width -> - maxHeightForPortraitMode?.let { height -> - targetWidth = if (isLandScape) height else width - targetHeight = if (isLandScape) width else height - } - } + targetWidth = if (isLandScape) maxHeightForPortraitMode else maxWidthForPortraitMode + targetHeight = if (isLandScape) maxWidthForPortraitMode else maxHeightForPortraitMode } SIZE_640_480 -> { targetWidth = if (isLandScape) 640 else 480 @@ -323,4 +295,26 @@ class StillImageActivity: AppCompatActivity() { } } + companion object { + + private const val TAG = "StillImageActivity" + + private const val CLOUD_LABEL_DETECTION = "Cloud Label" + private const val CLOUD_LANDMARK_DETECTION = "Landmark" + private const val CLOUD_TEXT_DETECTION = "Cloud Text" + private const val CLOUD_DOCUMENT_TEXT_DETECTION = "Doc Text" + + private const val SIZE_PREVIEW = "w:max" // Available on-screen width. + private const val SIZE_1024_768 = "w:1024" // ~1024*768 in a normal ratio + private const val SIZE_640_480 = "w:640" // ~640*480 in a normal ratio + + private const val KEY_IMAGE_URI = "com.googletest.firebase.ml.demo.KEY_IMAGE_URI" + private const val KEY_IMAGE_MAX_WIDTH = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_WIDTH" + private const val KEY_IMAGE_MAX_HEIGHT = "com.googletest.firebase.ml.demo.KEY_IMAGE_MAX_HEIGHT" + private const val KEY_SELECTED_SIZE = "com.googletest.firebase.ml.demo.KEY_SELECTED_SIZE" + + private const val REQUEST_IMAGE_CAPTURE = 1001 + private const val REQUEST_CHOOSE_IMAGE = 1002 + } + } \ No newline at end of file diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt index 5b7dd19cdc..3ca0b5b1c9 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/barcodescanning/BarcodeScanningProcessor.kt @@ -38,10 +38,8 @@ class BarcodeScanningProcessor : VisionProcessorBase return detector.detectInImage(image) } - override fun onSuccess( - barcodes: List, - frameMetadata: FrameMetadata, - graphicOverlay: GraphicOverlay) { + override fun onSuccess(barcodes: List, frameMetadata: FrameMetadata, + graphicOverlay: GraphicOverlay) { graphicOverlay.clear() for (i in barcodes.indices) { val barcode = barcodes[i] diff --git a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt index cba7b69715..6716fce53d 100644 --- a/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt +++ b/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/kotlin/cloudlandmarkrecognition/CloudLandmarkGraphic.kt @@ -48,14 +48,17 @@ class CloudLandmarkGraphic(overlay: GraphicOverlay) : GraphicOverlay.Graphic(ove // Draws the bounding box around the LandmarkBlock. val rect = RectF(landmark.boundingBox) - rect.left = translateX(rect.left) - rect.top = translateY(rect.top) - rect.right = translateX(rect.right) - rect.bottom = translateY(rect.bottom) - canvas.drawRect(rect, rectPaint) + with(rect) { + left = translateX(left) + top = translateY(top) + right = translateX(right) + bottom = translateY(bottom) + canvas.drawRect(this, rectPaint) + + // Renders the landmark at the bottom of the box. + canvas.drawText(landmark.landmark, left, bottom, landmarkPaint) + } - // Renders the landmark at the bottom of the box. - canvas.drawText(landmark.landmark, rect.left, rect.bottom, landmarkPaint) } companion object {