diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..351ec37b --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ + +/Android/.idea +Android/.idea/misc.xml +Android/.idea/gradle.xml diff --git a/Android/.gitignore b/Android/.gitignore new file mode 100644 index 00000000..aa724b77 --- /dev/null +++ b/Android/.gitignore @@ -0,0 +1,15 @@ +*.iml +.gradle +/local.properties +/.idea/caches +/.idea/libraries +/.idea/modules.xml +/.idea/workspace.xml +/.idea/navEditor.xml +/.idea/assetWizardSettings.xml +.DS_Store +/build +/captures +.externalNativeBuild +.cxx +local.properties diff --git a/Android/.idea/.gitignore b/Android/.idea/.gitignore new file mode 100644 index 00000000..26d33521 --- /dev/null +++ b/Android/.idea/.gitignore @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git a/Android/.idea/.name b/Android/.idea/.name new file mode 100644 index 00000000..b3405b3b --- /dev/null +++ b/Android/.idea/.name @@ -0,0 +1 @@ +My Application \ No newline at end of file diff --git a/Android/.idea/compiler.xml b/Android/.idea/compiler.xml new file mode 100644 index 00000000..b589d56e --- /dev/null +++ b/Android/.idea/compiler.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Android/.idea/deploymentTargetDropDown.xml b/Android/.idea/deploymentTargetDropDown.xml new file mode 100644 index 00000000..0c0c3383 --- /dev/null +++ b/Android/.idea/deploymentTargetDropDown.xml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/Android/.idea/gradle.xml b/Android/.idea/gradle.xml new file mode 100644 index 00000000..0897082f --- /dev/null +++ b/Android/.idea/gradle.xml @@ -0,0 +1,19 @@ + + + + + + + \ No newline at end of file diff --git a/Android/.idea/kotlinc.xml b/Android/.idea/kotlinc.xml new file mode 100644 index 00000000..f8467b45 --- /dev/null +++ b/Android/.idea/kotlinc.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/Android/.idea/migrations.xml b/Android/.idea/migrations.xml new file mode 100644 index 00000000..f8051a6f --- /dev/null +++ b/Android/.idea/migrations.xml @@ -0,0 +1,10 @@ + + + + + + \ No newline at end of file diff --git a/Android/.idea/misc.xml b/Android/.idea/misc.xml new file mode 100644 index 00000000..8978d23d --- /dev/null +++ b/Android/.idea/misc.xml @@ -0,0 +1,9 @@ + + + + + + + + \ No newline at end of file diff --git a/Android/.idea/vcs.xml b/Android/.idea/vcs.xml new file mode 100644 index 00000000..6c0b8635 --- /dev/null +++ b/Android/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Train_TFLite2_Object_Detction_Model.ipynb b/Android/Colab Notebook/Train_TFLite_Object_Detction_Model.ipynb similarity index 88% rename from Train_TFLite2_Object_Detction_Model.ipynb rename to Android/Colab Notebook/Train_TFLite_Object_Detction_Model.ipynb index 21ddd170..baa99aae 100644 --- a/Train_TFLite2_Object_Detction_Model.ipynb +++ b/Android/Colab Notebook/Train_TFLite_Object_Detction_Model.ipynb @@ -3,8 +3,8 @@ { "cell_type": "markdown", "metadata": { - "id": "view-in-github", - "colab_type": "text" + "colab_type": "text", + "id": "view-in-github" }, "source": [ "\"Open" @@ -54,12 +54,12 @@ }, { "cell_type": "markdown", - "source": [ - "# 1. Gather and Label Training Images" - ], "metadata": { "id": "4VAvZo8qE4u5" - } + }, + "source": [ + "# 1. Gather and Label Training Images" + ] }, { "cell_type": "markdown", @@ -83,12 +83,12 @@ }, { "cell_type": "markdown", - "source": [ - "#2. Install TensorFlow Object Detection Dependencies" - ], "metadata": { "id": "sxb8_h-QFErO" - } + }, + "source": [ + "#2. Install TensorFlow Object Detection Dependencies" + ] }, { "cell_type": "markdown", @@ -132,6 +132,11 @@ }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NRBnuCKjM4Bd" + }, + "outputs": [], "source": [ "# Modify setup.py file to install the tf-models-official repository targeted at TF v2.8.0\n", "import re\n", @@ -143,12 +148,7 @@ " s = re.sub('tf-models-official>=2.5.1',\n", " 'tf-models-official==2.8.0', s)\n", " f.write(s)" - ], - "metadata": { - "id": "NRBnuCKjM4Bd" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", @@ -203,12 +203,12 @@ }, { "cell_type": "markdown", - "source": [ - "# 3. Upload Image Dataset and Prepare Training Data" - ], "metadata": { "id": "eydREUsMGUUR" - } + }, + "source": [ + "# 3. Upload Image Dataset and Prepare Training Data" + ] }, { "cell_type": "markdown", @@ -232,39 +232,39 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "LE1MtX4HGQA4" + }, "source": [ "### 3.1 Upload images\n", "There are three options for moving the image files to this Colab instance." - ], - "metadata": { - "id": "LE1MtX4HGQA4" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "sFSJoDEnJotN" + }, "source": [ "**Option 1. Upload through Google Colab**\n", "\n", "Upload the \"images.zip\" file to the Google Colab instance by clicking the \"Files\" icon on the left hand side of the browser, and then the \"Upload to session storage\" icon. Select the zip folder to upload it.\n", "\n", "![](https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/doc/colab_upload_button.png)" - ], - "metadata": { - "id": "sFSJoDEnJotN" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "hGsPlloAGIXB" + }, "source": [ "**Option 2. Copy from Google Drive**\n", "\n", "You can also upload your images to your personal Google Drive, mount the drive on this Colab session, and copy them over to the Colab filesystem. This option works well if you want to upload the images beforehand so you don't have to wait for them to upload each time you restart this Colab. If you have more than 50MB worth of images, I recommend using this option.\n", "\n", "First, upload the \"images.zip\" file to your Google Drive, and make note of the folder you uploaded them to. Replace `MyDrive/path/to/images.zip` with the path to your zip file. (For example, I uploaded the zip file to folder called \"change-counter1\", so I would use `MyDrive/change-counter1/images.zip` for the path). Then, run the following block of code to mount your Google Drive to this Colab session and copy the folder to this filesystem." - ], - "metadata": { - "id": "hGsPlloAGIXB" - } + ] }, { "cell_type": "code", @@ -282,25 +282,25 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "9xAJMKwpFilm" + }, "source": [ "**Option 3. Use coin detection dataset**\n", "\n", "If you don't have a dataset and just want to try training a model, you can download my coin image dataset to use as an example. I've uploaded a dataset containing 750 labeled images of pennies, nickels, dimes, and quarters. Run the following code block to download the dataset." - ], - "metadata": { - "id": "9xAJMKwpFilm" - } + ] }, { "cell_type": "code", - "source": [ - "!wget -O /content/images.zip https://www.dropbox.com/s/gk57ec3v8dfuwcp/CoinPics_11NOV22.zip?dl=0 # United States coin images" - ], + "execution_count": null, "metadata": { "id": "suu_xPVZIEcH" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!wget -O /content/images.zip https://www.dropbox.com/s/gk57ec3v8dfuwcp/CoinPics_11NOV22.zip?dl=0 # United States coin images" + ] }, { "cell_type": "markdown", @@ -348,15 +348,15 @@ }, { "cell_type": "code", - "source": [ - "!wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/train_val_test_split.py\n", - "!python train_val_test_split.py" - ], + "execution_count": null, "metadata": { "id": "PfuZpmdBLjh-" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/train_val_test_split.py\n", + "!python train_val_test_split.py" + ] }, { "cell_type": "markdown", @@ -398,16 +398,16 @@ }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "laZZE0TlEeUF" + }, + "outputs": [], "source": [ "# Download data conversion scripts\n", "! wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/create_csv.py\n", "! wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/create_tfrecord.py" - ], - "metadata": { - "id": "laZZE0TlEeUF" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", @@ -447,12 +447,12 @@ }, { "cell_type": "markdown", - "source": [ - "# 4. Set Up Training Configuration" - ], "metadata": { "id": "eGEUZYAMEZ6f" - } + }, + "source": [ + "# 4. Set Up Training Configuration" + ] }, { "cell_type": "markdown", @@ -511,12 +511,12 @@ }, { "cell_type": "markdown", - "source": [ - "Download the pretrained model file and configuration file by clicking Play on the following section." - ], "metadata": { "id": "JMG3EEPqPggV" - } + }, + "source": [ + "Download the pretrained model file and configuration file by clicking Play on the following section." + ] }, { "cell_type": "code", @@ -717,12 +717,12 @@ }, { "cell_type": "markdown", - "source": [ - "# 5. Train Custom TFLite Detection Model" - ], "metadata": { "id": "-19zML6oEO7l" - } + }, + "source": [ + "# 5. Train Custom TFLite Detection Model" + ] }, { "cell_type": "markdown", @@ -778,21 +778,21 @@ }, { "cell_type": "markdown", - "source": [ - "If you want to stop training early, just click Stop a couple times or right-click on the code block and select \"Interrupt Execution\". Otherwise, training will stop by itself once it reaches the specified number of training steps.\n" - ], "metadata": { "id": "WHxbX4ZpzXIv" - } + }, + "source": [ + "If you want to stop training early, just click Stop a couple times or right-click on the code block and select \"Interrupt Execution\". Otherwise, training will stop by itself once it reaches the specified number of training steps.\n" + ] }, { "cell_type": "markdown", - "source": [ - "# 6. Convert Model to TensorFlow Lite" - ], "metadata": { "id": "kPg8oMnQDYKl" - } + }, + "source": [ + "# 6. Convert Model to TensorFlow Lite" + ] }, { "cell_type": "markdown", @@ -853,12 +853,12 @@ }, { "cell_type": "markdown", - "source": [ - "# 7. Test TensorFlow Lite Model and Calculate mAP" - ], "metadata": { "id": "RDQrtQhvC3oG" - } + }, + "source": [ + "# 7. Test TensorFlow Lite Model and Calculate mAP" + ] }, { "cell_type": "markdown", @@ -1005,15 +1005,20 @@ }, { "cell_type": "markdown", - "source": [ - "The next block sets the paths to the test images and models and then runs the inferencing function. If you want to use more than 10 images, change the `images_to_test` variable. Click play to run inferencing!" - ], "metadata": { "id": "-CJI4A0f_zqz" - } + }, + "source": [ + "The next block sets the paths to the test images and models and then runs the inferencing function. If you want to use more than 10 images, change the `images_to_test` variable. Click play to run inferencing!" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6t8CMarqBqP9" + }, + "outputs": [], "source": [ "# Set up variables for running user's model\n", "PATH_TO_IMAGES='/content/images/test' # Path to test images folder\n", @@ -1024,15 +1029,13 @@ "\n", "# Run inferencing function!\n", "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test)" - ], - "metadata": { - "id": "6t8CMarqBqP9" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "N_ckqeWqBF0P" + }, "source": [ "### 7.2 Calculate mAP\n", "Now we have a visual sense of how our model performs on test images, but how can we quantitatively measure its accuracy?\n", @@ -1040,13 +1043,15 @@ "One popular methord for measuring object detection model accuracy is \"mean average precision\" (mAP). Basically, the higher the mAP score, the better your model is at detecting objects in images. To learn more about mAP, read through this [article from Roboflow](https://blog.roboflow.com/mean-average-precision/).\n", "\n", "We'll use the mAP calculator tool at https://github.com/Cartucho/mAP to determine our model's mAP score. First, we need to clone the repository and remove its existing example data. We'll also download a script I wrote for interfacing with the calculator." - ], - "metadata": { - "id": "N_ckqeWqBF0P" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JlWarXEZDUqS" + }, + "outputs": [], "source": [ "%%bash\n", "git clone https://github.com/Cartucho/mAP /content/mAP\n", @@ -1055,70 +1060,70 @@ "rm input/ground-truth/*\n", "rm input/images-optional/*\n", "wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/calculate_map_cartucho.py" - ], - "metadata": { - "id": "JlWarXEZDUqS" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Next, we'll copy the images and annotation data from the **test** folder to the appropriate folders inside the cloned repository. These will be used as the \"ground truth data\" that our model's detection results will be compared to.\n" - ], "metadata": { "id": "qn22nGGqH5T6" - } + }, + "source": [ + "Next, we'll copy the images and annotation data from the **test** folder to the appropriate folders inside the cloned repository. These will be used as the \"ground truth data\" that our model's detection results will be compared to.\n" + ] }, { "cell_type": "code", - "source": [ - "!cp /content/images/test/* /content/mAP/input/images-optional # Copy images and xml files\n", - "!mv /content/mAP/input/images-optional/*.xml /content/mAP/input/ground-truth/ # Move xml files to the appropriate folder" - ], + "execution_count": null, "metadata": { "id": "5szFfVxwI3wT" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!cp /content/images/test/* /content/mAP/input/images-optional # Copy images and xml files\n", + "!mv /content/mAP/input/images-optional/*.xml /content/mAP/input/ground-truth/ # Move xml files to the appropriate folder" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "u6aro817DGzx" + }, "source": [ "The calculator tool expects annotation data in a format that's different from the Pascal VOC .xml file format we're using. Fortunately, it provides an easy script, `convert_gt_xml.py`, for converting to the expected .txt format.\n", "\n" - ], - "metadata": { - "id": "u6aro817DGzx" - } + ] }, { "cell_type": "code", - "source": [ - "!python /content/mAP/scripts/extra/convert_gt_xml.py" - ], + "execution_count": null, "metadata": { "id": "qdjtOUDnK2AA" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!python /content/mAP/scripts/extra/convert_gt_xml.py" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "mnIUacAlLP0B" + }, "source": [ "Okay, we've set up the ground truth data, but now we need actual detection results from our model. The detection results will be compared to the ground truth data to calculate the model's accuracy in mAP.\n", "\n", "The inference function we defined in Step 7.1 can be used to generate detection data for all the images in the **test** folder. We'll use it the same as before, except this time we'll tell it to save detection results into the `detection-results` folder.\n", "\n", "Click Play to run the following code block!" - ], - "metadata": { - "id": "mnIUacAlLP0B" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "szzHFAhsMNFF" + }, + "outputs": [], "source": [ "# Set up variables for running inference, this time to get detection results saved as .txt files\n", "PATH_TO_IMAGES='/content/images/test' # Path to test images folder\n", @@ -1138,53 +1143,48 @@ "print('Starting inference on %d images...' % images_to_test)\n", "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test, PATH_TO_RESULTS, txt_only)\n", "print('Finished inferencing!')" - ], - "metadata": { - "id": "szzHFAhsMNFF" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "e_QRnTqNPX4z" + }, "source": [ "Finally, let's calculate mAP! One popular style for reporting mAP is the COCO metric for mAP @ 0.50:0.95. Basically, this means that mAP is calculated at several IoU thresholds between 0.50 and 0.95, and then the result from each threshold is averaged to get a final mAP score. [Learn more here!](https://blog.roboflow.com/mean-average-precision/)\n", "\n", "I wrote a script to run the calculator tool at each IoU threshold, average the results, and report the final accuracy score. It reports mAP for each class and overall mAP. Click Play on the following two blocks to calculate mAP!" - ], - "metadata": { - "id": "e_QRnTqNPX4z" - } + ] }, { "cell_type": "code", - "source": [ - "%cd /content/mAP\n", - "!python calculate_map_cartucho.py --labels=/content/labelmap.txt" - ], + "execution_count": null, "metadata": { "id": "3DkjpIBARTQ7" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "%cd /content/mAP\n", + "!python calculate_map_cartucho.py --labels=/content/labelmap.txt" + ] }, { "cell_type": "markdown", - "source": [ - "The score reported at the end is your model's overall mAP score. Ideally, it should be above 50% (0.50). If it isn't, you can increase your model's accuracy by adding more images to your dataset. See my [dataset video](https://www.youtube.com/watch?v=v0ssiOY6cfg) for tips on how to capture good training images and improve accuracy." - ], "metadata": { "id": "R9HPoOBVKvxU" - } + }, + "source": [ + "The score reported at the end is your model's overall mAP score. Ideally, it should be above 50% (0.50). If it isn't, you can increase your model's accuracy by adding more images to your dataset. See my [dataset video](https://www.youtube.com/watch?v=v0ssiOY6cfg) for tips on how to capture good training images and improve accuracy." + ] }, { "cell_type": "markdown", - "source": [ - "# 8. Deploy TensorFlow Lite Model" - ], "metadata": { "id": "5i40ve0SCLaE" - } + }, + "source": [ + "# 8. Deploy TensorFlow Lite Model" + ] }, { "cell_type": "markdown", @@ -1197,14 +1197,14 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "zq3L2IoP4VHp" + }, "source": [ "## 8.1. Download TFLite model\n", "\n", "Run the two following cells to copy the labelmap files into the model folder, compress it into a zip folder, and then download it. The zip folder contains the `detect.tflite` model and `labelmap.txt` labelmap files that are needed to run the model in your application." - ], - "metadata": { - "id": "zq3L2IoP4VHp" - } + ] }, { "cell_type": "code", @@ -1238,12 +1238,12 @@ }, { "cell_type": "markdown", - "source": [ - "The `custom_model_lite.zip` file containing the model will download into your Downloads folder. It's ready to be deployed on your device!" - ], "metadata": { "id": "9Kb3ZBsMq95l" - } + }, + "source": [ + "The `custom_model_lite.zip` file containing the model will download into your Downloads folder. It's ready to be deployed on your device!" + ] }, { "cell_type": "markdown", @@ -1287,8 +1287,7 @@ "### 8.2.3. Deploy on other Linux-based edge devices\n", "Instructions to be added! 🐧\n", "\n", - "### 8.2.4. Deploy on Android\n", - "Instructions to be added! 🤖\n", + "\n", "\n", "\n", "\n" @@ -1296,12 +1295,180 @@ }, { "cell_type": "markdown", + "metadata": {}, "source": [ - "# 9. (Optional) Post-Training Quantization" - ], + "### 8.2.4. Deploy on Android" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 8.2.4.1. Add metadata to your TensorFlow Lite model\n", + "This step is required to load the TFLite model on an Android device or you will get an error!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install tflite_support==0.4.4\n", + "\n", + "%cd /content/custom_model_lite\n", + "\n", + "from tflite_support.metadata_writers import object_detector\n", + "from tflite_support.metadata_writers import writer_utils\n", + "from tflite_support import metadata\n", + "import flatbuffers\n", + "import os\n", + "from tensorflow_lite_support.metadata import metadata_schema_py_generated as _metadata_fb\n", + "from tensorflow_lite_support.metadata.python import metadata as _metadata\n", + "from tensorflow_lite_support.metadata.python.metadata_writers import metadata_info\n", + "from tensorflow_lite_support.metadata.python.metadata_writers import metadata_writer\n", + "from tensorflow_lite_support.metadata.python.metadata_writers import writer_utils\n", + "\n", + "ObjectDetectorWriter = object_detector.MetadataWriter\n", + "\n", + "_MODEL_PATH = \"/content/custom_model_lite/detect.tflite\"\n", + "_LABEL_FILE = \"/content/labelmap.txt\"\n", + "_SAVE_TO_PATH = \"/content/custom_model_lite/detect_with_metadata.tflite\"\n", + "\n", + "writer = ObjectDetectorWriter.create_for_inference(\n", + " writer_utils.load_file(_MODEL_PATH), [127.5], [127.5], [_LABEL_FILE])\n", + "writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)\n", + "\n", + "# Verify the populated metadata and associated files.\n", + "displayer = metadata.MetadataDisplayer.with_model_file(_SAVE_TO_PATH)\n", + "print(\"Metadata populated:\")\n", + "print(displayer.get_metadata_json())\n", + "print(\"Associated file(s) populated:\")\n", + "print(displayer.get_packed_associated_file_list())\n", + "\n", + "model_meta = _metadata_fb.ModelMetadataT()\n", + "model_meta.name = \"SSD_Detector\"\n", + "model_meta.description = (\n", + " \"Identify which of a known set of objects might be present and provide \"\n", + " \"information about their positions within the given image or a video \"\n", + " \"stream.\")\n", + "\n", + "# Creates input info.\n", + "input_meta = _metadata_fb.TensorMetadataT()\n", + "input_meta.name = \"image\"\n", + "input_meta.content = _metadata_fb.ContentT()\n", + "input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT()\n", + "input_meta.content.contentProperties.colorSpace = (\n", + " _metadata_fb.ColorSpaceType.RGB)\n", + "input_meta.content.contentPropertiesType = (\n", + " _metadata_fb.ContentProperties.ImageProperties)\n", + "input_normalization = _metadata_fb.ProcessUnitT()\n", + "input_normalization.optionsType = (\n", + " _metadata_fb.ProcessUnitOptions.NormalizationOptions)\n", + "input_normalization.options = _metadata_fb.NormalizationOptionsT()\n", + "input_normalization.options.mean = [127.5]\n", + "input_normalization.options.std = [127.5]\n", + "input_meta.processUnits = [input_normalization]\n", + "input_stats = _metadata_fb.StatsT()\n", + "input_stats.max = [255]\n", + "input_stats.min = [0]\n", + "input_meta.stats = input_stats\n", + "\n", + "# Creates outputs info.\n", + "output_location_meta = _metadata_fb.TensorMetadataT()\n", + "output_location_meta.name = \"location\"\n", + "output_location_meta.description = \"The locations of the detected boxes.\"\n", + "output_location_meta.content = _metadata_fb.ContentT()\n", + "output_location_meta.content.contentPropertiesType = (\n", + " _metadata_fb.ContentProperties.BoundingBoxProperties)\n", + "output_location_meta.content.contentProperties = (\n", + " _metadata_fb.BoundingBoxPropertiesT())\n", + "output_location_meta.content.contentProperties.index = [1, 0, 3, 2]\n", + "output_location_meta.content.contentProperties.type = (\n", + " _metadata_fb.BoundingBoxType.BOUNDARIES)\n", + "output_location_meta.content.contentProperties.coordinateType = (\n", + " _metadata_fb.CoordinateType.RATIO)\n", + "output_location_meta.content.range = _metadata_fb.ValueRangeT()\n", + "output_location_meta.content.range.min = 2\n", + "output_location_meta.content.range.max = 2\n", + "\n", + "output_class_meta = _metadata_fb.TensorMetadataT()\n", + "output_class_meta.name = \"category\"\n", + "output_class_meta.description = \"The categories of the detected boxes.\"\n", + "output_class_meta.content = _metadata_fb.ContentT()\n", + "output_class_meta.content.contentPropertiesType = (\n", + " _metadata_fb.ContentProperties.FeatureProperties)\n", + "output_class_meta.content.contentProperties = (\n", + " _metadata_fb.FeaturePropertiesT())\n", + "output_class_meta.content.range = _metadata_fb.ValueRangeT()\n", + "output_class_meta.content.range.min = 2\n", + "output_class_meta.content.range.max = 2\n", + "label_file = _metadata_fb.AssociatedFileT()\n", + "label_file.name = os.path.basename(\"labelmap.txt\")\n", + "label_file.description = \"Label of objects that this model can recognize.\"\n", + "label_file.type = _metadata_fb.AssociatedFileType.TENSOR_VALUE_LABELS\n", + "output_class_meta.associatedFiles = [label_file]\n", + "\n", + "output_score_meta = _metadata_fb.TensorMetadataT()\n", + "output_score_meta.name = \"score\"\n", + "output_score_meta.description = \"The scores of the detected boxes.\"\n", + "output_score_meta.content = _metadata_fb.ContentT()\n", + "output_score_meta.content.contentPropertiesType = (\n", + " _metadata_fb.ContentProperties.FeatureProperties)\n", + "output_score_meta.content.contentProperties = (\n", + " _metadata_fb.FeaturePropertiesT())\n", + "output_score_meta.content.range = _metadata_fb.ValueRangeT()\n", + "output_score_meta.content.range.min = 2\n", + "output_score_meta.content.range.max = 2\n", + "\n", + "output_number_meta = _metadata_fb.TensorMetadataT()\n", + "output_number_meta.name = \"number of detections\"\n", + "output_number_meta.description = \"The number of the detected boxes.\"\n", + "output_number_meta.content = _metadata_fb.ContentT()\n", + "output_number_meta.content.contentPropertiesType = (\n", + " _metadata_fb.ContentProperties.FeatureProperties)\n", + "output_number_meta.content.contentProperties = (\n", + " _metadata_fb.FeaturePropertiesT())\n", + "\n", + "# Creates subgraph info.\n", + "group = _metadata_fb.TensorGroupT()\n", + "group.name = \"detection result\"\n", + "group.tensorNames = [\n", + " output_location_meta.name, output_class_meta.name,\n", + " output_score_meta.name\n", + "]\n", + "subgraph = _metadata_fb.SubGraphMetadataT()\n", + "subgraph.inputTensorMetadata = [input_meta]\n", + "subgraph.outputTensorMetadata = [\n", + " output_location_meta, output_class_meta, output_score_meta,\n", + " output_number_meta\n", + "]\n", + "subgraph.outputTensorGroups = [group]\n", + "model_meta.subgraphMetadata = [subgraph]\n", + "\n", + "b = flatbuffers.Builder(0)\n", + "b.Finish(\n", + " model_meta.Pack(b),\n", + " _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n", + "metadata_buf = b.Output()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 8.2.4.2. Continue by following the Android deployment guide\n", + "FILL" + ] + }, + { + "cell_type": "markdown", "metadata": { "id": "WoptFnAhCSrR" - } + }, + "source": [ + "# 9. (Optional) Post-Training Quantization" + ] }, { "cell_type": "markdown", @@ -1316,13 +1483,13 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "VTyqlXFTJ0Uv" + }, "source": [ "## 9.1. Quantize model\n", "We'll use the \"TFLiteConverter\" module to perform [post-training quantization](https://www.tensorflow.org/lite/performance/post_training_quantization) on the model. To quantize the model, we need to provide a representative dataset, which is a set of images that represent what the model will see when deployed in the field. First, we'll create a list of images to include in the representative dataset (we'll just use the images in the `train` folder).\n" - ], - "metadata": { - "id": "VTyqlXFTJ0Uv" - } + ] }, { "cell_type": "code", @@ -1345,12 +1512,12 @@ }, { "cell_type": "markdown", - "source": [ - "Next, we'll define a function to yield images from our representative dataset. Refer to [TensorFlow's sample quantization code](https://colab.research.google.com/github/google-coral/tutorials/blob/master/retrain_classification_ptq_tf2.ipynb#scrollTo=kRDabW_u1wnv) to get a better understanding of what this is doing!" - ], "metadata": { "id": "cqbH1VlEgiuy" - } + }, + "source": [ + "Next, we'll define a function to yield images from our representative dataset. Refer to [TensorFlow's sample quantization code](https://colab.research.google.com/github/google-coral/tutorials/blob/master/retrain_classification_ptq_tf2.ipynb#scrollTo=kRDabW_u1wnv) to get a better understanding of what this is doing!" + ] }, { "cell_type": "code", @@ -1432,18 +1599,23 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "dYVVlv5QUUZF" + }, "source": [ "## 9.2. Test quantized model\n", "The model has been quantized and exported as `detect_quant.tflite`. Let's test it out! We'll re-use the function from Section 7 for running the model on test images and display the results, except this time we'll point it at the quantized model.\n", "\n", "Click Play on the code block below to test the `detect_quant.tflite` model." - ], - "metadata": { - "id": "dYVVlv5QUUZF" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6OoirJuOtdOG" + }, + "outputs": [], "source": [ "# Set up parameters for inferencing function (using detect_quant.tflite instead of detect.tflite)\n", "PATH_TO_IMAGES='/content/images/test' #Path to test images folder\n", @@ -1454,39 +1626,39 @@ "\n", "# Run inferencing function!\n", "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test)" - ], - "metadata": { - "id": "6OoirJuOtdOG" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "cKo7ZtfOyoxG" + }, "source": [ "If your quantized model isn't performing very well, try using my TensorFlow Lite 1 notebook *(link to be added)* to train a SSD-MobileNet model with your dataset. In my experience, the `ssd-mobilenet-v2-quantized` model from the [TF1 Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md) has the best quantized performance out of any other TensorFlow Lite model.\n", "\n", "TFLite models created with TensorFlow 1 are still compatible with the TensorFlow Lite 2 runtime, so your TFLite 1 model will still work with my [TensorFlow setup guide for the Raspberry Pi](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/Raspberry_Pi_Guide.md)." - ], - "metadata": { - "id": "cKo7ZtfOyoxG" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "vWdVxs6LUjbR" + }, "source": [ "## 9.3 Calculate quantized model mAP\n", "\n", "Let's calculate the quantize model's mAP using the calculator tool we set up in Step 7.2. We just need to perform inference with our quantized model (`detect_quant.tflite`) to get a new set of detection results.\n", "\n", "Run the following block to run inference on the test images and save the detection results." - ], - "metadata": { - "id": "vWdVxs6LUjbR" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZMaumV-11Et0" + }, + "outputs": [], "source": [ "# Need to remove existing detection results first\n", "!rm /content/mAP/input/detection-results/*\n", @@ -1509,43 +1681,38 @@ "print('Starting inference on %d images...' % images_to_test)\n", "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test, PATH_TO_RESULTS, txt_only)\n", "print('Finished inferencing!')" - ], - "metadata": { - "id": "ZMaumV-11Et0" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Now we can run the mAP calculation script to determine our quantized model's mAP." - ], "metadata": { "id": "QgcmdLQf1Et1" - } + }, + "source": [ + "Now we can run the mAP calculation script to determine our quantized model's mAP." + ] }, { "cell_type": "code", - "source": [ - "cd /content/mAP" - ], + "execution_count": null, "metadata": { "id": "ZIRNp0Af1Et1" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "cd /content/mAP" + ] }, { "cell_type": "code", - "source": [ - "!python calculate_map_cartucho.py --labels=/content/labelmap.txt" - ], + "execution_count": null, "metadata": { "id": "4TDgMBw_1Et1" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!python calculate_map_cartucho.py --labels=/content/labelmap.txt" + ] }, { "cell_type": "markdown", @@ -1642,12 +1809,12 @@ }, { "cell_type": "markdown", - "source": [ - "# Appendix: Common Errors" - ], "metadata": { "id": "5VI_Gh5dCd7w" - } + }, + "source": [ + "# Appendix: Common Errors" + ] }, { "cell_type": "markdown", @@ -1668,8 +1835,7 @@ "metadata": { "accelerator": "GPU", "colab": { - "provenance": [], - "toc_visible": true, + "authorship_tag": "ABX9TyPJwANaBtFGhp/i5NVhWkIW", "collapsed_sections": [ "4VAvZo8qE4u5", "sxb8_h-QFErO", @@ -1682,8 +1848,9 @@ "WoptFnAhCSrR", "5VI_Gh5dCd7w" ], - "authorship_tag": "ABX9TyPJwANaBtFGhp/i5NVhWkIW", - "include_colab_link": true + "include_colab_link": true, + "provenance": [], + "toc_visible": true }, "gpuClass": "standard", "kernelspec": { @@ -1696,4 +1863,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/Android/app/.gitignore b/Android/app/.gitignore new file mode 100644 index 00000000..42afabfd --- /dev/null +++ b/Android/app/.gitignore @@ -0,0 +1 @@ +/build \ No newline at end of file diff --git a/Android/app/build.gradle.kts b/Android/app/build.gradle.kts new file mode 100644 index 00000000..25092087 --- /dev/null +++ b/Android/app/build.gradle.kts @@ -0,0 +1,97 @@ +plugins { + alias(libs.plugins.androidApplication) + alias(libs.plugins.jetbrainsKotlinAndroid) + alias(libs.plugins.hiltAndroid) + alias(libs.plugins.ksp) +} + +android { + namespace = "com.example.myapplication" + compileSdk = 34 + + defaultConfig { + applicationId = "com.example.myapplication" + minSdk = 24 + targetSdk = 34 + versionCode = 1 + versionName = "1.0" + + testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner" + vectorDrawables { + useSupportLibrary = true + } + } + + buildTypes { + release { + isMinifyEnabled = false + proguardFiles( + getDefaultProguardFile("proguard-android-optimize.txt"), + "proguard-rules.pro" + ) + } + } + compileOptions { + sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_1_8 + } + kotlinOptions { + jvmTarget = "1.8" + } + buildFeatures { + compose = true + } + composeOptions { + kotlinCompilerExtensionVersion = "1.5.3" + } + packaging { + resources { + excludes += "/META-INF/{AL2.0,LGPL2.1}" + } + } +} + +dependencies { + + implementation(libs.androidx.core.ktx) + implementation(libs.androidx.lifecycle.runtime.ktx) + implementation(libs.androidx.activity.compose) + implementation(platform(libs.androidx.compose.bom)) + implementation(libs.androidx.ui) + implementation(libs.androidx.ui.graphics) + implementation(libs.androidx.ui.tooling.preview) + implementation(libs.androidx.material3) + + implementation(libs.hilt.android) + implementation(libs.hilt.navigation.compose) + + ksp(libs.dagger.compiler) + ksp(libs.hilt.android.compiler) + //ksp(libs.android.hilt.compiler) + //ksp("com.google.dagger:dagger-compiler:2.48.1") + //ksp("com.google.dagger:hilt-android-compiler:2.48.1") + + + implementation(libs.androidx.camera) + implementation(libs.androidx.camera.lifecycle) + implementation(libs.androidx.camera.view) + + implementation(libs.android.lifecycle.viewmodel.ktx) + implementation(libs.android.lifecycle.runtime.ktx) + implementation(libs.android.lifecycle.runtime.compose) + implementation(libs.android.lifecycle.lifecycle.viewmodel.compose) + + implementation(libs.kotlin.coroutines.core) + implementation(libs.kotlin.coroutines.android) + + implementation(libs.tflite.task.vision) + implementation(libs.tflite.gpu) + + testImplementation(libs.junit) + androidTestImplementation(libs.androidx.junit) + androidTestImplementation(libs.androidx.espresso.core) + androidTestImplementation(platform(libs.androidx.compose.bom)) + androidTestImplementation(libs.androidx.ui.test.junit4) + debugImplementation(libs.androidx.ui.tooling) + debugImplementation(libs.androidx.ui.test.manifest) +} \ No newline at end of file diff --git a/Android/app/proguard-rules.pro b/Android/app/proguard-rules.pro new file mode 100644 index 00000000..481bb434 --- /dev/null +++ b/Android/app/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# You can control the set of applied configuration files using the +# proguardFiles setting in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} + +# Uncomment this to preserve the line number information for +# debugging stack traces. +#-keepattributes SourceFile,LineNumberTable + +# If you keep the line number information, uncomment this to +# hide the original source file name. +#-renamesourcefileattribute SourceFile \ No newline at end of file diff --git a/Android/app/src/androidTest/java/io/ejtech/tflite/ExampleInstrumentedTest.kt b/Android/app/src/androidTest/java/io/ejtech/tflite/ExampleInstrumentedTest.kt new file mode 100644 index 00000000..06fc020d --- /dev/null +++ b/Android/app/src/androidTest/java/io/ejtech/tflite/ExampleInstrumentedTest.kt @@ -0,0 +1,24 @@ +package io.ejtech.tflite + +import androidx.test.platform.app.InstrumentationRegistry +import androidx.test.ext.junit.runners.AndroidJUnit4 + +import org.junit.Test +import org.junit.runner.RunWith + +import org.junit.Assert.* + +/** + * Instrumented test, which will execute on an Android device. + * + * See [testing documentation](http://d.android.com/tools/testing). + */ +@RunWith(AndroidJUnit4::class) +class ExampleInstrumentedTest { + @Test + fun useAppContext() { + // Context of the app under test. + val appContext = InstrumentationRegistry.getInstrumentation().targetContext + assertEquals("com.example.myapplication", appContext.packageName) + } +} \ No newline at end of file diff --git a/Android/app/src/main/AndroidManifest.xml b/Android/app/src/main/AndroidManifest.xml new file mode 100644 index 00000000..14363554 --- /dev/null +++ b/Android/app/src/main/AndroidManifest.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Android/app/src/main/assets/detect_coin.tflite b/Android/app/src/main/assets/detect_coin.tflite new file mode 100644 index 00000000..eeb3dcef Binary files /dev/null and b/Android/app/src/main/assets/detect_coin.tflite differ diff --git a/Android/app/src/main/assets/ssd_mobilenet_v1.tflite b/Android/app/src/main/assets/ssd_mobilenet_v1.tflite new file mode 100644 index 00000000..be3b96b7 Binary files /dev/null and b/Android/app/src/main/assets/ssd_mobilenet_v1.tflite differ diff --git a/Android/app/src/main/java/io/ejtech/tflite/MainActivity.kt b/Android/app/src/main/java/io/ejtech/tflite/MainActivity.kt new file mode 100644 index 00000000..eb60a76d --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/MainActivity.kt @@ -0,0 +1,45 @@ +package io.ejtech.tflite + +import android.os.Bundle +import android.view.WindowManager +import androidx.activity.ComponentActivity +import androidx.activity.compose.setContent +import androidx.compose.foundation.layout.fillMaxSize +import androidx.compose.material3.MaterialTheme +import androidx.compose.material3.Surface +import androidx.compose.ui.Modifier +import androidx.hilt.navigation.compose.hiltViewModel +import dagger.hilt.android.AndroidEntryPoint +import io.ejtech.tflite.ui.detection.DetectionScreen +import io.ejtech.tflite.ui.detection.DetectionViewModel +import io.ejtech.tflite.ui.theme.MyApplicationTheme + +@AndroidEntryPoint +class MainActivity : ComponentActivity() { + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + keepScreenOn() + setContent { + MyApplicationTheme { + //How DetectionScreen is displayed + Surface( + modifier = Modifier.fillMaxSize(), + color = MaterialTheme.colorScheme.background + ) { + val detectionViewModel = hiltViewModel() + DetectionScreen( + detectionViewModel = detectionViewModel, + detectionState = detectionViewModel.detectionState.value + ) + } + } + } + } + + /** + * Prevents screen from sleeping + */ + private fun keepScreenOn() { + window.addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON) + } +} \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/TfliteApplication.kt b/Android/app/src/main/java/io/ejtech/tflite/TfliteApplication.kt new file mode 100644 index 00000000..137e025d --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/TfliteApplication.kt @@ -0,0 +1,11 @@ +package io.ejtech.tflite + +import android.app.Application +import dagger.hilt.android.HiltAndroidApp + +/** + * Boilerplate for Dagger-Hilt dependency injection + */ +@HiltAndroidApp +class TfliteApplication : Application() { +} \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/data/di/AppModule.kt b/Android/app/src/main/java/io/ejtech/tflite/data/di/AppModule.kt new file mode 100644 index 00000000..d4b19508 --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/data/di/AppModule.kt @@ -0,0 +1,26 @@ +package io.ejtech.tflite.data.di + +import android.app.Application +import android.content.Context +import dagger.Module +import dagger.Provides +import dagger.hilt.InstallIn +import dagger.hilt.components.SingletonComponent +import io.ejtech.tflite.ui.detection.ObjectDetectorHelper +import javax.inject.Singleton + +@Module +@InstallIn(SingletonComponent::class) +object AppModule { + @Provides + @Singleton + fun provideContext(application: Application): Context { + return application.applicationContext + } + + @Provides + @Singleton + fun provideObjectDetectorHelper(context: Context): ObjectDetectorHelper { + return ObjectDetectorHelper(context = context) + } +} \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/detection/CategoryMut.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/CategoryMut.kt new file mode 100644 index 00000000..120de915 --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/CategoryMut.kt @@ -0,0 +1,11 @@ +package io.ejtech.tflite.ui.detection + +/** + * @param label + * Name of the detection + * @param confidence_score + */ +data class CategoryMut ( + var label: String = "", + var confidence_score: Float = 0.0f, +) \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionMut.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionMut.kt new file mode 100644 index 00000000..70702a9e --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionMut.kt @@ -0,0 +1,8 @@ +package io.ejtech.tflite.ui.detection + +import android.graphics.RectF + +data class DetectionMut( + var boundingBox: RectF, + var category: CategoryMut +) diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionScreen.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionScreen.kt new file mode 100644 index 00000000..3275f02f --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionScreen.kt @@ -0,0 +1,249 @@ +package io.ejtech.tflite.ui.detection + +import android.Manifest +import android.app.Activity +import android.content.pm.ActivityInfo +import android.graphics.Bitmap +import android.graphics.Paint +import android.graphics.Rect +import android.graphics.RectF +import android.widget.Toast +import androidx.activity.compose.rememberLauncherForActivityResult +import androidx.activity.result.contract.ActivityResultContracts +import androidx.camera.core.AspectRatio +import androidx.camera.core.Camera +import androidx.camera.core.CameraSelector +import androidx.camera.core.ImageAnalysis +import androidx.camera.core.Preview +import androidx.camera.lifecycle.ProcessCameraProvider +import androidx.camera.view.PreviewView +import androidx.compose.foundation.Canvas +import androidx.compose.foundation.layout.Box +import androidx.compose.foundation.layout.fillMaxSize +import androidx.compose.material3.Text +import androidx.compose.runtime.Composable +import androidx.compose.runtime.DisposableEffect +import androidx.compose.runtime.LaunchedEffect +import androidx.compose.runtime.getValue +import androidx.compose.runtime.mutableStateOf +import androidx.compose.runtime.remember +import androidx.compose.runtime.setValue +import androidx.compose.ui.Alignment +import androidx.compose.ui.Modifier +import androidx.compose.ui.geometry.Offset +import androidx.compose.ui.geometry.Size +import androidx.compose.ui.graphics.Color +import androidx.compose.ui.graphics.drawscope.Stroke +import androidx.compose.ui.graphics.nativeCanvas +import androidx.compose.ui.layout.onGloballyPositioned +import androidx.compose.ui.platform.LocalContext +import androidx.compose.ui.platform.LocalLifecycleOwner +import androidx.compose.ui.text.TextStyle +import androidx.compose.ui.text.drawText +import androidx.compose.ui.text.font.FontWeight +import androidx.compose.ui.text.rememberTextMeasurer + +import androidx.compose.ui.unit.dp +import androidx.compose.ui.unit.sp +import androidx.compose.ui.unit.toSize +import androidx.compose.ui.viewinterop.AndroidView +import androidx.core.content.ContextCompat +import androidx.core.graphics.toRect +import androidx.lifecycle.Lifecycle +import androidx.lifecycle.LifecycleEventObserver +import androidx.navigation.NavGraph +import com.google.android.gms.tflite.gpu.R +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.withContext +import java.util.concurrent.ExecutorService +import java.util.concurrent.Executors +import kotlin.math.max + +/** + * Main screen that displays the camera view and visible detections + * + * @param detectionViewModel + * Receives images from the camera feed and returns detections + * @param detectionState + * Holds the detections returned from detectionViewModel + */ +@Composable +fun DetectionScreen( + detectionViewModel: DetectionViewModel, + detectionState: DetectionState +) { + val context = LocalContext.current + val lifecycleOwner = LocalLifecycleOwner.current + val activity = (LocalContext.current as Activity) + // Set the screen to remain in Landscape mode no matter how the device is held + activity.requestedOrientation = ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE + + // Runs camera on a separate thread and observes this screen to end the thread when the screen is destroyed + var cameraExecutor: ExecutorService = Executors.newSingleThreadExecutor() + DisposableEffect(lifecycleOwner) { + val observer = LifecycleEventObserver { _, event -> + if (event == Lifecycle.Event.ON_DESTROY) { + cameraExecutor.shutdown() + detectionViewModel.destroy() + } + } + lifecycleOwner.lifecycle.addObserver(observer) + onDispose { + lifecycleOwner.lifecycle.removeObserver(observer) + } + } + + // Camera state + val camera = remember { mutableStateOf(null) } + val cameraProviderFuture = remember { + ProcessCameraProvider.getInstance(context) + } + + //Requests permission from user to gain access to the camera + val permissionsLauncher = rememberLauncherForActivityResult( + contract = ActivityResultContracts.RequestMultiplePermissions(), + onResult = { granted -> } + ) + LaunchedEffect(key1 = true) { + permissionsLauncher.launch( + arrayOf( + Manifest.permission.CAMERA + ) + ) + } + + //Listed for a one-time initialization event of Tensorflow + LaunchedEffect(key1 = context) { + detectionViewModel.tensorflowInitializationEvent.collect { event -> + when (event) { + is Resource.Success -> { + withContext(Dispatchers.Main) { + Toast.makeText(context, event.data, Toast.LENGTH_SHORT).show() + } + } + is Resource.Loading -> { + + } + is Resource.Error -> { + withContext(Dispatchers.Main) { + Toast.makeText(context, event.message, Toast.LENGTH_SHORT).show() + } + } + } + } + } + + var boxsize by remember { mutableStateOf(Size.Zero)} + //Used to size the text label on detections + val textMeasurer = rememberTextMeasurer() + // Stores the image for each camera frame + var imageBitmap: Bitmap? = null + Box( + modifier = Modifier + .fillMaxSize() + .onGloballyPositioned { coordinates -> + boxsize = coordinates.size.toSize() + } + ){ + AndroidView( + modifier = Modifier + .fillMaxSize(), + factory = { context -> + //PreviewView is the camera preview + PreviewView(context).also{ + //Fill the camera view to the entire screen + it.scaleType = PreviewView.ScaleType.FILL_START + //Ratio that best matches our model image format + val preview = Preview.Builder() + .setTargetAspectRatio(AspectRatio.RATIO_4_3) + .build() + //Use the rear camera + val selector = CameraSelector.Builder() + .requireLensFacing(CameraSelector.LENS_FACING_BACK) + .build() + preview.setSurfaceProvider(it.surfaceProvider) + + //Passes each camera frame to the viewmodel to detect objects + var imageAnalyzer: ImageAnalysis = ImageAnalysis.Builder() + .setTargetAspectRatio(AspectRatio.RATIO_4_3) + .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST) + .setOutputImageFormat(ImageAnalysis.OUTPUT_IMAGE_FORMAT_RGBA_8888) + .build() + .also{ + it.setAnalyzer(cameraExecutor) { image -> + if (imageBitmap == null) { + imageBitmap = Bitmap.createBitmap( + image.width, + image.height, + Bitmap.Config.ARGB_8888 + ) + } + detectionViewModel.detectObjects(image, imageBitmap!!, boxsize) + } + } + + //Assigns our imageAnalyzer to the camera + try{ + cameraProviderFuture.get().unbindAll() + camera.value = cameraProviderFuture.get().bindToLifecycle( + lifecycleOwner, + selector, + preview, + imageAnalyzer + ) + } catch(e: Exception){ + e.printStackTrace() + } + } + } + ) + Text(text = detectionState.inferenceTimeCurr.toFloat().toString() + " ms", modifier = Modifier.align(Alignment.BottomCenter)) + + //Where detections are drawn on screen if the model successfully load and the screen has detections + if(detectionState.tensorflowEnabled && detectionState.tensorflowDetections.isNotEmpty()) { + Canvas( + modifier = Modifier + .fillMaxSize() + ) { + //Images are resized before being passed to the ObjectDetector + for (detection in detectionState.tensorflowDetections) { + val boundingBox = detection.boundingBox + val label = detection.category.label + + //Draws the bounding box + drawRect( + topLeft = Offset(boundingBox.left, boundingBox.top), + color = Color.Green, + style = Stroke(width = 3.dp.toPx()), + size = Size(boundingBox.width(), boundingBox.height()) + ) + + val textBounds = Rect() + val textPaint = Paint().apply { + textSize = 14.sp.toPx() + color = ContextCompat.getColor(context, com.example.myapplication.R.color.white) + } + textPaint.getTextBounds(label, 0, label.length, textBounds) + + val backgroundRect = androidx.compose.ui.geometry.Rect( + 0f, + 0f, + textBounds.width().toFloat(), + textBounds.height().toFloat() + ) + drawRect( + color = Color.Black, + topLeft = Offset(x = boundingBox.left, y = boundingBox.top - 50), + size = backgroundRect.size + ) + drawContext.canvas.nativeCanvas.drawText( + label, + boundingBox.left, + boundingBox.top - 60 + textBounds.height(), + textPaint + ) + } + } + } + } +} \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionState.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionState.kt new file mode 100644 index 00000000..590236a7 --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionState.kt @@ -0,0 +1,15 @@ +package io.ejtech.tflite.ui.detection + +import org.tensorflow.lite.task.gms.vision.detector.Detection + +/** + * Used by DetectionScreen to update itself from DetectionViewMdeol + */ +data class DetectionState( + var tensorflowEnabled: Boolean = false, + var tensorflowDetections: MutableList = mutableListOf(), + var tensorflowImageHeight: Int = 0, + var tensorflowImageWidth: Int = 0, + var inferenceTimeAvg: Double = 0.0, + var inferenceTimeCurr: Long = 0L +) diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionViewModel.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionViewModel.kt new file mode 100644 index 00000000..18c15e24 --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/DetectionViewModel.kt @@ -0,0 +1,192 @@ +package io.ejtech.tflite.ui.detection + +import android.graphics.Bitmap +import android.graphics.RectF +import android.os.SystemClock +import androidx.camera.core.ImageProxy +import androidx.compose.runtime.State +import androidx.compose.runtime.mutableStateOf +import androidx.compose.ui.geometry.Size +import androidx.lifecycle.ViewModel +import androidx.lifecycle.viewModelScope +import dagger.hilt.android.lifecycle.HiltViewModel +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.channels.Channel +import kotlinx.coroutines.flow.receiveAsFlow +import kotlinx.coroutines.launch +import kotlinx.coroutines.withContext +import javax.inject.Inject +import kotlin.math.abs +import kotlin.math.max + +@HiltViewModel +class DetectionViewModel @Inject constructor( + private val objectDetectorHelper: ObjectDetectorHelper +): ViewModel() { + + // State of our screen + private var _detectionState = mutableStateOf(DetectionState()) + val detectionState: State = _detectionState + + //One time event to notify the user if initialization was successful or not + private val _tensorflowInitializationEvent = Channel>() + val tensorflowInitializationEvent = _tensorflowInitializationEvent.receiveAsFlow() + + private var avgInference: MutableList = mutableListOf() + + // On Screen creation, the ViewModel will attempt to initialize the Tflite model + init { + if(!detectionState.value.tensorflowEnabled){ + viewModelScope.launch(Dispatchers.IO) { + objectDetectorHelper.initialize().collect { detectionData: Resource -> + when(detectionData){ + is Resource.Success -> { + detectionData.data?.let{ + withContext(Dispatchers.Main){ + _detectionState.value = _detectionState.value.copy( + tensorflowEnabled = it.tensorflowEnabled + ) + _tensorflowInitializationEvent.send(Resource.Success("Tensorflow successfully initialized")) + } + } + } + is Resource.Loading -> {} + is Resource.Error -> { + detectionData.data?.let{ + withContext(Dispatchers.Main){ + _detectionState.value = _detectionState.value.copy( + tensorflowEnabled = it.tensorflowEnabled + ) + _tensorflowInitializationEvent.send(Resource.Error("Tensorflow failed to initialize. Error: " + detectionData.message)) + } + } + } + } + } + } + } + } + + /** + * @param image + * Frame from the camera + * @param bitmapBuffer + * Bitmap to be passed to the ObjectDetector + */ + fun detectObjects(image: ImageProxy, bitmapBuffer: Bitmap, boxsize: Size) { + var inferenceTime = SystemClock.uptimeMillis() + image.use { bitmapBuffer.copyPixelsFromBuffer(image.planes[0].buffer) } + + val imageRotation = image.imageInfo.rotationDegrees + //Passes the bitmapBuffer with the current device rotation + val resultState = objectDetectorHelper.detect(bitmapBuffer, imageRotation) + //Updates the state with any new detections to be used by DeviceScreen + + //Remove underscores + resultState.tensorflowDetections.forEach { it.category.label = it.category.label.replace("_", " ") } + //Filter vehicle + resultState.tensorflowDetections.removeAll{it.category.label == "vehicle"} + //Update wheel names based on closeness to other parts + val wheels = resultState.tensorflowDetections.filter {it.category.label == "wheel"} + if(wheels.isNotEmpty()){ + wheels.forEach{ part -> + val idx = resultState.tensorflowDetections.indexOf(part) + val wheelleft = part.boundingBox.left + var partclosesttowheelleft = DetectionMut(RectF(99999f, 99999f, 99999f, 99999f), CategoryMut()) + var diff = abs(wheelleft - partclosesttowheelleft.boundingBox.left) + resultState.tensorflowDetections.forEach { + if (it.category.label != "wheel") { + if (abs(wheelleft - it.boundingBox.left) < diff) { + partclosesttowheelleft = it + diff = abs(wheelleft - it.boundingBox.left) + } + } + } + //Just to check that we found detections first + if(partclosesttowheelleft.category.label != "") { + val partclosestowheelleftlabel = partclosesttowheelleft.category.label + if (partclosestowheelleftlabel == "left headlamp" || + partclosestowheelleftlabel == "left fender" || + partclosestowheelleftlabel == "left front door" + ) { + resultState.tensorflowDetections[idx].category.label = "left front wheel" + } else if (partclosestowheelleftlabel == "right headlamp" || + partclosestowheelleftlabel == "right fender" || + partclosestowheelleftlabel == "right front door" + ) { + resultState.tensorflowDetections[idx].category.label = "right front wheel" + } else if (partclosestowheelleftlabel == "left rear door" || + partclosestowheelleftlabel == "left quarter panel" || + partclosestowheelleftlabel == "left tail lamp" + ) { + resultState.tensorflowDetections[idx].category.label = "left rear wheel" + } else if (partclosestowheelleftlabel == "right rear door" || + partclosestowheelleftlabel == "right quarter panel" || + partclosestowheelleftlabel == "right tail lamp" + ) { + resultState.tensorflowDetections[idx].category.label = "right rear wheel" + } else if (partclosestowheelleftlabel == "front bumper cover") { + if (partclosesttowheelleft.boundingBox.left < wheelleft) { + resultState.tensorflowDetections[idx].category.label = "left front wheel" + } else { + resultState.tensorflowDetections[idx].category.label = "right front wheel" + } + } else if (partclosestowheelleftlabel == "rear bumper cover") { + if (partclosesttowheelleft.boundingBox.left < wheelleft) { + resultState.tensorflowDetections[idx].category.label = "right rear wheel" + } else { + resultState.tensorflowDetections[idx].category.label = "left rear wheel" + } + } + } + } + } + //Remove duplicates and save the one with the highest score + resultState.tensorflowDetections = resultState.tensorflowDetections.groupBy{ + it.category.label + }.mapValues { (_, sameNameDetections) -> + sameNameDetections.maxBy {it.category.confidence_score} + }.values.toMutableList() + + val scaleFactor = max(boxsize.width / resultState.tensorflowImageWidth, boxsize.height / resultState.tensorflowImageHeight) + resultState.tensorflowDetections.forEachIndexed { index, detection -> + val boundingBox = detection.boundingBox + + //Once returned, the bounding boxes and coordinates need to be scaled back up to + //be correctly displayed on screen + var top = boundingBox.top * scaleFactor + var left = boundingBox.left * scaleFactor + var bottom = boundingBox.bottom * scaleFactor + var right = boundingBox.right * scaleFactor + + var centerX = (left + right) / 2 + var centerY = (top + bottom) / 2 + top = centerY - 25f + bottom = centerY + 25f + left = centerX - 25f + right = centerX + 25f + + resultState.tensorflowDetections[index].boundingBox.set(left, top, right, bottom) + } + + inferenceTime = SystemClock.uptimeMillis() - inferenceTime + avgInference.add(inferenceTime) + val avg = avgInference.average() + if(avgInference.size > 500) { + avgInference.clear() + } + viewModelScope.launch(Dispatchers.Main){ + _detectionState.value = _detectionState.value.copy( + inferenceTimeCurr = inferenceTime, + inferenceTimeAvg = avg, + tensorflowDetections = resultState.tensorflowDetections, + tensorflowImageHeight = resultState.tensorflowImageHeight, + tensorflowImageWidth = resultState.tensorflowImageWidth + ) + } + } + + fun destroy(){ + objectDetectorHelper.destroy() + } +} \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/detection/ObjectDetectorHelper.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/ObjectDetectorHelper.kt new file mode 100644 index 00000000..f5b43709 --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/ObjectDetectorHelper.kt @@ -0,0 +1,126 @@ +/* + * Copyright 2022 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.ejtech.tflite.ui.detection + +import android.content.Context +import android.graphics.Bitmap +import androidx.compose.runtime.mutableStateOf +import com.google.android.gms.tflite.client.TfLiteInitializationOptions +import com.google.android.gms.tflite.gpu.support.TfLiteGpu +import kotlinx.coroutines.channels.awaitClose +import kotlinx.coroutines.flow.callbackFlow +import kotlinx.coroutines.launch +import org.tensorflow.lite.support.image.ImageProcessor +import org.tensorflow.lite.support.image.TensorImage +import org.tensorflow.lite.support.image.ops.Rot90Op +import org.tensorflow.lite.task.core.BaseOptions +import org.tensorflow.lite.task.gms.vision.TfLiteVision +import org.tensorflow.lite.task.gms.vision.detector.ObjectDetector + +/** + * @param threshold + * Minimum confidence value for results + * @param numThreads + * Thread value for your device + * @param maxResults + * Maximum results to detect at a time + * @param modelName + * Tflite file + * @param context + * Used for initialization + */ +class ObjectDetectorHelper( + //Modify the following 4 parameters as you wish + var threshold: Float = 0.8f, //Between 0 and 1 + var numThreads: Int = 4, //Between 1 and 4 + var maxResults: Int = 10, //1+, though you may notice frame rate drops with too many detections at once + var modelName: String = "detect_coin.tflite", //Update with your filename in assets folder + val context: Context +) { + //Single source of truth for our State + private var _detectorState = mutableStateOf(DetectionState()) + private var objectDetectorHelper: ObjectDetector? = null + + + //Called by DetectionViewModel on app startup to initialize the model + fun initialize() = callbackFlow> { + if(objectDetectorHelper == null){ + TfLiteGpu.isGpuDelegateAvailable(context).onSuccessTask { gpuAvailable: Boolean -> + val optionsBuilder = TfLiteInitializationOptions.builder() + TfLiteVision.initialize(context, optionsBuilder.build()) + }.addOnSuccessListener { + val optionsBuilder = ObjectDetector.ObjectDetectorOptions.builder() + .setScoreThreshold(threshold) + .setMaxResults(maxResults) + val baseOptionsBuilder = BaseOptions.builder().setNumThreads(numThreads) + optionsBuilder.setBaseOptions(baseOptionsBuilder.build()) + + try { + objectDetectorHelper = ObjectDetector.createFromFileAndOptions(context, modelName, optionsBuilder.build()) + _detectorState.value = _detectorState.value.copy(tensorflowEnabled = true) + launch{ + send(Resource.Success(_detectorState.value)) + } + } catch (e: Exception) { + _detectorState.value = _detectorState.value.copy(tensorflowEnabled = false) + launch{ + send(Resource.Error(e.message!!, _detectorState.value)) + } + } + }.addOnFailureListener{ + _detectorState.value = _detectorState.value.copy(tensorflowEnabled = false) + launch{ + send(Resource.Error(it.message!!, _detectorState.value)) + } + } + } + else{ + _detectorState.value = _detectorState.value.copy(tensorflowEnabled = true) + launch{ + send(Resource.Success(_detectorState.value)) + } + } + awaitClose { } + } + + //Called for every camera frame + fun detect(image: Bitmap, imageRotation: Int): DetectionState { + val imageProcessor = ImageProcessor.Builder().add(Rot90Op(-imageRotation / 90)).build() + val tensorImage = imageProcessor.process(TensorImage.fromBitmap(image)) + val results = objectDetectorHelper?.detect(tensorImage) + + var detections = mutableListOf() + results?.forEach { + detections.add(DetectionMut(it.boundingBox, CategoryMut(it.categories[0].label, it.categories[0].score))) + } + + results?.let{ + _detectorState.value = _detectorState.value.copy( + tensorflowDetections = detections, + tensorflowImageHeight = tensorImage.height, + tensorflowImageWidth = tensorImage.width + ) + } + return _detectorState.value + } + + fun destroy(): DetectionState { + _detectorState.value = DetectionState() + objectDetectorHelper?.close() + objectDetectorHelper = null + return _detectorState.value + } +} diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/detection/Resource.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/Resource.kt new file mode 100644 index 00000000..dc8b8fed --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/detection/Resource.kt @@ -0,0 +1,10 @@ +package io.ejtech.tflite.ui.detection + +/** + * Used for passing state between the ViewModel and Screen + */ +sealed class Resource(val data: T? = null, val message: String? = null) { + class Success(data: T) : Resource(data) + class Error(message: String, data: T? = null) : Resource(data, message) + class Loading(data: T? = null) : Resource(data) +} diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Color.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Color.kt new file mode 100644 index 00000000..5d5e3c33 --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Color.kt @@ -0,0 +1,11 @@ +package io.ejtech.tflite.ui.theme + +import androidx.compose.ui.graphics.Color + +val Purple80 = Color(0xFFD0BCFF) +val PurpleGrey80 = Color(0xFFCCC2DC) +val Pink80 = Color(0xFFEFB8C8) + +val Purple40 = Color(0xFF6650a4) +val PurpleGrey40 = Color(0xFF625b71) +val Pink40 = Color(0xFF7D5260) \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Theme.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Theme.kt new file mode 100644 index 00000000..c705f78b --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Theme.kt @@ -0,0 +1,70 @@ +package io.ejtech.tflite.ui.theme + +import android.app.Activity +import android.os.Build +import androidx.compose.foundation.isSystemInDarkTheme +import androidx.compose.material3.MaterialTheme +import androidx.compose.material3.darkColorScheme +import androidx.compose.material3.dynamicDarkColorScheme +import androidx.compose.material3.dynamicLightColorScheme +import androidx.compose.material3.lightColorScheme +import androidx.compose.runtime.Composable +import androidx.compose.runtime.SideEffect +import androidx.compose.ui.graphics.toArgb +import androidx.compose.ui.platform.LocalContext +import androidx.compose.ui.platform.LocalView +import androidx.core.view.WindowCompat + +private val DarkColorScheme = darkColorScheme( + primary = Purple80, + secondary = PurpleGrey80, + tertiary = Pink80 +) + +private val LightColorScheme = lightColorScheme( + primary = Purple40, + secondary = PurpleGrey40, + tertiary = Pink40 + + /* Other default colors to override + background = Color(0xFFFFFBFE), + surface = Color(0xFFFFFBFE), + onPrimary = Color.White, + onSecondary = Color.White, + onTertiary = Color.White, + onBackground = Color(0xFF1C1B1F), + onSurface = Color(0xFF1C1B1F), + */ +) + +@Composable +fun MyApplicationTheme( + darkTheme: Boolean = isSystemInDarkTheme(), + // Dynamic color is available on Android 12+ + dynamicColor: Boolean = true, + content: @Composable () -> Unit +) { + val colorScheme = when { + dynamicColor && Build.VERSION.SDK_INT >= Build.VERSION_CODES.S -> { + val context = LocalContext.current + if (darkTheme) dynamicDarkColorScheme(context) else dynamicLightColorScheme(context) + } + + darkTheme -> DarkColorScheme + else -> LightColorScheme + } + val view = LocalView.current + if (!view.isInEditMode) { + SideEffect { + val window = (view.context as Activity).window + window.statusBarColor = colorScheme.primary.toArgb() + WindowCompat.getInsetsController(window, view).isAppearanceLightStatusBars = darkTheme + } + } + + MaterialTheme( + colorScheme = colorScheme, + typography = Typography, + content = content + ) +} \ No newline at end of file diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Type.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Type.kt new file mode 100644 index 00000000..c16c21fa --- /dev/null +++ b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Type.kt @@ -0,0 +1,34 @@ +package io.ejtech.tflite.ui.theme + +import androidx.compose.material3.Typography +import androidx.compose.ui.text.TextStyle +import androidx.compose.ui.text.font.FontFamily +import androidx.compose.ui.text.font.FontWeight +import androidx.compose.ui.unit.sp + +// Set of Material typography styles to start with +val Typography = Typography( + bodyLarge = TextStyle( + fontFamily = FontFamily.Default, + fontWeight = FontWeight.Normal, + fontSize = 16.sp, + lineHeight = 24.sp, + letterSpacing = 0.5.sp + ) + /* Other default text styles to override + titleLarge = TextStyle( + fontFamily = FontFamily.Default, + fontWeight = FontWeight.Normal, + fontSize = 22.sp, + lineHeight = 28.sp, + letterSpacing = 0.sp + ), + labelSmall = TextStyle( + fontFamily = FontFamily.Default, + fontWeight = FontWeight.Medium, + fontSize = 11.sp, + lineHeight = 16.sp, + letterSpacing = 0.5.sp + ) + */ +) \ No newline at end of file diff --git a/Android/app/src/main/res/drawable/ic_launcher_background.xml b/Android/app/src/main/res/drawable/ic_launcher_background.xml new file mode 100644 index 00000000..07d5da9c --- /dev/null +++ b/Android/app/src/main/res/drawable/ic_launcher_background.xml @@ -0,0 +1,170 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Android/app/src/main/res/drawable/ic_launcher_foreground.xml b/Android/app/src/main/res/drawable/ic_launcher_foreground.xml new file mode 100644 index 00000000..2b068d11 --- /dev/null +++ b/Android/app/src/main/res/drawable/ic_launcher_foreground.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml new file mode 100644 index 00000000..6f3b755b --- /dev/null +++ b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml new file mode 100644 index 00000000..6f3b755b --- /dev/null +++ b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Android/app/src/main/res/mipmap-hdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-hdpi/ic_launcher.webp new file mode 100644 index 00000000..c209e78e Binary files /dev/null and b/Android/app/src/main/res/mipmap-hdpi/ic_launcher.webp differ diff --git a/Android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp new file mode 100644 index 00000000..b2dfe3d1 Binary files /dev/null and b/Android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp differ diff --git a/Android/app/src/main/res/mipmap-mdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-mdpi/ic_launcher.webp new file mode 100644 index 00000000..4f0f1d64 Binary files /dev/null and b/Android/app/src/main/res/mipmap-mdpi/ic_launcher.webp differ diff --git a/Android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp new file mode 100644 index 00000000..62b611da Binary files /dev/null and b/Android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp differ diff --git a/Android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp new file mode 100644 index 00000000..948a3070 Binary files /dev/null and b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp differ diff --git a/Android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp new file mode 100644 index 00000000..1b9a6956 Binary files /dev/null and b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp differ diff --git a/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp new file mode 100644 index 00000000..28d4b77f Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp differ diff --git a/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp new file mode 100644 index 00000000..9287f508 Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp differ diff --git a/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp new file mode 100644 index 00000000..aa7d6427 Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp differ diff --git a/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp new file mode 100644 index 00000000..9126ae37 Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp differ diff --git a/Android/app/src/main/res/values/colors.xml b/Android/app/src/main/res/values/colors.xml new file mode 100644 index 00000000..f8c6127d --- /dev/null +++ b/Android/app/src/main/res/values/colors.xml @@ -0,0 +1,10 @@ + + + #FFBB86FC + #FF6200EE + #FF3700B3 + #FF03DAC5 + #FF018786 + #FF000000 + #FFFFFFFF + \ No newline at end of file diff --git a/Android/app/src/main/res/values/strings.xml b/Android/app/src/main/res/values/strings.xml new file mode 100644 index 00000000..dc4486a7 --- /dev/null +++ b/Android/app/src/main/res/values/strings.xml @@ -0,0 +1,3 @@ + + EJ Tech Object Detection + \ No newline at end of file diff --git a/Android/app/src/main/res/values/themes.xml b/Android/app/src/main/res/values/themes.xml new file mode 100644 index 00000000..e48770ab --- /dev/null +++ b/Android/app/src/main/res/values/themes.xml @@ -0,0 +1,5 @@ + + + +