(data)
+}
diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Color.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Color.kt
new file mode 100644
index 00000000..5d5e3c33
--- /dev/null
+++ b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Color.kt
@@ -0,0 +1,11 @@
+package io.ejtech.tflite.ui.theme
+
+import androidx.compose.ui.graphics.Color
+
+val Purple80 = Color(0xFFD0BCFF)
+val PurpleGrey80 = Color(0xFFCCC2DC)
+val Pink80 = Color(0xFFEFB8C8)
+
+val Purple40 = Color(0xFF6650a4)
+val PurpleGrey40 = Color(0xFF625b71)
+val Pink40 = Color(0xFF7D5260)
\ No newline at end of file
diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Theme.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Theme.kt
new file mode 100644
index 00000000..c705f78b
--- /dev/null
+++ b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Theme.kt
@@ -0,0 +1,70 @@
+package io.ejtech.tflite.ui.theme
+
+import android.app.Activity
+import android.os.Build
+import androidx.compose.foundation.isSystemInDarkTheme
+import androidx.compose.material3.MaterialTheme
+import androidx.compose.material3.darkColorScheme
+import androidx.compose.material3.dynamicDarkColorScheme
+import androidx.compose.material3.dynamicLightColorScheme
+import androidx.compose.material3.lightColorScheme
+import androidx.compose.runtime.Composable
+import androidx.compose.runtime.SideEffect
+import androidx.compose.ui.graphics.toArgb
+import androidx.compose.ui.platform.LocalContext
+import androidx.compose.ui.platform.LocalView
+import androidx.core.view.WindowCompat
+
+private val DarkColorScheme = darkColorScheme(
+ primary = Purple80,
+ secondary = PurpleGrey80,
+ tertiary = Pink80
+)
+
+private val LightColorScheme = lightColorScheme(
+ primary = Purple40,
+ secondary = PurpleGrey40,
+ tertiary = Pink40
+
+ /* Other default colors to override
+ background = Color(0xFFFFFBFE),
+ surface = Color(0xFFFFFBFE),
+ onPrimary = Color.White,
+ onSecondary = Color.White,
+ onTertiary = Color.White,
+ onBackground = Color(0xFF1C1B1F),
+ onSurface = Color(0xFF1C1B1F),
+ */
+)
+
+@Composable
+fun MyApplicationTheme(
+ darkTheme: Boolean = isSystemInDarkTheme(),
+ // Dynamic color is available on Android 12+
+ dynamicColor: Boolean = true,
+ content: @Composable () -> Unit
+) {
+ val colorScheme = when {
+ dynamicColor && Build.VERSION.SDK_INT >= Build.VERSION_CODES.S -> {
+ val context = LocalContext.current
+ if (darkTheme) dynamicDarkColorScheme(context) else dynamicLightColorScheme(context)
+ }
+
+ darkTheme -> DarkColorScheme
+ else -> LightColorScheme
+ }
+ val view = LocalView.current
+ if (!view.isInEditMode) {
+ SideEffect {
+ val window = (view.context as Activity).window
+ window.statusBarColor = colorScheme.primary.toArgb()
+ WindowCompat.getInsetsController(window, view).isAppearanceLightStatusBars = darkTheme
+ }
+ }
+
+ MaterialTheme(
+ colorScheme = colorScheme,
+ typography = Typography,
+ content = content
+ )
+}
\ No newline at end of file
diff --git a/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Type.kt b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Type.kt
new file mode 100644
index 00000000..c16c21fa
--- /dev/null
+++ b/Android/app/src/main/java/io/ejtech/tflite/ui/theme/Type.kt
@@ -0,0 +1,34 @@
+package io.ejtech.tflite.ui.theme
+
+import androidx.compose.material3.Typography
+import androidx.compose.ui.text.TextStyle
+import androidx.compose.ui.text.font.FontFamily
+import androidx.compose.ui.text.font.FontWeight
+import androidx.compose.ui.unit.sp
+
+// Set of Material typography styles to start with
+val Typography = Typography(
+ bodyLarge = TextStyle(
+ fontFamily = FontFamily.Default,
+ fontWeight = FontWeight.Normal,
+ fontSize = 16.sp,
+ lineHeight = 24.sp,
+ letterSpacing = 0.5.sp
+ )
+ /* Other default text styles to override
+ titleLarge = TextStyle(
+ fontFamily = FontFamily.Default,
+ fontWeight = FontWeight.Normal,
+ fontSize = 22.sp,
+ lineHeight = 28.sp,
+ letterSpacing = 0.sp
+ ),
+ labelSmall = TextStyle(
+ fontFamily = FontFamily.Default,
+ fontWeight = FontWeight.Medium,
+ fontSize = 11.sp,
+ lineHeight = 16.sp,
+ letterSpacing = 0.5.sp
+ )
+ */
+)
\ No newline at end of file
diff --git a/Android/app/src/main/res/drawable/ic_launcher_background.xml b/Android/app/src/main/res/drawable/ic_launcher_background.xml
new file mode 100644
index 00000000..07d5da9c
--- /dev/null
+++ b/Android/app/src/main/res/drawable/ic_launcher_background.xml
@@ -0,0 +1,170 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Android/app/src/main/res/drawable/ic_launcher_foreground.xml b/Android/app/src/main/res/drawable/ic_launcher_foreground.xml
new file mode 100644
index 00000000..2b068d11
--- /dev/null
+++ b/Android/app/src/main/res/drawable/ic_launcher_foreground.xml
@@ -0,0 +1,30 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml
new file mode 100644
index 00000000..6f3b755b
--- /dev/null
+++ b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml
new file mode 100644
index 00000000..6f3b755b
--- /dev/null
+++ b/Android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Android/app/src/main/res/mipmap-hdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-hdpi/ic_launcher.webp
new file mode 100644
index 00000000..c209e78e
Binary files /dev/null and b/Android/app/src/main/res/mipmap-hdpi/ic_launcher.webp differ
diff --git a/Android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp
new file mode 100644
index 00000000..b2dfe3d1
Binary files /dev/null and b/Android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp differ
diff --git a/Android/app/src/main/res/mipmap-mdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-mdpi/ic_launcher.webp
new file mode 100644
index 00000000..4f0f1d64
Binary files /dev/null and b/Android/app/src/main/res/mipmap-mdpi/ic_launcher.webp differ
diff --git a/Android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp
new file mode 100644
index 00000000..62b611da
Binary files /dev/null and b/Android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp differ
diff --git a/Android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp
new file mode 100644
index 00000000..948a3070
Binary files /dev/null and b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp differ
diff --git a/Android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp
new file mode 100644
index 00000000..1b9a6956
Binary files /dev/null and b/Android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp differ
diff --git a/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp
new file mode 100644
index 00000000..28d4b77f
Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp differ
diff --git a/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp
new file mode 100644
index 00000000..9287f508
Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp differ
diff --git a/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp
new file mode 100644
index 00000000..aa7d6427
Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp differ
diff --git a/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp
new file mode 100644
index 00000000..9126ae37
Binary files /dev/null and b/Android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp differ
diff --git a/Android/app/src/main/res/values/colors.xml b/Android/app/src/main/res/values/colors.xml
new file mode 100644
index 00000000..f8c6127d
--- /dev/null
+++ b/Android/app/src/main/res/values/colors.xml
@@ -0,0 +1,10 @@
+
+
+ #FFBB86FC
+ #FF6200EE
+ #FF3700B3
+ #FF03DAC5
+ #FF018786
+ #FF000000
+ #FFFFFFFF
+
\ No newline at end of file
diff --git a/Android/app/src/main/res/values/strings.xml b/Android/app/src/main/res/values/strings.xml
new file mode 100644
index 00000000..dc4486a7
--- /dev/null
+++ b/Android/app/src/main/res/values/strings.xml
@@ -0,0 +1,3 @@
+
+ EJ Tech Object Detection
+
\ No newline at end of file
diff --git a/Android/app/src/main/res/values/themes.xml b/Android/app/src/main/res/values/themes.xml
new file mode 100644
index 00000000..e48770ab
--- /dev/null
+++ b/Android/app/src/main/res/values/themes.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
\ No newline at end of file
diff --git a/Android/app/src/main/res/xml/backup_rules.xml b/Android/app/src/main/res/xml/backup_rules.xml
new file mode 100644
index 00000000..fa0f996d
--- /dev/null
+++ b/Android/app/src/main/res/xml/backup_rules.xml
@@ -0,0 +1,13 @@
+
+
+
+
\ No newline at end of file
diff --git a/Android/app/src/main/res/xml/data_extraction_rules.xml b/Android/app/src/main/res/xml/data_extraction_rules.xml
new file mode 100644
index 00000000..9ee9997b
--- /dev/null
+++ b/Android/app/src/main/res/xml/data_extraction_rules.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Android/app/src/test/java/io/ejtech/tflite/ExampleUnitTest.kt b/Android/app/src/test/java/io/ejtech/tflite/ExampleUnitTest.kt
new file mode 100644
index 00000000..676b74fe
--- /dev/null
+++ b/Android/app/src/test/java/io/ejtech/tflite/ExampleUnitTest.kt
@@ -0,0 +1,17 @@
+package io.ejtech.tflite
+
+import org.junit.Test
+
+import org.junit.Assert.*
+
+/**
+ * Example local unit test, which will execute on the development machine (host).
+ *
+ * See [testing documentation](http://d.android.com/tools/testing).
+ */
+class ExampleUnitTest {
+ @Test
+ fun addition_isCorrect() {
+ assertEquals(4, 2 + 2)
+ }
+}
\ No newline at end of file
diff --git a/Android/build.gradle.kts b/Android/build.gradle.kts
new file mode 100644
index 00000000..b0fdbcd5
--- /dev/null
+++ b/Android/build.gradle.kts
@@ -0,0 +1,9 @@
+// Top-level build file where you can add configuration options common to all sub-projects/modules.
+plugins {
+ alias(libs.plugins.androidApplication) apply false
+ alias(libs.plugins.jetbrainsKotlinAndroid) apply false
+ alias(libs.plugins.hiltAndroid) apply false
+ alias(libs.plugins.ksp) apply false
+ //alias(libs.plugins.ksp) apply false
+ //alias(libs.plugins.hilt) apply false
+}
\ No newline at end of file
diff --git a/Android/gradle.properties b/Android/gradle.properties
new file mode 100644
index 00000000..20e2a015
--- /dev/null
+++ b/Android/gradle.properties
@@ -0,0 +1,23 @@
+# Project-wide Gradle settings.
+# IDE (e.g. Android Studio) users:
+# Gradle settings configured through the IDE *will override*
+# any settings specified in this file.
+# For more details on how to configure your build environment visit
+# http://www.gradle.org/docs/current/userguide/build_environment.html
+# Specifies the JVM arguments used for the daemon process.
+# The setting is particularly useful for tweaking memory settings.
+org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8
+# When configured, Gradle will run in incubating parallel mode.
+# This option should only be used with decoupled projects. For more details, visit
+# https://developer.android.com/r/tools/gradle-multi-project-decoupled-projects
+# org.gradle.parallel=true
+# AndroidX package structure to make it clearer which packages are bundled with the
+# Android operating system, and which are packaged with your app's APK
+# https://developer.android.com/topic/libraries/support-library/androidx-rn
+android.useAndroidX=true
+# Kotlin code style for this project: "official" or "obsolete":
+kotlin.code.style=official
+# Enables namespacing of each library's R class so that its R class includes only the
+# resources declared in the library itself and none from the library's dependencies,
+# thereby reducing the size of the R class for that library
+android.nonTransitiveRClass=true
\ No newline at end of file
diff --git a/Android/gradle/libs.versions.toml b/Android/gradle/libs.versions.toml
new file mode 100644
index 00000000..4b4f801e
--- /dev/null
+++ b/Android/gradle/libs.versions.toml
@@ -0,0 +1,67 @@
+[versions]
+agp = "8.3.1"
+kotlin = "1.9.10"
+coreKtx = "1.12.0"
+junit = "4.13.2"
+junitVersion = "1.1.5"
+espressoCore = "3.5.1"
+lifecycleRuntimeKtx = "2.6.1"
+activityCompose = "1.8.2"
+composeBom = "2024.03.00"
+
+hilt = "2.48.1"
+androidxHilt = "1.2.0"
+
+androidxCamera = "1.3.0-alpha07"
+
+tfliteTaskvision = "0.4.2"
+tfliteGpu = "16.0.0"
+
+lifecycle = "2.6.1"
+coroutines = "1.6.4"
+
+ksp = "1.9.10-1.0.13"
+
+[libraries]
+androidx-core-ktx = { group = "androidx.core", name = "core-ktx", version.ref = "coreKtx" }
+junit = { group = "junit", name = "junit", version.ref = "junit" }
+androidx-junit = { group = "androidx.test.ext", name = "junit", version.ref = "junitVersion" }
+androidx-espresso-core = { group = "androidx.test.espresso", name = "espresso-core", version.ref = "espressoCore" }
+androidx-lifecycle-runtime-ktx = { group = "androidx.lifecycle", name = "lifecycle-runtime-ktx", version.ref = "lifecycleRuntimeKtx" }
+androidx-activity-compose = { group = "androidx.activity", name = "activity-compose", version.ref = "activityCompose" }
+androidx-compose-bom = { group = "androidx.compose", name = "compose-bom", version.ref = "composeBom" }
+androidx-ui = { group = "androidx.compose.ui", name = "ui" }
+androidx-ui-graphics = { group = "androidx.compose.ui", name = "ui-graphics" }
+androidx-ui-tooling = { group = "androidx.compose.ui", name = "ui-tooling" }
+androidx-ui-tooling-preview = { group = "androidx.compose.ui", name = "ui-tooling-preview" }
+androidx-ui-test-manifest = { group = "androidx.compose.ui", name = "ui-test-manifest" }
+androidx-ui-test-junit4 = { group = "androidx.compose.ui", name = "ui-test-junit4" }
+androidx-material3 = { group = "androidx.compose.material3", name = "material3" }
+
+hilt-android = { group = "com.google.dagger", name = "hilt-android", version.ref = "hilt"}
+hilt-compiler = { group = "com.google.dagger", name = "hilt-compiler", version.ref = "hilt" }
+dagger-compiler = { group = "com.google.dagger", name = "dagger-compiler", version.ref = "hilt"}
+hilt-android-compiler = { group = "com.google.dagger", name = "hilt-android-compiler", version.ref = "hilt" }
+hilt-navigation-compose = { group = "androidx.hilt", name = "hilt-navigation-compose", version.ref = "androidxHilt"}
+android-hilt-compiler = { group = "androidx.hilt", name = "hilt-compiler", version.ref = "androidxHilt"}
+
+androidx-camera = { group = "androidx.camera", name = "camera-camera2", version.ref = "androidxCamera"}
+androidx-camera-lifecycle = { group = "androidx.camera", name = "camera-lifecycle", version.ref = "androidxCamera"}
+androidx-camera-view = { group = "androidx.camera", name = "camera-view", version.ref = "androidxCamera"}
+
+tflite-task-vision = { group = "org.tensorflow", name = "tensorflow-lite-task-vision-play-services", version.ref = "tfliteTaskvision"}
+tflite-gpu = { group = "com.google.android.gms", name = "play-services-tflite-gpu", version.ref = "tfliteGpu"}
+
+android-lifecycle-viewmodel-ktx = { group = "androidx.lifecycle", name = "lifecycle-viewmodel-ktx", version.ref = "lifecycle"}
+android-lifecycle-runtime-ktx = { group = "androidx.lifecycle", name = "lifecycle-runtime-ktx", version.ref = "lifecycle"}
+android-lifecycle-runtime-compose = { group = "androidx.lifecycle", name = "lifecycle-runtime-compose", version.ref = "lifecycle"}
+android-lifecycle-lifecycle-viewmodel-compose = { group = "androidx.lifecycle", name = "lifecycle-viewmodel-compose", version.ref = "lifecycle"}
+
+kotlin-coroutines-core = { group = "org.jetbrains.kotlinx", name = "kotlinx-coroutines-core", version.ref = "coroutines"}
+kotlin-coroutines-android = { group = "org.jetbrains.kotlinx", name = "kotlinx-coroutines-android", version.ref = "coroutines"}
+
+[plugins]
+androidApplication = { id = "com.android.application", version.ref = "agp" }
+jetbrainsKotlinAndroid = { id = "org.jetbrains.kotlin.android", version.ref = "kotlin" }
+hiltAndroid = { id = "com.google.dagger.hilt.android", version.ref = "hilt" }
+ksp = { id = "com.google.devtools.ksp", version.ref = "ksp" }
\ No newline at end of file
diff --git a/Android/gradle/wrapper/gradle-wrapper.jar b/Android/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 00000000..e708b1c0
Binary files /dev/null and b/Android/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/Android/gradle/wrapper/gradle-wrapper.properties b/Android/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 00000000..20aee8af
--- /dev/null
+++ b/Android/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,6 @@
+#Wed Mar 20 16:22:16 EDT 2024
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-bin.zip
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
diff --git a/Android/gradlew b/Android/gradlew
new file mode 100644
index 00000000..4f906e0c
--- /dev/null
+++ b/Android/gradlew
@@ -0,0 +1,185 @@
+#!/usr/bin/env sh
+
+#
+# Copyright 2015 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##############################################################################
+##
+## Gradle start up script for UN*X
+##
+##############################################################################
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG=`dirname "$PRG"`"/$link"
+ fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn () {
+ echo "$*"
+}
+
+die () {
+ echo
+ echo "$*"
+ echo
+ exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "`uname`" in
+ CYGWIN* )
+ cygwin=true
+ ;;
+ Darwin* )
+ darwin=true
+ ;;
+ MINGW* )
+ msys=true
+ ;;
+ NONSTOP* )
+ nonstop=true
+ ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ if [ ! -x "$JAVACMD" ] ; then
+ die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+ fi
+else
+ JAVACMD="java"
+ which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
+ MAX_FD_LIMIT=`ulimit -H -n`
+ if [ $? -eq 0 ] ; then
+ if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+ MAX_FD="$MAX_FD_LIMIT"
+ fi
+ ulimit -n $MAX_FD
+ if [ $? -ne 0 ] ; then
+ warn "Could not set maximum file descriptor limit: $MAX_FD"
+ fi
+ else
+ warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+ fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+ GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
+ APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+ CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+
+ JAVACMD=`cygpath --unix "$JAVACMD"`
+
+ # We build the pattern for arguments to be converted via cygpath
+ ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+ SEP=""
+ for dir in $ROOTDIRSRAW ; do
+ ROOTDIRS="$ROOTDIRS$SEP$dir"
+ SEP="|"
+ done
+ OURCYGPATTERN="(^($ROOTDIRS))"
+ # Add a user-defined pattern to the cygpath arguments
+ if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+ OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+ fi
+ # Now convert the arguments - kludge to limit ourselves to /bin/sh
+ i=0
+ for arg in "$@" ; do
+ CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+ CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
+
+ if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
+ eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+ else
+ eval `echo args$i`="\"$arg\""
+ fi
+ i=`expr $i + 1`
+ done
+ case $i in
+ 0) set -- ;;
+ 1) set -- "$args0" ;;
+ 2) set -- "$args0" "$args1" ;;
+ 3) set -- "$args0" "$args1" "$args2" ;;
+ 4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+ 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+ 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+ 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+ 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+ 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+ esac
+fi
+
+# Escape application args
+save () {
+ for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
+ echo " "
+}
+APP_ARGS=`save "$@"`
+
+# Collect all arguments for the java command, following the shell quoting and substitution rules
+eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
+
+exec "$JAVACMD" "$@"
diff --git a/Android/gradlew.bat b/Android/gradlew.bat
new file mode 100644
index 00000000..107acd32
--- /dev/null
+++ b/Android/gradlew.bat
@@ -0,0 +1,89 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/Android/placeholder.txt b/Android/placeholder.txt
deleted file mode 100644
index c42a1fdd..00000000
--- a/Android/placeholder.txt
+++ /dev/null
@@ -1 +0,0 @@
-This is a placeholder... the Android content will come eventually!
diff --git a/Android/settings.gradle.kts b/Android/settings.gradle.kts
new file mode 100644
index 00000000..7e677746
--- /dev/null
+++ b/Android/settings.gradle.kts
@@ -0,0 +1,24 @@
+pluginManagement {
+ repositories {
+ google {
+ content {
+ includeGroupByRegex("com\\.android.*")
+ includeGroupByRegex("com\\.google.*")
+ includeGroupByRegex("androidx.*")
+ }
+ }
+ mavenCentral()
+ gradlePluginPortal()
+ }
+}
+dependencyResolutionManagement {
+ repositoriesMode.set(RepositoriesMode.FAIL_ON_PROJECT_REPOS)
+ repositories {
+ google()
+ mavenCentral()
+ }
+}
+
+rootProject.name = "My Application"
+include(":app")
+
\ No newline at end of file
diff --git a/Train_TFLite2_Object_Detection_Model.ipynb b/Train_TFLite2_Object_Detection_Model.ipynb
new file mode 100644
index 00000000..baa99aae
--- /dev/null
+++ b/Train_TFLite2_Object_Detection_Model.ipynb
@@ -0,0 +1,1866 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "view-in-github"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "fF8ysCfYKgTP"
+ },
+ "source": [
+ "# TensorFlow Lite Object Detection API in Colab\n",
+ "**Author:** Evan Juras, [EJ Technology Consultants](https://ejtech.io)\n",
+ "\n",
+ "**Last updated:** 1/28/23\n",
+ "\n",
+ "**GitHub:** [TensorFlow Lite Object Detection](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi)\n",
+ "\n",
+ "# Introduction\n",
+ "\n",
+ "This notebook uses [the TensorFlow 2 Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) to train an SSD-MobileNet model or EfficientDet model with a custom dataset and convert it to TensorFlow Lite format. By working through this Colab, you'll be able to create and download a TFLite model that you can run on your PC, an Android phone, or an edge device like the Raspberry Pi.\n",
+ "\n",
+ "\n",
+ "
\n",
+ "Custom SSD-MobileNet-FPNLite model in action!\n",
+ "
\n",
+ "\n",
+ "I also made a YouTube video that walks through this guide step by step. I use a coin detection model as an example for the video. I recommend following along with the video while working through this notebook.\n",
+ "\n",
+ "\n",
+ "
\n",
+ "Click here to go to the video!\n",
+ "
\n",
+ "\n",
+ "**Important note: This notebook will be continuously updated to make sure it works with newer versions of TensorFlow. If you see any differences between the YouTube video and this notebook, always follow the notebook!**\n",
+ "\n",
+ "### Working in Colab\n",
+ "Colab provides a virtual machine in your browser complete with a Linux OS, filesystem, Python environment, and best of all, a free GPU. It comes with most TensorFlow backend requirements (like CUDA and cuDNN) pre-installed. Simply click the play button on sections of code in this notebook to execute them on the virtual machine.\n",
+ "\n",
+ "> *Note: Make sure you're using a GPU-equipped machine by going to \"Runtime\" -> \"Change runtime type\" in the top menu bar, and then selecting \"GPU\" from the Hardware accelerator dropdown.*\n",
+ "\n",
+ "This Colab notebook uses TensorFlow 2. If you'd like to use TensorFlow 1, please see my [TF1 Colab notebook](https://colab.research.google.com/github/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/Train_TFLite1_Object_Detection_Model.ipynb).\n",
+ "\n",
+ "### Navigation\n",
+ "This is a long notebook! Each step of the training process has its own section. Click the arrow next to the heading for each section to expand it. You can use the table of contents in the left sidebar to jump from section to section."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "4VAvZo8qE4u5"
+ },
+ "source": [
+ "# 1. Gather and Label Training Images"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ag0qD4XiBDcz"
+ },
+ "source": [
+ "Before we start training, we need to gather and label images that will be used for training the object detection model. A good starting point for a proof-of-concept model is 200 images. The training images should have random objects in the image along with the desired objects, and should have a variety of backgrounds and lighting conditions.\n",
+ "\n",
+ "Watch the YouTube video below for instructions and tips on how to gather and label images for training an object detection model.\n",
+ "\n",
+ "\n",
+ "
\n",
+ "Watch this video to learn how to capture and label images.\n",
+ "
\n",
+ "\n",
+ "When you've finished gathering and labeling images, you should have a folder full of images and corresponding .xml data annotation file for each image. An example of a labeled image and the image folder for my coin detector model are shown below.\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "sxb8_h-QFErO"
+ },
+ "source": [
+ "#2. Install TensorFlow Object Detection Dependencies"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "l7EOtpvlLeS0"
+ },
+ "source": [
+ "First, we'll install the TensorFlow Object Detection API in this Google Colab instance. This requires cloning the [TensorFlow models repository](https://github.com/tensorflow/models) and running a couple installation commands. Click the play button to run the following sections of code.\n",
+ "\n",
+ "The latest version of TensorFlow this Colab has been verified to work with is TF v2.8.0.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ypWGYdPlLRUN"
+ },
+ "outputs": [],
+ "source": [
+ "# Clone the tensorflow models repository from GitHub\n",
+ "!pip uninstall Cython -y # Temporary fix for \"No module named 'object_detection'\" error\n",
+ "!git clone --depth 1 https://github.com/tensorflow/models"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "6QPmVBSlLTzM"
+ },
+ "outputs": [],
+ "source": [
+ "# Copy setup files into models/research folder\n",
+ "%%bash\n",
+ "cd models/research/\n",
+ "protoc object_detection/protos/*.proto --python_out=.\n",
+ "#cp object_detection/packages/tf2/setup.py ."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "NRBnuCKjM4Bd"
+ },
+ "outputs": [],
+ "source": [
+ "# Modify setup.py file to install the tf-models-official repository targeted at TF v2.8.0\n",
+ "import re\n",
+ "with open('/content/models/research/object_detection/packages/tf2/setup.py') as f:\n",
+ " s = f.read()\n",
+ "\n",
+ "with open('/content/models/research/setup.py', 'w') as f:\n",
+ " # Set fine_tune_checkpoint path\n",
+ " s = re.sub('tf-models-official>=2.5.1',\n",
+ " 'tf-models-official==2.8.0', s)\n",
+ " f.write(s)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "OLDnCkLLwLr6"
+ },
+ "outputs": [],
+ "source": [
+ "# Install the Object Detection API (NOTE: This block takes about 10 minutes to finish executing)\n",
+ "\n",
+ "# Need to do a temporary fix with PyYAML because Colab isn't able to install PyYAML v5.4.1\n",
+ "!pip install pyyaml==5.3\n",
+ "!pip install /content/models/research/\n",
+ "\n",
+ "# Need to downgrade to TF v2.8.0 due to Colab compatibility bug with TF v2.10 (as of 10/03/22)\n",
+ "!pip install tensorflow==2.8.0\n",
+ "\n",
+ "# Install CUDA version 11.0 (to maintain compatibility with TF v2.8.0)\n",
+ "!pip install tensorflow_io==0.23.1\n",
+ "!wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin\n",
+ "!mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600\n",
+ "!wget http://developer.download.nvidia.com/compute/cuda/11.0.2/local_installers/cuda-repo-ubuntu1804-11-0-local_11.0.2-450.51.05-1_amd64.deb\n",
+ "!dpkg -i cuda-repo-ubuntu1804-11-0-local_11.0.2-450.51.05-1_amd64.deb\n",
+ "!apt-key add /var/cuda-repo-ubuntu1804-11-0-local/7fa2af80.pub\n",
+ "!apt-get update && sudo apt-get install cuda-toolkit-11-0\n",
+ "!export LD_LIBRARY_PATH=/usr/local/cuda-11.0/lib64:$LD_LIBRARY_PATH"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "6V7TrfUos-9E"
+ },
+ "source": [
+ "You may get warnings or errors related to package dependencies in the previous code block, but you can ignore them for now.\n",
+ "\n",
+ "Let's test our installation by running `model_builder_tf2_test.py` to make sure everything is working as expected. Run the following code block and confirm that it finishes without errors. If you get errors, try Googling them or checking the FAQ at the end of this Colab."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "wh_HPMOqWH9z"
+ },
+ "outputs": [],
+ "source": [
+ "# Run Model Bulider Test file, just to verify everything's working properly\n",
+ "!python /content/models/research/object_detection/builders/model_builder_tf2_test.py\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "eydREUsMGUUR"
+ },
+ "source": [
+ "# 3. Upload Image Dataset and Prepare Training Data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "mSZVCxE4nSVI"
+ },
+ "source": [
+ "In this section, we'll upload our data and prepare it for training with TensorFlow. We'll upload our images, split them into train, validation, and test folders, and then run scripts for creating TFRecords from our data.\n",
+ "\n",
+ "First, on your local PC, zip all your training images and XML files into a single folder called \"images.zip\". The files should be directly inside the zip folder, or in a nested folder as shown below:\n",
+ "```\n",
+ "images.zip\n",
+ "-- images\n",
+ " -- img1.jpg\n",
+ " -- img1.xml\n",
+ " -- img2.jpg\n",
+ " -- img2.xml\n",
+ " ...\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "LE1MtX4HGQA4"
+ },
+ "source": [
+ "### 3.1 Upload images\n",
+ "There are three options for moving the image files to this Colab instance."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "sFSJoDEnJotN"
+ },
+ "source": [
+ "**Option 1. Upload through Google Colab**\n",
+ "\n",
+ "Upload the \"images.zip\" file to the Google Colab instance by clicking the \"Files\" icon on the left hand side of the browser, and then the \"Upload to session storage\" icon. Select the zip folder to upload it.\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "hGsPlloAGIXB"
+ },
+ "source": [
+ "**Option 2. Copy from Google Drive**\n",
+ "\n",
+ "You can also upload your images to your personal Google Drive, mount the drive on this Colab session, and copy them over to the Colab filesystem. This option works well if you want to upload the images beforehand so you don't have to wait for them to upload each time you restart this Colab. If you have more than 50MB worth of images, I recommend using this option.\n",
+ "\n",
+ "First, upload the \"images.zip\" file to your Google Drive, and make note of the folder you uploaded them to. Replace `MyDrive/path/to/images.zip` with the path to your zip file. (For example, I uploaded the zip file to folder called \"change-counter1\", so I would use `MyDrive/change-counter1/images.zip` for the path). Then, run the following block of code to mount your Google Drive to this Colab session and copy the folder to this filesystem."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "tLgAPsQsfTLs"
+ },
+ "outputs": [],
+ "source": [
+ "from google.colab import drive\n",
+ "drive.mount('/content/gdrive')\n",
+ "\n",
+ "!cp /content/gdrive/MyDrive/path/to/images.zip /content"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "9xAJMKwpFilm"
+ },
+ "source": [
+ "**Option 3. Use coin detection dataset**\n",
+ "\n",
+ "If you don't have a dataset and just want to try training a model, you can download my coin image dataset to use as an example. I've uploaded a dataset containing 750 labeled images of pennies, nickels, dimes, and quarters. Run the following code block to download the dataset."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "suu_xPVZIEcH"
+ },
+ "outputs": [],
+ "source": [
+ "!wget -O /content/images.zip https://www.dropbox.com/s/gk57ec3v8dfuwcp/CoinPics_11NOV22.zip?dl=0 # United States coin images"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "CHjOhoSGYwT7"
+ },
+ "source": [
+ "## 3.2 Split images into train, validation, and test folders\n",
+ "At this point, whether you used Option 1, 2, or 3, you should be able to click the folder icon on the left and see your \"images.zip\" file in the list of files. Now that the dataset is uploaded, let's unzip it and create some folders to hold the images. These directories are created in the /content folder in this instance's filesystem. You can browse the filesystem by clicking the \"Files\" icon on the left."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "mGvoHH-unSVO"
+ },
+ "outputs": [],
+ "source": [
+ "!mkdir /content/images\n",
+ "!unzip -q images.zip -d /content/images/all\n",
+ "!mkdir /content/images/train; mkdir /content/images/validation; mkdir /content/images/test"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "n-6RIcrwbQMh"
+ },
+ "source": [
+ "Next, we'll split the images into train, validation, and test sets. Here's what each set is used for:\n",
+ "\n",
+ "\n",
+ "\n",
+ "* **Train**: These are the actual images used to train the model. In each step of training, a batch of images from the \"train\" set is passed into the neural network. The network predicts classes and locations of objects in the images. The training algorithm calculates the loss (i.e. how \"wrong\" the predictions were) and adjusts the network weights through backpropagation.\n",
+ "\n",
+ "\n",
+ "* **Validation**: Images from the \"validation\" set can be used by the training algorithm to check the progress of training and adjust hyperparameters (like learning rate). Unlike \"train\" images, these images are only used periodically during training (i.e. once every certain number of training steps).\n",
+ "\n",
+ "\n",
+ "* **Test**: These images are never seen by the neural network during training. They are intended to be used by a human to perform final testing of the model to check how accurate the model is.\n",
+ "\n",
+ "I wrote a Python script to randomly move 80% of the images to the \"train\" folder, 10% to the \"validation\" folder, and 10% to the \"test\" folder. Click play on the following block to download the script and execute it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "PfuZpmdBLjh-"
+ },
+ "outputs": [],
+ "source": [
+ "!wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/train_val_test_split.py\n",
+ "!python train_val_test_split.py"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "p--K1PJXEgNo"
+ },
+ "source": [
+ "## 3.3 Create Labelmap and TFRecords\n",
+ "Finally, we need to create a labelmap for the detector and convert the images into a data file format called TFRecords, which are used by TensorFlow for training. We'll use Python scripts to automatically convert the data into TFRecord format. Before running them, we need to define a labelmap for our classes.\n",
+ "\n",
+ "The code section below will create a \"labelmap.txt\" file that contains a list of classes. Replace the `class1`, `class2`, `class3` text with your own classes (for example, `penny`, `nickel`, `dime`, `quarter`), adding a new line for each class. Then, click play to execute the code."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "_DE_r4MKY7ln"
+ },
+ "outputs": [],
+ "source": [
+ "### This creates a a \"labelmap.txt\" file with a list of classes the object detection model will detect.\n",
+ "%%bash\n",
+ "cat <> /content/labelmap.txt\n",
+ "class1\n",
+ "class2\n",
+ "class3\n",
+ "EOF"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "5pa2VYhTIT1l"
+ },
+ "source": [
+ "Download and run the data conversion scripts from the [GitHub repository](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi) by clicking play on the following three sections of code. They will create TFRecord files for the train and validation datasets, as well as a `labelmap.pbtxt` file which contains the labelmap in a different format."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "laZZE0TlEeUF"
+ },
+ "outputs": [],
+ "source": [
+ "# Download data conversion scripts\n",
+ "! wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/create_csv.py\n",
+ "! wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/create_tfrecord.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "5tdDbTmHYwu-"
+ },
+ "outputs": [],
+ "source": [
+ "# Create CSV data files and TFRecord files\n",
+ "!python3 create_csv.py\n",
+ "!python3 create_tfrecord.py --csv_input=images/train_labels.csv --labelmap=labelmap.txt --image_dir=images/train --output_path=train.tfrecord\n",
+ "!python3 create_tfrecord.py --csv_input=images/validation_labels.csv --labelmap=labelmap.txt --image_dir=images/validation --output_path=val.tfrecord"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "RNyv_YyDXwMs"
+ },
+ "source": [
+ "We'll store the locations of the TFRecord and labelmap files as variables so we can reference them later in this Colab session."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "YUd2wtfrqedy"
+ },
+ "outputs": [],
+ "source": [
+ "train_record_fname = '/content/train.tfrecord'\n",
+ "val_record_fname = '/content/val.tfrecord'\n",
+ "label_map_pbtxt_fname = '/content/labelmap.pbtxt'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "eGEUZYAMEZ6f"
+ },
+ "source": [
+ "# 4. Set Up Training Configuration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "I2MAcgJ53STW"
+ },
+ "source": [
+ "In this section, we'll set up the model and training configuration. We'll specifiy which pretrained TensorFlow model we want to use from the [TensorFlow 2 Object Detection Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md). Each model also comes with a configuration file that points to file locations, sets training parameters (such as learning rate and total number of training steps), and more. We'll modify the configuration file for our custom training job.\n",
+ "\n",
+ "The first section of code lists out some models availabe in the TF2 Model Zoo and defines some filenames that will be used later to download the model and config file. This makes it easy to manage which model you're using and to add other models to the list later.\n",
+ "\n",
+ "Set the \"chosen_model\" variable to match the name of the model you'd like to train with. It's currently set to use the popular \"ssd-mobilenet-v2\" model. Click play on the next block once the chosen model has been set.\n",
+ "\n",
+ "Not sure which model to pick? [Check out my blog post comparing each model's speed and accuracy.](https://ejtech.io/learn/tflite-object-detection-model-comparison)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "gN0EUEa3e5Un"
+ },
+ "outputs": [],
+ "source": [
+ "# Change the chosen_model variable to deploy different models available in the TF2 object detection zoo\n",
+ "chosen_model = 'ssd-mobilenet-v2-fpnlite-320'\n",
+ "\n",
+ "MODELS_CONFIG = {\n",
+ " 'ssd-mobilenet-v2': {\n",
+ " 'model_name': 'ssd_mobilenet_v2_320x320_coco17_tpu-8',\n",
+ " 'base_pipeline_file': 'ssd_mobilenet_v2_320x320_coco17_tpu-8.config',\n",
+ " 'pretrained_checkpoint': 'ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz',\n",
+ " },\n",
+ " 'efficientdet-d0': {\n",
+ " 'model_name': 'efficientdet_d0_coco17_tpu-32',\n",
+ " 'base_pipeline_file': 'ssd_efficientdet_d0_512x512_coco17_tpu-8.config',\n",
+ " 'pretrained_checkpoint': 'efficientdet_d0_coco17_tpu-32.tar.gz',\n",
+ " },\n",
+ " 'ssd-mobilenet-v2-fpnlite-320': {\n",
+ " 'model_name': 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8',\n",
+ " 'base_pipeline_file': 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.config',\n",
+ " 'pretrained_checkpoint': 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz',\n",
+ " },\n",
+ " # The centernet model isn't working as of 9/10/22\n",
+ " #'centernet-mobilenet-v2': {\n",
+ " # 'model_name': 'centernet_mobilenetv2fpn_512x512_coco17_od',\n",
+ " # 'base_pipeline_file': 'pipeline.config',\n",
+ " # 'pretrained_checkpoint': 'centernet_mobilenetv2fpn_512x512_coco17_od.tar.gz',\n",
+ " #}\n",
+ "}\n",
+ "\n",
+ "model_name = MODELS_CONFIG[chosen_model]['model_name']\n",
+ "pretrained_checkpoint = MODELS_CONFIG[chosen_model]['pretrained_checkpoint']\n",
+ "base_pipeline_file = MODELS_CONFIG[chosen_model]['base_pipeline_file']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "JMG3EEPqPggV"
+ },
+ "source": [
+ "Download the pretrained model file and configuration file by clicking Play on the following section."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "kG4TmJUVrYQ7"
+ },
+ "outputs": [],
+ "source": [
+ "# Create \"mymodel\" folder for holding pre-trained weights and configuration files\n",
+ "%mkdir /content/models/mymodel/\n",
+ "%cd /content/models/mymodel/\n",
+ "\n",
+ "# Download pre-trained model weights\n",
+ "import tarfile\n",
+ "download_tar = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/' + pretrained_checkpoint\n",
+ "!wget {download_tar}\n",
+ "tar = tarfile.open(pretrained_checkpoint)\n",
+ "tar.extractall()\n",
+ "tar.close()\n",
+ "\n",
+ "# Download training configuration file for model\n",
+ "download_config = 'https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/configs/tf2/' + base_pipeline_file\n",
+ "!wget {download_config}"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "BFAlqNrPn5y3"
+ },
+ "source": [
+ "Now that we've downloaded our model and config file, we need to modify the configuration file with some high-level training parameters. The following variables are used to control training steps:\n",
+ "\n",
+ "* **num_steps**: The total amount of steps to use for training the model. A good number to start with is 40,000 steps. You can use more steps if you notice the loss metrics are still decreasing by the time training finishes. The more steps, the longer training will take. Training can also be stopped early if loss flattens out before reaching the specified number of steps.\n",
+ "* **batch_size**: The number of images to use per training step. A larger batch size allows a model to be trained in fewer steps, but the size is limited by the GPU memory available for training. With the GPUs used in Colab instances, 16 is a good number for SSD models and 4 is good for EfficientDet models.\n",
+ "\n",
+ "Other training information, like the location of the pretrained model file, the config file, and total number of classes are also assigned in this step. To learn more about training configuration with the TensorFlow Object Detection API, read this [article from Neptune](https://neptune.ai/blog/tensorflow-object-detection-api-best-practices-to-training-evaluation-deployment)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "1lYDvJN-n69v"
+ },
+ "outputs": [],
+ "source": [
+ "# Set training parameters for the model\n",
+ "num_steps = 40000\n",
+ "\n",
+ "if chosen_model == 'efficientdet-d0':\n",
+ " batch_size = 4\n",
+ "else:\n",
+ " batch_size = 16"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "b_ki9jOqxn7V"
+ },
+ "outputs": [],
+ "source": [
+ "# Set file locations and get number of classes for config file\n",
+ "pipeline_fname = '/content/models/mymodel/' + base_pipeline_file\n",
+ "fine_tune_checkpoint = '/content/models/mymodel/' + model_name + '/checkpoint/ckpt-0'\n",
+ "\n",
+ "def get_num_classes(pbtxt_fname):\n",
+ " from object_detection.utils import label_map_util\n",
+ " label_map = label_map_util.load_labelmap(pbtxt_fname)\n",
+ " categories = label_map_util.convert_label_map_to_categories(\n",
+ " label_map, max_num_classes=90, use_display_name=True)\n",
+ " category_index = label_map_util.create_category_index(categories)\n",
+ " return len(category_index.keys())\n",
+ "num_classes = get_num_classes(label_map_pbtxt_fname)\n",
+ "print('Total classes:', num_classes)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "cwPyaIAXxyKu"
+ },
+ "source": [
+ "Next, we'll rewrite the config file to use the training parameters we just specified. The following section of code will automatically replace the necessary parameters in the downloaded .config file and save it as our custom \"pipeline_file.config\" file."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "5eA5ht3_yukT"
+ },
+ "outputs": [],
+ "source": [
+ "# Create custom configuration file by writing the dataset, model checkpoint, and training parameters into the base pipeline file\n",
+ "import re\n",
+ "\n",
+ "%cd /content/models/mymodel\n",
+ "print('writing custom configuration file')\n",
+ "\n",
+ "with open(pipeline_fname) as f:\n",
+ " s = f.read()\n",
+ "with open('pipeline_file.config', 'w') as f:\n",
+ "\n",
+ " # Set fine_tune_checkpoint path\n",
+ " s = re.sub('fine_tune_checkpoint: \".*?\"',\n",
+ " 'fine_tune_checkpoint: \"{}\"'.format(fine_tune_checkpoint), s)\n",
+ "\n",
+ " # Set tfrecord files for train and test datasets\n",
+ " s = re.sub(\n",
+ " '(input_path: \".*?)(PATH_TO_BE_CONFIGURED/train)(.*?\")', 'input_path: \"{}\"'.format(train_record_fname), s)\n",
+ " s = re.sub(\n",
+ " '(input_path: \".*?)(PATH_TO_BE_CONFIGURED/val)(.*?\")', 'input_path: \"{}\"'.format(val_record_fname), s)\n",
+ "\n",
+ " # Set label_map_path\n",
+ " s = re.sub(\n",
+ " 'label_map_path: \".*?\"', 'label_map_path: \"{}\"'.format(label_map_pbtxt_fname), s)\n",
+ "\n",
+ " # Set batch_size\n",
+ " s = re.sub('batch_size: [0-9]+',\n",
+ " 'batch_size: {}'.format(batch_size), s)\n",
+ "\n",
+ " # Set training steps, num_steps\n",
+ " s = re.sub('num_steps: [0-9]+',\n",
+ " 'num_steps: {}'.format(num_steps), s)\n",
+ "\n",
+ " # Set number of classes num_classes\n",
+ " s = re.sub('num_classes: [0-9]+',\n",
+ " 'num_classes: {}'.format(num_classes), s)\n",
+ "\n",
+ " # Change fine-tune checkpoint type from \"classification\" to \"detection\"\n",
+ " s = re.sub(\n",
+ " 'fine_tune_checkpoint_type: \"classification\"', 'fine_tune_checkpoint_type: \"{}\"'.format('detection'), s)\n",
+ "\n",
+ " # If using ssd-mobilenet-v2, reduce learning rate (because it's too high in the default config file)\n",
+ " if chosen_model == 'ssd-mobilenet-v2':\n",
+ " s = re.sub('learning_rate_base: .8',\n",
+ " 'learning_rate_base: .08', s)\n",
+ "\n",
+ " s = re.sub('warmup_learning_rate: 0.13333',\n",
+ " 'warmup_learning_rate: .026666', s)\n",
+ "\n",
+ " # If using efficientdet-d0, use fixed_shape_resizer instead of keep_aspect_ratio_resizer (because it isn't supported by TFLite)\n",
+ " if chosen_model == 'efficientdet-d0':\n",
+ " s = re.sub('keep_aspect_ratio_resizer', 'fixed_shape_resizer', s)\n",
+ " s = re.sub('pad_to_max_dimension: true', '', s)\n",
+ " s = re.sub('min_dimension', 'height', s)\n",
+ " s = re.sub('max_dimension', 'width', s)\n",
+ "\n",
+ " f.write(s)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "GDySP7TLzdCM"
+ },
+ "source": [
+ "(Optional) If you're curious, you can display the configuration file's contents here in the browser by running the line of code below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "HEsOLOMHzBqF"
+ },
+ "outputs": [],
+ "source": [
+ "# (Optional) Display the custom configuration file's contents\n",
+ "!cat /content/models/mymodel/pipeline_file.config"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "UXpnXYC908Zl"
+ },
+ "source": [
+ "Finally, let's set the locations of the configuration file and model output directory as variables so we can reference them when we call the training command."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "GMlaN3rs3zLe"
+ },
+ "outputs": [],
+ "source": [
+ "# Set the path to the custom config file and the directory to store training checkpoints in\n",
+ "pipeline_file = '/content/models/mymodel/pipeline_file.config'\n",
+ "model_dir = '/content/training/'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "-19zML6oEO7l"
+ },
+ "source": [
+ "# 5. Train Custom TFLite Detection Model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "XxPj_QV43qD5"
+ },
+ "source": [
+ "We're ready to train our object detection model! Before we start training, let's load up a TensorBoard session to monitor training progress. Run the following section of code, and a TensorBoard session will appear in the browser. It won't show anything yet, because we haven't started training. Once training starts, come back and click the refresh button to see the model's overall loss.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "TI9iCCxoNlAL"
+ },
+ "outputs": [],
+ "source": [
+ "%load_ext tensorboard\n",
+ "%tensorboard --logdir '/content/training/train'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "5cuQpPJL2pUq"
+ },
+ "source": [
+ "Model training is performed using the \"model_main_tf2.py\" script from the TF Object Detection API. Training will take anywhere from 2 to 6 hours, depending on the model, batch size, and number of training steps. We've already defined all the parameters and arguments used by `model_main_tf2.py` in previous sections of this Colab. Just click Play on the following block to begin training!\n",
+ "\n",
+ "\n",
+ "\n",
+ "> *Note: It takes a few minutes for the program to display any training messages, because it only displays logs once every 100 steps. If it seems like nothing is happening, just wait a couple minutes.*"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "tQTfZChVzzpZ"
+ },
+ "outputs": [],
+ "source": [
+ "# Run training!\n",
+ "!python /content/models/research/object_detection/model_main_tf2.py \\\n",
+ " --pipeline_config_path={pipeline_file} \\\n",
+ " --model_dir={model_dir} \\\n",
+ " --alsologtostderr \\\n",
+ " --num_train_steps={num_steps} \\\n",
+ " --sample_1_of_n_eval_examples=1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "WHxbX4ZpzXIv"
+ },
+ "source": [
+ "If you want to stop training early, just click Stop a couple times or right-click on the code block and select \"Interrupt Execution\". Otherwise, training will stop by itself once it reaches the specified number of training steps.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "kPg8oMnQDYKl"
+ },
+ "source": [
+ "# 6. Convert Model to TensorFlow Lite"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "spQXdq8Y63pj"
+ },
+ "source": [
+ "Alright! Our model is all trained up and ready to be used for detecting objects. First, we need to export the model graph (a file that contains information about the architecture and weights) to a TensorFlow Lite-compatible format. We'll do this using the `export_tflite_graph_tf2.py` script."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "RaUU8tBlHifd"
+ },
+ "outputs": [],
+ "source": [
+ "# Make a directory to store the trained TFLite model\n",
+ "!mkdir /content/custom_model_lite\n",
+ "output_directory = '/content/custom_model_lite'\n",
+ "\n",
+ "# Path to training directory (the conversion script automatically chooses the highest checkpoint file)\n",
+ "last_model_path = '/content/training'\n",
+ "\n",
+ "!python /content/models/research/object_detection/export_tflite_graph_tf2.py \\\n",
+ " --trained_checkpoint_dir {last_model_path} \\\n",
+ " --output_directory {output_directory} \\\n",
+ " --pipeline_config_path {pipeline_file}\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "z_NuapO2VROu"
+ },
+ "source": [
+ "Next, we'll take the exported graph and use the `TFLiteConverter` module to convert it to `.tflite` FlatBuffer format."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "TsE_uVjlsz3u"
+ },
+ "outputs": [],
+ "source": [
+ "# Convert exported graph file into TFLite model file\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "converter = tf.lite.TFLiteConverter.from_saved_model('/content/custom_model_lite/saved_model')\n",
+ "tflite_model = converter.convert()\n",
+ "\n",
+ "with open('/content/custom_model_lite/detect.tflite', 'wb') as f:\n",
+ " f.write(tflite_model)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "RDQrtQhvC3oG"
+ },
+ "source": [
+ "# 7. Test TensorFlow Lite Model and Calculate mAP"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "vtSmUZcxIAvt"
+ },
+ "source": [
+ "We've trained our custom model and converted it to TFLite format. But how well does it actually perform at detecting objects in images? This is where the images we set aside in the **test** folder come in. The model never saw any test images during training, so its performance on these images should be representative of how it will perform on new images from the field.\n",
+ "\n",
+ "### 7.1 Inference test images\n",
+ "The following code defines a function to run inference on test images. It loads the images, loads the model and labelmap, runs the model on each image, and displays the result. It also optionally saves detection results as text files so we can use them to calculate model mAP score.\n",
+ "\n",
+ "This code is based off the [TFLite_detection_image.py](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/TFLite_detection_image.py) script from my [TensorFlow Lite Object Detection repository on GitHub](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi); feel free to use it as a starting point for your own application."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "e4WtI8i5K96w"
+ },
+ "outputs": [],
+ "source": [
+ "# Script to run custom TFLite model on test images to detect objects\n",
+ "# Source: https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/TFLite_detection_image.py\n",
+ "\n",
+ "# Import packages\n",
+ "import os\n",
+ "import cv2\n",
+ "import numpy as np\n",
+ "import sys\n",
+ "import glob\n",
+ "import random\n",
+ "import importlib.util\n",
+ "from tensorflow.lite.python.interpreter import Interpreter\n",
+ "\n",
+ "import matplotlib\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "%matplotlib inline\n",
+ "\n",
+ "### Define function for inferencing with TFLite model and displaying results\n",
+ "\n",
+ "def tflite_detect_images(modelpath, imgpath, lblpath, min_conf=0.5, num_test_images=10, savepath='/content/results', txt_only=False):\n",
+ "\n",
+ " # Grab filenames of all images in test folder\n",
+ " images = glob.glob(imgpath + '/*.jpg') + glob.glob(imgpath + '/*.JPG') + glob.glob(imgpath + '/*.png') + glob.glob(imgpath + '/*.bmp')\n",
+ "\n",
+ " # Load the label map into memory\n",
+ " with open(lblpath, 'r') as f:\n",
+ " labels = [line.strip() for line in f.readlines()]\n",
+ "\n",
+ " # Load the Tensorflow Lite model into memory\n",
+ " interpreter = Interpreter(model_path=modelpath)\n",
+ " interpreter.allocate_tensors()\n",
+ "\n",
+ " # Get model details\n",
+ " input_details = interpreter.get_input_details()\n",
+ " output_details = interpreter.get_output_details()\n",
+ " height = input_details[0]['shape'][1]\n",
+ " width = input_details[0]['shape'][2]\n",
+ "\n",
+ " float_input = (input_details[0]['dtype'] == np.float32)\n",
+ "\n",
+ " input_mean = 127.5\n",
+ " input_std = 127.5\n",
+ "\n",
+ " # Randomly select test images\n",
+ " images_to_test = random.sample(images, num_test_images)\n",
+ "\n",
+ " # Loop over every image and perform detection\n",
+ " for image_path in images_to_test:\n",
+ "\n",
+ " # Load image and resize to expected shape [1xHxWx3]\n",
+ " image = cv2.imread(image_path)\n",
+ " image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
+ " imH, imW, _ = image.shape\n",
+ " image_resized = cv2.resize(image_rgb, (width, height))\n",
+ " input_data = np.expand_dims(image_resized, axis=0)\n",
+ "\n",
+ " # Normalize pixel values if using a floating model (i.e. if model is non-quantized)\n",
+ " if float_input:\n",
+ " input_data = (np.float32(input_data) - input_mean) / input_std\n",
+ "\n",
+ " # Perform the actual detection by running the model with the image as input\n",
+ " interpreter.set_tensor(input_details[0]['index'],input_data)\n",
+ " interpreter.invoke()\n",
+ "\n",
+ " # Retrieve detection results\n",
+ " boxes = interpreter.get_tensor(output_details[1]['index'])[0] # Bounding box coordinates of detected objects\n",
+ " classes = interpreter.get_tensor(output_details[3]['index'])[0] # Class index of detected objects\n",
+ " scores = interpreter.get_tensor(output_details[0]['index'])[0] # Confidence of detected objects\n",
+ "\n",
+ " detections = []\n",
+ "\n",
+ " # Loop over all detections and draw detection box if confidence is above minimum threshold\n",
+ " for i in range(len(scores)):\n",
+ " if ((scores[i] > min_conf) and (scores[i] <= 1.0)):\n",
+ "\n",
+ " # Get bounding box coordinates and draw box\n",
+ " # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()\n",
+ " ymin = int(max(1,(boxes[i][0] * imH)))\n",
+ " xmin = int(max(1,(boxes[i][1] * imW)))\n",
+ " ymax = int(min(imH,(boxes[i][2] * imH)))\n",
+ " xmax = int(min(imW,(boxes[i][3] * imW)))\n",
+ "\n",
+ " cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n",
+ "\n",
+ " # Draw label\n",
+ " object_name = labels[int(classes[i])] # Look up object name from \"labels\" array using class index\n",
+ " label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'\n",
+ " labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size\n",
+ " label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n",
+ " cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n",
+ " cv2.putText(image, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text\n",
+ "\n",
+ " detections.append([object_name, scores[i], xmin, ymin, xmax, ymax])\n",
+ "\n",
+ "\n",
+ " # All the results have been drawn on the image, now display the image\n",
+ " if txt_only == False: # \"text_only\" controls whether we want to display the image results or just save them in .txt files\n",
+ " image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n",
+ " plt.figure(figsize=(12,16))\n",
+ " plt.imshow(image)\n",
+ " plt.show()\n",
+ "\n",
+ " # Save detection results in .txt files (for calculating mAP)\n",
+ " elif txt_only == True:\n",
+ "\n",
+ " # Get filenames and paths\n",
+ " image_fn = os.path.basename(image_path)\n",
+ " base_fn, ext = os.path.splitext(image_fn)\n",
+ " txt_result_fn = base_fn +'.txt'\n",
+ " txt_savepath = os.path.join(savepath, txt_result_fn)\n",
+ "\n",
+ " # Write results to text file\n",
+ " # (Using format defined by https://github.com/Cartucho/mAP, which will make it easy to calculate mAP)\n",
+ " with open(txt_savepath,'w') as f:\n",
+ " for detection in detections:\n",
+ " f.write('%s %.4f %d %d %d %d\\n' % (detection[0], detection[1], detection[2], detection[3], detection[4], detection[5]))\n",
+ "\n",
+ " return"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "-CJI4A0f_zqz"
+ },
+ "source": [
+ "The next block sets the paths to the test images and models and then runs the inferencing function. If you want to use more than 10 images, change the `images_to_test` variable. Click play to run inferencing!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "6t8CMarqBqP9"
+ },
+ "outputs": [],
+ "source": [
+ "# Set up variables for running user's model\n",
+ "PATH_TO_IMAGES='/content/images/test' # Path to test images folder\n",
+ "PATH_TO_MODEL='/content/custom_model_lite/detect.tflite' # Path to .tflite model file\n",
+ "PATH_TO_LABELS='/content/labelmap.txt' # Path to labelmap.txt file\n",
+ "min_conf_threshold=0.5 # Confidence threshold (try changing this to 0.01 if you don't see any detection results)\n",
+ "images_to_test = 10 # Number of images to run detection on\n",
+ "\n",
+ "# Run inferencing function!\n",
+ "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "N_ckqeWqBF0P"
+ },
+ "source": [
+ "### 7.2 Calculate mAP\n",
+ "Now we have a visual sense of how our model performs on test images, but how can we quantitatively measure its accuracy?\n",
+ "\n",
+ "One popular methord for measuring object detection model accuracy is \"mean average precision\" (mAP). Basically, the higher the mAP score, the better your model is at detecting objects in images. To learn more about mAP, read through this [article from Roboflow](https://blog.roboflow.com/mean-average-precision/).\n",
+ "\n",
+ "We'll use the mAP calculator tool at https://github.com/Cartucho/mAP to determine our model's mAP score. First, we need to clone the repository and remove its existing example data. We'll also download a script I wrote for interfacing with the calculator."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "JlWarXEZDUqS"
+ },
+ "outputs": [],
+ "source": [
+ "%%bash\n",
+ "git clone https://github.com/Cartucho/mAP /content/mAP\n",
+ "cd /content/mAP\n",
+ "rm input/detection-results/*\n",
+ "rm input/ground-truth/*\n",
+ "rm input/images-optional/*\n",
+ "wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/calculate_map_cartucho.py"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "qn22nGGqH5T6"
+ },
+ "source": [
+ "Next, we'll copy the images and annotation data from the **test** folder to the appropriate folders inside the cloned repository. These will be used as the \"ground truth data\" that our model's detection results will be compared to.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "5szFfVxwI3wT"
+ },
+ "outputs": [],
+ "source": [
+ "!cp /content/images/test/* /content/mAP/input/images-optional # Copy images and xml files\n",
+ "!mv /content/mAP/input/images-optional/*.xml /content/mAP/input/ground-truth/ # Move xml files to the appropriate folder"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "u6aro817DGzx"
+ },
+ "source": [
+ "The calculator tool expects annotation data in a format that's different from the Pascal VOC .xml file format we're using. Fortunately, it provides an easy script, `convert_gt_xml.py`, for converting to the expected .txt format.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "qdjtOUDnK2AA"
+ },
+ "outputs": [],
+ "source": [
+ "!python /content/mAP/scripts/extra/convert_gt_xml.py"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "mnIUacAlLP0B"
+ },
+ "source": [
+ "Okay, we've set up the ground truth data, but now we need actual detection results from our model. The detection results will be compared to the ground truth data to calculate the model's accuracy in mAP.\n",
+ "\n",
+ "The inference function we defined in Step 7.1 can be used to generate detection data for all the images in the **test** folder. We'll use it the same as before, except this time we'll tell it to save detection results into the `detection-results` folder.\n",
+ "\n",
+ "Click Play to run the following code block!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "szzHFAhsMNFF"
+ },
+ "outputs": [],
+ "source": [
+ "# Set up variables for running inference, this time to get detection results saved as .txt files\n",
+ "PATH_TO_IMAGES='/content/images/test' # Path to test images folder\n",
+ "PATH_TO_MODEL='/content/custom_model_lite/detect.tflite' # Path to .tflite model file\n",
+ "PATH_TO_LABELS='/content/labelmap.txt' # Path to labelmap.txt file\n",
+ "PATH_TO_RESULTS='/content/mAP/input/detection-results' # Folder to save detection results in\n",
+ "min_conf_threshold=0.1 # Confidence threshold\n",
+ "\n",
+ "# Use all the images in the test folder\n",
+ "image_list = glob.glob(PATH_TO_IMAGES + '/*.jpg') + glob.glob(PATH_TO_IMAGES + '/*.JPG') + glob.glob(PATH_TO_IMAGES + '/*.png') + glob.glob(PATH_TO_IMAGES + '/*.bmp')\n",
+ "images_to_test = min(500, len(image_list)) # If there are more than 500 images in the folder, just use 500\n",
+ "\n",
+ "# Tell function to just save results and not display images\n",
+ "txt_only = True\n",
+ "\n",
+ "# Run inferencing function!\n",
+ "print('Starting inference on %d images...' % images_to_test)\n",
+ "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test, PATH_TO_RESULTS, txt_only)\n",
+ "print('Finished inferencing!')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "e_QRnTqNPX4z"
+ },
+ "source": [
+ "Finally, let's calculate mAP! One popular style for reporting mAP is the COCO metric for mAP @ 0.50:0.95. Basically, this means that mAP is calculated at several IoU thresholds between 0.50 and 0.95, and then the result from each threshold is averaged to get a final mAP score. [Learn more here!](https://blog.roboflow.com/mean-average-precision/)\n",
+ "\n",
+ "I wrote a script to run the calculator tool at each IoU threshold, average the results, and report the final accuracy score. It reports mAP for each class and overall mAP. Click Play on the following two blocks to calculate mAP!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "3DkjpIBARTQ7"
+ },
+ "outputs": [],
+ "source": [
+ "%cd /content/mAP\n",
+ "!python calculate_map_cartucho.py --labels=/content/labelmap.txt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "R9HPoOBVKvxU"
+ },
+ "source": [
+ "The score reported at the end is your model's overall mAP score. Ideally, it should be above 50% (0.50). If it isn't, you can increase your model's accuracy by adding more images to your dataset. See my [dataset video](https://www.youtube.com/watch?v=v0ssiOY6cfg) for tips on how to capture good training images and improve accuracy."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "5i40ve0SCLaE"
+ },
+ "source": [
+ "# 8. Deploy TensorFlow Lite Model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "phT8vvzriqQp"
+ },
+ "source": [
+ "Now that your custom model has been trained and converted to TFLite format, it's ready to be downloaded and deployed in an application! This section shows how to download the model and provides links to instructions for deploying it on the Raspberry Pi, your PC, or other edge devices."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "zq3L2IoP4VHp"
+ },
+ "source": [
+ "## 8.1. Download TFLite model\n",
+ "\n",
+ "Run the two following cells to copy the labelmap files into the model folder, compress it into a zip folder, and then download it. The zip folder contains the `detect.tflite` model and `labelmap.txt` labelmap files that are needed to run the model in your application."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "awZMQGVqMpVL"
+ },
+ "outputs": [],
+ "source": [
+ "# Move labelmap and pipeline config files into TFLite model folder and zip it up\n",
+ "!cp /content/labelmap.txt /content/custom_model_lite\n",
+ "!cp /content/labelmap.pbtxt /content/custom_model_lite\n",
+ "!cp /content/models/mymodel/pipeline_file.config /content/custom_model_lite\n",
+ "\n",
+ "%cd /content\n",
+ "!zip -r custom_model_lite.zip custom_model_lite"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "FVPfAGbNPV56"
+ },
+ "outputs": [],
+ "source": [
+ "from google.colab import files\n",
+ "\n",
+ "files.download('/content/custom_model_lite.zip')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "9Kb3ZBsMq95l"
+ },
+ "source": [
+ "The `custom_model_lite.zip` file containing the model will download into your Downloads folder. It's ready to be deployed on your device!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "GSJ2wgGCixy2"
+ },
+ "source": [
+ "## 8.2. Deploy model\n",
+ "TensorFlow Lite models can run on a wide variety of hardware, including PCs, embedded systems, and phones. This section provides instructions showing how to deploy your TFLite model on various devices.\n",
+ "\n",
+ "### 8.2.1. Deploy on Raspberry Pi\n",
+ "TFLite models are great for running on the Raspberry Pi, because they require less processing power than regular TensorFlow vision models. The Pi can run TFLite models in near real-time.\n",
+ "\n",
+ "To run your new model on the Raspberry Pi, you'll have to install TensorFlow Lite and prepare a Python environment for your application. I provide step-by-step instructions on how to set up TFLite on the Pi in my video, [How To Run TensorFlow Lite on Raspberry Pi for Object Detection](https://youtu.be/aimSGOAUI8Y).\n",
+ "\n",
+ "[](https://www.youtube.com/watch?v=aimSGOAUI8Y)\n",
+ "\n",
+ "Once you've completed all the steps in the video, move the `custom_model_lite.zip` file downloaded from this Colab session over to your Raspberry Pi into the `~/tflite1` folder. Move into the folder and unzip it by issuing:\n",
+ "\n",
+ "```\n",
+ "cd ~/tflite1\n",
+ "unzip custom_model_lite.zip\n",
+ "```\n",
+ "\n",
+ "Then, run the image, video, or webcam TFLite detection program with the `--modeldir=fine_tuned_model_lite` argument. For example, to run the webcam detection program, issue:\n",
+ "\n",
+ "```\n",
+ "python TFLite_detection_webcam.py --modeldir=custom_model_lite\n",
+ "```\n",
+ "\n",
+ "A window will appear showing a live feed from your webcam with boxes drawn around detected objects in each frame.\n",
+ "\n",
+ "### 8.2.2. Deploy on Windows, Linux, or macOS\n",
+ "Follow the instructions linked below to quickly set up your Windows, Linux, or macOS computer to run TFLite models. It only takes a few minutes! Running a model on your PC is good for quickly testing your model with a webcam. However, keep in mind that the TFLite Runtime is optimized for lower-power processors, and it won't utilize the full capability of your PC's processor.\n",
+ "\n",
+ "Here are links to the deployment guides for Windows, Linux, and macOS:\n",
+ "* [How to Run TensorFlow Lite Models on Windows](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/deploy_guides/Windows_TFLite_Guide.md)\n",
+ "* *link to Linux guide to be added (but really it's the same as Raspberry Pi)*\n",
+ "* *link to macOS guide to be added*\n",
+ "\n",
+ "### 8.2.3. Deploy on other Linux-based edge devices\n",
+ "Instructions to be added! 🐧\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 8.2.4. Deploy on Android"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### 8.2.4.1. Add metadata to your TensorFlow Lite model\n",
+ "This step is required to load the TFLite model on an Android device or you will get an error!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install tflite_support==0.4.4\n",
+ "\n",
+ "%cd /content/custom_model_lite\n",
+ "\n",
+ "from tflite_support.metadata_writers import object_detector\n",
+ "from tflite_support.metadata_writers import writer_utils\n",
+ "from tflite_support import metadata\n",
+ "import flatbuffers\n",
+ "import os\n",
+ "from tensorflow_lite_support.metadata import metadata_schema_py_generated as _metadata_fb\n",
+ "from tensorflow_lite_support.metadata.python import metadata as _metadata\n",
+ "from tensorflow_lite_support.metadata.python.metadata_writers import metadata_info\n",
+ "from tensorflow_lite_support.metadata.python.metadata_writers import metadata_writer\n",
+ "from tensorflow_lite_support.metadata.python.metadata_writers import writer_utils\n",
+ "\n",
+ "ObjectDetectorWriter = object_detector.MetadataWriter\n",
+ "\n",
+ "_MODEL_PATH = \"/content/custom_model_lite/detect.tflite\"\n",
+ "_LABEL_FILE = \"/content/labelmap.txt\"\n",
+ "_SAVE_TO_PATH = \"/content/custom_model_lite/detect_with_metadata.tflite\"\n",
+ "\n",
+ "writer = ObjectDetectorWriter.create_for_inference(\n",
+ " writer_utils.load_file(_MODEL_PATH), [127.5], [127.5], [_LABEL_FILE])\n",
+ "writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)\n",
+ "\n",
+ "# Verify the populated metadata and associated files.\n",
+ "displayer = metadata.MetadataDisplayer.with_model_file(_SAVE_TO_PATH)\n",
+ "print(\"Metadata populated:\")\n",
+ "print(displayer.get_metadata_json())\n",
+ "print(\"Associated file(s) populated:\")\n",
+ "print(displayer.get_packed_associated_file_list())\n",
+ "\n",
+ "model_meta = _metadata_fb.ModelMetadataT()\n",
+ "model_meta.name = \"SSD_Detector\"\n",
+ "model_meta.description = (\n",
+ " \"Identify which of a known set of objects might be present and provide \"\n",
+ " \"information about their positions within the given image or a video \"\n",
+ " \"stream.\")\n",
+ "\n",
+ "# Creates input info.\n",
+ "input_meta = _metadata_fb.TensorMetadataT()\n",
+ "input_meta.name = \"image\"\n",
+ "input_meta.content = _metadata_fb.ContentT()\n",
+ "input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT()\n",
+ "input_meta.content.contentProperties.colorSpace = (\n",
+ " _metadata_fb.ColorSpaceType.RGB)\n",
+ "input_meta.content.contentPropertiesType = (\n",
+ " _metadata_fb.ContentProperties.ImageProperties)\n",
+ "input_normalization = _metadata_fb.ProcessUnitT()\n",
+ "input_normalization.optionsType = (\n",
+ " _metadata_fb.ProcessUnitOptions.NormalizationOptions)\n",
+ "input_normalization.options = _metadata_fb.NormalizationOptionsT()\n",
+ "input_normalization.options.mean = [127.5]\n",
+ "input_normalization.options.std = [127.5]\n",
+ "input_meta.processUnits = [input_normalization]\n",
+ "input_stats = _metadata_fb.StatsT()\n",
+ "input_stats.max = [255]\n",
+ "input_stats.min = [0]\n",
+ "input_meta.stats = input_stats\n",
+ "\n",
+ "# Creates outputs info.\n",
+ "output_location_meta = _metadata_fb.TensorMetadataT()\n",
+ "output_location_meta.name = \"location\"\n",
+ "output_location_meta.description = \"The locations of the detected boxes.\"\n",
+ "output_location_meta.content = _metadata_fb.ContentT()\n",
+ "output_location_meta.content.contentPropertiesType = (\n",
+ " _metadata_fb.ContentProperties.BoundingBoxProperties)\n",
+ "output_location_meta.content.contentProperties = (\n",
+ " _metadata_fb.BoundingBoxPropertiesT())\n",
+ "output_location_meta.content.contentProperties.index = [1, 0, 3, 2]\n",
+ "output_location_meta.content.contentProperties.type = (\n",
+ " _metadata_fb.BoundingBoxType.BOUNDARIES)\n",
+ "output_location_meta.content.contentProperties.coordinateType = (\n",
+ " _metadata_fb.CoordinateType.RATIO)\n",
+ "output_location_meta.content.range = _metadata_fb.ValueRangeT()\n",
+ "output_location_meta.content.range.min = 2\n",
+ "output_location_meta.content.range.max = 2\n",
+ "\n",
+ "output_class_meta = _metadata_fb.TensorMetadataT()\n",
+ "output_class_meta.name = \"category\"\n",
+ "output_class_meta.description = \"The categories of the detected boxes.\"\n",
+ "output_class_meta.content = _metadata_fb.ContentT()\n",
+ "output_class_meta.content.contentPropertiesType = (\n",
+ " _metadata_fb.ContentProperties.FeatureProperties)\n",
+ "output_class_meta.content.contentProperties = (\n",
+ " _metadata_fb.FeaturePropertiesT())\n",
+ "output_class_meta.content.range = _metadata_fb.ValueRangeT()\n",
+ "output_class_meta.content.range.min = 2\n",
+ "output_class_meta.content.range.max = 2\n",
+ "label_file = _metadata_fb.AssociatedFileT()\n",
+ "label_file.name = os.path.basename(\"labelmap.txt\")\n",
+ "label_file.description = \"Label of objects that this model can recognize.\"\n",
+ "label_file.type = _metadata_fb.AssociatedFileType.TENSOR_VALUE_LABELS\n",
+ "output_class_meta.associatedFiles = [label_file]\n",
+ "\n",
+ "output_score_meta = _metadata_fb.TensorMetadataT()\n",
+ "output_score_meta.name = \"score\"\n",
+ "output_score_meta.description = \"The scores of the detected boxes.\"\n",
+ "output_score_meta.content = _metadata_fb.ContentT()\n",
+ "output_score_meta.content.contentPropertiesType = (\n",
+ " _metadata_fb.ContentProperties.FeatureProperties)\n",
+ "output_score_meta.content.contentProperties = (\n",
+ " _metadata_fb.FeaturePropertiesT())\n",
+ "output_score_meta.content.range = _metadata_fb.ValueRangeT()\n",
+ "output_score_meta.content.range.min = 2\n",
+ "output_score_meta.content.range.max = 2\n",
+ "\n",
+ "output_number_meta = _metadata_fb.TensorMetadataT()\n",
+ "output_number_meta.name = \"number of detections\"\n",
+ "output_number_meta.description = \"The number of the detected boxes.\"\n",
+ "output_number_meta.content = _metadata_fb.ContentT()\n",
+ "output_number_meta.content.contentPropertiesType = (\n",
+ " _metadata_fb.ContentProperties.FeatureProperties)\n",
+ "output_number_meta.content.contentProperties = (\n",
+ " _metadata_fb.FeaturePropertiesT())\n",
+ "\n",
+ "# Creates subgraph info.\n",
+ "group = _metadata_fb.TensorGroupT()\n",
+ "group.name = \"detection result\"\n",
+ "group.tensorNames = [\n",
+ " output_location_meta.name, output_class_meta.name,\n",
+ " output_score_meta.name\n",
+ "]\n",
+ "subgraph = _metadata_fb.SubGraphMetadataT()\n",
+ "subgraph.inputTensorMetadata = [input_meta]\n",
+ "subgraph.outputTensorMetadata = [\n",
+ " output_location_meta, output_class_meta, output_score_meta,\n",
+ " output_number_meta\n",
+ "]\n",
+ "subgraph.outputTensorGroups = [group]\n",
+ "model_meta.subgraphMetadata = [subgraph]\n",
+ "\n",
+ "b = flatbuffers.Builder(0)\n",
+ "b.Finish(\n",
+ " model_meta.Pack(b),\n",
+ " _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n",
+ "metadata_buf = b.Output()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### 8.2.4.2. Continue by following the Android deployment guide\n",
+ "FILL"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "WoptFnAhCSrR"
+ },
+ "source": [
+ "# 9. (Optional) Post-Training Quantization"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "I54paUm8dUCr"
+ },
+ "source": [
+ "Want to make your TFLite model run even faster? Quantize it! Quantizating a model converts its weights from 32-bit floating-point values to 8-bit integer values. This allows the quantized model to run faster and occupy less memory without too much reduction in accuracy.\n",
+ "\n",
+ "> Note: If you observe an obvious decrease in detection accuracy when quantizing your model with TF2, I recommend using TensorFlow 1 to quantize your model instead. TF1 supports quantization-aware training, which helps improve the accuracy of quantized models. The ssd-mobilenet-v2-quantized model from the TF1 Model Zoo has fast and accurate performance when trained with a custom dataset. Visit my [TFLite v1 Colab notebook](https://colab.research.google.com/github/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/Train_TFLite1_Object_Detection_Model.ipynb) for step-by-step instructions on how to train and quantize a model with TensorFlow 1."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "VTyqlXFTJ0Uv"
+ },
+ "source": [
+ "## 9.1. Quantize model\n",
+ "We'll use the \"TFLiteConverter\" module to perform [post-training quantization](https://www.tensorflow.org/lite/performance/post_training_quantization) on the model. To quantize the model, we need to provide a representative dataset, which is a set of images that represent what the model will see when deployed in the field. First, we'll create a list of images to include in the representative dataset (we'll just use the images in the `train` folder).\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "XSNZtfj_k3NP"
+ },
+ "outputs": [],
+ "source": [
+ "# Get list of all images in train directory\n",
+ "image_path = '/content/images/train'\n",
+ "\n",
+ "jpg_file_list = glob.glob(image_path + '/*.jpg')\n",
+ "JPG_file_list = glob.glob(image_path + '/*.JPG')\n",
+ "png_file_list = glob.glob(image_path + '/*.png')\n",
+ "bmp_file_list = glob.glob(image_path + '/*.bmp')\n",
+ "\n",
+ "quant_image_list = jpg_file_list + JPG_file_list + png_file_list + bmp_file_list"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "cqbH1VlEgiuy"
+ },
+ "source": [
+ "Next, we'll define a function to yield images from our representative dataset. Refer to [TensorFlow's sample quantization code](https://colab.research.google.com/github/google-coral/tutorials/blob/master/retrain_classification_ptq_tf2.ipynb#scrollTo=kRDabW_u1wnv) to get a better understanding of what this is doing!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ORzx0XRErSLV"
+ },
+ "outputs": [],
+ "source": [
+ "# A generator that provides a representative dataset\n",
+ "# Code modified from https://colab.research.google.com/github/google-coral/tutorials/blob/master/retrain_classification_ptq_tf2.ipynb\n",
+ "\n",
+ "# First, get input details for model so we know how to preprocess images\n",
+ "interpreter = Interpreter(model_path=PATH_TO_MODEL) # PATH_TO_MODEL is defined in Step 7 above\n",
+ "interpreter.allocate_tensors()\n",
+ "input_details = interpreter.get_input_details()\n",
+ "output_details = interpreter.get_output_details()\n",
+ "height = input_details[0]['shape'][1]\n",
+ "width = input_details[0]['shape'][2]\n",
+ "\n",
+ "import random\n",
+ "\n",
+ "def representative_data_gen():\n",
+ " dataset_list = quant_image_list\n",
+ " quant_num = 300\n",
+ " for i in range(quant_num):\n",
+ " pick_me = random.choice(dataset_list)\n",
+ " image = tf.io.read_file(pick_me)\n",
+ "\n",
+ " if pick_me.endswith('.jpg') or pick_me.endswith('.JPG'):\n",
+ " image = tf.io.decode_jpeg(image, channels=3)\n",
+ " elif pick_me.endswith('.png'):\n",
+ " image = tf.io.decode_png(image, channels=3)\n",
+ " elif pick_me.endswith('.bmp'):\n",
+ " image = tf.io.decode_bmp(image, channels=3)\n",
+ "\n",
+ " image = tf.image.resize(image, [width, height]) # TO DO: Replace 300s with an automatic way of reading network input size\n",
+ " image = tf.cast(image / 255., tf.float32)\n",
+ " image = tf.expand_dims(image, 0)\n",
+ " yield [image]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "wqtu98mzebEj"
+ },
+ "source": [
+ "Finally, we'll initialize the TFLiteConverter module, point it at the TFLite graph we generated in Step 6, and provide it with the representative dataset generator function we created in the previous code block. We'll configure the converter to quantize the model's weight values to INT8 format."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "Ox0bGDWds_Ce"
+ },
+ "outputs": [],
+ "source": [
+ "# Initialize converter module\n",
+ "converter = tf.lite.TFLiteConverter.from_saved_model('/content/custom_model_lite/saved_model')\n",
+ "\n",
+ "# This enables quantization\n",
+ "converter.optimizations = [tf.lite.Optimize.DEFAULT]\n",
+ "# This sets the representative dataset for quantization\n",
+ "converter.representative_dataset = representative_data_gen\n",
+ "# This ensures that if any ops can't be quantized, the converter throws an error\n",
+ "converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n",
+ "# For full integer quantization, though supported types defaults to int8 only, we explicitly declare it for clarity.\n",
+ "converter.target_spec.supported_types = [tf.int8]\n",
+ "# These set the input tensors to uint8 and output tensors to float32\n",
+ "converter.inference_input_type = tf.uint8\n",
+ "converter.inference_output_type = tf.float32\n",
+ "tflite_model = converter.convert()\n",
+ "\n",
+ "with open('/content/custom_model_lite/detect_quant.tflite', 'wb') as f:\n",
+ " f.write(tflite_model)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "dYVVlv5QUUZF"
+ },
+ "source": [
+ "## 9.2. Test quantized model\n",
+ "The model has been quantized and exported as `detect_quant.tflite`. Let's test it out! We'll re-use the function from Section 7 for running the model on test images and display the results, except this time we'll point it at the quantized model.\n",
+ "\n",
+ "Click Play on the code block below to test the `detect_quant.tflite` model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "6OoirJuOtdOG"
+ },
+ "outputs": [],
+ "source": [
+ "# Set up parameters for inferencing function (using detect_quant.tflite instead of detect.tflite)\n",
+ "PATH_TO_IMAGES='/content/images/test' #Path to test images folder\n",
+ "PATH_TO_MODEL='/content/custom_model_lite/detect_quant.tflite' #Path to .tflite model file\n",
+ "PATH_TO_LABELS='/content/labelmap.txt' #Path to labelmap.txt file\n",
+ "min_conf_threshold=0.5 #Confidence threshold (try changing this to 0.01 if you don't see any detection results)\n",
+ "images_to_test = 10 #Number of images to run detection on\n",
+ "\n",
+ "# Run inferencing function!\n",
+ "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "cKo7ZtfOyoxG"
+ },
+ "source": [
+ "If your quantized model isn't performing very well, try using my TensorFlow Lite 1 notebook *(link to be added)* to train a SSD-MobileNet model with your dataset. In my experience, the `ssd-mobilenet-v2-quantized` model from the [TF1 Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md) has the best quantized performance out of any other TensorFlow Lite model.\n",
+ "\n",
+ "TFLite models created with TensorFlow 1 are still compatible with the TensorFlow Lite 2 runtime, so your TFLite 1 model will still work with my [TensorFlow setup guide for the Raspberry Pi](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/Raspberry_Pi_Guide.md)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "vWdVxs6LUjbR"
+ },
+ "source": [
+ "## 9.3 Calculate quantized model mAP\n",
+ "\n",
+ "Let's calculate the quantize model's mAP using the calculator tool we set up in Step 7.2. We just need to perform inference with our quantized model (`detect_quant.tflite`) to get a new set of detection results.\n",
+ "\n",
+ "Run the following block to run inference on the test images and save the detection results."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ZMaumV-11Et0"
+ },
+ "outputs": [],
+ "source": [
+ "# Need to remove existing detection results first\n",
+ "!rm /content/mAP/input/detection-results/*\n",
+ "\n",
+ "# Set up variables for running inference, this time to get detection results saved as .txt files\n",
+ "PATH_TO_IMAGES='/content/images/test' # Path to test images folder\n",
+ "PATH_TO_MODEL='/content/custom_model_lite/detect_quant.tflite' # Path to quantized .tflite model file\n",
+ "PATH_TO_LABELS='/content/labelmap.txt' # Path to labelmap.txt file\n",
+ "PATH_TO_RESULTS='/content/mAP/input/detection-results' # Folder to save detection results in\n",
+ "min_conf_threshold=0.1 # Confidence threshold\n",
+ "\n",
+ "# Use all the images in the test folder\n",
+ "image_list = glob.glob(PATH_TO_IMAGES + '/*.jpg') + glob.glob(PATH_TO_IMAGES + '/*.JPG') + glob.glob(PATH_TO_IMAGES + '/*.png') + glob.glob(PATH_TO_IMAGES + '/*.bmp')\n",
+ "images_to_test = min(500, len(image_list)) # If there are more than 500 images in the folder, just use 500\n",
+ "\n",
+ "# Tell function to just save results and not display images\n",
+ "txt_only = True\n",
+ "\n",
+ "# Run inferencing function!\n",
+ "print('Starting inference on %d images...' % images_to_test)\n",
+ "tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test, PATH_TO_RESULTS, txt_only)\n",
+ "print('Finished inferencing!')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QgcmdLQf1Et1"
+ },
+ "source": [
+ "Now we can run the mAP calculation script to determine our quantized model's mAP."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ZIRNp0Af1Et1"
+ },
+ "outputs": [],
+ "source": [
+ "cd /content/mAP"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "4TDgMBw_1Et1"
+ },
+ "outputs": [],
+ "source": [
+ "!python calculate_map_cartucho.py --labels=/content/labelmap.txt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "XFsuasvxFHo8"
+ },
+ "source": [
+ "## 9.4. Compile model for Edge TPU\n",
+ "\n",
+ "Now that the model has been converted to TFLite and quantized, we can compile it to run on Edge TPU devices like the [Coral USB Accelerator](https://coral.ai/products/accelerator/) or the [Coral Dev Board](https://coral.ai/products/dev-board/). This allows the model to run much faster! For information on how to set up the USB Accelerator, my [TensorFlow Lite repository on GitHub](https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/deploy_guides/Raspberry_Pi_Guide.md#section-2---run-edge-tpu-object-detection-models-on-the-raspberry-pi-using-the-coral-usb-accelerator).\n",
+ "\n",
+ "First, install the Edge TPU Compiler package inside this Colab instance."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "mUd_SNC0JSq0"
+ },
+ "outputs": [],
+ "source": [
+ "! curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\n",
+ "! echo \"deb https://packages.cloud.google.com/apt coral-edgetpu-stable main\" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list\n",
+ "! sudo apt-get update\n",
+ "! sudo apt-get install edgetpu-compiler"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "usfmdtSiJuuC"
+ },
+ "source": [
+ "Next, compile the quantize TFLite model. (If your model has a different filename than \"detect_quant.tflite\", use that instead.)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "mULCY0nb0ahH"
+ },
+ "outputs": [],
+ "source": [
+ "%cd /content/custom_model_lite\n",
+ "!edgetpu_compiler detect_quant.tflite\n",
+ "!mv detect_quant_edgetpu.tflite edgetpu.tflite\n",
+ "!rm detect_quant_edgetpu.log"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "oqGy2FgzKomN"
+ },
+ "source": [
+ "The compiled model will be output in the `custom_model_lite` folder as \"detect__quant_edgetpu.tflite\". It gets renamed to \"edgetpu.tflite\" to be consistent with my code. Zip the `custom_model_lite` folder and download it by running the two code blocks below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "8nCdUouYJjQM"
+ },
+ "outputs": [],
+ "source": [
+ "%cd /content\n",
+ "!zip -r custom_model_lite.zip custom_model_lite"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "AmjqvKuuK8ZR"
+ },
+ "outputs": [],
+ "source": [
+ "from google.colab import files\n",
+ "\n",
+ "files.download('custom_model_lite.zip')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ptwpBBEWLfuJ"
+ },
+ "source": [
+ "Now you're all set to use the Coral model! For instructions on how to run an object detection model on the Raspberry Pi using the Coral USB Acclerator, please see my video, [\"How to Use the Coral USB Accelerator with the Raspberry Pi\"](https://www.youtube.com/watch?v=qJMwNHQNOVU)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "5VI_Gh5dCd7w"
+ },
+ "source": [
+ "# Appendix: Common Errors"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "sEbd9cO7I_o3"
+ },
+ "source": [
+ "Here are solutions to common errors that can occur while stepping through this notebook.\n",
+ "\n",
+ "**1. Training suddenly stops with ^C output**\n",
+ "\n",
+ "If your training randomly stops without any error messages except a `^C`, that means the virtual machine has run out of memory. To resolve the issue, try reducing the `batch_size` variable in Step 4 to a lower value like `batch_size = 4`. The value must be a power of 2. (e.g. 2, 4, 8 ...)\n",
+ "\n",
+ "Source: https://stackoverflow.com/questions/75901898/why-my-model-training-automatically-stopped-during-training"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "authorship_tag": "ABX9TyPJwANaBtFGhp/i5NVhWkIW",
+ "collapsed_sections": [
+ "4VAvZo8qE4u5",
+ "sxb8_h-QFErO",
+ "eydREUsMGUUR",
+ "eGEUZYAMEZ6f",
+ "-19zML6oEO7l",
+ "kPg8oMnQDYKl",
+ "RDQrtQhvC3oG",
+ "5i40ve0SCLaE",
+ "WoptFnAhCSrR",
+ "5VI_Gh5dCd7w"
+ ],
+ "include_colab_link": true,
+ "provenance": [],
+ "toc_visible": true
+ },
+ "gpuClass": "standard",
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/deploy_guides/Android_TFLite_Guide.md b/deploy_guides/Android_TFLite_Guide.md
new file mode 100644
index 00000000..fe21ef41
--- /dev/null
+++ b/deploy_guides/Android_TFLite_Guide.md
@@ -0,0 +1,96 @@
+# How to Run TensorFlow Lite Models on Android
+This guide shows how to load a TensorFlow Lite model on an Android device to detect objects using the rear camera. It walks through the process of setting up a project in Android Studio, adding your custom TFLite model to the project, and deploying it to your Android phone.
+
+
+
+
+
+## Requirements
+Running TFLite models requires an Android device with an API level of 24 or higher (Android 7.0 or higher). This covers 97% of all active Android devices. For more information on API levels, see the [Android SDK Platform release notes page.](https://developer.android.com/tools/releases/platforms)
+For this guide, using a phone is highly recommended. Android tablets typically don't have as good of performance or camera quality.
+
+## Step 1. Train your TensorFlow Lite model
+[Google Colab Notebook](https://colab.research.google.com/github/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/Train_TFLite2_Object_Detction_Model.ipynb)
+
+
+## Step 2. Set Up Your device for Use With Android Studio
+Next, you'll need to enable Developer Options on your Android device. This allows it to load and run custom apps, like the one you'll be creating in this guide. The following steps and images show the process for a typical Android phone. If your device has a different Settings menu, visit the [Run apps on a hardware device](https://developer.android.com/studio/run/device) page for specific instructions for your device.
+
+1. Navigate to Settings -> About Phone -> Software Information.
+
+
+
+
+
+2. Tap the "Build number" item 7 times, then return to Settings. Developer Options will be revealed under About Phone.
+
+
+
+
+
+3. Enter Developer Options and enable it by toggling the top option to "On". Also enable the USB Debugging option.
+
+
+
+
+## Step 3. Download and Install Android Studio
+Now we need to download and install Android Studio, which will be used to write and build the application. Visit the [Android Studio page](https://developer.android.com/studio) and click the Download Android Studio button. Once it's downloaded, run the installer and use the default install options.
+
+## Step 4. Set Up Project in Android Studio
+The Android folder in this repository contains all the files for an Android Studio project with code to build an object detection app. Open a terminal and clone the repository using the following command.
+
+`git clone https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi.git --depth=1`
+
+Note: If you're on Windows and don't have `git` installed, you can just visit the main page of this repository, and click Code -> Download ZIP to download a zipped repository folder.
+Once the repository is cloned, launch Android Studio. The first time you open Android Studio, it will go through a Setup Wizard, where you can again use the default options. You'll need to accept each License Agreement and download some extra components.
+
+When it reaches the Welcome to Android Studio screen, click Open, navigate to the folder where the repository was cloned, and select the Android folder. Click "Trust Project" when asked.
+
+
+
+
+
+
+
+
+
+## Step 5. Add Your TFLite Model to the Project
+
+Click the Project dropdown in the upper left corner and select Android. Click the `app` folder, then expand every subfolder by clicking the Expand All button (or pressing Ctrl + NumPad+ on the keyboard).
+
+
+
+
+Right-click on the `assets` folder and select Open In -> Explorer. Move the `android_model.tflite` file you downloaded from the Colab Notebook into this folder.
+
+
+
+
+
+Find the ObjectDetectorHelper file in the `kotlin+java/io.ejtech.tflite/ui/detection` folder in the navigation panel on the left. Double-click it to open it. This file contains adjustable parameters for the app. For example, the `var threshold` variable sets the minimum confidence threshold for displaying detected objects.
+
+If your model name is different than `android_model.tflite`, change the `modelName` variable to match the name of the model file.
+
+
+
+
+## Step 6. Launch App on Your Device!
+
+Download a few tools from Android Studio to complete device setup. Open the Settings window (Ctrl+Alt+S or Command+comma) and select Languages & Frameworks. From there you can choose Android SDK. SDK Platforms will be selected by default, but click on SDK Tools to find the software you need. Scroll down and select Android Emulator, Android SDK Platform-Tools, and Google USB Driver. Click Apply. Android Studio will begin downloading and installing these tools and notify you when finished. Close the Settings window when ready.
+
+
+
+
+
+Connect your Android device to the PC using a USB cable. You should see the name of your phone appear at the top of Android Studio. Once connected, press the green arrow button to build and launch the app on your phone. Depending on the speed of your computer and the amount of dependencies that need to be downloaded on the first launch, it may take 3 to 5 minutes.
+Connect your device by USB and you should see the name of your phone appear at the top. Once connected, press the green arrow button to launch the app.
+Depending on the speed of your computer and any dependencies that need to be downloaded on the first launch, it may take a little bit of time
+
+
+
+
+
+If everything worked successfully, your phone should show a live view of your camera with detected objects drawn on each camera frame.
+
+Congratulations! You've successfully built and run an Android app that uses your custom TFLite model detect objects. If you ran into any errors, first try a brief Google search to see if you can find a solution. If a Google search doesn't reveal the fix, feel free to submit the problem on the Issues page for this repository.
+
diff --git a/doc/AndroidStudio_ConnectDeviceAndLaunch.png b/doc/AndroidStudio_ConnectDeviceAndLaunch.png
new file mode 100644
index 00000000..06753472
Binary files /dev/null and b/doc/AndroidStudio_ConnectDeviceAndLaunch.png differ
diff --git a/doc/AndroidStudio_ImportModelAsset.png b/doc/AndroidStudio_ImportModelAsset.png
new file mode 100644
index 00000000..aa87a40b
Binary files /dev/null and b/doc/AndroidStudio_ImportModelAsset.png differ
diff --git a/doc/AndroidStudio_ImportModelAsset_2.png b/doc/AndroidStudio_ImportModelAsset_2.png
new file mode 100644
index 00000000..dc91e8b7
Binary files /dev/null and b/doc/AndroidStudio_ImportModelAsset_2.png differ
diff --git a/doc/AndroidStudio_ObjectDetectorHelper.png b/doc/AndroidStudio_ObjectDetectorHelper.png
new file mode 100644
index 00000000..d34399e4
Binary files /dev/null and b/doc/AndroidStudio_ObjectDetectorHelper.png differ
diff --git a/doc/AndroidStudio_OpenProject.png b/doc/AndroidStudio_OpenProject.png
new file mode 100644
index 00000000..1579086b
Binary files /dev/null and b/doc/AndroidStudio_OpenProject.png differ
diff --git a/doc/AndroidStudio_ProjectDescription.png b/doc/AndroidStudio_ProjectDescription.png
new file mode 100644
index 00000000..877cc00d
Binary files /dev/null and b/doc/AndroidStudio_ProjectDescription.png differ
diff --git a/doc/AndroidStudio_RevealProject.png b/doc/AndroidStudio_RevealProject.png
new file mode 100644
index 00000000..8df04a14
Binary files /dev/null and b/doc/AndroidStudio_RevealProject.png differ
diff --git a/doc/AndroidStudio_SDKTools.png b/doc/AndroidStudio_SDKTools.png
new file mode 100644
index 00000000..24189aa9
Binary files /dev/null and b/doc/AndroidStudio_SDKTools.png differ
diff --git a/doc/Device_Setup_Step1.jpg b/doc/Device_Setup_Step1.jpg
new file mode 100644
index 00000000..a90e5d1a
Binary files /dev/null and b/doc/Device_Setup_Step1.jpg differ
diff --git a/doc/Device_Setup_Step2.jpg b/doc/Device_Setup_Step2.jpg
new file mode 100644
index 00000000..48e9035e
Binary files /dev/null and b/doc/Device_Setup_Step2.jpg differ
diff --git a/doc/Device_Setup_Step3.jpg b/doc/Device_Setup_Step3.jpg
new file mode 100644
index 00000000..422af4a4
Binary files /dev/null and b/doc/Device_Setup_Step3.jpg differ
diff --git a/doc/Device_Setup_Step4.jpg b/doc/Device_Setup_Step4.jpg
new file mode 100644
index 00000000..eb4daef9
Binary files /dev/null and b/doc/Device_Setup_Step4.jpg differ
diff --git a/doc/Device_Setup_Step5.jpg b/doc/Device_Setup_Step5.jpg
new file mode 100644
index 00000000..0810adc9
Binary files /dev/null and b/doc/Device_Setup_Step5.jpg differ
diff --git a/doc/coin_detection.jpg b/doc/coin_detection.jpg
new file mode 100644
index 00000000..26c90466
Binary files /dev/null and b/doc/coin_detection.jpg differ