diff --git a/app/src/main/AndroidManifest.xml b/app/src/main/AndroidManifest.xml
index f0886146..1de0a976 100644
--- a/app/src/main/AndroidManifest.xml
+++ b/app/src/main/AndroidManifest.xml
@@ -34,6 +34,12 @@
+
+
+
+
diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml
index a710f81c..f62c3af7 100644
--- a/gradle/libs.versions.toml
+++ b/gradle/libs.versions.toml
@@ -39,7 +39,7 @@ androidxTestExtTruth = "1.5.0"
androidxTestRules = "1.5.0"
androidxTestRunner = "1.5.2"
androidxUiAutomator = "2.2.0"
-media3 = "1.4.0-rc01"
+media3 = "1.5.0"
appcompat = "1.6.1"
material = "1.12.0-beta01"
constraintlayout = "2.1.4"
diff --git a/samples/README.md b/samples/README.md
index b0148535..6a8611b2 100644
--- a/samples/README.md
+++ b/samples/README.md
@@ -117,7 +117,7 @@ A sample showcasing how to handle calls with the Jetpack Telecom API
- [TextSpan](user-interface/text/src/main/java/com/example/platform/ui/text/TextSpan.kt):
buildSpannedString is useful for quickly building a rich text.
- [Transformer and TFLite](media/video/src/main/java/com/example/platform/media/video/TransformerTFLite.kt):
-This sample demonstrates using Transformer with TFLite by applying a selected art style to a video.
+This sample demonstrates using Transformer with TFLite/RTLite by applying a selected art style to a video.
- [UltraHDR Image Capture](camera/camera2/src/main/java/com/example/platform/camera/imagecapture/Camera2UltraHDRCapture.kt):
This sample demonstrates how to capture a 10-bit compressed still image and
- [UltraHDR to HDR Video](media/ultrahdr/src/main/java/com/example/platform/media/ultrahdr/video/UltraHDRToHDRVideo.kt):
diff --git a/samples/media/video/download_model.gradle b/samples/media/video/download_model.gradle
index 1ba5f602..da3c3e4b 100644
--- a/samples/media/video/download_model.gradle
+++ b/samples/media/video/download_model.gradle
@@ -13,16 +13,39 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-tasks.register('downloadModelFile1', Download) {
+task downloadModelFile(type: Download) {
+ src 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/task_library/style_transfer/android/magenta_arbitrary-image-stylization-v1-256_int8_prediction_1.tflite'
+ dest project.ext.ASSET_DIR + '/predict_int8.tflite'
+ overwrite false
+}
+
+task downloadModelFile0(type: Download) {
+ src 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/task_library/style_transfer/android/magenta_arbitrary-image-stylization-v1-256_int8_transfer_1.tflite'
+ dest project.ext.ASSET_DIR + '/transfer_int8.tflite'
+ overwrite false
+}
+
+task downloadModelFile1(type: Download) {
src 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/task_library/style_transfer/android/magenta_arbitrary-image-stylization-v1-256_fp16_prediction_1.tflite'
dest project.ext.ASSET_DIR + '/predict_float16.tflite'
overwrite false
}
-tasks.register('downloadModelFile2', Download) {
+task downloadModelFile2(type: Download) {
src 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/task_library/style_transfer/android/magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite'
dest project.ext.ASSET_DIR + '/transfer_float16.tflite'
overwrite false
}
-preBuild.dependsOn downloadModelFile1, downloadModelFile2
\ No newline at end of file
+task copyTestModel(type: Copy, dependsOn: downloadModelFile1) {
+ from project.ext.ASSET_DIR + '/predict_float16.tflite'
+ into project.ext.TEST_ASSETS_DIR
+}
+
+task copyTestModel0(type: Copy, dependsOn: downloadModelFile2) {
+ from project.ext.ASSET_DIR + '/transfer_float16.tflite'
+ into project.ext.TEST_ASSETS_DIR
+}
+
+preBuild.dependsOn downloadModelFile, downloadModelFile1, downloadModelFile2,
+ copyTestModel, copyTestModel0
diff --git a/samples/media/video/src/main/java/com/example/platform/media/video/StyleTransferEffect.kt b/samples/media/video/src/main/java/com/example/platform/media/video/StyleTransferEffect.kt
new file mode 100644
index 00000000..85474570
--- /dev/null
+++ b/samples/media/video/src/main/java/com/example/platform/media/video/StyleTransferEffect.kt
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.platform.media.video
+
+import android.content.Context
+import android.graphics.Bitmap
+import android.graphics.BitmapFactory
+import android.graphics.Matrix
+import androidx.media3.common.GlTextureInfo
+import androidx.media3.common.VideoFrameProcessingException
+import androidx.media3.common.util.GlRect
+import androidx.media3.common.util.GlUtil
+import androidx.media3.common.util.Size
+import androidx.media3.common.util.UnstableApi
+import androidx.media3.common.util.Util
+import androidx.media3.effect.ByteBufferGlEffect
+import com.google.common.collect.ImmutableMap
+import com.google.common.util.concurrent.ListenableFuture
+import com.google.common.util.concurrent.ListeningExecutorService
+import com.google.common.util.concurrent.MoreExecutors
+import org.tensorflow.lite.DataType
+import org.tensorflow.lite.Interpreter
+import org.tensorflow.lite.InterpreterApi
+import org.tensorflow.lite.gpu.CompatibilityList
+import org.tensorflow.lite.gpu.GpuDelegate
+import org.tensorflow.lite.support.common.FileUtil
+import org.tensorflow.lite.support.common.ops.DequantizeOp
+import org.tensorflow.lite.support.common.ops.NormalizeOp
+import org.tensorflow.lite.support.image.ImageProcessor
+import org.tensorflow.lite.support.image.TensorImage
+import org.tensorflow.lite.support.image.ops.ResizeOp
+import org.tensorflow.lite.support.image.ops.ResizeWithCropOrPadOp
+import org.tensorflow.lite.support.tensorbuffer.TensorBuffer
+import java.util.concurrent.Future
+
+@UnstableApi
+class StyleTransferEffect(context: Context, styleAssetFileName: String) : ByteBufferGlEffect.Processor {
+
+ private val transformInterpreter: InterpreterApi
+ private val inputTransformTargetHeight: Int
+ private val inputTransformTargetWidth: Int
+ private val outputTransformShape: IntArray
+
+ private var preProcess: ListeningExecutorService = MoreExecutors.listeningDecorator(
+ Util.newSingleThreadExecutor("preProcess"))
+ private var postProcess: ListeningExecutorService = MoreExecutors.listeningDecorator(
+ Util.newSingleThreadExecutor("postProcess"))
+ private var tfRun: ListeningExecutorService = MoreExecutors.listeningDecorator(
+ Util.newSingleThreadExecutor("tfRun"))
+
+ private val predictOutput: TensorBuffer
+
+ private var inputWidth: Int = 0
+ private var inputHeight: Int = 0
+
+
+ init {
+ val options = Interpreter.Options()
+ val compatibilityList = CompatibilityList()
+ val gpuDelegateOptions = compatibilityList.bestOptionsForThisDevice
+ val gpuDelegate = GpuDelegate(gpuDelegateOptions)
+ options.addDelegate(gpuDelegate)
+ val predictModel = "predict_float16.tflite"
+ val transferModel = "transfer_float16.tflite"
+ val predictInterpreter = Interpreter(FileUtil.loadMappedFile(context, predictModel), options)
+ transformInterpreter = InterpreterApi.create(FileUtil.loadMappedFile(context, transferModel), options)
+ val inputPredictTargetHeight = predictInterpreter.getInputTensor(0).shape()[1]
+ val inputPredictTargetWidth = predictInterpreter.getInputTensor(0).shape()[2]
+ val outputPredictShape = predictInterpreter.getOutputTensor(0).shape()
+
+ inputTransformTargetHeight = transformInterpreter.getInputTensor(0).shape()[1]
+ inputTransformTargetWidth = transformInterpreter.getInputTensor(0).shape()[2]
+ outputTransformShape = transformInterpreter.getOutputTensor(0).shape()
+
+ val inputStream = context.assets.open(styleAssetFileName)
+ val styleImage = BitmapFactory.decodeStream(inputStream)
+ inputStream.close()
+ val styleTensorImage = getScaledTensorImage(styleImage, inputPredictTargetWidth, inputPredictTargetHeight)
+ predictOutput = TensorBuffer.createFixedSize(outputPredictShape, DataType.FLOAT32)
+ predictInterpreter.run(styleTensorImage.buffer, predictOutput.buffer)
+ }
+
+ override fun configure(inputWidth: Int, inputHeight: Int): Size {
+ this.inputWidth = inputWidth
+ this.inputHeight = inputHeight
+ return Size(inputTransformTargetWidth, inputTransformTargetHeight)
+ }
+
+ override fun getScaledRegion(presentationTimeUs: Long): GlRect {
+ val minSide = minOf(inputWidth, inputHeight)
+ return GlRect(0, 0, minSide, minSide)
+ }
+
+ override fun processImage(
+ image: ByteBufferGlEffect.Image,
+ presentationTimeUs: Long,
+ ): ListenableFuture {
+ val tensorImageFuture = preProcess(image)
+ val tensorBufferFuture = tfRun(tensorImageFuture)
+ return postProcess(tensorBufferFuture)
+ }
+
+ override fun release() {}
+
+ override fun finishProcessingAndBlend(
+ outputFrame: GlTextureInfo,
+ presentationTimeUs: Long,
+ result: Bitmap,
+ ) {
+ try {
+ copyBitmapToFbo(result, outputFrame, getScaledRegion(presentationTimeUs))
+ } catch (e: GlUtil.GlException) {
+ throw VideoFrameProcessingException.from(e)
+ }
+ }
+
+ private fun preProcess(image: ByteBufferGlEffect.Image): ListenableFuture {
+ return preProcess.submit {
+ val bitmap = image.copyToBitmap()
+ getScaledTensorImage(bitmap, inputTransformTargetWidth, inputTransformTargetHeight)
+ }
+ }
+
+ private fun tfRun(tensorImageFuture: Future): ListenableFuture {
+ return tfRun.submit {
+ val tensorImage = tensorImageFuture.get()
+ val outputImage = TensorBuffer.createFixedSize(outputTransformShape, DataType.FLOAT32)
+
+ transformInterpreter.runForMultipleInputsOutputs(
+ arrayOf(tensorImage.buffer, predictOutput.buffer),
+ ImmutableMap.builder().put(0, outputImage.buffer).build()
+ )
+ outputImage
+ }
+ }
+
+ private fun postProcess(futureOutputImage: ListenableFuture): ListenableFuture {
+ return postProcess.submit {
+ val outputImage = futureOutputImage.get()
+ val imagePostProcessor = ImageProcessor.Builder()
+ .add(DequantizeOp(0f, 255f))
+ .build()
+ val outputTensorImage = TensorImage(DataType.FLOAT32)
+ outputTensorImage.load(outputImage)
+ imagePostProcessor.process(outputTensorImage).bitmap
+ }
+ }
+
+ private fun getScaledTensorImage(bitmap: Bitmap, targetWidth: Int, targetHeight: Int): TensorImage {
+ val cropSize = minOf(bitmap.width, bitmap.height)
+ val imageProcessor = ImageProcessor.Builder()
+ .add(ResizeWithCropOrPadOp(cropSize, cropSize))
+ .add(ResizeOp(targetHeight, targetWidth, ResizeOp.ResizeMethod.BILINEAR))
+ .add(NormalizeOp(0f, 255f))
+ .build()
+ val tensorImage = TensorImage(DataType.FLOAT32)
+ tensorImage.load(bitmap)
+ return imageProcessor.process(tensorImage)
+ }
+
+ private fun copyBitmapToFbo(bitmap: Bitmap, textureInfo: GlTextureInfo, rect: GlRect) {
+ val bitmapToGl = Matrix().apply { setScale(1f, -1f) }
+ val texId = GlUtil.createTexture(bitmap.width, bitmap.height, false)
+ val fboId = GlUtil.createFboForTexture(texId)
+ GlUtil.setTexture(texId,
+ Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, bitmapToGl, true))
+ GlUtil.blitFrameBuffer(fboId, GlRect(0, 0, bitmap.width, bitmap.height), textureInfo.fboId, rect)
+ GlUtil.deleteTexture(texId)
+ GlUtil.deleteFbo(fboId)
+ }
+}
\ No newline at end of file
diff --git a/samples/media/video/src/main/java/com/example/platform/media/video/StyleTransferShaderProgram.java b/samples/media/video/src/main/java/com/example/platform/media/video/StyleTransferShaderProgram.java
deleted file mode 100644
index 9dbef7e9..00000000
--- a/samples/media/video/src/main/java/com/example/platform/media/video/StyleTransferShaderProgram.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright 2024 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.example.platform.media.video;
-
-import android.content.Context;
-import android.graphics.Bitmap;
-import android.graphics.BitmapFactory;
-import android.opengl.GLES20;
-import android.opengl.GLUtils;
-import android.util.Log;
-
-import androidx.media3.common.VideoFrameProcessingException;
-import androidx.media3.common.util.GlProgram;
-import androidx.media3.common.util.GlUtil;
-import androidx.media3.common.util.Size;
-import androidx.media3.common.util.UnstableApi;
-import androidx.media3.effect.BaseGlShaderProgram;
-
-import com.google.common.collect.ImmutableMap;
-
-import org.tensorflow.lite.DataType;
-import org.tensorflow.lite.Interpreter;
-import org.tensorflow.lite.InterpreterApi;
-import org.tensorflow.lite.gpu.CompatibilityList;
-import org.tensorflow.lite.gpu.GpuDelegate;
-import org.tensorflow.lite.support.common.FileUtil;
-import org.tensorflow.lite.support.common.ops.DequantizeOp;
-import org.tensorflow.lite.support.common.ops.NormalizeOp;
-import org.tensorflow.lite.support.image.ImageProcessor;
-import org.tensorflow.lite.support.image.TensorImage;
-import org.tensorflow.lite.support.image.ops.ResizeOp;
-import org.tensorflow.lite.support.image.ops.ResizeWithCropOrPadOp;
-import org.tensorflow.lite.support.tensorbuffer.TensorBuffer;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-
-import javax.microedition.khronos.opengles.GL10;
-
-// TODO: Migrate this class to Kotlin
-@UnstableApi
-final class StyleTransferShaderProgram extends BaseGlShaderProgram {
-
- private static final String TAG = "StyleTransferSP";
- private static final String VERTEX_SHADER_PATH = "shaders/vertex_shader_transformation_es2.glsl";
- private static final String FRAGMENT_SHADER_PATH = "shaders/fragment_shader_copy_es2.glsl";
-
- private final GlProgram glProgram;
- private final InterpreterApi transformInterpreter;
- private final int inputTransformTargetHeight;
- private final int inputTransformTargetWidth;
- private final int[] outputTransformShape;
-
- private final TensorBuffer predictOutput;
-
- private int width;
- private int height;
-
- public StyleTransferShaderProgram(Context context, String styleAssetFileName)
- throws VideoFrameProcessingException {
- super(/* useHighPrecisionColorComponents= */ false, /* texturePoolCapacity= */ 1);
-
- try {
- glProgram = new GlProgram(context, VERTEX_SHADER_PATH, FRAGMENT_SHADER_PATH);
-
- Interpreter.Options options = new Interpreter.Options();
-
- CompatibilityList compatibilityList = new CompatibilityList();
- if (compatibilityList.isDelegateSupportedOnThisDevice()) {
- GpuDelegate.Options gpuDelegateOptions = compatibilityList.getBestOptionsForThisDevice();
- GpuDelegate gpuDelegate = new GpuDelegate(gpuDelegateOptions);
- options.addDelegate(gpuDelegate);
- } else {
- options.setNumThreads(6);
- }
- String predictModel = "predict_float16.tflite";
- String transferModel = "transfer_float16.tflite";
- Interpreter predictInterpeter =
- new Interpreter(FileUtil.loadMappedFile(context, predictModel), options);
- transformInterpreter =
- InterpreterApi.create(FileUtil.loadMappedFile(context, transferModel), options);
- int inputPredictTargetHeight = predictInterpeter.getInputTensor(0).shape()[1];
- int inputPredictTargetWidth = predictInterpeter.getInputTensor(0).shape()[2];
- int[] outputPredictShape = predictInterpeter.getOutputTensor(0).shape();
-
- inputTransformTargetHeight = transformInterpreter.getInputTensor(0).shape()[1];
- inputTransformTargetWidth = transformInterpreter.getInputTensor(0).shape()[2];
- outputTransformShape = transformInterpreter.getOutputTensor(0).shape();
-
- InputStream inputStream = context.getAssets().open(styleAssetFileName);
- Bitmap styleImage = BitmapFactory.decodeStream(inputStream);
- inputStream.close();
- TensorImage styleTensorImage =
- getScaledTensorImage(styleImage, inputPredictTargetWidth, inputPredictTargetHeight);
- predictOutput = TensorBuffer.createFixedSize(outputPredictShape, DataType.FLOAT32);
- predictInterpeter.run(styleTensorImage.getBuffer(), predictOutput.getBuffer());
- } catch (IOException | GlUtil.GlException e) {
- Log.w(TAG, "Error setting up TfShaderProgram", e);
- throw new VideoFrameProcessingException(e);
- }
- }
-
- @Override
- public Size configure(int inputWidth, int inputHeight) {
- width = inputWidth;
- height = inputHeight;
- return new Size(inputWidth, inputHeight);
- }
-
- @Override
- public void drawFrame(int inputTexId, long presentationTimeUs)
- throws VideoFrameProcessingException {
- ByteBuffer pixelBuffer = ByteBuffer.allocateDirect(width * height * 4);
-
- Bitmap bitmap;
- int texId;
- try {
- int[] boundFramebuffer = new int[1];
- GLES20.glGetIntegerv(GLES20.GL_FRAMEBUFFER_BINDING, boundFramebuffer, /* offset= */ 0);
-
- int fboId = GlUtil.createFboForTexture(inputTexId);
- GlUtil.focusFramebufferUsingCurrentContext(fboId, width, height);
- GLES20.glReadPixels(
- /* x= */ 0,
- /* y= */ 0,
- width,
- height,
- GLES20.GL_RGBA,
- GLES20.GL_UNSIGNED_BYTE,
- pixelBuffer);
- GlUtil.checkGlError();
- bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
- bitmap.copyPixelsFromBuffer(pixelBuffer);
-
- Log.w(TAG, "Process frame at " + (presentationTimeUs / 1000) + " ms");
- long before = System.currentTimeMillis();
- TensorImage tensorImage =
- getScaledTensorImage(bitmap, inputTransformTargetWidth, inputTransformTargetHeight);
- Log.w(TAG, "- Scale " + (System.currentTimeMillis() - before) + " ms");
- TensorBuffer outputImage =
- TensorBuffer.createFixedSize(outputTransformShape, DataType.FLOAT32);
-
- before = System.currentTimeMillis();
- transformInterpreter.runForMultipleInputsOutputs(
- new Object[] {tensorImage.getBuffer(), predictOutput.getBuffer()},
- ImmutableMap.builder().put(0, outputImage.getBuffer()).build());
-
- Log.w(TAG, "- Run " + (System.currentTimeMillis() - before) + " ms");
-
- before = System.currentTimeMillis();
- ImageProcessor imagePostProcessor =
- new ImageProcessor.Builder()
- .add(new DequantizeOp(/* zeroPoint= */ 0f, /* scale= */ 255f))
- .build();
- TensorImage outputTensorImage = new TensorImage(DataType.FLOAT32);
- outputTensorImage.load(outputImage);
- Log.w(TAG, "- Load output " + (System.currentTimeMillis() - before) + " ms");
-
- before = System.currentTimeMillis();
- Bitmap outputBitmap = imagePostProcessor.process(outputTensorImage).getBitmap();
- Log.w(TAG, "- Post process output " + (System.currentTimeMillis() - before) + " ms");
-
- texId =
- GlUtil.createTexture(
- outputBitmap.getWidth(),
- outputBitmap.getHeight(),
- /* useHighPrecisionColorComponents= */ false);
- GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, texId);
- GLES20.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_NEAREST);
- GLES20.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR);
- GLES20.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_S, GL10.GL_REPEAT);
- GLES20.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_T, GL10.GL_REPEAT);
- GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, /* level= */ 0, outputBitmap, /* border= */ 0);
- GlUtil.checkGlError();
-
- GlUtil.focusFramebufferUsingCurrentContext(boundFramebuffer[0], width, height);
-
- glProgram.use();
- glProgram.setSamplerTexIdUniform("uTexSampler", texId, /* texUnitIndex= */ 0);
- float[] identityMatrix = GlUtil.create4x4IdentityMatrix();
- glProgram.setFloatsUniform("uTexTransformationMatrix", identityMatrix);
- glProgram.setFloatsUniform("uTransformationMatrix", identityMatrix);
- glProgram.setBufferAttribute(
- "aFramePosition",
- GlUtil.getNormalizedCoordinateBounds(),
- GlUtil.HOMOGENEOUS_COORDINATE_VECTOR_SIZE);
- glProgram.bindAttributesAndUniforms();
-
- GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, /* first= */ 0, /* count= */ 4);
- GlUtil.checkGlError();
-
- GlUtil.deleteTexture(texId);
- } catch (GlUtil.GlException e) {
- throw VideoFrameProcessingException.from(e);
- }
- }
-
- private static TensorImage getScaledTensorImage(
- Bitmap bitmap, int targetWidth, int targetHeight) {
- int cropSize = Math.min(bitmap.getWidth(), bitmap.getHeight());
- ImageProcessor imageProcessor =
- new ImageProcessor.Builder()
- .add(new ResizeWithCropOrPadOp(cropSize, cropSize))
- .add(
- new ResizeOp(
- targetHeight,
- targetWidth,
- ResizeOp.ResizeMethod.BILINEAR)) // TODO: Not sure why they are swapped?
- .add(new NormalizeOp(/* mean= */ 0f, /* stddev= */ 255f))
- .build();
- TensorImage tensorImage = new TensorImage(DataType.FLOAT32);
- tensorImage.load(bitmap);
- return imageProcessor.process(tensorImage);
- }
-}
diff --git a/samples/media/video/src/main/java/com/example/platform/media/video/TransformerTFLite.kt b/samples/media/video/src/main/java/com/example/platform/media/video/TransformerTFLite.kt
index 3fa0519d..713cf300 100644
--- a/samples/media/video/src/main/java/com/example/platform/media/video/TransformerTFLite.kt
+++ b/samples/media/video/src/main/java/com/example/platform/media/video/TransformerTFLite.kt
@@ -16,6 +16,7 @@
package com.example.platform.media.video
+import android.graphics.Bitmap
import android.os.Bundle
import android.os.Handler
import android.os.Looper
@@ -29,7 +30,7 @@ import androidx.lifecycle.lifecycleScope
import androidx.media3.common.Effect
import androidx.media3.common.MediaItem
import androidx.media3.common.util.UnstableApi
-import androidx.media3.effect.GlEffect
+import androidx.media3.effect.ByteBufferGlEffect
import androidx.media3.exoplayer.ExoPlayer
import androidx.media3.transformer.Composition
import androidx.media3.transformer.EditedMediaItem
@@ -52,7 +53,7 @@ import java.util.concurrent.TimeUnit
@UnstableApi
@Sample(
name = "Transformer and TFLite",
- description = "This sample demonstrates using Transformer with TFLite by applying a selected art style to a video.",
+ description = "This sample demonstrates using Transformer with TFLite/RTLite by applying a selected art style to a video.",
documentation = "https://developer.android.com/guide/topics/media/transformer",
tags = ["Transformer"],
)
@@ -197,12 +198,7 @@ class TransformerTFLite : Fragment() {
}
selectedEffects.add(
- GlEffect { context, _ ->
- StyleTransferShaderProgram(
- context,
- selectedStyleAsset,
- )
- },
+ ByteBufferGlEffect(StyleTransferEffect(requireContext(), selectedStyleAsset))
)
return Effects(
@@ -216,11 +212,10 @@ class TransformerTFLite : Fragment() {
*/
private suspend fun playbackUsingExoPlayer() = withContext(Dispatchers.Main) {
binding.mediaPlayer.useController = true
-
val player = ExoPlayer.Builder(requireContext()).build()
player.setMediaItem(MediaItem.fromUri("file://" + externalCacheFile!!.absolutePath))
- player.prepare()
+ player.prepare()
// Attaching player to player view
binding.mediaPlayer.player = player