diff --git a/notebooks/flux.1-image-generation/flux.1-image-generation.ipynb b/notebooks/flux.1-image-generation/flux.1-image-generation.ipynb index 9a99ca8f067..f4f0010b58d 100644 --- a/notebooks/flux.1-image-generation/flux.1-image-generation.ipynb +++ b/notebooks/flux.1-image-generation/flux.1-image-generation.ipynb @@ -403,7 +403,8 @@ "id": "cab6790e", "metadata": {}, "source": [ - "`openvino_genai.Text2ImagePipeline` represents inference pipeline for text-to-image generation. For creation pipeline instance, you should provide directory with converted to OpenVINO model and inference device." + "`openvino_genai.Text2ImagePipeline` represents inference pipeline for text-to-image generation. For creation pipeline instance, you should provide directory with converted to OpenVINO model and inference device.\n", + "> **Note**: When using GPU, the default inference precision is FP16, which can cause image quality degradation (blurry or noisy images) with quantized models." ] }, { @@ -425,7 +426,11 @@ "\n", "model_dir = model_base_dir / \"INT4\" if use_quantized_models.value else model_base_dir / \"FP16\"\n", "\n", - "ov_pipe = ov_genai.Text2ImagePipeline(model_dir, device=device.value)" + "if \"GPU\" in device.value:\n", + " ov_pipe = ov_genai.Text2ImagePipeline(model_dir)\n", + " ov_pipe.compile(device.value, INFERENCE_PRECISION_HINT=\"f32\")\n", + "else:\n", + " ov_pipe = ov_genai.Text2ImagePipeline(model_dir, device=device.value)" ] }, {