From 6cdda31d26faaf23d077c146ca7f86e32472aa24 Mon Sep 17 00:00:00 2001 From: EscapedGibbon Date: Tue, 30 Sep 2025 13:25:55 +0200 Subject: [PATCH 1/5] docs: update files with react components --- docs/basics/{basics.md => basics.mdx} | 0 ...g-with-masks.md => working-with-masks.mdx} | 8 +++---- docs/basics/working-with-rois.md | 2 +- docs/features/comparison/addition.md | 2 +- docs/features/{features.md => features.mdx} | 0 docs/features/filters/{blur.md => blur.mdx} | 4 ++-- .../filters/{derivative.md => derivative.mdx} | 0 docs/features/filters/filters.md | 22 +++++++++---------- .../{gaussian-blur.md => gaussian-blur.mdx} | 4 ++-- .../filters/{gradient.md => gradient.mdx} | 6 ++--- .../filters/{grayscale.md => grayscale.mdx} | 0 .../filters/{invert.md => invert.mdx} | 2 +- docs/features/filters/{level.md => level.mdx} | 0 .../filters/{median.md => median.mdx} | 2 +- .../filters/{pixelate.md => pixelate.mdx} | 0 docs/features/geometry/{flip.md => flip.mdx} | 0 docs/features/geometry/geometry.md | 10 ++++----- .../geometry/{resize.md => resize.mdx} | 0 .../geometry/{rotate.md => rotate.mdx} | 0 ...and-rotate.md => transform-and-rotate.mdx} | 4 ++-- .../geometry/{transform.md => transform.mdx} | 0 .../{bottom-hat.md => bottom-hat.mdx} | 4 ++-- ...ge-detector.md => canny-edge-detector.mdx} | 2 +- .../morphology/{closing.md => closing.mdx} | 4 ++-- .../morphology/{dilation.md => dilation.mdx} | 2 +- .../morphology/{erosion.md => erosion.mdx} | 2 +- ...gradient.md => morphological-gradient.mdx} | 4 ++-- docs/features/morphology/morphology.md | 20 ++++++++--------- .../morphology/{opening.md => opening.mdx} | 4 ++-- .../morphology/{top-hat.md => top-hat.mdx} | 4 ++-- docs/features/operations/operations.md | 2 +- .../{threshold.md => threshold.mdx} | 0 docs/features/operations/watershed.md | 2 +- docs/getting-started.mdx | 6 ++--- .../applying-transform-function-on-images.md | 6 ++--- docs/tutorials/extracting-metadata.md | 2 +- .../image-segmentation-with-watershed.md | 18 +++++++-------- docs/tutorials/image-stack-analysis.md | 2 +- .../tutorials/{tutorials.md => tutorials.mdx} | 0 ...urring-techniques-and-their-differences.md | 4 ++-- .../{useful-tips.md => useful-tips.mdx} | 0 41 files changed, 77 insertions(+), 77 deletions(-) rename docs/basics/{basics.md => basics.mdx} (100%) rename docs/basics/{working-with-masks.md => working-with-masks.mdx} (89%) rename docs/features/{features.md => features.mdx} (100%) rename docs/features/filters/{blur.md => blur.mdx} (92%) rename docs/features/filters/{derivative.md => derivative.mdx} (100%) rename docs/features/filters/{gaussian-blur.md => gaussian-blur.mdx} (92%) rename docs/features/filters/{gradient.md => gradient.mdx} (89%) rename docs/features/filters/{grayscale.md => grayscale.mdx} (100%) rename docs/features/filters/{invert.md => invert.mdx} (97%) rename docs/features/filters/{level.md => level.mdx} (100%) rename docs/features/filters/{median.md => median.mdx} (98%) rename docs/features/filters/{pixelate.md => pixelate.mdx} (100%) rename docs/features/geometry/{flip.md => flip.mdx} (100%) rename docs/features/geometry/{resize.md => resize.mdx} (100%) rename docs/features/geometry/{rotate.md => rotate.mdx} (100%) rename docs/features/geometry/{transform-and-rotate.md => transform-and-rotate.mdx} (93%) rename docs/features/geometry/{transform.md => transform.mdx} (100%) rename docs/features/morphology/{bottom-hat.md => bottom-hat.mdx} (79%) rename docs/features/morphology/{canny-edge-detector.md => canny-edge-detector.mdx} (98%) rename docs/features/morphology/{closing.md => closing.mdx} (81%) rename docs/features/morphology/{dilation.md => dilation.mdx} (97%) rename docs/features/morphology/{erosion.md => erosion.mdx} (97%) rename docs/features/morphology/{morphological-gradient.md => morphological-gradient.mdx} (93%) rename docs/features/morphology/{opening.md => opening.mdx} (87%) rename docs/features/morphology/{top-hat.md => top-hat.mdx} (93%) rename docs/features/operations/{threshold.md => threshold.mdx} (100%) rename docs/tutorials/{tutorials.md => tutorials.mdx} (100%) rename docs/useful-tips/{useful-tips.md => useful-tips.mdx} (100%) diff --git a/docs/basics/basics.md b/docs/basics/basics.mdx similarity index 100% rename from docs/basics/basics.md rename to docs/basics/basics.mdx diff --git a/docs/basics/working-with-masks.md b/docs/basics/working-with-masks.mdx similarity index 89% rename from docs/basics/working-with-masks.md rename to docs/basics/working-with-masks.mdx index d8e256a..9a981d3 100644 --- a/docs/basics/working-with-masks.md +++ b/docs/basics/working-with-masks.mdx @@ -1,5 +1,5 @@ -import ThresholdMaskDemo from './demos/thresholdMask.demo.tsx' -import CannyMaskDemo from './demos/cannyEdgeMask.demo.tsx' +import ThresholdMaskDemo from './demos/thresholdMask.demo.tsx'; +import CannyMaskDemo from './demos/cannyEdgeMask.demo.tsx'; # Working with Masks @@ -23,7 +23,7 @@ const mask = new Mask(500, 500); // Creates a simple mask filled with 0s of size ### Use `threshold()` method -Another approach is to obtain a mask by using [`threshold` method](../features/operations/threshold.md 'internal link on threshold') on an image. +Another approach is to obtain a mask by using [`threshold` method](../features/operations/threshold.mdx 'internal link on threshold') on an image. ```ts const mask = image.threshold(); // returns a mask @@ -39,7 +39,7 @@ In most cases, thresholding is your go-to method to get a mask from an image. ### Use `cannyEdgeDetector()` method -There is also a third way to get a mask. It is to use [`cannyEdgeDetector` method](../features/morphology/canny-edge-detector.md 'internal link on canny edge detector'). +There is also a third way to get a mask. It is to use [`cannyEdgeDetector` method](../features/morphology/canny-edge-detector.mdx 'internal link on canny edge detector'). ```ts const mask = image.cannyEdgeDetector(); // returns a mask diff --git a/docs/basics/working-with-rois.md b/docs/basics/working-with-rois.md index c9f6984..38d14fe 100644 --- a/docs/basics/working-with-rois.md +++ b/docs/basics/working-with-rois.md @@ -4,7 +4,7 @@ _A region of interest (ROI) represents an area of contiguous pixels within the d There are currently two ways ROIs can be generated in ImageJS: -- From [masks](./working-with-masks.md 'internal link on working with mask') by identifying contiguous black or white pixels within it. +- From [masks](./working-with-masks.mdx 'internal link on working with mask') by identifying contiguous black or white pixels within it. - By identifying starting points of interest (for example by finding and filtering local extrema) and running the [watershed algorithm](../features/operations/watershed.md 'internal link on watershed') on them. - By identifying starting points of interest (for example by finding and filtering local extrema) and running the [watershed algorithm](../features/operations/watershed.md 'internal link on watershed') on them. diff --git a/docs/features/comparison/addition.md b/docs/features/comparison/addition.md index a7fd40e..847dc49 100644 --- a/docs/features/comparison/addition.md +++ b/docs/features/comparison/addition.md @@ -8,7 +8,7 @@ _Add two images together._ [🖼️ Image options and parameters of `add` method](https://api.image-js.org/classes/index.Image.html#add) -`add` method, opposed to [subtraction](./subtraction.md 'internal link on subtract'), takes another Image and makes an addition between each respective pixel value. +`add` method, opposed to [subtraction](./subtraction 'internal link on subtract'), takes another Image and makes an addition between each respective pixel value. It works like this: ```ts diff --git a/docs/features/features.md b/docs/features/features.mdx similarity index 100% rename from docs/features/features.md rename to docs/features/features.mdx diff --git a/docs/features/filters/blur.md b/docs/features/filters/blur.mdx similarity index 92% rename from docs/features/filters/blur.md rename to docs/features/filters/blur.mdx index 4eacb92..5a8bfae 100644 --- a/docs/features/filters/blur.md +++ b/docs/features/filters/blur.mdx @@ -2,7 +2,7 @@ sidebar_position: 30 --- -import BlurDemo from './demos/blur.demo.tsx' +import BlurDemo from './demos/blur.demo.tsx'; # Blur @@ -15,7 +15,7 @@ Blur, also known as average blur or box blur, is a simple image processing techn Box blur is particularly effective in reducing [salt-and-pepper](https://en.wikipedia.org/wiki/Salt-and-pepper_noise 'wikipedia link on salt and pepper noise') noise (random black and white pixels) and minor imperfections in an image. However, it also leads to loss of finer details, so the choice of [kernel](../../glossary.md#kernel 'glossary link on kernel') size is important. -More advanced blurring techniques, such as [Gaussian blur](./gaussian-blur.md 'internal link to gaussian blur') or [bilateral filter](https://en.wikipedia.org/wiki/Bilateral_filter 'wikipedia link on bilateral filters'), are often used for better results in various applications. +More advanced blurring techniques, such as [Gaussian blur](./gaussian-blur 'internal link to gaussian blur') or [bilateral filter](https://en.wikipedia.org/wiki/Bilateral_filter 'wikipedia link on bilateral filters'), are often used for better results in various applications. ### Kinds of images compatible with algorithm diff --git a/docs/features/filters/derivative.md b/docs/features/filters/derivative.mdx similarity index 100% rename from docs/features/filters/derivative.md rename to docs/features/filters/derivative.mdx diff --git a/docs/features/filters/filters.md b/docs/features/filters/filters.md index 8b59ea2..7fd227d 100644 --- a/docs/features/filters/filters.md +++ b/docs/features/filters/filters.md @@ -8,14 +8,14 @@ sidebar_position: 0 ### Methods -| Can be applied on | Images | Masks | -| ------------------------------------------------------------------------------ | ------- | -------- | -| [Invert(`invert`)](./invert.md 'internal link on invert') | ✅ | ✅ | -| [Grayscale(`grey`)](./grayscale.md 'internal link on grayscale') | ✅ | ❌ | -| [Gradient(`gradient`)](./gradient.md 'internal link on gradient') | ✅ | ❌ | -| [Derivative(`derivative`)](./derivative.md 'internal link on derivative') | ✅ | ❌ | -| [Median(`median`)](./median.md 'internal link on median') | ✅ | ❌ | -| [Pixelate(`pixelate`)](./pixelate.md 'internal link on pixelate') | ✅ | ❌ | -| [Blur(`blur`)](./blur.md 'internal link on blur') | ✅ | ❌ | -| [Gaussian(`gaussianBlur`)](./gaussian-blur.md 'internal link on gaussianBlur') | ✅ | ❌ | -| [Level(`level`)](./level.md 'internal link on level') | ✅ | ❌ | +| Can be applied on | Images | Masks | +| ------------------------------------------------------------------------------- | ------- | -------- | +| [Invert(`invert`)](./invert.mdx 'internal link on invert') | ✅ | ✅ | +| [Grayscale(`grey`)](./grayscale.mdx 'internal link on grayscale') | ✅ | ❌ | +| [Gradient(`gradient`)](./gradient.mdx 'internal link on gradient') | ✅ | ❌ | +| [Derivative(`derivative`)](./derivative.mdx 'internal link on derivative') | ✅ | ❌ | +| [Median(`median`)](./median.mdx 'internal link on median') | ✅ | ❌ | +| [Pixelate(`pixelate`)](./pixelate.mdx 'internal link on pixelate') | ✅ | ❌ | +| [Blur(`blur`)](./blur.mdx 'internal link on blur') | ✅ | ❌ | +| [Gaussian(`gaussianBlur`)](./gaussian-blur.mdx 'internal link on gaussianBlur') | ✅ | ❌ | +| [Level(`level`)](./level.mdx 'internal link on level') | ✅ | ❌ | diff --git a/docs/features/filters/gaussian-blur.md b/docs/features/filters/gaussian-blur.mdx similarity index 92% rename from docs/features/filters/gaussian-blur.md rename to docs/features/filters/gaussian-blur.mdx index 5c2d2e9..3231a2f 100644 --- a/docs/features/filters/gaussian-blur.md +++ b/docs/features/filters/gaussian-blur.mdx @@ -2,7 +2,7 @@ sidebar_position: 40 --- -import GaussianBlurDemo from './demos/gaussianBlur.demo.tsx' +import GaussianBlurDemo from './demos/gaussianBlur.demo.tsx'; # Gaussian Blur @@ -61,7 +61,7 @@ The size of the Gaussian kernel and the standard deviation parameter (which cont Here's how Gaussian blur is implemented in ImageJS: -_Kernel Definition_: The core concept of Gaussian blur involves [convolving](../../glossary.md#convolution 'glossary link on convolution') the image with a Gaussian [kernel](../../glossary.md#kernel 'glossary link on kernel'), also known as a Gaussian filter or mask. This kernel's values are arranged in a way that creates a symmetric, bell-shaped pattern around the center of the kernel to approximate Gaussian function. +_Kernel Definition_: The core concept of Gaussian blur involves [convolving](../../glossary#convolution 'glossary link on convolution') the image with a Gaussian [kernel](../../glossary.md#kernel 'glossary link on kernel'), also known as a Gaussian filter or mask. This kernel's values are arranged in a way that creates a symmetric, bell-shaped pattern around the center of the kernel to approximate Gaussian function. _Convolution Operation_: The Gaussian kernel is applied to the image using a convolution operation. This involves placing the kernel's center over each pixel in the image and performing element-wise multiplication of the kernel's values with the corresponding pixel values in the neighborhood. The results of these multiplications are summed up to compute the new value for the central pixel. diff --git a/docs/features/filters/gradient.md b/docs/features/filters/gradient.mdx similarity index 89% rename from docs/features/filters/gradient.md rename to docs/features/filters/gradient.mdx index 6fb7291..5dd33f9 100644 --- a/docs/features/filters/gradient.md +++ b/docs/features/filters/gradient.mdx @@ -2,7 +2,7 @@ sidebar_position: 60 --- -import GradientDemo from './demos/gradient.demo.tsx' +import GradientDemo from './demos/gradient.demo.tsx'; # Gradient @@ -49,7 +49,7 @@ Keep in mind that gradient filters can be sensitive to noise and might result in Here's how gradient filter is implemented in ImageJS: -_Grayscale Conversion_: Before applying a gradient filter, the color image is converted into [grayscale](./grayscale.md 'internal link on grayscale filter'). This simplifies the processing by reducing the image to a single channel representing pixel intensities. +_Grayscale Conversion_: Before applying a gradient filter, the color image is converted into [grayscale](./grayscale.mdx 'internal link on grayscale filter'). This simplifies the processing by reducing the image to a single channel representing pixel intensities. _Kernel Operators_: Gradient filter consists of small convolution [kernels](../../glossary.md#kernel 'glossary link on kernel'). Normally, one for detecting horizontal changes and another for vertical changes, however user might indicate only one kernel to check only one of directions. These kernels are usually 3x3 matrices of numerical weights. @@ -60,7 +60,7 @@ _Gradient Magnitude and Direction_: For each pixel, the gradient magnitude is ca _Edge Detection_: The gradient magnitude values are used to identify regions of rapid intensity change, which correspond to edges in the image. Higher gradient magnitude values indicate stronger edges. :::tip -_Thresholding_: To further refine the edges detected, a [thresholding](../operations/threshold.md 'internal link on threshold filter') step is often applied. Pixels with gradient magnitudes below a certain threshold are considered as non-edges, while those above the threshold are considered edges. This helps in reducing noise and emphasizing significant edges. +_Thresholding_: To further refine the edges detected, a [thresholding](../operations/threshold.mdx 'internal link on threshold filter') step is often applied. Pixels with gradient magnitudes below a certain threshold are considered as non-edges, while those above the threshold are considered edges. This helps in reducing noise and emphasizing significant edges. ::: diff --git a/docs/features/filters/grayscale.md b/docs/features/filters/grayscale.mdx similarity index 100% rename from docs/features/filters/grayscale.md rename to docs/features/filters/grayscale.mdx diff --git a/docs/features/filters/invert.md b/docs/features/filters/invert.mdx similarity index 97% rename from docs/features/filters/invert.md rename to docs/features/filters/invert.mdx index 41cbb13..08f3750 100644 --- a/docs/features/filters/invert.md +++ b/docs/features/filters/invert.mdx @@ -2,7 +2,7 @@ sidebar_position: 10 --- -import InvertDemo from './demos/invert.demo.tsx' +import InvertDemo from './demos/invert.demo.tsx'; # Invert diff --git a/docs/features/filters/level.md b/docs/features/filters/level.mdx similarity index 100% rename from docs/features/filters/level.md rename to docs/features/filters/level.mdx diff --git a/docs/features/filters/median.md b/docs/features/filters/median.mdx similarity index 98% rename from docs/features/filters/median.md rename to docs/features/filters/median.mdx index ee795b6..abb75d7 100644 --- a/docs/features/filters/median.md +++ b/docs/features/filters/median.mdx @@ -2,7 +2,7 @@ sidebar_position: 50 --- -import MedianDemo from './demos/median.demo.tsx' +import MedianDemo from './demos/median.demo.tsx'; # Median diff --git a/docs/features/filters/pixelate.md b/docs/features/filters/pixelate.mdx similarity index 100% rename from docs/features/filters/pixelate.md rename to docs/features/filters/pixelate.mdx diff --git a/docs/features/geometry/flip.md b/docs/features/geometry/flip.mdx similarity index 100% rename from docs/features/geometry/flip.md rename to docs/features/geometry/flip.mdx diff --git a/docs/features/geometry/geometry.md b/docs/features/geometry/geometry.md index 0aa7e3a..ba694dd 100644 --- a/docs/features/geometry/geometry.md +++ b/docs/features/geometry/geometry.md @@ -10,9 +10,9 @@ Geometric operations in image processing involve transforming the spatial coordi | Can be applied on | Images | Masks | | --------------------------------------------------------------------------------------------------------------------------- | ------- | -------- | -| [Flip(`flip`)](./flip.md 'internal link on flip') | ✅ | ❌ | -| [Resize(`resize`)](./resize.md 'internal link on resize') | ✅ | ❌ | -| [Rotate(`rotate`)](./rotate.md 'internal link on rotate') | ✅ | ❌ | -| [Transform(`transform`)](./transform.md 'internal link on transform') | ✅ | ❌ | -| [Transform and rotate(`transformRotate`)](./transform-and-rotate 'internal link on transformRotate') | ✅ | ❌ | +| [Flip(`flip`)](./flip.mdx 'internal link on flip') | ✅ | ❌ | +| [Resize(`resize`)](./resize.mdx 'internal link on resize') | ✅ | ❌ | +| [Rotate(`rotate`)](./rotate.mdx 'internal link on rotate') | ✅ | ❌ | +| [Transform(`transform`)](./transform.mdx 'internal link on transform') | ✅ | ❌ | +| [Transform and rotate(`transformRotate`)](./transform-and-rotate.mdx 'internal link on transformRotate') | ✅ | ❌ | | [Get perspective warp matrix(`getPerspectiveWarp`)](./get-perspective-warp-matrix.md 'internal link on getPerspectiveWarp') | - | - | diff --git a/docs/features/geometry/resize.md b/docs/features/geometry/resize.mdx similarity index 100% rename from docs/features/geometry/resize.md rename to docs/features/geometry/resize.mdx diff --git a/docs/features/geometry/rotate.md b/docs/features/geometry/rotate.mdx similarity index 100% rename from docs/features/geometry/rotate.md rename to docs/features/geometry/rotate.mdx diff --git a/docs/features/geometry/transform-and-rotate.md b/docs/features/geometry/transform-and-rotate.mdx similarity index 93% rename from docs/features/geometry/transform-and-rotate.md rename to docs/features/geometry/transform-and-rotate.mdx index 687ae7a..88acf81 100644 --- a/docs/features/geometry/transform-and-rotate.md +++ b/docs/features/geometry/transform-and-rotate.mdx @@ -2,7 +2,7 @@ sidebar_position: 50 --- -import TransRotateDemo from './demos/transformRotate.demo.tsx' +import TransRotateDemo from './demos/transformRotate.demo.tsx'; # Transform and Rotate @@ -10,7 +10,7 @@ _Rotates an image by any angle._ [🖼️ Image options and parameters of `transformRotate` method](https://api.image-js.org/classes/index.Image.html#transformRotate) -`transformRotate` method rotates image anti-clockwise at any angle that user sets. It applies the same principle as [transform](./transform.md 'internal link on transform demo') method, but user only needs to pass a rotation angle as a parameter instead of the whole matrix. +`transformRotate` method rotates image anti-clockwise at any angle that user sets. It applies the same principle as [transform](./transform.mdx 'internal link on transform demo') method, but user only needs to pass a rotation angle as a parameter instead of the whole matrix. diff --git a/docs/features/geometry/transform.md b/docs/features/geometry/transform.mdx similarity index 100% rename from docs/features/geometry/transform.md rename to docs/features/geometry/transform.mdx diff --git a/docs/features/morphology/bottom-hat.md b/docs/features/morphology/bottom-hat.mdx similarity index 79% rename from docs/features/morphology/bottom-hat.md rename to docs/features/morphology/bottom-hat.mdx index 8f75965..31a3e49 100644 --- a/docs/features/morphology/bottom-hat.md +++ b/docs/features/morphology/bottom-hat.mdx @@ -2,7 +2,7 @@ sidebar_position: 60 --- -import BottomHatDemo from './demos/bottomHat.mask.demo.tsx' +import BottomHatDemo from './demos/bottomHat.mask.demo.tsx'; # Bottom Hat @@ -11,7 +11,7 @@ _Enhances the fine details or small objects within an image by subtracting an op [🖼️ Image options and parameters of `bottomHat` method](https://api.image-js.org/classes/index.Image.html#bottomHat) [🎭 Mask options and parameters of `bottomHat` method](https://api.image-js.org/classes/index.Mask.html#bottomHat) -Similarly to [top hat](./top-hat.md 'internal link to top hat'), [bottom hat](https://en.wikipedia.org/wiki/Top-hat_transform 'wikipedia link to top hat') operation computes the difference between two images. However, if top hat was using [opening method](./opening.md 'internal link on open method'), bottom hat is using [closing method](./closing.md 'internal link on close method'). +Similarly to [top hat](./top-hat 'internal link to top hat'), [bottom hat](https://en.wikipedia.org/wiki/Top-hat_transform 'wikipedia link to top hat') operation computes the difference between two images. However, if top hat was using [opening method](./opening 'internal link on open method'), bottom hat is using [closing method](./closing 'internal link on close method'). The purpose of bottom hat(or, as it is also called, _black-hat_) is to enhance and extract **darker** regions of the image. diff --git a/docs/features/morphology/canny-edge-detector.md b/docs/features/morphology/canny-edge-detector.mdx similarity index 98% rename from docs/features/morphology/canny-edge-detector.md rename to docs/features/morphology/canny-edge-detector.mdx index cde3219..b69e61b 100644 --- a/docs/features/morphology/canny-edge-detector.md +++ b/docs/features/morphology/canny-edge-detector.mdx @@ -2,7 +2,7 @@ sidebar_position: 80 --- -import CannyEdgeDemo from './demos/cannyEdgeDetector.demo.tsx' +import CannyEdgeDemo from './demos/cannyEdgeDetector.demo.tsx'; # Canny Edge Detector diff --git a/docs/features/morphology/closing.md b/docs/features/morphology/closing.mdx similarity index 81% rename from docs/features/morphology/closing.md rename to docs/features/morphology/closing.mdx index a4f93d2..0faa735 100644 --- a/docs/features/morphology/closing.md +++ b/docs/features/morphology/closing.mdx @@ -2,7 +2,7 @@ sidebar_position: 40 --- -import CloseDemo from './demos/close.mask.demo.tsx' +import CloseDemo from './demos/close.mask.demo.tsx'; # Closing @@ -11,7 +11,7 @@ _Combines a dilation filter followed by an erosion filter._ [🖼️ Image options and parameters of `close` method](https://api.image-js.org/classes/index.Image.html#close) [🎭 Mask options and parameters of `close` method](https://api.image-js.org/classes/index.Mask.html#close) -Opposed to [opening](./opening.md 'internal link to open method'), [closing process]( 'wikipedia link on closing') first [erodes](./erosion.md 'internal link to erode method') an image and only then [dilates](./dilation.md 'internal link to dilate method') it. +Opposed to [opening](./opening.mdx 'internal link to open method'), [closing process]( 'wikipedia link on closing') first [erodes](./erosion 'internal link to erode method') an image and only then [dilates](./dilation 'internal link to dilate method') it. It is a useful process for filling small holes in the image, while preserving the shape and size of large holes and objects. diff --git a/docs/features/morphology/dilation.md b/docs/features/morphology/dilation.mdx similarity index 97% rename from docs/features/morphology/dilation.md rename to docs/features/morphology/dilation.mdx index 2c8028c..ba075f3 100644 --- a/docs/features/morphology/dilation.md +++ b/docs/features/morphology/dilation.mdx @@ -2,7 +2,7 @@ sidebar_position: 20 --- -import DilateDemo from './demos/dilate.mask.demo.tsx' +import DilateDemo from './demos/dilate.mask.demo.tsx'; # Dilation diff --git a/docs/features/morphology/erosion.md b/docs/features/morphology/erosion.mdx similarity index 97% rename from docs/features/morphology/erosion.md rename to docs/features/morphology/erosion.mdx index c0a2db2..b856dff 100644 --- a/docs/features/morphology/erosion.md +++ b/docs/features/morphology/erosion.mdx @@ -2,7 +2,7 @@ sidebar_position: 10 --- -import ErodeDemo from './demos/erode.mask.demo.tsx' +import ErodeDemo from './demos/erode.mask.demo.tsx'; # Erosion diff --git a/docs/features/morphology/morphological-gradient.md b/docs/features/morphology/morphological-gradient.mdx similarity index 93% rename from docs/features/morphology/morphological-gradient.md rename to docs/features/morphology/morphological-gradient.mdx index 0b40557..0dac5a9 100644 --- a/docs/features/morphology/morphological-gradient.md +++ b/docs/features/morphology/morphological-gradient.mdx @@ -2,7 +2,7 @@ sidebar_position: 70 --- -import MorphGradientDemo from './demos/morphologicalGradient.mask.demo.tsx' +import MorphGradientDemo from './demos/morphologicalGradient.mask.demo.tsx'; # Morphological Gradient @@ -13,7 +13,7 @@ _Emphasizes the boundaries of objects in a binary or grayscale image by calculat [The morphological gradient](https://en.wikipedia.org/wiki/Morphological_gradient 'wikipedia link on morphological gradient') is a mathematical operation used in image processing and mathematical morphology to highlight the boundaries of objects or regions within an image. It is a fundamental concept in morphological image analysis and is often used for tasks such as edge detection and image segmentation. -The morphological gradient is based on the difference between an image after [dilation](./dilation.md 'internal link on dilation') and the same image after [erosion](./erosion.md 'internal link on erosion'). +The morphological gradient is based on the difference between an image after [dilation](./dilation 'internal link on dilation') and the same image after [erosion](./erosion 'internal link on erosion'). ### Applying morphological gradient on Images: diff --git a/docs/features/morphology/morphology.md b/docs/features/morphology/morphology.md index 7be8cab..973077f 100644 --- a/docs/features/morphology/morphology.md +++ b/docs/features/morphology/morphology.md @@ -10,13 +10,13 @@ Morphological operations are simple yet powerful tools that play a significant r ### Methods -| Can be applied on | Images | Masks | -| ----------------------------------------------------------------------------------------------------------------------- | ------- | -------- | -| [Morphological gradient(`morphologicalGradient`)](./morphological-gradient.md 'internal link on morphologicalGradient') | ✅ | ✅ | -| [Canny edge detector(`cannyEdgeDetector`)](./canny-edge-detector.md 'internal link on cannyEdgeDetector') | ✅ | ❌ | -| [Erosion(`erode`)](./erosion.md 'internal link on erode') | ✅ | ✅ | -| [Dilation(`dilate`)](./dilation.md 'internal link on dilate') | ✅ | ✅ | -| [Opening(`open`)](./opening.md 'internal link on open') | ✅ | ✅ | -| [Closing(`close`)](./closing.md 'internal link on close') | ✅ | ✅ | -| [Top Hat(`topHat`)](./top-hat 'internal link on topHat') | ✅ | ✅ | -| [Bottom Hat(`bottomHat`)](./bottom-hat 'internal link on bottomHat') | ✅ | ✅ | +| Can be applied on | Images | Masks | +| ------------------------------------------------------------------------------------------------------------------------ | ------- | -------- | +| [Morphological gradient(`morphologicalGradient`)](./morphological-gradient.mdx 'internal link on morphologicalGradient') | ✅ | ✅ | +| [Canny edge detector(`cannyEdgeDetector`)](./canny-edge-detector.mdx 'internal link on cannyEdgeDetector') | ✅ | ❌ | +| [Erosion(`erode`)](./erosion.mdx 'internal link on erode') | ✅ | ✅ | +| [Dilation(`dilate`)](./dilation.mdx 'internal link on dilate') | ✅ | ✅ | +| [Opening(`open`)](./opening.mdx 'internal link on open') | ✅ | ✅ | +| [Closing(`close`)](./closing.mdx 'internal link on close') | ✅ | ✅ | +| [Top Hat(`topHat`)](./top-hat.mdx 'internal link on topHat') | ✅ | ✅ | +| [Bottom Hat(`bottomHat`)](./bottom-hat.mdx 'internal link on bottomHat') | ✅ | ✅ | diff --git a/docs/features/morphology/opening.md b/docs/features/morphology/opening.mdx similarity index 87% rename from docs/features/morphology/opening.md rename to docs/features/morphology/opening.mdx index c63aa89..ca0bf93 100644 --- a/docs/features/morphology/opening.md +++ b/docs/features/morphology/opening.mdx @@ -2,7 +2,7 @@ sidebar_position: 30 --- -import OpenDemo from './demos/open.mask.demo.tsx' +import OpenDemo from './demos/open.mask.demo.tsx'; # Opening @@ -11,7 +11,7 @@ _Combines an erosion filter followed by a dilation filter._ [🖼️ Image options and parameters of `open` method](https://api.image-js.org/classes/index.Image.html#open) [🎭 Mask options and parameters of `open` method](https://api.image-js.org/classes/index.Mask.html#open) -[Opening]() process in morphology involves a dilation of an image, followed by its erosion. +[Opening]() process in morphology involves a [dilation](./dilation 'internal link to dilate method') of an image, followed by its [erosion](./erosion 'internal link to erode method'). This process allows removing small objects and thin lines while preserving the shape and size of larger objects. diff --git a/docs/features/morphology/top-hat.md b/docs/features/morphology/top-hat.mdx similarity index 93% rename from docs/features/morphology/top-hat.md rename to docs/features/morphology/top-hat.mdx index 3239636..d3d5137 100644 --- a/docs/features/morphology/top-hat.md +++ b/docs/features/morphology/top-hat.mdx @@ -2,7 +2,7 @@ sidebar_position: 50 --- -import TopHatDemo from './demos/topHat.mask.demo.tsx' +import TopHatDemo from './demos/topHat.mask.demo.tsx'; # Top Hat @@ -12,7 +12,7 @@ _Enhances the fine details or small objects within an image by subtracting an op [🎭 Mask options and parameters of `topHat` method](https://api.image-js.org/classes/index.Mask.html#topHat) In morphology and image processing, [Top Hat](https://en.wikipedia.org/wiki/Top-hat_transform 'wikipedia link on top hat') is an operation used to enhance or extract small bright regions or details from an image while suppressing the larger surrounding structures. -It is the result of subtraction between the result of input image [opening](./opening.md 'internal link on open method') and the input image itself. +It is the result of subtraction between the result of input image [opening](./opening 'internal link on open method') and the input image itself. The purpose of bottom hat(or as it is also called _black-hat_) is to enhance and extract **brighter** regions of the image. diff --git a/docs/features/operations/operations.md b/docs/features/operations/operations.md index 568f75e..3e0b1a2 100644 --- a/docs/features/operations/operations.md +++ b/docs/features/operations/operations.md @@ -13,7 +13,7 @@ For instance, threshold is used to go from Image to Mask, while watershed is use | ----------------------------------------------------------------------------------- | -------- | -------- | | [Get extrema(`getExtrema`)](./get-extrema.md 'internal link on getExtrema') | ✅ | ❌ | | [Filter points(`filterPoints`)](./remove-points.md 'internal link on filterPoints') | ✅ | ❌ | -| [Threshold(`threshold`)](./threshold.md 'internal link on threshold') | ✅ | ❌ | +| [Threshold(`threshold`)](./threshold.mdx 'internal link on threshold') | ✅ | ❌ | | [Watershed(`waterShed`)](./watershed.md 'internal link on watershed') | ✅ | ❌ | | [Clear border(`clearBorder`)](./clear-border.md 'internal link on clear border') | ❌ | ✅ | | [Paint mask(`paintMask`)](./paint-mask.md 'internal link on paint mask') | ✅ | ✅ | diff --git a/docs/features/operations/threshold.md b/docs/features/operations/threshold.mdx similarity index 100% rename from docs/features/operations/threshold.md rename to docs/features/operations/threshold.mdx diff --git a/docs/features/operations/watershed.md b/docs/features/operations/watershed.md index 3149f3e..60a1e4f 100644 --- a/docs/features/operations/watershed.md +++ b/docs/features/operations/watershed.md @@ -10,7 +10,7 @@ _Separates and identifies distinct regions or objects within an image through gr [Watershed filter]( 'wikipedia link on watershed') is a way of identifying objects by finding image's extreme points (minima or maxima) in terms of intensity and filling these spaces with color (label). The process reminds geological [watershed](https://en.wikipedia.org/wiki/Drainage_divide 'wikipedia link on drainage divide'), which is the origin of algorithm's name. In order for the "water" not to go overboard and stay within the limits of the region, these limits must be set. -There are two ways to do so. One way is to limit the [intensity](../../glossary.md#intensity 'glossary link on intensity') by threshold value. Another way is to apply a mask which can set the area where watershed will be implemented. +There are two ways to do so. One way is to limit the [intensity](../../glossary#intensity 'glossary link on intensity') by threshold value. Another way is to apply a mask which can set the area where watershed will be implemented. The watershed algorithm is particularly useful for segmenting objects in images, especially when objects are close to each other. diff --git a/docs/getting-started.mdx b/docs/getting-started.mdx index b50bf2f..55ee6a9 100644 --- a/docs/getting-started.mdx +++ b/docs/getting-started.mdx @@ -97,12 +97,12 @@ image = image.grey(); ``` :::info -To see more methods, visit the ["Features"](./features/features.md) category. +To see more methods, visit the ["Features"](./features/features.mdx) category. ::: ## What's next? Now that you know how images are loaded and saved, you can deepen your understanding by going through the [Basics](./basics) category and seeing how different basic elements of ImageJS work. -You can also broaden your horizons by looking at available [Features](./features). +You can also broaden your horizons by looking at available [Features](./features/features.mdx). -If you want to see how ImageJS works in practice, we suggest you visit the [Tutorials](./tutorials) segment and see for yourself its practical applications. +If you want to see how ImageJS works in practice, we suggest you visit the [Tutorials](./tutorials/tutorials.mdx) segment and see for yourself its practical applications. diff --git a/docs/tutorials/applying-transform-function-on-images.md b/docs/tutorials/applying-transform-function-on-images.md index 329e1a6..f83577f 100644 --- a/docs/tutorials/applying-transform-function-on-images.md +++ b/docs/tutorials/applying-transform-function-on-images.md @@ -123,7 +123,7 @@ const stretchedImage = image.transform(transformationMatrix); ![Stretched image](./images/transformations/lennaStretched.png) :::note -ImageJS also has [`resize`](../features/geometry/resize.md) function that allows to scale an image. +ImageJS also has [`resize`](../features/geometry/resize.mdx) function that allows to scale an image. The current tutorial just demonstrates the basic principle behind transformation of such kind. ::: @@ -162,7 +162,7 @@ const flippedImage = image.transform(flipMatrix); ![Flipped image](./images/transformations/lennaFlipped.png) :::note -ImageJS also has [`flip`](../features/geometry/flip.md) function that allows to flip an image. +ImageJS also has [`flip`](../features/geometry/flip.mdx) function that allows to flip an image. Current tutorial just demonstrates the basic principle behind transformation of such kind. ::: @@ -302,7 +302,7 @@ const rotateAroundCenterImage = image.transform( ![Rotated by center image](./images/transformations/lennaRotatedCenter.png) :::note -Image-js also has [`rotate()`](../features/geometry/rotate.md) and [`transformRotate()`](../features/geometry/transform-and-rotate.md) functions. `rotate()` function allows rotating an image by multiple of 90 degrees. +Image-js also has [`rotate()`](../features/geometry/rotate.mdx) and [`transformRotate()`](../features/geometry/transform-and-rotate.mdx) functions. `rotate()` function allows rotating an image by multiple of 90 degrees. `transformRotate()` allows rotating an image by any degree. It also allows choosing the axe of rotation. So, for rotation, you have other functions that allow you to perform it. ::: diff --git a/docs/tutorials/extracting-metadata.md b/docs/tutorials/extracting-metadata.md index 45e315f..cf2411b 100644 --- a/docs/tutorials/extracting-metadata.md +++ b/docs/tutorials/extracting-metadata.md @@ -4,7 +4,7 @@ In this tutorial we will discuss metadata extraction with an image that we alrea ![Particles](./images/roiAnalysis/particles.jpg) -[Metadata](../glossary.md#metadata 'glossary link on metadata') represents information about various aspects of an image itself. It can be something basic such as the date when an image was taken, or something more specific like the name of the camera that the image was taken by. You can extract metadata tags that can provide additional information about an image by using this command: +[Metadata](../glossary#metadata 'glossary link on metadata') represents information about various aspects of an image itself. It can be something basic such as the date when an image was taken, or something more specific like the name of the camera that the image was taken by. You can extract metadata tags that can provide additional information about an image by using this command: ```ts const meta = image.meta; diff --git a/docs/tutorials/image-segmentation-with-watershed.md b/docs/tutorials/image-segmentation-with-watershed.md index 1fafdfe..0058f81 100644 --- a/docs/tutorials/image-segmentation-with-watershed.md +++ b/docs/tutorials/image-segmentation-with-watershed.md @@ -9,7 +9,7 @@ Watershed algorithm is an advanced image segmentation technique to identify obje ![Input image](./images/watershed/input.jpg) -First you must have a grayscale image. If this is not the case, use [`grey()`](../features/filters/grayscale.md 'internal link on grayscale') method to grayscale it. Then blur the image. The choice of a blurring technique depends on what kind of image is to blur, but regular blur will do. Be careful while setting the kernel size. If it gets too big, objects' edges and minor details start to deteriorate. +First you must have a grayscale image. If this is not the case, use [`grey()`](../features/filters/grayscale.mdx 'internal link on grayscale') method to grayscale it. Then blur the image. The choice of a blurring technique depends on what kind of image is to blur, but regular blur will do. Be careful while setting the kernel size. If it gets too big, objects' edges and minor details start to deteriorate. After that, a threshold needs to be defined. It can be defined as an arbitrary value, but we recommend to compute a threshold mask from the image of interest. Result can vary from one threshold algorithm to another so take a look at a few of them to see which one fits your needs. @@ -48,7 +48,7 @@ Below you will find a detailed review of all the steps. ## Why is watershed necessary? -[Threshold](../features/operations/threshold.md 'internal link on threshold') is a great segmentation tool for finding objects, but it works only if objects are clearly separated from each other. +[Threshold](../features/operations/threshold.mdx 'internal link on threshold') is a great segmentation tool for finding objects, but it works only if objects are clearly separated from each other. Sometimes objects can be too close to each other and the binary image takes it as a giant region of interest, which is not the desired result. @@ -95,7 +95,7 @@ Before starting, check the [color model](../glossary.md#color-model 'glossary li let image = image.grey(); ``` -You can take a look at different types of grayscale algorithm on [grayscale page](../features/filters/grayscale.md 'internal link on grayscale') in our "Features" section, but a default grayscale should be enough, since the important aspect is for an image to have only one channel. +You can take a look at different types of grayscale algorithm on [grayscale page](../features/filters/grayscale.mdx 'internal link on grayscale') in our "Features" section, but a default grayscale should be enough, since the important aspect is for an image to have only one channel. ::: ## Blurring @@ -104,11 +104,11 @@ First thing that you possibly need to do is to remove [image noise](https://en.w ImageJS has several kinds of blurring: -- [blur filter](../features/filters/blur.md 'internal link on blur') +- [blur filter](../features/filters/blur.mdx'internal link on blur') -- [gaussian blur filter](../features/filters/gaussian-blur.md 'internal link on gaussian blur') +- [gaussian blur filter](../features/filters/gaussian-blur.mdx 'internal link on gaussian blur') -- [median filter](../features/filters/median.md 'internal link on median') +- [median filter](../features/filters/median.mdx 'internal link on median') Each filter serves its own purpose, which we will briefly explain. @@ -121,7 +121,7 @@ To use it you need to specify width and height of the kernel: let blurredImage = image.blur({ width: 3, height: 3 }); ``` -To discover more options you can visit our ["Features"](../features/features.md 'internal link on features main page') section about [blur](../features/filters/blur.md 'internal link on blur'). +To discover more options you can visit our ["Features"](../features/features.mdx 'internal link on features main page') section about [blur](../features/filters/blur.mdx 'internal link on blur'). #### Gaussian blur @@ -132,7 +132,7 @@ To use it you need to specify the size of the kernel. This is one of the ways of let blurredImage = image.gaussianBlur({ sigma: 3 }); ``` -To discover more options you can visit our ["Features"](../features/features.md 'internal link on features main page') section about [gaussian blur](../features/filters/gaussian-blur.md 'internal link on gaussian blur'). +To discover more options you can visit our ["Features"](../features/features.mdx 'internal link on features main page') section about [gaussian blur](../features/filters/gaussian-blur.mdx 'internal link on gaussian blur'). #### Median @@ -146,7 +146,7 @@ let blurredImage = image.medianFilter({ }); ``` -To discover more options you can visit our ["Features"](../features/features.md 'internal link on features main page') section about [median filter](../features/filters/median.md 'internal link on median'). +To discover more options you can visit our ["Features"](../features/features.mdx 'internal link on features main page') section about [median filter](../features/filters/median.mdx 'internal link on median'). :::caution For each technique, kernel size must be an odd number in order for algorithm to find the center correctly! diff --git a/docs/tutorials/image-stack-analysis.md b/docs/tutorials/image-stack-analysis.md index daca0c9..5e50767 100644 --- a/docs/tutorials/image-stack-analysis.md +++ b/docs/tutorials/image-stack-analysis.md @@ -61,7 +61,7 @@ const stack = decodeStack(buffer); ``` :::warning -`Stack` class works only with images that share same properties. Particularly, values for [bit depth](../glossary.md#bit-depth 'internal link on bit depth'), [color model](../glossary.md#color-model 'internal link on color model'), width and height must be the same. +`Stack` class works only with images that share same properties. Particularly, values for [bit depth](../glossary#bit-depth 'internal link on bit depth'), [color model](../glossary#color-model 'internal link on color model'), width and height must be the same. ::: ## Find the image with maximum values: diff --git a/docs/tutorials/tutorials.md b/docs/tutorials/tutorials.mdx similarity index 100% rename from docs/tutorials/tutorials.md rename to docs/tutorials/tutorials.mdx diff --git a/docs/useful-tips/blurring-techniques-and-their-differences.md b/docs/useful-tips/blurring-techniques-and-their-differences.md index 2ac507c..d39eb52 100644 --- a/docs/useful-tips/blurring-techniques-and-their-differences.md +++ b/docs/useful-tips/blurring-techniques-and-their-differences.md @@ -4,7 +4,7 @@ If you looked at some of our tutorials, you might have noticed that we apply a b ## Blur -To be precise blur is a general term that refers to a reduction in the sharpness or clarity of an image. It also works to reduce some of the noise, such as [gaussian noise](https://en.wikipedia.org/wiki/Gaussian_noise#:~:text=In%20signal%20processing%20theory%2C%20Gaussian,can%20take%20are%20Gaussian%2Ddistributed. 'wikipedia link on gaussian noise') for example. In ImageJS blur is actually a box blur or mean blur. It is a filter that uses convolution matrix to calculate an average among the surrounding pixels which are within the transformation matrix ([kernel](../glossary.md#kernel 'glossary link on kernel')) and then applies this value. +To be precise blur is a general term that refers to a reduction in the sharpness or clarity of an image. It also works to reduce some of the noise, such as [gaussian noise](https://en.wikipedia.org/wiki/Gaussian_noise#:~:text=In%20signal%20processing%20theory%2C%20Gaussian,can%20take%20are%20Gaussian%2Ddistributed. 'wikipedia link on gaussian noise') for example. In ImageJS blur is actually a box blur or mean blur. It is a filter that uses convolution matrix to calculate an average among the surrounding pixels which are within the transformation matrix ([kernel](../glossary#kernel 'glossary link on kernel')) and then applies this value. ![Convolution process](./images/blurring/2D_Convolution_Animation.gif) @@ -23,7 +23,7 @@ The idea is that the closer you are to the pixel in check, the more weight it wi The main parameter of gaussian blur is called "sigma" and it is responsible for the width of the gaussian bell curve, therefore it controls the overall smoothness of the end result. -Gaussian blur is a good preparatory tool for edge detection. Edge detection's algorithms are sensitive to noise and small details so blur smoothens them. For instance here is the example of a [Canny Edge detector](../features/morphology/canny-edge-detector.md 'internal link on canny edge detector') with and without gaussian blur: +Gaussian blur is a good preparatory tool for edge detection. Edge detection's algorithms are sensitive to noise and small details so blur smoothens them. For instance here is the example of a [Canny Edge detector](../features/morphology/canny-edge-detector 'internal link on canny edge detector') with and without gaussian blur: ![Edge detection with gaussian](./images/blurring/edgesWithBlurs.png) diff --git a/docs/useful-tips/useful-tips.md b/docs/useful-tips/useful-tips.mdx similarity index 100% rename from docs/useful-tips/useful-tips.md rename to docs/useful-tips/useful-tips.mdx From 426c60b3357ac1ba7dc237c9f201decc9d2cae6c Mon Sep 17 00:00:00 2001 From: EscapedGibbon Date: Tue, 30 Sep 2025 13:34:53 +0200 Subject: [PATCH 2/5] Merge remote-tracking branch 'origin' into 151-mdx-files-should-have-mdx-extension --- docs/basics/working-with-images.md | 21 +++++++++++++-------- project-words.txt | 3 ++- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/docs/basics/working-with-images.md b/docs/basics/working-with-images.md index 6aea935..cce6a9d 100644 --- a/docs/basics/working-with-images.md +++ b/docs/basics/working-with-images.md @@ -8,14 +8,19 @@ In the context of digital technology and computing, images are represented as a - Currently ImageJS supports images with these characteristics: -| | TIFF | JPEG | PNG | BMP | -| -------------------------------- | ---------------- | ------- | ---------------- | -------- | -| **Bits per channel** | 8 or 16 bits | 8 bits | 8 or 16 bits | 1 bit | -| **Alpha channel** | yes | no | yes | no | -| **Lossy compression** | can be either | yes | no | no | -| **Color Model** | RGB or grayscale | RGB | RGB or grayscale | N/A | -| **Can be loaded in this format** | ✅ | ✅ | ✅ | ❌ | -| **Can be saved in this format** | ❌ | ✅ | ✅ | ✅ | +| | TIFF | JPEG | PNG[^1] | BMP | +| -------------------------------- | --------------------------- | -------- | --------------------------- | --------------------------- | +| **Can be loaded in this format** | ✅ | ✅ | ✅ | ✅ | +| **Can be saved in this format** | ❌ | ✅ | ✅ | ✅ | +| **Bits per channel** | 1, 8 or 16 bits | 8 bits | 1, 2, 4, 8 or 16 bits | 1 or 8 bits | +| **Alpha channel** | ✅ | ❌ | ✅ | ✅ | +| **Palette images** | ✅ | ❌ | ✅ | ❌ | +| **Lossy compression** | can be either | ✅ | ❌ | ❌ | +| **Color Model** | Binary[^2],RGB or grayscale | RGB | Binary[^2],RGB or grayscale | Binary[^2],RGB or grayscale | + +[^1]: ImageJS can also **decode** [APNG images](https://en.wikipedia.org/wiki/APNG). + +[^2]: While binary images can be decoded, for technical reasons image is decoded as a grayscale image. ### Image coordinates diff --git a/project-words.txt b/project-words.txt index ddc22c6..5404a8d 100644 --- a/project-words.txt +++ b/project-words.txt @@ -38,4 +38,5 @@ Dssim mssim GREYA Polylines -anonymization \ No newline at end of file +anonymization +APNG \ No newline at end of file From 356afc1d2808596eeb04cdd3cfd53aaf592eb817 Mon Sep 17 00:00:00 2001 From: EscapedGibbon Date: Tue, 30 Sep 2025 14:08:16 +0200 Subject: [PATCH 3/5] docs: finalize change of extension --- docs/tutorials/extracting-metadata.md | 2 +- docs/tutorials/image-segmentation-with-watershed.md | 2 +- docs/tutorials/image-stack-analysis.md | 2 +- docs/useful-tips/blurring-techniques-and-their-differences.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/tutorials/extracting-metadata.md b/docs/tutorials/extracting-metadata.md index cf2411b..45e315f 100644 --- a/docs/tutorials/extracting-metadata.md +++ b/docs/tutorials/extracting-metadata.md @@ -4,7 +4,7 @@ In this tutorial we will discuss metadata extraction with an image that we alrea ![Particles](./images/roiAnalysis/particles.jpg) -[Metadata](../glossary#metadata 'glossary link on metadata') represents information about various aspects of an image itself. It can be something basic such as the date when an image was taken, or something more specific like the name of the camera that the image was taken by. You can extract metadata tags that can provide additional information about an image by using this command: +[Metadata](../glossary.md#metadata 'glossary link on metadata') represents information about various aspects of an image itself. It can be something basic such as the date when an image was taken, or something more specific like the name of the camera that the image was taken by. You can extract metadata tags that can provide additional information about an image by using this command: ```ts const meta = image.meta; diff --git a/docs/tutorials/image-segmentation-with-watershed.md b/docs/tutorials/image-segmentation-with-watershed.md index 0058f81..55931e0 100644 --- a/docs/tutorials/image-segmentation-with-watershed.md +++ b/docs/tutorials/image-segmentation-with-watershed.md @@ -104,7 +104,7 @@ First thing that you possibly need to do is to remove [image noise](https://en.w ImageJS has several kinds of blurring: -- [blur filter](../features/filters/blur.mdx'internal link on blur') +- [blur filter](../features/filters/blur.mdx 'internal link on blur') - [gaussian blur filter](../features/filters/gaussian-blur.mdx 'internal link on gaussian blur') diff --git a/docs/tutorials/image-stack-analysis.md b/docs/tutorials/image-stack-analysis.md index 5e50767..bb10483 100644 --- a/docs/tutorials/image-stack-analysis.md +++ b/docs/tutorials/image-stack-analysis.md @@ -61,7 +61,7 @@ const stack = decodeStack(buffer); ``` :::warning -`Stack` class works only with images that share same properties. Particularly, values for [bit depth](../glossary#bit-depth 'internal link on bit depth'), [color model](../glossary#color-model 'internal link on color model'), width and height must be the same. +`Stack` class works only with images that share same properties. Particularly, values for [bit depth](../glossary#bit-depth 'internal link on bit depth'), [color model](../glossary.md#color-model 'internal link on color model'), width and height must be the same. ::: ## Find the image with maximum values: diff --git a/docs/useful-tips/blurring-techniques-and-their-differences.md b/docs/useful-tips/blurring-techniques-and-their-differences.md index d39eb52..12c8cb1 100644 --- a/docs/useful-tips/blurring-techniques-and-their-differences.md +++ b/docs/useful-tips/blurring-techniques-and-their-differences.md @@ -4,7 +4,7 @@ If you looked at some of our tutorials, you might have noticed that we apply a b ## Blur -To be precise blur is a general term that refers to a reduction in the sharpness or clarity of an image. It also works to reduce some of the noise, such as [gaussian noise](https://en.wikipedia.org/wiki/Gaussian_noise#:~:text=In%20signal%20processing%20theory%2C%20Gaussian,can%20take%20are%20Gaussian%2Ddistributed. 'wikipedia link on gaussian noise') for example. In ImageJS blur is actually a box blur or mean blur. It is a filter that uses convolution matrix to calculate an average among the surrounding pixels which are within the transformation matrix ([kernel](../glossary#kernel 'glossary link on kernel')) and then applies this value. +To be precise blur is a general term that refers to a reduction in the sharpness or clarity of an image. It also works to reduce some of the noise, such as [gaussian noise](https://en.wikipedia.org/wiki/Gaussian_noise#:~:text=In%20signal%20processing%20theory%2C%20Gaussian,can%20take%20are%20Gaussian%2Ddistributed. 'wikipedia link on gaussian noise') for example. In ImageJS blur is actually a box blur or mean blur. It is a filter that uses convolution matrix to calculate an average among the surrounding pixels which are within the transformation matrix ([kernel](../glossary.md#kernel 'glossary link on kernel')) and then applies this value. ![Convolution process](./images/blurring/2D_Convolution_Animation.gif) @@ -23,7 +23,7 @@ The idea is that the closer you are to the pixel in check, the more weight it wi The main parameter of gaussian blur is called "sigma" and it is responsible for the width of the gaussian bell curve, therefore it controls the overall smoothness of the end result. -Gaussian blur is a good preparatory tool for edge detection. Edge detection's algorithms are sensitive to noise and small details so blur smoothens them. For instance here is the example of a [Canny Edge detector](../features/morphology/canny-edge-detector 'internal link on canny edge detector') with and without gaussian blur: +Gaussian blur is a good preparatory tool for edge detection. Edge detection's algorithms are sensitive to noise and small details so blur smoothens them. For instance here is the example of a [Canny Edge detector](../features/morphology/canny-edge-detector.mdx 'internal link on canny edge detector') with and without gaussian blur: ![Edge detection with gaussian](./images/blurring/edgesWithBlurs.png) From 1bf9d38b32f9ec955ac0cd78e2c6bdd283544f3a Mon Sep 17 00:00:00 2001 From: EscapedGibbon Date: Tue, 30 Sep 2025 14:16:00 +0200 Subject: [PATCH 4/5] docs: fix prettier errors --- docs/features/filters/derivative.mdx | 2 +- docs/features/filters/grayscale.mdx | 2 +- docs/features/filters/level.mdx | 2 +- docs/features/filters/pixelate.mdx | 2 +- docs/features/geometry/flip.mdx | 2 +- docs/features/geometry/resize.mdx | 2 +- docs/features/geometry/rotate.mdx | 2 +- docs/features/geometry/transform.mdx | 2 +- docs/features/operations/threshold.mdx | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/features/filters/derivative.mdx b/docs/features/filters/derivative.mdx index 2ff0bff..f8cd45d 100644 --- a/docs/features/filters/derivative.mdx +++ b/docs/features/filters/derivative.mdx @@ -2,7 +2,7 @@ sidebar_position: 70 --- -import DerivativeDemo from './demos//derivative.demo.tsx' +import DerivativeDemo from './demos//derivative.demo.tsx'; # Derivative diff --git a/docs/features/filters/grayscale.mdx b/docs/features/filters/grayscale.mdx index df5371d..ad9e500 100644 --- a/docs/features/filters/grayscale.mdx +++ b/docs/features/filters/grayscale.mdx @@ -2,7 +2,7 @@ sidebar_position: 20 --- -import GrayDemo from './demos/grayscale.demo.tsx' +import GrayDemo from './demos/grayscale.demo.tsx'; # Grayscale diff --git a/docs/features/filters/level.mdx b/docs/features/filters/level.mdx index 2d5799a..66d5071 100644 --- a/docs/features/filters/level.mdx +++ b/docs/features/filters/level.mdx @@ -2,7 +2,7 @@ sidebar_position: 80 --- -import LevelDemo from './demos/level.demo.tsx' +import LevelDemo from './demos/level.demo.tsx'; # Level diff --git a/docs/features/filters/pixelate.mdx b/docs/features/filters/pixelate.mdx index 4aba0e8..57011a6 100644 --- a/docs/features/filters/pixelate.mdx +++ b/docs/features/filters/pixelate.mdx @@ -2,7 +2,7 @@ sidebar_position: 90 --- -import PixelateDemo from './demos/pixelate.demo.tsx' +import PixelateDemo from './demos/pixelate.demo.tsx'; # Pixelate diff --git a/docs/features/geometry/flip.mdx b/docs/features/geometry/flip.mdx index 71b243b..618dcdc 100644 --- a/docs/features/geometry/flip.mdx +++ b/docs/features/geometry/flip.mdx @@ -2,7 +2,7 @@ sidebar_position: 30 --- -import FlipDemo from './demos/flip.demo.tsx' +import FlipDemo from './demos/flip.demo.tsx'; # Flip diff --git a/docs/features/geometry/resize.mdx b/docs/features/geometry/resize.mdx index 684ecee..d34e420 100644 --- a/docs/features/geometry/resize.mdx +++ b/docs/features/geometry/resize.mdx @@ -2,7 +2,7 @@ sidebar_position: 10 --- -import ResizeDemo from './demos/resize.demo.tsx' +import ResizeDemo from './demos/resize.demo.tsx'; # Resize diff --git a/docs/features/geometry/rotate.mdx b/docs/features/geometry/rotate.mdx index 04d6254..4f1928e 100644 --- a/docs/features/geometry/rotate.mdx +++ b/docs/features/geometry/rotate.mdx @@ -2,7 +2,7 @@ sidebar_position: 20 --- -import RotateDemo from './demos/rotate.demo.tsx' +import RotateDemo from './demos/rotate.demo.tsx'; # Rotate diff --git a/docs/features/geometry/transform.mdx b/docs/features/geometry/transform.mdx index 684c2d2..7b02259 100644 --- a/docs/features/geometry/transform.mdx +++ b/docs/features/geometry/transform.mdx @@ -2,7 +2,7 @@ sidebar_position: 40 --- -import TransformDemo from './demos/transform.demo.tsx' +import TransformDemo from './demos/transform.demo.tsx'; # Transform diff --git a/docs/features/operations/threshold.mdx b/docs/features/operations/threshold.mdx index 8e1da66..01e058e 100644 --- a/docs/features/operations/threshold.mdx +++ b/docs/features/operations/threshold.mdx @@ -2,7 +2,7 @@ sidebar_position: 10 --- -import ThresholdDemo from './demos/threshold.demo.tsx' +import ThresholdDemo from './demos/threshold.demo.tsx'; # Threshold From 62630de786a7b1549dd8cb3b6b2dbfd9274a749b Mon Sep 17 00:00:00 2001 From: EscapedGibbon <101188881+EscapedGibbon@users.noreply.github.com> Date: Tue, 30 Sep 2025 17:58:30 +0200 Subject: [PATCH 5/5] docs: add page in useful tips about transform() parameters and options (#154) --- .../transform-parameters/borderTypes.svg | 143 + .../transform-parameters/borderValueTest.svg | 148 + .../transform-parameters/fullImageTest.svg | 199 ++ .../transform-parameters/interpolations.svg | 2435 +++++++++++++++++ .../transform-parameters/inverseTest.svg | 75 + .../images/transform-parameters/original.png | Bin 0 -> 1475 bytes .../images/transform-parameters/test.png | Bin 0 -> 112 bytes .../transform-function-and-its-parameters.md | 161 ++ 8 files changed, 3161 insertions(+) create mode 100644 docs/useful-tips/images/transform-parameters/borderTypes.svg create mode 100644 docs/useful-tips/images/transform-parameters/borderValueTest.svg create mode 100644 docs/useful-tips/images/transform-parameters/fullImageTest.svg create mode 100644 docs/useful-tips/images/transform-parameters/interpolations.svg create mode 100644 docs/useful-tips/images/transform-parameters/inverseTest.svg create mode 100644 docs/useful-tips/images/transform-parameters/original.png create mode 100644 docs/useful-tips/images/transform-parameters/test.png create mode 100644 docs/useful-tips/transform-function-and-its-parameters.md diff --git a/docs/useful-tips/images/transform-parameters/borderTypes.svg b/docs/useful-tips/images/transform-parameters/borderTypes.svg new file mode 100644 index 0000000..62be07f --- /dev/null +++ b/docs/useful-tips/images/transform-parameters/borderTypes.svg @@ -0,0 +1,143 @@ + + + +originalconstantreflectreflect101replicatewrap diff --git a/docs/useful-tips/images/transform-parameters/borderValueTest.svg b/docs/useful-tips/images/transform-parameters/borderValueTest.svg new file mode 100644 index 0000000..609775c --- /dev/null +++ b/docs/useful-tips/images/transform-parameters/borderValueTest.svg @@ -0,0 +1,148 @@ + + + +defaultborderValue:125 diff --git a/docs/useful-tips/images/transform-parameters/fullImageTest.svg b/docs/useful-tips/images/transform-parameters/fullImageTest.svg new file mode 100644 index 0000000..55a826d --- /dev/null +++ b/docs/useful-tips/images/transform-parameters/fullImageTest.svg @@ -0,0 +1,199 @@ + + + +fullImage:falsefullImage:true diff --git a/docs/useful-tips/images/transform-parameters/interpolations.svg b/docs/useful-tips/images/transform-parameters/interpolations.svg new file mode 100644 index 0000000..ba5d71e --- /dev/null +++ b/docs/useful-tips/images/transform-parameters/interpolations.svg @@ -0,0 +1,2435 @@ + + + +nearestbilinearbicubic diff --git a/docs/useful-tips/images/transform-parameters/inverseTest.svg b/docs/useful-tips/images/transform-parameters/inverseTest.svg new file mode 100644 index 0000000..95e5dba --- /dev/null +++ b/docs/useful-tips/images/transform-parameters/inverseTest.svg @@ -0,0 +1,75 @@ + + + +inverse:trueinverse:false diff --git a/docs/useful-tips/images/transform-parameters/original.png b/docs/useful-tips/images/transform-parameters/original.png new file mode 100644 index 0000000000000000000000000000000000000000..6d3ffaa5d7fb98acc23b553bf95091487b3f4352 GIT binary patch literal 1475 zcmeAS@N?(olHy`uVBq!ia0vp^CxG}D2NRIY?wFs?z`)w&>Eakt5%>14W0$gr0E?q- zZQZ)7rTKC?mLh99?bwd?Y@fK@ZT|XxCU#ak_WY`#M*RqZIU)?NRaUznOnR_{AwoUU zmP?FTo%Mj#$yb3JBrG%;u6c&_Hyu6T$+*F9Q7=WR6ATN27}iW)Bgfj^ zIGHJ7Zpv4U2p;@KefSvYz{n~RKr!)PD%1V*&yUWZZ2#r&`u>0anEoD}AFlED_x+EH zR?YvaZm+-US7iNX<}h=fYY7rM8`$(}xx^x3Pj6u1UeoA3zlk;Mx{=O7rtAlomM1h` z-OA&=;egbRFPaevrMicOMu2jj%jT)P{~Pz4*M9mPTEBn4xqZUZ``i(hSMxc-*0F~9 zv0j_@>S(@n{6F^k;@3$}Z|Q~kJx$788MEfz>7%FC#;loh>gcVYoe_JdcTbJp8DTlK zd+XAojaB;M+S`jZ7HNrRdp}M38mAYQ_cZBgh~Bknr;fhb8?)xlsiUWWM(9rMzFNIA zV&~NEslhuVqP4_Ve+O#P7S~>0v~gRAUf6q}rm*2m*a7m~d$PAKy4oWgUO=HZ4u=@@#%!;mqLa>gTe~DWM4fdNXGy literal 0 HcmV?d00001 diff --git a/docs/useful-tips/images/transform-parameters/test.png b/docs/useful-tips/images/transform-parameters/test.png new file mode 100644 index 0000000000000000000000000000000000000000..6f59c8f03bb813e0d9eebb97eb543ee11e083abe GIT binary patch literal 112 zcmeAS@N?(olHy`uVBq!ia0vp^96-#)!2~4PmUdhRQs$m6jv*Cs$v^s;4IVK6my~ee z;As%yaAfe~T)`6}=)rxarbA3eC_$Gup&^I$ir5;V8+Uq6H#4;SNL{GVq{R$0g2B_( K&t;ucLK6UsMjhV( literal 0 HcmV?d00001 diff --git a/docs/useful-tips/transform-function-and-its-parameters.md b/docs/useful-tips/transform-function-and-its-parameters.md new file mode 100644 index 0000000..300c8ac --- /dev/null +++ b/docs/useful-tips/transform-function-and-its-parameters.md @@ -0,0 +1,161 @@ +# `transform()` and its parameters + +`transform()` represents a fundamental operation of image processing. It needs two parameters for it to work: + +- image to transform +- transformation matrix + +However, it also has an `options` parameter which enhances its capabilities. +In this article we will talk about what these options do and how they are useful. + +### Options + +#### Width and Height + +`width` and `height` are pretty straight-forward as options: they represent dimensions of the destination image. If these values are not specified, they will be equal to source image's. + +#### Interpolation Type + +To understand `interpolationType`, imagine you're scaling an image up by 2. The new image has 4 times as many pixels, but where do the "in-between" pixel values come from? Interpolation algorithms calculate these new pixel values based on surrounding original pixels. +Available interpolation types: + +`nearest` - Uses the value of the nearest pixel (fastest, preserves sharp edges). + +`bilinear` - Linear interpolation between 4 nearest pixels (smooth gradients). + +`bicubic` - Cubic interpolation using 16 nearest pixels (smoothest, best quality). + +
+ +Which interpolation is best? + + +You might be thinking that `bicubic` will be always the way to go since it should transfer the most details from the source. However, that's not always the case. +Let's take a look at this simple 10x10 image (image was enlarged here for display purposes): + +![Test image](./images/transform-parameters/original.png) + +And now let's enlarge the image by a factor of 10 using different interpolations: + +```ts +const resizeMatrix = [ + [10, 0, 0], + [0, 10, 0], + [0, 0, 1], +]; +const newImage = image.transform(resizeMatrix, { + fullImage: true, + interpolationType: 'nearest', +}); +const newImage2 = image.transform(resizeMatrix, { + fullImage: true, + interpolationType: 'bilinear', +}); +const newImage3 = image.transform(resizeMatrix, { + fullImage: true, + interpolationType: 'bicubic', +}); +``` + +And see the results: + +![Interpolations](images/transform-parameters/interpolations.svg) + +As you can see, sometimes, `nearest` can be a better choice for interpolation algorithm, because it handles abrupt color changes better. This is just one of the examples that shows that "smoothest" does not always equal to "best". + +
+ +:::note +You can check out [this video](https://www.youtube.com/watch?v=AqscP7rc8_M) that wonderfully explains how interpolation happens in more detail. +::: + +#### Border Type + +To understand what `borderType` is, we need to get back to interpolation for a moment. When you transform an image (rotate, scale, skew, etc.), the new pixel positions often map to coordinates in the original image that don't exist - for example, negative coordinates or coordinates beyond the image edges. The `borderType` tells the algorithm how to handle these "out-of-bounds" pixels. There are several approaches: + +`constant` - Fill border areas with a constant color value (specified by [`borderValue`](./transform-function-and-its-parameters.md#border-value)).The value by default is 0. + +`reflect` - Mirror edge pixels: [1,2,3,4] → [2,1,1,2,3,4,4,3] + +`reflect101` - Mirror edge pixels without repeating the edge: [1,2,3,4] → [3,2,1,2,3,4,3,2] + +`replicate` - Extend edge pixels: [1,2,3,4] → [1,1,1,2,3,4,4,4] + +`wrap` - Wrap around to opposite edge: [1,2,3,4] → [3,4,1,2,3,4,1,2] + +![Border types](./images/transform-parameters/borderTypes.svg) + +#### Border Value + +`borderValue` is used when `borderType` is set to `constant`. It specifies the pixel value to use for out-of-bounds pixels. The value should be a number that will be the same for all channels of the pixel in question, regardless of image's number of channels. + +```ts +const result = image.transform(matrix, { + borderType: 'constant', + borderValue: 125, // Grey border +}); +``` + +![Border types](./images/transform-parameters/borderValueTest.svg) + +#### Inverse + +We have briefly shown how this option is used in [our tutorial on image transformations](../tutorials/applying-transform-function-on-images.md). Let's take a deeper look at it. +The `inverse` controls how the transformation matrix is applied during the image transformation process. Understanding this concept is crucial for getting the expected results from your transformations. +When transforming an image, there are two fundamental approaches: + +- **Forward mapping** (inverse: false): Apply the matrix to source coordinates to find destination coordinates. +- **Backward mapping** (inverse: true): Source image is treated as destination image. For each destination pixel, the inverse matrix applies to find which source pixel to sample from. + +Let's take a simple example. Let's say we want to rotate an image by 45 degrees through transformation matrix: + +```ts +//Rotation matrix +const centerX = image.width / 2; +const centerY = image.height / 2; +const cosA = Math.cos(Math.PI / 4); +const sinA = Math.sin(Math.PI / 4); + +const rotationMatrix = [ + [cosA, -sinA, centerX - centerX * cosA + centerY * sinA], + [sinA, cosA, centerY - centerX * sinA - centerY * cosA], + [0, 0, 1], +]; +const result = image.transform(rotationMatrix, { + inverse: true, // Default +}); + +const result2 = image.transform(rotationMatrix, { + inverse: false, +}); +``` + +As you can see using the same matrix images the image seems to be turned into opposite directions. + +![inverse test](./images/transform-parameters/inverseTest.svg) + +#### Full Image + +`fullImage` ensures that every pixel of the original image is included inside the transformed image, preventing any cropping. When true, the function automatically calculates the required output dimensions. When false, source image dimensions are taken. + +```ts +//Without fullImage - may crop transformed pixels +const result1 = image.transform(rotationMatrix, { + width: 8, + height: 10, + fullImage: false, +}); + +// With fullImage - automatically sizes output to fit all pixels +const result2 = image.transform(rotationMatrix, { + fullImage: true, // Output dimensions calculated automatically +}); +``` + +This is particularly useful for rotations, where corners of the image may extend beyond the original boundaries. + +![fullImage test](./images/transform-parameters/fullImageTest.svg) + +:::warning +If `fullImage` is set to `true`, the `width` and `height` parameters will be ignored. +:::