diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
deleted file mode 100644
index 581f2e9..0000000
--- a/.github/workflows/build.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-name: Build firmware
-
-on:
- workflow_dispatch:
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- with:
- submodules: true
- - name: make
- run: |
- sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi
- make -C micropython/mpy-cross
- make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules
- make BOARD=SPARKFUN_XRP_CONTROLLER
diff --git a/.gitmodules b/.gitmodules
index b1327c0..0968144 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,9 +1,6 @@
-[submodule "src/opencv"]
- path = src/opencv
- url = https://github.com/sfe-SparkFro/opencv.git
-[submodule "src/ulab"]
- path = src/ulab
+[submodule "opencv"]
+ path = opencv
+ url = https://github.com/opencv/opencv.git
+[submodule "ulab"]
+ path = ulab
url = https://github.com/v923z/micropython-ulab.git
-[submodule "micropython"]
- path = micropython
- url = https://github.com/sparkfun/micropython.git
diff --git a/Makefile b/Makefile
index 8cc8f78..3d1cfd6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,20 +1,16 @@
-# Set Pico SDK flags to create our own malloc wrapper and enable exceptions
-CMAKE_ARGS += -DSKIP_PICO_MALLOC=1 -DPICO_CXX_ENABLE_EXCEPTIONS=1
+ifndef PLATFORM
+$(error PLATFORM not specified. Use 'make PLATFORM=rp2350' or similar.)
+endif
-# Get current directory
-CURRENT_DIR = $(shell pwd)
+TOOLCHAIN_FILE = platforms/${PLATFORM}.toolchain.cmake
-# Set the MicroPython user C module path to the OpenCV module
-MAKE_ARGS = USER_C_MODULES="$(CURRENT_DIR)/src/opencv_upy.cmake"
+# TODO: For some reason, specifying this in the toolchain file doesn't work
+CMAKE_ARGS += -DBUILD_LIST=core,imgproc,imgcodecs
-# Build MicroPython with the OpenCV module
+# Generic build
all:
- @cd micropython/ports/rp2 && export CMAKE_ARGS="$(CMAKE_ARGS)" && make -f Makefile $(MAKEFLAGS) $(MAKE_ARGS)
+ cd opencv && mkdir -p build && cmake -S . -B build -DPICO_BUILD_DOCS=0 -DCMAKE_TOOLCHAIN_FILE=../${TOOLCHAIN_FILE} ${CMAKE_ARGS} && make -C build -f Makefile $(MAKEFLAGS) $(MAKE_ARGS)
-# Clean the MicroPython build
+# Clean the OpenCV build
clean:
- @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) clean
-
-# Load the MicroPython submodules
-submodules:
- @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) submodules
+ cd opencv && rm -rf build
diff --git a/README.md b/README.md
index 6083295..46eab35 100644
--- a/README.md
+++ b/README.md
@@ -1 +1,292 @@
-# micropython-opencv
\ No newline at end of file
+# SparkFun MicroPython-OpenCV
+
+Welcome to SparkFun's MicroPython port of OpenCV! This is the first known MicroPython port of OpenCV, which opens up a whole new world of vision processing abilities on embedded devices in a Python environment!
+
+As the first port, there may be incomplete or missing features, and some rough edges. For example, we have only implemented support for the Raspberry Pi RP2350 so far, and some of the build procedures are hard-coded for that. We'd be happy to work with the community to create an official port in the future, but until then, this repo is available and fully open-source for anyone to use!
+
+# Example Snippets
+
+Below are example code snippets of features avaiable in this port of OpenCV. We've done our best to make it as similar as possible to standard OpenCV, but there are some necessary API changes due to the limitations of MicroPython.
+
+```python
+# Import OpenCV, just like any other Python environment!
+import cv2 as cv
+
+# Import ulab NumPy and initialize an image, almost like any other Python
+# environment!
+from ulab import numpy as np
+img = np.zeros((240, 320, 3), dtype=np.uint8)
+
+# Call OpenCV functions, just like standard OpenCV!
+img = cv.putText(img, "Hello OpenCV!", (50, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
+img = cv.Canny(img, 100, 200)
+
+# Call `cv.imshow()`, almost like standard OpenCV! Instead of passing a window
+# name string, you pass a display driver that implements an `imshow()` method
+# that takes a NumPy array as input.
+cv.imshow(display, img)
+
+# Call `cv.waitKey()`, just like standard OpenCV! Unlike standard OpenCV, this
+# waits for a key press on the REPL instead of a window, and it is not necessary
+# to call after `cv.imshow()` because display drivers show images immediately.
+key = cv.waitKey(0)
+
+# Use a camera, similar to standard OpenCV! `cv.VideoCapture()` is not used in
+# MicroPython-OpenCV, because a separate camera driver that implements the same
+# methods as the OpenCV `VideoCapture` class must be initialized separately.
+camera.open()
+success, frame = camera.read()
+camera.release()
+
+# Call `cv.imread()` and `cv.imwrite()` to read and write images to and from
+# the MicroPython filesystem, just like standard OpenCV! It can also point to an
+# SD card if one is mounted for extra storage space.
+img = cv.imread("path/to/image.png")
+success = cv.imwrite("path/to/image.png", img)
+```
+
+For full example, see our [Red Vision repo](https://github.com/sparkfun/red_vision).
+
+# Performance
+
+Limit your expectations. OpenCV typically runs on full desktop systems containing processors running at GHz speeds with dozens of cores optimized for computing speed and GB of RAM. In contrast, microcontrollers processors typically run at a few hundred MHz with 1 or 2 cores optimized for low power consumtion with a few MB of RAM. Exact performance depends on many things, including the processor, vision pipeline, image resolution, colorspaces used, RAM available, etc.
+
+If you want best performance, keep in mind is that MicroPython uses a garbage collector for memory management. If images are repeatedly created in a vision pipeline, RAM will be consumed until the garbage collector runs. The collection process takes longer with more RAM, so this can result in noticable delays during collection (typically a few hundred milliseconds). To mitigate this, it's best to pre-allocate arrays and utilize the optional `dst` argument of OpenCV functions so memory consumption is minimized. Pre-allocation also helps improve performance, because allocating memory takes time.
+
+Below are some typical execution times for various OpenCV functions. All were tested on a Raspberry Pi RP2350 with a 320x240 test image.
+
+| Function | Execution Time |
+| --- | --- |
+| `dst = cv.blur(src, (5, 5))` | 115ms |
+| `dst = cv.blur(src, (5, 5), dst)` | 87ms |
+| `retval, dst = cv.threshold(src, 127, 255, cv.THRESH_BINARY)` | 76ms |
+| `retval, dst = cv.threshold(src, 127, 255, cv.THRESH_BINARY, dst)` | 46ms |
+| `dst = cv.cvtColor(src, cv.COLOR_BGR2HSV)` | 114ms |
+| `dst = cv.cvtColor(src, cv.COLOR_BGR2HSV, dst)` | 84ms |
+| `dst = cv.Canny(src, 100, 200)` | 504ms |
+| `dst = cv.Canny(src, 100, 200, dst)` | 482ms |
+
+# Included OpenCV Functions
+
+Below is a list of all OpenCV functions included in the MicroPython port of OpenCV. This section follows OpenCV's module structure.
+
+Only the most useful OpenCV functions are included. The MicroPython environment is *extremely* limited, so many functions are omitted due to prohibitively high RAM and firmware size requirements. Other less useful functions have been omitted to reduce firmware size. If there are additional functions you'd like to be included, see [#Contributing](#Contributing).
+
+If you need help understanding how to use these functions, see the documentation link for each function. You can also check out [OpenCV's Python Tutorials](https://docs.opencv.org/4.11.0/d6/d00/tutorial_py_root.html) and other tutorials online for more educational experience. This repository is simply a port of OpenCV, so we do not document these functions or how to use them, except for deviations from standard OpenCV.
+
+## [`core`](https://docs.opencv.org/4.11.0/d0/de1/group__core.html)
+
+> [!NOTE]
+> The `core` module includes many functions for basic operations on arrays. Most of these can be performed by `numpy` operations, so they have been omitted to reduce firmware size.
+
+### [Operations on arrays](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.convertScaleAbs(src[, dst[, alpha[, beta]]]) -> dst`
Scales, calculates absolute values, and converts the result to 8-bit.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga3460e9c9f37b563ab9dd550c4d8c4e7d) | |
+| `cv.inRange(src, lowerb, upperb[, dst]) -> dst`
Checks if array elements lie between the elements of two other arrays.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga48af0ab51e36436c5d04340e036ce981) | |
+| `cv.minMaxLoc(src[, mask]) -> minVal, maxVal, minLoc, maxLoc`
Finds the global minimum and maximum in an array.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#gab473bf2eb6d14ff97e89b355dac20707) | |
+
+## [`imgproc`](https://docs.opencv.org/4.11.0/d7/dbd/group__imgproc.html)
+
+### [Image Filtering](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.bilateralFilter(src, d, sigmaColor, sigmaSpace[, dst[, borderType]]) -> dst`
Applies the bilateral filter to an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga9d7064d478c95d60003cf839430737ed) | |
+| `cv.blur(src, ksize[, dst[, anchor[, borderType]]]) -> dst`
Blurs an image using the normalized box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga8c45db9afe636703801b0b2e440fce37) | |
+| `cv.boxFilter(src, ddepth, ksize[, dst[, anchor[, normalize[, borderType]]]]) -> dst`
Blurs an image using the box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad533230ebf2d42509547d514f7d3fbc3) | |
+| `cv.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst`
Dilates an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga4ff0f3318642c4f469d0e11f242f3b6c) | |
+| `cv.erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst`
Erodes an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaeb1e0c1033e3f6b891a25d0511362aeb) | |
+| `cv.filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst`
Convolves an image with the kernel.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga27c049795ce870216ddfb366086b5a04) | |
+| `cv.GaussianBlur(src, ksize, sigmaX[, dst[, sigmaY[, borderType[, hint]]]]) -> dst`
Blurs an image using a Gaussian filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gae8bdcd9154ed5ca3cbc1766d960f45c1) | |
+| `cv.getStructuringElement(shape, ksize[, anchor]) -> retval`
Returns a structuring element of the specified size and shape for morphological operations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gac342a1bb6eabf6f55c803b09268e36dc) | |
+| `cv.Laplacian(src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst`
Calculates the Laplacian of an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad78703e4c8fe703d479c1860d76429e6) | |
+| `cv.medianBlur(src, ksize[, dst]) -> dst`
Blurs an image using the median filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga564869aa33e58769b4469101aac458f9) | |
+| `cv.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst`
Performs advanced morphological transformations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f) | |
+| `cv.Scharr(src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]]) -> dst`
Calculates the first x- or y- image derivative using Scharr operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaa13106761eedf14798f37aa2d60404c9) | |
+| `cv.Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst`
Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gacea54f142e81b6758cb6f375ce782c8d) | |
+| `cv.spatialGradient(src[, dx[, dy[, ksize[, borderType]]]]) -> dx, dy`
Calculates the first order image derivative in both x and y using a Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga405d03b20c782b65a4daf54d233239a2) | |
+
+### [Miscellaneous Image Transformations](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst`
Applies an adaptive threshold to an array.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#ga72b913f352e4a1b1b397736707afcde3) | |
+| `cv.threshold(src, thresh, maxval, type[, dst]) -> retval, dst`
Applies a fixed-level threshold to each array element.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57) | |
+
+### [Drawing Functions](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.arrowedLine(img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]]) -> img`
Draws an arrow segment pointing from the first point to the second one.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga0a165a3ca093fd488ac709fdf10c05b2) | |
+| `cv.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> img`
Draws a circle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#gaf10604b069374903dbd0f0488cb43670) | |
+| `cv.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> image`
Draws contours outlines or filled contours.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga746c0625f1781f1ffc9056259103edbc) | |
+| `cv.drawMarker(img, position, color[, markerType[, markerSize[, thickness[, line_type]]]]) -> img`
Draws a marker on a predefined position in an image.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga644c4a170d4799a56b29f864ce984b7e) | |
+| `cv.ellipse(img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]]) -> img`
Draws a simple or thick elliptic arc or fills an ellipse sector.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga28b2267d35786f5f890ca167236cbc69) | |
+| `cv.fillConvexPoly(img, points, color[, lineType[, shift]]) -> img`
Fills a convex polygon.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga9bb982be9d641dc51edd5e8ae3624e1f) | |
+| `cv.fillPoly(img, pts, color[, lineType[, shift[, offset]]]) -> img`
Fills the area bounded by one or more polygons.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga311160e71d37e3b795324d097cb3a7dc) | |
+| `cv.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img`
Draws a line segment connecting two points.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga7078a9fae8c7e7d13d24dac2520ae4a2) | |
+| `cv.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]]) -> img`
Draws a text string.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga5126f47f883d730f633d74f07456c576) | |
+| `cv.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img`
Draws a simple, thick, or filled up-right rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9) | |
+
+### [Color Space Conversions](https://docs.opencv.org/4.11.0/d8/d01/group__imgproc__color__conversions.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.cvtColor(src, code[, dst[, dstCn[, hint]]]) -> dst`
Converts an image from one color space to another.
[Documentation](https://docs.opencv.org/4.11.0/d8/d01/group__imgproc__color__conversions.html#gaf86c09fe702ed037c03c2bc603ceab14) | |
+
+### [Structural Analysis and Shape Descriptors](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.approxPolyDP(curve, epsilon, closed[, approxCurve]) -> approxCurve`
Approximates a polygonal curve(s) with the specified precision.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga0012a5fdaea70b8a9970165d98722b4c) | |
+| `cv.approxPolyN(curve, nsides[, approxCurve[, epsilon_percentage[, ensure_convex]]]) -> approxCurve`
Approximates a polygon with a convex hull with a specified accuracy and number of sides.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga88981607a2d61b95074688aac55625cc) | |
+| `cv.arcLength(curve, closed) -> retval`
Calculates a contour perimeter or a curve length.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8d26483c636be6b35c3ec6335798a47c) | |
+| `cv.boundingRect(array) -> retval`
Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga103fcbda2f540f3ef1c042d6a9b35ac7) | |
+| `cv.boxPoints(box[, points]) -> points`
Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf78d467e024b4d7936cf9397185d2f5c) | |
+| `cv.connectedComponents(image[, labels[, connectivity[, ltype]]]) -> retval, labels`
computes the connected components labeled image of boolean image
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaedef8c7340499ca391d459122e51bef5) | `ltype` defaults to `CV_16U` instead of `CV_32S` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 |
+| `cv.connectedComponentsWithStats(image[, labels[, stats[, centroids[, connectivity[, ltype]]]]]) -> retval, labels, stats, centroids`
computes the connected components labeled image of boolean image and also produces a statistics output for each label
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga107a78bf7cd25dec05fb4dfc5c9e765f) | `labels`, `stats`, and `centroids` are returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 |
+| `cv.contourArea(contour[, oriented]) -> retval`
Calculates a contour area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga2c759ed9f497d4a618048a2f56dc97f1) | |
+| `cv.convexHull(points[, hull[, clockwise[, returnPoints]]]) -> hull`
Finds the convex hull of a point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga014b28e56cb8854c0de4a211cb2be656) | `hull` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 |
+| `cv.convexityDefects(contour, convexhull[, convexityDefects]) -> convexityDefects`
Finds the convexity defects of a contour.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gada4437098113fd8683c932e0567f47ba) | `convexityDefects` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 |
+| `cv.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy`
Finds contours in a binary image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gadf1ad6a0b82947fa1fe3c3d497f260e0) | `contours` and `hierarchy` are returned with `dtype=np.float` and `dtype=np.int16` respectively instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 |
+| `cv.fitEllipse(points) -> retval`
Fits an ellipse around a set of 2D points.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf259efaad93098103d6c27b9e4900ffa) | |
+| `cv.fitLine(points, distType, param, reps, aeps[, line]) -> line`
Fits a line to a 2D or 3D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf849da1fdafa67ee84b1e9a23b93f91f) | |
+| `cv.isContourConvex(contour) -> retval`
Tests a contour convexity.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8abf8010377b58cbc16db6734d92941b) | |
+| `cv.matchShapes(contour1, contour2, method, parameter) -> retval`
Compares two shapes.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaadc90cb16e2362c9bd6e7363e6e4c317) | |
+| `cv.minAreaRect(points) -> retval`
Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga3d476a3417130ae5154aea421ca7ead9) | |
+| `cv.minEnclosingCircle(points) -> center, radius`
Finds a circle of the minimum area enclosing a 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8ce13c24081bbc7151e9326f412190f1) | |
+| `cv.minEnclosingTriangle(points[, triangle]) -> retval, triangle`
Finds a triangle of minimum area enclosing a 2D point set and returns its area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1513e72f6bbdfc370563664f71e0542f) | |
+| `cv.moments(array[, binaryImage]) -> retval`
Calculates all of the moments up to the third order of a polygon or rasterized shape.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga556a180f43cab22649c23ada36a8a139) | |
+| `cv.pointPolygonTest(contour, pt, measureDist) -> retval`
Performs a point-in-contour test.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722) | |
+
+### [Feature Detection](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.Canny(image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]]) -> edges`
Finds edges in an image using the Canny algorithm.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga04723e007ed888ddf11d9ba04e2232de) | |
+| `cv.HoughCircles(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles`
Finds circles in a grayscale image using the Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga47849c3be0d0406ad3ca45db65a25d2d) | |
+| `cv.HoughCirclesWithAccumulator(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles`
Finds circles in a grayscale image using the Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#aed6d238ceede74293152437228c603ce) | |
+| `cv.HoughLines(image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]]) -> lines`
Finds lines in a binary image using the standard Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga0b7ee275f89bb1a799ab70a42131f39d) | |
+| `cv.HoughLinesP(image, rho, theta, threshold[, lines[, minLineLength[, maxLineGap]]]) -> lines`
Finds line segments in a binary image using the probabilistic Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga8618180a5948286384e3b7ca02f6feeb) | `lines` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 |
+| `cv.HoughLinesWithAccumulator(image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]]) -> lines`
Finds lines in a binary image using the standard Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#ad5e6dca5163cd4bd0135cb808f1ddfe8) | |
+
+### [Object Detection](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.matchTemplate(image, templ, method[, result[, mask]]) -> result`
Compares a template against overlapped image regions.
[Documentation](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html#ga586ebfb0a7fb604b35a23d85391329be) | |
+
+## [`imgcodecs`](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.imread(filename[, flags]) -> retval`
Loads an image from a file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#gacbaa02cffc4ec2422dfa2e24412a99e2) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. |
+| `cv.imwrite(filename, img[, params]) -> retval`
Saves an image to a specified file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#ga8ac397bd09e48851665edbe12aa28f25) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. |
+
+## [`highgui`](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html)
+
+| Function | Notes |
+| --- | --- |
+| `cv.imshow(winname, mat) -> None`
Displays an image in the specified window.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga453d42fe4cb60e5723281a89973ee563) | `winname` must actually be a display driver object that implements an `imshow()` method that takes a NumPy array as input. |
+| `cv.waitKey([, delay]) -> retval`
Waits for a pressed key.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL. |
+| `cv.waitKeyEx([, delay]) -> retval`
Similar to waitKey, but returns full key code.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL.
Full key code is implementation specific, so special key codes in MicroPython will not match other Python environments. |
+
+# Hardware Drivers
+
+Standard OpenCV leverages the host operating system to access hardware, like creating windows and accessing cameras. MicroPython does not have that luxury, so instead, drivers must be implemented for these hardware devices. Take a look at our [Red Vision repo](https://github.com/sparkfun/red_vision) for examples. This leads to necessary API changes for functions like `cv.imshow()`.
+
+# MicroPython Board Requirements
+
+As of writing, the OpenCV firmware adds over 3MiB on top of the standard MicroPython firmware, which itself be up to 1MiB in size (depending on platform and board). You'll also want some storage space, so a board with at least 8MB of flash is recommended.
+
+PSRAM is basically a requirement to do anything useful with OpenCV. A single 320x240 RGB888 frame buffer requires 225KiB of RAM; most microcontrollers only have a few hundred KiB of SRAM. Several frame buffers can be needed for even simple vision pipelines, so you really need at least a few MiB of RAM available. The more the merrier!
+
+# Building
+
+Below are instructions to build the MicroPython-OpenCV firmware from scratch. Instructions are only provided for Linux systems.
+
+> [!NOTE]
+> This build process does not include any hardware drivers, see our [Red Vision repo](https://github.com/sparkfun/red_vision) for example drivers.
+
+> [!NOTE]
+> Because OpenCV dramatically increases the firmware size, it may be necessary to define board variants that reduce the storage size to avoid it overlapping with the firmware. See [#Adding New Boards](#Adding-New-Boards).
+
+1. Clone this repo and MicroPython
+ * ```
+ cd ~
+ git clone https://github.com/sparkfun/micropython-opencv.git
+ git clone https://github.com/micropython/micropython.git
+ ```
+2. Build the MicroPython cross-compiler
+ * ```
+ make -C micropython/mpy-cross -j4
+ ```
+3. Clone MicroPython submodules for your board
+ * ```
+ make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules
+ ```
+ * Replace `rp2` and `SPARKFUN_XRP_CONTROLLER` with your platform and board name respectively
+4. Set environment variables (if needed)
+ * Some platforms require environment variables to be set. Example:
+ * ```
+ export PICO_SDK_PATH=~/micropython/lib/pico-sdk
+ ```
+5. Build OpenCV for your platform
+ * ```
+ make -C micropython-opencv PLATFORM=rp2350 --no-print-directory -j4
+ ```
+ * Replace `rp2350` with your board's platform
+6. Build MicroPython-OpenCV firmware for your board
+ * ```
+ export CMAKE_ARGS="-DSKIP_PICO_MALLOC=1 -DPICO_CXX_ENABLE_EXCEPTIONS=1" && make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER USER_C_MODULES=~/micropython-opencv/micropython_opencv.cmake -j4
+ ```
+ * Replace `rp2` and `SPARKFUN_XRP_CONTROLLER` with your platform and board name respectively
+ * Replace the `CMAKE_ARGS` contents with whatever is required for your board's platform
+ * Your firmware file(s) will be located in `~/micropython/ports//build-/`
+
+# Adding New Boards
+
+Because OpenCV dramatically increases the firmware size, it may be necessary to define board variants that reduce the storage size to avoid it overlapping with the firmware. It is also beneficial to adjust the board name to include `OpenCV` or similar to help people identify that the MicroPython-OpenCV firmware is flashed to the board instead of standard MicroPython.
+
+Below is the variant for the XRP Controller as an example. The variant is defined by creating a file called [`micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER/mpconfigvariant_RED_VISION.cmake`](https://github.com/sparkfun/micropython/blob/7e728e8c6aad74ca244183f3e0705db6f332abd9/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER/mpconfigvariant_LARGE_BINARY.cmake) with contents:
+
+```
+list(APPEND MICROPY_DEF_BOARD
+ # Board name
+ "MICROPY_HW_BOARD_NAME=\"SparkFun XRP Controller (Red Vision)\""
+ # 8MB (8 * 1024 * 1024)
+ "MICROPY_HW_FLASH_STORAGE_BYTES=8388608"
+)
+```
+
+Some board definitions do not have `#ifndef` wrappers in `mpconfigboard.h` for `MICROPY_HW_BOARD_NAME` and `MICROPY_HW_FLASH_STORAGE_BYTES`. They should be added if needed so the variant can build properly.
+
+Then, the firmware can be built by adding `BOARD_VARIANT=` to the `make` command when building the MicroPython-OpenCV firmware.
+
+# Adding New Platforms
+
+Only support for the Raspberry Pi RP2350 has been figured out, so the all requirements for adding new platforms is not fully known yet. However, it should be along the lines of:
+
+1. Create a valid toolchain file for the platform
+ * See [rp2350.toolchain.cmake](src/opencv/platforms/rp2350.toolchain.cmake) for reference
+ * This loosely follow's [OpenCV's platform definitions](https://github.com/opencv/opencv/tree/4.x/platforms)
+2. Ensure OpenCV builds correctly
+ * ```
+ make -C micropython-opencv/src/opencv PLATFORM= --no-print-directory -j4
+ ```
+3. Create new board(s) for that platform
+ * See [#Adding New Boards](#Adding-New-Boards)
+4. Build MicroPython-OpenCV firmware for that board
+ * ```
+ make -C micropython/ports/rp2 BOARD= USER_C_MODULES=micropython-opencv/src/micropython_opencv.cmake -j4
+ ```
+
+# Contributing
+
+Found a bug? Is there a discrepancy between standard OpenCV and MicroPython-OpenCV? Have a feature request?
+
+First, please see if there is an [existing issue](https://github.com/sparkfun/micropython-opencv/issues). If not, then please [open a new issue](https://github.com/sparkfun/micropython-opencv/issues/new) so we can discuss the topic!
+
+Pull requests are welcome! Please keep the scope of your pull request focused (make separate ones if needed), and keep file changes limited to the scope of your pull request.
diff --git a/micropython b/micropython
deleted file mode 160000
index 186caf9..0000000
--- a/micropython
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 186caf9f0326c9d61494a7d5c6d0408c0fef8485
diff --git a/src/opencv_upy.cmake b/micropython_opencv.cmake
similarity index 55%
rename from src/opencv_upy.cmake
rename to micropython_opencv.cmake
index f2ea973..2e1b3b1 100644
--- a/src/opencv_upy.cmake
+++ b/micropython_opencv.cmake
@@ -1,14 +1,26 @@
+#-------------------------------------------------------------------------------
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025 SparkFun Electronics
+#-------------------------------------------------------------------------------
+# opencv_upy.cmake
+#
+# CMake file for the MicroPython port of OpenCV.
+#-------------------------------------------------------------------------------
+
# Create an INTERFACE library for our CPP module.
add_library(usermod_cv2 INTERFACE)
# Add our source files to the library.
target_sources(usermod_cv2 INTERFACE
- ${CMAKE_CURRENT_LIST_DIR}/alloc.c
- ${CMAKE_CURRENT_LIST_DIR}/convert.cpp
- ${CMAKE_CURRENT_LIST_DIR}/core.cpp
- ${CMAKE_CURRENT_LIST_DIR}/imgproc.cpp
- ${CMAKE_CURRENT_LIST_DIR}/numpy.cpp
- ${CMAKE_CURRENT_LIST_DIR}/opencv_upy.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/alloc.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/convert.cpp
+ ${CMAKE_CURRENT_LIST_DIR}/src/core.cpp
+ ${CMAKE_CURRENT_LIST_DIR}/src/highgui.cpp
+ ${CMAKE_CURRENT_LIST_DIR}/src/imgcodecs.cpp
+ ${CMAKE_CURRENT_LIST_DIR}/src/imgproc.cpp
+ ${CMAKE_CURRENT_LIST_DIR}/src/numpy.cpp
+ ${CMAKE_CURRENT_LIST_DIR}/src/opencv_upy.c
)
# Add the src directory as an include directory.
@@ -21,16 +33,16 @@ target_link_libraries(usermod INTERFACE usermod_cv2)
# OpenCV creates some global variables on the heap. These get created before
# the GC is initialized, so we need to allocate some space for them on the C
-# heap. 10kB seems sufficient. TODO: See if we can get away with less.
-set(MICROPY_C_HEAP_SIZE 10240)
+# heap. 64kB seems sufficient.
+set(MICROPY_C_HEAP_SIZE 65536)
# Makes m_tracked_calloc() and m_tracked_free() available. These track pointers
# in a linked list to ensure the GC does not free them. Needed for some OpenCV
# functions
set(MICROPY_TRACKED_ALLOC 1)
-# Set ULAB max number of dimensions to 4 (default is 2). TODO: See if 4 is
-# actually needed, or if we can get away with 2.
+# Set ULAB max number of dimensions to 4 (default is 2), which is needed for
+# some OpenCV functions
target_compile_definitions(usermod INTERFACE ULAB_MAX_DIMS=4)
# Include ULAB
@@ -48,3 +60,8 @@ target_link_libraries(usermod INTERFACE "-Wl,--wrap,malloc")
target_link_libraries(usermod INTERFACE "-Wl,--wrap,free")
target_link_libraries(usermod INTERFACE "-Wl,--wrap,calloc")
target_link_libraries(usermod INTERFACE "-Wl,--wrap,realloc")
+
+# __NEWLIB__ is not defined for some reason, which causes a conflicting
+# definition of uint here:
+# https://github.com/opencv/opencv/blob/9cdd525bc59b34a3db8f6db905216c5398ca93d6/modules/core/include/opencv2/core/hal/interface.h#L35-L39
+target_compile_definitions(usermod INTERFACE -D__NEWLIB__)
diff --git a/opencv b/opencv
new file mode 160000
index 0000000..31b0eee
--- /dev/null
+++ b/opencv
@@ -0,0 +1 @@
+Subproject commit 31b0eeea0b44b370fd0712312df4214d4ae1b158
diff --git a/platforms/common.cmake b/platforms/common.cmake
new file mode 100644
index 0000000..e9fef23
--- /dev/null
+++ b/platforms/common.cmake
@@ -0,0 +1,46 @@
+# Derived from:
+# https://github.com/joachimBurket/esp32-opencv/blob/master/esp32/doc/detailed_build_procedure.md
+set(CMAKE_BUILD_TYPE Release)
+set(BUILD_SHARED_LIBS OFF)
+set(CV_DISABLE_OPTIMIZATION OFF)
+set(WITH_IPP OFF)
+set(WITH_TBB OFF)
+set(WITH_OPENMP OFF)
+set(WITH_PTHREADS_PF OFF)
+set(WITH_QUIRC OFF)
+set(WITH_1394 OFF)
+set(WITH_CUDA OFF)
+set(WITH_OPENCL OFF)
+set(WITH_OPENCLAMDFFT OFF)
+set(WITH_OPENCLAMDBLAS OFF)
+set(WITH_VA_INTEL OFF)
+set(WITH_EIGEN OFF)
+set(WITH_GSTREAMER OFF)
+set(WITH_GTK OFF)
+set(WITH_JASPER OFF)
+set(WITH_JPEG OFF)
+set(WITH_OPENJPEG OFF)
+set(WITH_WEBP OFF)
+set(BUILD_ZLIB ON)
+set(BUILD_PNG ON)
+set(WITH_TIFF OFF)
+set(WITH_V4L OFF)
+set(WITH_LAPACK OFF)
+set(WITH_ITT OFF)
+set(WITH_PROTOBUF OFF)
+set(WITH_IMGCODEC_HDR OFF)
+set(WITH_IMGCODEC_SUNRASTER OFF)
+set(WITH_IMGCODEC_PXM OFF)
+set(WITH_IMGCODEC_PFM OFF)
+# TODO: For some reason, specifying this in the toolchain file doesn't work
+# set(BUILD_LIST core,imgproc,imgcodecs)
+set(BUILD_JAVA OFF)
+set(BUILD_opencv_python OFF)
+set(BUILD_opencv_java OFF)
+set(BUILD_opencv_apps OFF)
+set(BUILD_PACKAGE OFF)
+set(BUILD_PERF_TESTS OFF)
+set(BUILD_TESTS OFF)
+set(CV_ENABLE_INTRINSICS OFF)
+set(CV_TRACE OFF)
+set(OPENCV_ENABLE_MEMALIGN OFF)
diff --git a/platforms/include/rp2350_unsafe_cv_xadd.h b/platforms/include/rp2350_unsafe_cv_xadd.h
new file mode 100644
index 0000000..a3853c8
--- /dev/null
+++ b/platforms/include/rp2350_unsafe_cv_xadd.h
@@ -0,0 +1,20 @@
+#ifndef RP2350_UNSAFE_CV_XADD_H
+#define RP2350_UNSAFE_CV_XADD_H
+
+// Fix for https://github.com/raspberrypi/pico-sdk/issues/2505
+// TLDR; OpenCV uses atomic operations for incrementing reference counters by
+// default. However, the Pico SDK does not support atomic operations on data in
+// PSRAM; attempting to do so just causes an infinite loop where the value is
+// incremented forever. The workaround is to use a non-atomic operation by
+// re-defining the `CV_XADD` macro. This is "unsafe" because it's not atomic,
+// but it *should* be fine since we're only using one thread. Also see:
+// https://github.com/opencv/opencv/blob/52bed3cd7890192700b2451e2713c340209ffd79/modules/core/include/opencv2/core/cvdef.h#L697-L723
+static inline int unsafe_cv_xadd(int* addr, int delta)
+{
+ int tmp = *addr;
+ *addr += delta;
+ return tmp;
+}
+#define CV_XADD(addr, delta) unsafe_cv_xadd(addr, delta)
+
+#endif
diff --git a/platforms/include/zephyr_stdint.h b/platforms/include/zephyr_stdint.h
new file mode 100644
index 0000000..750db64
--- /dev/null
+++ b/platforms/include/zephyr_stdint.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2019 BayLibre SAS
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef ZEPHYR_INCLUDE_TOOLCHAIN_STDINT_H_
+#define ZEPHYR_INCLUDE_TOOLCHAIN_STDINT_H_
+
+/*
+ * Some gcc versions and/or configurations as found in the Zephyr SDK
+ * (questionably) define __INT32_TYPE__ and derivatives as a long int
+ * which makes the printf format checker to complain about long vs int
+ * mismatch when %u is given a uint32_t argument, and uint32_t pointers not
+ * being compatible with int pointers. Let's redefine them to follow
+ * common expectations and usage.
+ */
+
+#if __SIZEOF_INT__ != 4
+#error "unexpected int width"
+#endif
+
+#undef __INT32_TYPE__
+#undef __UINT32_TYPE__
+#undef __INT_FAST32_TYPE__
+#undef __UINT_FAST32_TYPE__
+#undef __INT_LEAST32_TYPE__
+#undef __UINT_LEAST32_TYPE__
+#undef __INT64_TYPE__
+#undef __UINT64_TYPE__
+#undef __INT_FAST64_TYPE__
+#undef __UINT_FAST64_TYPE__
+#undef __INT_LEAST64_TYPE__
+#undef __UINT_LEAST64_TYPE__
+
+#define __INT32_TYPE__ int
+#define __UINT32_TYPE__ unsigned int
+#define __INT_FAST32_TYPE__ __INT32_TYPE__
+#define __UINT_FAST32_TYPE__ __UINT32_TYPE__
+#define __INT_LEAST32_TYPE__ __INT32_TYPE__
+#define __UINT_LEAST32_TYPE__ __UINT32_TYPE__
+#define __INT64_TYPE__ long long int
+#define __UINT64_TYPE__ unsigned long long int
+#define __INT_FAST64_TYPE__ __INT64_TYPE__
+#define __UINT_FAST64_TYPE__ __UINT64_TYPE__
+#define __INT_LEAST64_TYPE__ __INT64_TYPE__
+#define __UINT_LEAST64_TYPE__ __UINT64_TYPE__
+
+/*
+ * The confusion also exists with __INTPTR_TYPE__ which is either an int
+ * (even when __INT32_TYPE__ is a long int) or a long int. Let's redefine
+ * it to a long int to get some uniformity. Doing so also makes it compatible
+ * with LP64 (64-bit) targets where a long is always 64-bit wide.
+ */
+
+#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
+#error "unexpected size difference between pointers and long ints"
+#endif
+
+#undef __INTPTR_TYPE__
+#undef __UINTPTR_TYPE__
+#define __INTPTR_TYPE__ long int
+#define __UINTPTR_TYPE__ long unsigned int
+
+/*
+ * Re-define the INTN_C(value) integer constant expression macros to match the
+ * integer types re-defined above.
+ */
+
+#undef __INT32_C
+#undef __UINT32_C
+#undef __INT64_C
+#undef __UINT64_C
+#define __INT32_C(c) c
+#define __UINT32_C(c) c ## U
+#define __INT64_C(c) c ## LL
+#define __UINT64_C(c) c ## ULL
+
+#endif /* ZEPHYR_INCLUDE_TOOLCHAIN_STDINT_H_ */
\ No newline at end of file
diff --git a/platforms/rp2350.toolchain.cmake b/platforms/rp2350.toolchain.cmake
new file mode 100644
index 0000000..56b0b13
--- /dev/null
+++ b/platforms/rp2350.toolchain.cmake
@@ -0,0 +1,22 @@
+# Define PICO_SDK_PATH in your environment before running this script
+if(NOT DEFINED ENV{PICO_SDK_PATH})
+ message(FATAL_ERROR "PICO_SDK_PATH environment variable is not set. Please define it before running this script.")
+endif()
+
+# Include the RP2350 toolchain file
+include("$ENV{PICO_SDK_PATH}/cmake/preload/toolchains/pico_arm_cortex_m33_gcc.cmake")
+
+# Include the common embedded OpenCV settings
+include("${CMAKE_CURRENT_LIST_DIR}/common.cmake")
+
+# Set RP2350 specific settings
+set(OPENCV_DISABLE_THREAD_SUPPORT ON)
+
+# Fix for https://github.com/raspberrypi/pico-sdk/issues/2505
+set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"")
+set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"")
+
+# Fix for https://github.com/sparkfun/micropython-opencv/issues/31
+# Source: https://docs.zephyrproject.org/4.0.0/doxygen/html/zephyr__stdint_8h_source.html
+set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -imacros ${CMAKE_CURRENT_LIST_DIR}/include/zephyr_stdint.h")
+set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -imacros ${CMAKE_CURRENT_LIST_DIR}/include/zephyr_stdint.h")
diff --git a/src/alloc.c b/src/alloc.c
index 6bf07b5..c8fc5ec 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -1,3 +1,17 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * alloc.c
+ *
+ * Wrapper functions for malloc(), free(), calloc(), and realloc(). These ensure
+ * memory gets allocated on the C heap before the MicroPython garbage collector
+ * has been initialized, and and in the GC pool afterwards.
+ *------------------------------------------------------------------------------
+ */
+
// C headers
#include "py/runtime.h"
diff --git a/src/convert.cpp b/src/convert.cpp
index 54af82e..1e425ba 100644
--- a/src/convert.cpp
+++ b/src/convert.cpp
@@ -1,3 +1,16 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * convert.cpp
+ *
+ * Helper functions to convert between various data types from MicroPython, ulab
+ * NumPy, and OpenCV
+ *------------------------------------------------------------------------------
+ */
+
// C++ headers
#include "convert.h"
#include "numpy.h"
@@ -16,8 +29,7 @@ uint8_t mat_depth_to_ndarray_type(int depth)
case CV_16U: return NDARRAY_UINT16;
case CV_16S: return NDARRAY_INT16;
case CV_32F: return NDARRAY_FLOAT;
- // case CV_Bool: return NDARRAY_BOOL;
- default: mp_raise_ValueError(MP_ERROR_TEXT("Unsupported Mat depth"));
+ default: mp_raise_TypeError(MP_ERROR_TEXT("Unsupported Mat depth"));
}
}
@@ -29,8 +41,7 @@ int ndarray_type_to_mat_depth(uint8_t type)
case NDARRAY_UINT16: return CV_16U;
case NDARRAY_INT16: return CV_16S;
case NDARRAY_FLOAT: return CV_32F;
- // case NDARRAY_BOOL: return CV_Bool;
- default: mp_raise_ValueError(MP_ERROR_TEXT("Unsupported ndarray type"));
+ default: mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type"));
}
}
@@ -39,7 +50,7 @@ ndarray_obj_t *mat_to_ndarray(Mat& mat)
// Derived from:
// https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L313-L328
if(mat.data == NULL)
- mp_const_none;
+ return (ndarray_obj_t*) mp_const_none;
Mat temp, *ptr = (Mat*)&mat;
if(!ptr->u || ptr->allocator != &GetNumpyAllocator())
{
@@ -62,6 +73,7 @@ Mat ndarray_to_mat(ndarray_obj_t *ndarray)
// We have an ndarray_obj_t, so these checks have already been done.
// https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L130-L172
+ bool needcopy = false;
int type = ndarray_type_to_mat_depth(ndarray->dtype);
int ndims = ndarray->ndim;
@@ -73,17 +85,41 @@ Mat ndarray_to_mat(ndarray_obj_t *ndarray)
_strides[i] = ndarray->strides[ULAB_MAX_DIMS - ndarray->ndim + i];
}
- // https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L176-L221
+ // https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L176-L241
bool ismultichannel = ndims == 3;
+ for( int i = ndims-1; i >= 0 && !needcopy; i-- )
+ {
+ // these checks handle cases of
+ // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
+ // b) transposed arrays, where _strides[] elements go in non-descending order
+ // c) flipped arrays, where some of _strides[] elements are negative
+ // the _sizes[i] > 1 is needed to avoid spurious copies when NPY_RELAXED_STRIDES is set
+ if( (i == ndims-1 && _sizes[i] > 1 && (size_t)_strides[i] != elemsize) ||
+ (i < ndims-1 && _sizes[i] > 1 && _strides[i] < _strides[i+1]) )
+ needcopy = true;
+ }
+
if (ismultichannel)
{
int channels = ndims >= 1 ? (int)_sizes[ndims - 1] : 1;
ndims--;
type |= CV_MAKETYPE(0, channels);
+
+ if (ndims >= 1 && _strides[ndims - 1] != (size_t)elemsize*_sizes[ndims])
+ needcopy = true;
+
elemsize = CV_ELEM_SIZE(type);
}
+ if (needcopy)
+ {
+ ndarray = ndarray_from_mp_obj(ndarray_copy(ndarray), 0);
+ for (int i = 0; i < ndarray->ndim; i++) {
+ _strides[i] = ndarray->strides[ULAB_MAX_DIMS - ndarray->ndim + i];
+ }
+ }
+
// https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L243-L261
int size[CV_MAX_DIM+1] = {};
size_t step[CV_MAX_DIM+1] = {};
@@ -147,3 +183,324 @@ Mat mp_obj_to_mat(mp_obj_t obj)
return mat;
}
+
+Size mp_obj_to_size(mp_obj_t obj)
+{
+ // Check for None object
+ if(obj == mp_const_none)
+ {
+ // Create an empty Size object
+ return Size();
+ }
+
+ // Assume the object is a ndarray, or can be converted to one. Will raise an
+ // exception if not
+ ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0);
+
+ // Validate the length of the ndarray
+ if(ndarray->len != 2)
+ {
+ mp_raise_TypeError(MP_ERROR_TEXT("Size must be length 2"));
+ }
+
+ // Compute the size, checking the type of the ndarray
+ Size size;
+ switch(ndarray->dtype)
+ {
+ case NDARRAY_UINT8:
+ size.width = ((uint8_t*) ndarray->array)[0];
+ size.height = ((uint8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT8:
+ size.width = ((int8_t*) ndarray->array)[0];
+ size.height = ((int8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_UINT16:
+ size.width = ((uint16_t*) ndarray->array)[0];
+ size.height = ((uint16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT16:
+ size.width = ((int16_t*) ndarray->array)[0];
+ size.height = ((int16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_FLOAT:
+ size.width = ((float*) ndarray->array)[0];
+ size.height = ((float*) ndarray->array)[1];
+ break;
+ case NDARRAY_BOOL:
+ size.width = ((bool*) ndarray->array)[0];
+ size.height = ((bool*) ndarray->array)[1];
+ break;
+ default:
+ mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type"));
+ break;
+ }
+
+ return size;
+}
+
+Size2f mp_obj_to_size2f(mp_obj_t obj)
+{
+ // Check for None object
+ if(obj == mp_const_none)
+ {
+ // Create an empty Size2f object
+ return Size2f();
+ }
+
+ // Assume the object is a ndarray, or can be converted to one. Will raise an
+ // exception if not
+ ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0);
+
+ // Validate the length of the ndarray
+ if(ndarray->len != 2)
+ {
+ mp_raise_TypeError(MP_ERROR_TEXT("Size2f must be length 2"));
+ }
+
+ // Compute the size, checking the type of the ndarray
+ Size2f size;
+ switch(ndarray->dtype)
+ {
+ case NDARRAY_UINT8:
+ size.width = ((uint8_t*) ndarray->array)[0];
+ size.height = ((uint8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT8:
+ size.width = ((int8_t*) ndarray->array)[0];
+ size.height = ((int8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_UINT16:
+ size.width = ((uint16_t*) ndarray->array)[0];
+ size.height = ((uint16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT16:
+ size.width = ((int16_t*) ndarray->array)[0];
+ size.height = ((int16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_FLOAT:
+ size.width = ((float*) ndarray->array)[0];
+ size.height = ((float*) ndarray->array)[1];
+ break;
+ case NDARRAY_BOOL:
+ size.width = ((bool*) ndarray->array)[0];
+ size.height = ((bool*) ndarray->array)[1];
+ break;
+ default:
+ mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type"));
+ break;
+ }
+
+ return size;
+}
+
+Point mp_obj_to_point(mp_obj_t obj)
+{
+ // Check for None object
+ if(obj == mp_const_none)
+ {
+ // Create an empty Point object
+ return Point();
+ }
+
+ // Assume the object is a ndarray, or can be converted to one. Will raise an
+ // exception if not
+ ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0);
+
+ // Validate the length of the ndarray
+ if(ndarray->len != 2)
+ {
+ mp_raise_TypeError(MP_ERROR_TEXT("Point must be length 2"));
+ }
+
+ // Compute the point, checking the type of the ndarray
+ Point point;
+ switch(ndarray->dtype)
+ {
+ case NDARRAY_UINT8:
+ point.x = ((uint8_t*) ndarray->array)[0];
+ point.y = ((uint8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT8:
+ point.x = ((int8_t*) ndarray->array)[0];
+ point.y = ((int8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_UINT16:
+ point.x = ((uint16_t*) ndarray->array)[0];
+ point.y = ((uint16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT16:
+ point.x = ((int16_t*) ndarray->array)[0];
+ point.y = ((int16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_FLOAT:
+ point.x = ((float*) ndarray->array)[0];
+ point.y = ((float*) ndarray->array)[1];
+ break;
+ case NDARRAY_BOOL:
+ point.x = ((bool*) ndarray->array)[0];
+ point.y = ((bool*) ndarray->array)[1];
+ break;
+ default:
+ mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type"));
+ break;
+ }
+
+ return point;
+}
+
+Point2f mp_obj_to_point2f(mp_obj_t obj)
+{
+ // Check for None object
+ if(obj == mp_const_none)
+ {
+ // Create an empty Point2f object
+ return Point2f();
+ }
+
+ // Assume the object is a ndarray, or can be converted to one. Will raise an
+ // exception if not
+ ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0);
+
+ // Validate the length of the ndarray
+ if(ndarray->len != 2)
+ {
+ mp_raise_TypeError(MP_ERROR_TEXT("Point2f must be length 2"));
+ }
+
+ // Compute the point, checking the type of the ndarray
+ Point2f point;
+ switch(ndarray->dtype)
+ {
+ case NDARRAY_UINT8:
+ point.x = ((uint8_t*) ndarray->array)[0];
+ point.y = ((uint8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT8:
+ point.x = ((int8_t*) ndarray->array)[0];
+ point.y = ((int8_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_UINT16:
+ point.x = ((uint16_t*) ndarray->array)[0];
+ point.y = ((uint16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_INT16:
+ point.x = ((int16_t*) ndarray->array)[0];
+ point.y = ((int16_t*) ndarray->array)[1];
+ break;
+ case NDARRAY_FLOAT:
+ point.x = ((float*) ndarray->array)[0];
+ point.y = ((float*) ndarray->array)[1];
+ break;
+ case NDARRAY_BOOL:
+ point.x = ((bool*) ndarray->array)[0];
+ point.y = ((bool*) ndarray->array)[1];
+ break;
+ default:
+ mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type"));
+ break;
+ }
+
+ return point;
+}
+
+Scalar mp_obj_to_scalar(mp_obj_t obj)
+{
+ // Check for None object
+ if(obj == mp_const_none)
+ {
+ // Create an empty Scalar object
+ return Scalar();
+ }
+
+ // Assume the object is a ndarray, or can be converted to one. Will raise an
+ // exception if not
+ ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0);
+
+ // Validate the length of the ndarray
+ if(ndarray->len > 4)
+ {
+ mp_raise_TypeError(MP_ERROR_TEXT("Scalar must be length 4 or less"));
+ }
+
+ // Compute the scalar, checking the type of the ndarray
+ Scalar scalar;
+ switch(ndarray->dtype)
+ {
+ case NDARRAY_UINT8:
+ for(size_t i = 0; i < ndarray->len; i++)
+ scalar[i] = ((uint8_t*) ndarray->array)[i];
+ break;
+ case NDARRAY_INT8:
+ for(size_t i = 0; i < ndarray->len; i++)
+ scalar[i] = ((int8_t*) ndarray->array)[i];
+ break;
+ case NDARRAY_UINT16:
+ for(size_t i = 0; i < ndarray->len; i++)
+ scalar[i] = ((uint16_t*) ndarray->array)[i];
+ break;
+ case NDARRAY_INT16:
+ for(size_t i = 0; i < ndarray->len; i++)
+ scalar[i] = ((int16_t*) ndarray->array)[i];
+ break;
+ case NDARRAY_FLOAT:
+ for(size_t i = 0; i < ndarray->len; i++)
+ scalar[i] = ((float*) ndarray->array)[i];
+ break;
+ case NDARRAY_BOOL:
+ for(size_t i = 0; i < ndarray->len; i++)
+ scalar[i] = ((bool*) ndarray->array)[i];
+ break;
+ default:
+ mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type"));
+ break;
+ }
+
+ return scalar;
+}
+
+std::vector> mp_obj_to_contours(mp_obj_t obj)
+{
+ // Check for None object
+ if(obj == mp_const_none)
+ {
+ // Create an empty contours object
+ return std::vector>();
+ }
+
+ // Create a vector to hold the contours
+ std::vector> contours;
+
+ // Ideally, we could just use ndarray_from_mp_obj(), but it has a bug with
+ // 4D arrays, so we need to do this a bit manually.
+ // https://github.com/v923z/micropython-ulab/issues/727
+
+ // Assume the object is iterable. Will raise an exception if not
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(obj, &iter_buf);
+ mp_obj_t item;
+
+ // Iterate through all items in the iterable
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION)
+ {
+ // Create a vector to hold the points of this contour
+ std::vector contour;
+
+ // Convert the item to a Mat object (should be a 3D ndarray of points)
+ Mat contour_mat = mp_obj_to_mat(item);
+
+ // Iterate through the rows of the Mat object and extract the points
+ for (int j = 0; j < contour_mat.rows; j++)
+ {
+ contour.push_back(Point(
+ contour_mat.at(j, 0),
+ contour_mat.at(j, 1)
+ ));
+ }
+
+ // Add the contour to the list of contours
+ contours.push_back(contour);
+ }
+
+ return contours;
+}
diff --git a/src/convert.h b/src/convert.h
index 07512e6..08d3731 100644
--- a/src/convert.h
+++ b/src/convert.h
@@ -1,10 +1,23 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * convert.h
+ *
+ * Helper functions to convert between various data types from MicroPython, ulab
+ * NumPy, and OpenCV.
+ *------------------------------------------------------------------------------
+ */
+
// C++ headers
#include "opencv2/core.hpp"
// C headers
extern "C" {
#include "py/runtime.h"
-#include "ulab/code/ndarray.h"
+#include "ndarray.h"
} // extern "C"
using namespace cv;
@@ -17,7 +30,21 @@ int ndarray_type_to_mat_depth(uint8_t type);
ndarray_obj_t *mat_to_ndarray(Mat &mat);
Mat ndarray_to_mat(ndarray_obj_t *ndarray);
-// Conversion functions between Mat and mp_obj_t. Abstracts away intermediate
+// Conversion functions between Mat and mp_obj_t. Abstracts away intermediate
// conversions to ndarray_obj_t
mp_obj_t mat_to_mp_obj(Mat &mat);
Mat mp_obj_to_mat(mp_obj_t obj);
+
+// Conversion functions between Size and mp_obj_t
+Size mp_obj_to_size(mp_obj_t obj);
+Size2f mp_obj_to_size2f(mp_obj_t obj);
+
+// Conversion functions between Point and mp_obj_t
+Point mp_obj_to_point(mp_obj_t obj);
+Point2f mp_obj_to_point2f(mp_obj_t obj);
+
+// Conversion functions between Scalar and mp_obj_t
+Scalar mp_obj_to_scalar(mp_obj_t obj);
+
+// Conversion functions between contours (vector of vector of Point) and mp_obj_t
+std::vector> mp_obj_to_contours(mp_obj_t obj);
diff --git a/src/core.cpp b/src/core.cpp
index 18d3e7a..2e7c95b 100644
--- a/src/core.cpp
+++ b/src/core.cpp
@@ -1,24 +1,86 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * core.cpp
+ *
+ * MicroPython wrappers for functions from the OpenCV core module.
+ *------------------------------------------------------------------------------
+ */
+
// C++ headers
#include "opencv2/core.hpp"
+#include "opencv2/imgcodecs.hpp"
#include "convert.h"
#include "numpy.h"
// C headers
extern "C" {
#include "core.h"
-#include "ulab/code/ndarray.h"
+#include "ndarray.h"
} // extern "C"
using namespace cv;
-mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+// The function below is a workaround for memory management issues between
+// OpenCV and the MicroPython GC. OpenCV allocates some objects on the heap,
+// whenever the first function that needs the objects happen to be called. That
+// only happens from the user's code after the GC has been initialized, meaning
+// they get allocated on the GC heap (see `__wrap_malloc()`). If a soft reset
+// occurs, the GC gets reset and the memory locations get overwritten, but the
+// same memory locations are still referenced for the objects, resulting in bad
+// values and problems (crashes and freezes, `CV_Assert()` calls fail, etc.).
+//
+// The solution here is to ensure those objects are allocated in the C heap
+// instead of the GC heap. The function below calls various OpenCV functions
+// that subsequently allocate the problematic objects. To ensure they are
+// allocated on the C heap, this needs to happen before the GC is initialized
+// (before `main()` is called), so __wrap_malloc() will use __real_malloc()
+// instead of the GC.
+//
+// The function below returns a dummy value that we use to initialize a global
+// variable, ensuring it gets run before `main()` gets called. This also means
+// it can be used as a general boot function for anything else that needs to
+// happen before `main()` is called, such as setting the default Mat allocator.
+bool upyOpenCVBoot() {
+ try {
+ // Initializes `CoreTLSData` on the C heap, see:
+ // https://github.com/sparkfun/micropython-opencv/issues/13
+ theRNG();
+
+ // Initializes all image codecs on the C heap, see:
+ // https://github.com/sparkfun/micropython-opencv/issues/17
+ haveImageWriter(".bmp");
+
+ // Initializes `StdMatAllocator` on the C heap, see:
+ // https://github.com/sparkfun/micropython-opencv/issues/17
+ //
+ // Alternatively, we could set the NumpyAllocator as the default Mat
+ // allocator with `Mat::setDefaultAllocator(&GetNumpyAllocator())`,
+ // however that actually causes some issues. For example, Canny()
+ // creates a temporary 64-bit float Mat, which is not supported by ulab
+ // NumPy and therefore fails with the NumpyAllocator. StdMatAllocator is
+ // fine to use, because it calls `malloc()`, which we catch with
+ // `__wrap_malloc()` to ensure the data is allocated on the GC heap
+ Mat::getDefaultAllocator();
+
+ return true;
+ } catch (const Exception& e) {
+ return false;
+ }
+}
+volatile bool bootSuccess = upyOpenCVBoot();
+
+mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
// Define the arguments
- enum { ARG_src, ARG_lower, ARG_upper, ARG_dst };
+ enum { ARG_src, ARG_dst, ARG_alpha, ARG_beta };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
- { MP_QSTR_lower, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
- { MP_QSTR_upper, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
{ MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_alpha, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_beta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
};
// Parse the arguments
@@ -27,13 +89,13 @@ mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_
// Convert arguments to required types
Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
- Mat lower = mp_obj_to_mat(args[ARG_lower].u_obj);
- Mat upper = mp_obj_to_mat(args[ARG_upper].u_obj);
Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ mp_float_t alpha = args[ARG_alpha].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_alpha].u_obj);
+ mp_float_t beta = args[ARG_beta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_beta].u_obj);
// Call the corresponding OpenCV function
try {
- inRange(src, lower, upper, dst);
+ convertScaleAbs(src, dst, alpha, beta);
} catch(Exception& e) {
mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
}
@@ -42,12 +104,13 @@ mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_
return mat_to_mp_obj(dst);
}
-mp_obj_t cv2_core_max(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
// Define the arguments
- enum { ARG_src1, ARG_src2, ARG_dst };
+ enum { ARG_src, ARG_lower, ARG_upper, ARG_dst };
static const mp_arg_t allowed_args[] = {
- { MP_QSTR_src1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
- { MP_QSTR_src2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_lower, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_upper, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
{ MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
};
@@ -56,28 +119,28 @@ mp_obj_t cv2_core_max(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args
mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
// Convert arguments to required types
- Mat src1 = mp_obj_to_mat(args[ARG_src1].u_obj);
- Mat src2 = mp_obj_to_mat(args[ARG_src2].u_obj);
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ Mat lower = mp_obj_to_mat(args[ARG_lower].u_obj);
+ Mat upper = mp_obj_to_mat(args[ARG_upper].u_obj);
Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
// Call the corresponding OpenCV function
try {
- max(src1, src2, dst);
+ inRange(src, lower, upper, dst);
} catch(Exception& e) {
mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
}
-
+
// Return the result
return mat_to_mp_obj(dst);
}
-mp_obj_t cv2_core_min(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+mp_obj_t cv2_core_minMaxLoc(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
// Define the arguments
- enum { ARG_src1, ARG_src2, ARG_dst };
+ enum { ARG_src, ARG_mask };
static const mp_arg_t allowed_args[] = {
- { MP_QSTR_src1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
- { MP_QSTR_src2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
- { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_mask, MP_ARG_OBJ, { .u_obj = mp_const_none } },
};
// Parse the arguments
@@ -85,17 +148,33 @@ mp_obj_t cv2_core_min(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args
mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
// Convert arguments to required types
- Mat src1 = mp_obj_to_mat(args[ARG_src1].u_obj);
- Mat src2 = mp_obj_to_mat(args[ARG_src2].u_obj);
- Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ Mat mask = mp_obj_to_mat(args[ARG_mask].u_obj);
+
+ double minVal, maxVal;
+ Point minLoc, maxLoc;
// Call the corresponding OpenCV function
try {
- min(src1, src2, dst);
+ minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, mask);
} catch(Exception& e) {
mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
}
// Return the result
- return mat_to_mp_obj(dst);
+ mp_obj_t min_loc_tuple[2] = {
+ mp_obj_new_float(minLoc.x),
+ mp_obj_new_float(minLoc.y)
+ };
+ mp_obj_t max_loc_tuple[2] = {
+ mp_obj_new_float(maxLoc.x),
+ mp_obj_new_float(maxLoc.y)
+ };
+ mp_obj_t result_tuple[4] = {
+ mp_obj_new_float(minVal),
+ mp_obj_new_float(maxVal),
+ mp_obj_new_tuple(2, min_loc_tuple),
+ mp_obj_new_tuple(2, max_loc_tuple)
+ };
+ return mp_obj_new_tuple(4, result_tuple);
}
diff --git a/src/core.h b/src/core.h
index 66f9538..81a525a 100644
--- a/src/core.h
+++ b/src/core.h
@@ -1,6 +1,50 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * core.h
+ *
+ * MicroPython wrappers for functions from the OpenCV core module.
+ *------------------------------------------------------------------------------
+ */
+
// C headers
#include "py/runtime.h"
+// Function declarations
+extern mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
extern mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
-extern mp_obj_t cv2_core_max(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
-extern mp_obj_t cv2_core_min(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_core_minMaxLoc(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+
+// Python references to the functions
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_convertScaleAbs_obj, 1, cv2_core_convertScaleAbs);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_minMaxLoc_obj, 1, cv2_core_minMaxLoc);
+
+// Global definitions for functions and constants
+#define OPENCV_CORE_GLOBALS \
+ /* Functions */ \
+ { MP_ROM_QSTR(MP_QSTR_convertScaleAbs), MP_ROM_PTR(&cv2_core_convertScaleAbs_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_minMaxLoc), MP_ROM_PTR(&cv2_core_minMaxLoc_obj) }, \
+ \
+ /* OpenCV data types, from opencv2/core/hal/interface.h */ \
+ /* Other types are currently not supported by ulab */ \
+ { MP_ROM_QSTR(MP_QSTR_CV_8U), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_CV_8S), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_CV_16U), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_CV_16S), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_CV_32F), MP_ROM_INT(4) }, \
+ \
+ /* Border types, from opencv2/core/base.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_CONSTANT), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_REPLICATE), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_WRAP), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT_101), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_TRANSPARENT), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT101), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_DEFAULT), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_BORDER_ISOLATED), MP_ROM_INT(16) }
diff --git a/src/highgui.cpp b/src/highgui.cpp
new file mode 100644
index 0000000..645dac6
--- /dev/null
+++ b/src/highgui.cpp
@@ -0,0 +1,195 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * highgui.cpp
+ *
+ * MicroPython wrappers for functions from the OpenCV highgui module.
+ *------------------------------------------------------------------------------
+ */
+
+// C++ headers
+#include "opencv2/core.hpp"
+#include "convert.h"
+#include "numpy.h"
+
+// C headers
+extern "C" {
+#include "highgui.h"
+#include "ndarray.h"
+#include "py/mphal.h"
+} // extern "C"
+
+extern const mp_obj_type_t cv2_display_type;
+
+using namespace cv;
+
+mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_display, ARG_img };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_display, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Assume the display object has an `imshow` method and load it. The method
+ // array should be loaded with method[0] as the method object and method[1]
+ // as the self object.
+ mp_obj_t method[3];
+ mp_load_method_maybe(args[ARG_display].u_obj, MP_QSTR_imshow, method);
+
+ // Check if the method was found
+ if(method[0] == MP_OBJ_NULL) {
+ // Method not found, raise an AttributeError
+ mp_raise_msg(&mp_type_AttributeError, MP_ERROR_TEXT("`cv2.imshow()` requires a display object with its own 'imshow()' method, not a window name string."));
+ }
+
+ // Add the image object to the method arguments
+ method[2] = args[ARG_img].u_obj;
+
+ // Call the method with one positional argument (the image we just added)
+ return mp_call_method_n_kw(1, 0, method);
+}
+
+mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Call waitKeyEx to do the heavy lifting
+ mp_obj_t key = cv2_highgui_waitKeyEx(n_args, pos_args, kw_args);
+
+ // Get the key code as an integer
+ int32_t key_code = mp_obj_get_int(key);
+
+ // If the key code is -1, it means no key was pressed
+ if (key_code == -1) {
+ // Return the original key object
+ return key;
+ } else {
+ // Return the last byte of the key code
+ return MP_OBJ_NEW_SMALL_INT(key_code & 0xFF);
+ }
+}
+
+mp_obj_t cv2_highgui_waitKeyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { Arg_delay };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_delay, MP_ARG_INT, {.u_int = 0} },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ int delay = args[Arg_delay].u_int;
+
+ // Derived from:
+ // https://github.com/orgs/micropython/discussions/11448
+
+ // Import `sys` and `select` modules
+ mp_obj_t sys_module = mp_import_name(MP_QSTR_sys, mp_const_none, MP_OBJ_NEW_SMALL_INT(0));
+ mp_obj_t select_module = mp_import_name(MP_QSTR_select, mp_const_none, MP_OBJ_NEW_SMALL_INT(0));
+
+ // Get the `sys.stdin` object
+ mp_obj_t stdin_obj = mp_load_attr(sys_module, MP_QSTR_stdin);
+
+ // Get the `select.POLLIN` constant
+ mp_obj_t pollin_obj = mp_load_attr(select_module, MP_QSTR_POLLIN);
+
+ // Call `select.poll()` function to create a poll object
+ mp_obj_t select_poll_method[2];
+ mp_load_method(select_module, MP_QSTR_poll, select_poll_method);
+ mp_obj_t poll_obj = mp_call_method_n_kw(0, 0, select_poll_method);
+
+ // Call `poll.register(sys.stdin, select.POLLIN)`
+ mp_obj_t poll_register_method[4];
+ mp_load_method(poll_obj, MP_QSTR_register, poll_register_method);
+ poll_register_method[2] = stdin_obj;
+ poll_register_method[3] = pollin_obj;
+ mp_call_method_n_kw(2, 0, poll_register_method);
+
+ // Create timeout integer object for next method call. OpenCV uses a delay
+ // of 0 to wait indefinitely, whereas `select.poll` uses -1
+ mp_obj_t timeout = MP_OBJ_NEW_SMALL_INT(delay <= 0 ? -1 : delay);
+
+ // Load the `poll.poll()` method to check for key presses
+ mp_obj_t poll_poll_method[3];
+ mp_load_method(poll_obj, MP_QSTR_poll, poll_poll_method);
+
+ // Load the `sys.stdin.read(1)` method to read a single character
+ mp_obj_t read_method[3];
+ mp_load_method(stdin_obj, MP_QSTR_read, read_method);
+ read_method[2] = MP_OBJ_NEW_SMALL_INT(1);
+
+ // Initialize key code to -1, which indicates no key was pressed
+ int32_t key_code = -1;
+
+ // Some key presses return multiple bytes (eg. up arrow key returns 3 bytes:
+ // `\x1b[A`). To handle this, we will loop until no more bytes are available
+ for (int i = 0; true; i++) {
+ // Call `poll.poll(timeout)` if this is the first iteration, otherwise
+ // call `poll.poll(1)` to quickly check for any remaining bytes. Can't
+ // wait 0ms, because it takes a moment for all bytes to arrive
+ poll_poll_method[2] = i == 0 ? timeout : MP_OBJ_NEW_SMALL_INT(1);
+ mp_obj_t result = mp_call_method_n_kw(1, 0, poll_poll_method);
+
+ // Extract the items from the result list
+ mp_obj_t *items;
+ size_t len;
+ mp_obj_list_get(result, &len, &items);
+
+ // Check if any items were returned
+ if(len == 0) {
+ // No more bytes available, so we're done. If multiple bytes were
+ // read, we want the last byte to be 0 so it doesn't get confused
+ // in `waitKey()` with a normal key press. So we can simply shift
+ // the key code left by 8 bits again
+ if (i > 1) {
+ key_code <<= 8;
+ }
+ break;
+ }
+
+ // Since something was returned, a byte is available. We need to
+ // extract it by calling `sys.stdin.read(1)`
+ mp_obj_t byte_str = mp_call_method_n_kw(1, 0, read_method);
+
+ // Convert the byte object to an actual byte
+ uint8_t byte_val = mp_obj_str_get_str(byte_str)[0];
+
+ // Check which iteration this is
+ if(i == 0) {
+ // This is the first iteration, set the key code to this byte
+ key_code = byte_val;
+
+ // Special keys always start with an escape character (0x1b). If
+ // this is not the escape character, we can assume it's a normal key
+ // press and break immediately. This helps mitigate the problem of
+ // interpreting 2 key simultaneous key presses as 1 special key
+ if (byte_val != 0x1b) {
+ break;
+ }
+ } else if (i == 1) {
+ // This is the second iteration, meaning the first byte was the
+ // escape character. We don't want that to be part of the key code
+ // (special keys will be indicated by having multiple bytes, and the
+ // last byte being zero), so we'll just overwrite the key code with
+ // the second byte
+ key_code = byte_val;
+ } else {
+ // This is a subsequent iteration, meaning we have already read the
+ // escape character and the second byte. For all subsequent bytes,
+ // we will shift the key code left by 8 bits and add the new byte to
+ // it to create a multi-byte key
+ key_code = (key_code << 8) | byte_val;
+ }
+ }
+
+ // Return the final key code
+ return MP_OBJ_NEW_SMALL_INT(key_code);
+}
diff --git a/src/highgui.h b/src/highgui.h
new file mode 100644
index 0000000..e99c9a8
--- /dev/null
+++ b/src/highgui.h
@@ -0,0 +1,31 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * highgui.h
+ *
+ * MicroPython wrappers for functions from the OpenCV highgui module.
+ *------------------------------------------------------------------------------
+ */
+
+// C headers
+#include "py/runtime.h"
+
+// Function declarations
+extern mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_highgui_waitKeyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+
+// Python references to the functions
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKey);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKeyEx_obj, 0, cv2_highgui_waitKeyEx);
+
+// Global definitions for functions and constants
+#define OPENCV_HIGHGUI_GLOBALS \
+ /* Functions */ \
+ { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_waitKeyEx), MP_ROM_PTR(&cv2_highgui_waitKeyEx_obj) }
diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp
new file mode 100644
index 0000000..c624f89
--- /dev/null
+++ b/src/imgcodecs.cpp
@@ -0,0 +1,167 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * imgcodecs.cpp
+ *
+ * MicroPython wrappers for functions from the OpenCV imgcodecs module.
+ *------------------------------------------------------------------------------
+ */
+
+// C++ headers
+#include "opencv2/core.hpp"
+#include "opencv2/imgcodecs.hpp"
+#include "convert.h"
+#include "numpy.h"
+
+// C headers
+extern "C" {
+#include "imgcodecs.h"
+#include "ndarray.h"
+#include "py/builtin.h"
+} // extern "C"
+
+using namespace cv;
+
+// Helper macro to create an empty mp_map_t, derived from MP_DEFINE_CONST_MAP.
+// Primarily used for function calls with no keyword arguments, since we can't
+// just pass `NULL` or mp_const_none (crash occurs otherwise)
+#define MP_EMPTY_MAP() { \
+ .all_keys_are_qstrs = 0, \
+ .is_fixed = 1, \
+ .is_ordered = 0, \
+ .used = 0, \
+ .alloc = 0, \
+ .table = (mp_map_elem_t *)(mp_rom_map_elem_t *)mp_const_none, \
+ }
+
+mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_filename, ARG_flags };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_filename, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_flags, MP_ARG_INT, { .u_int = IMREAD_COLOR_BGR } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ mp_obj_t filename = args[ARG_filename].u_obj;
+ int flags = args[ARG_flags].u_int;
+
+ // Call MicroPython's `open()` function to read the image file
+ mp_obj_t open_args[2];
+ open_args[0] = filename;
+ open_args[1] = mp_obj_new_str("rb", 2); // Open in binary read mode
+ mp_map_t open_kw_args = MP_EMPTY_MAP(); // No keyword arguments
+ mp_obj_t file_obj = mp_builtin_open(2, open_args, &open_kw_args);
+
+ // Call the `read()` method on the file object to get the image data
+ mp_obj_t read_method[2];
+ mp_load_method(file_obj, MP_QSTR_read, read_method);
+ mp_obj_t bytes_obj = mp_call_method_n_kw(0, 0, read_method);
+
+ // Close the file object
+ mp_obj_t close_method[2];
+ mp_load_method(file_obj, MP_QSTR_close, close_method);
+ mp_call_method_n_kw(0, 0, close_method);
+
+ // Convert bytes_obj to vector of uint8_t for decoding
+ size_t len;
+ const char *buf_data = mp_obj_str_get_data(bytes_obj, &len);
+ std::vector buf(buf_data, buf_data + len);
+
+ // Decode the image from the buffer
+ Mat img;
+ img.allocator = &GetNumpyAllocator();
+ try {
+ img = imdecode(buf, flags);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgcodecs_imwrite(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_filename, ARG_img, ARG_params };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_filename, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_params, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ mp_obj_t filename = args[ARG_filename].u_obj;
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ ndarray_obj_t *params;
+ if (args[ARG_params].u_obj == mp_const_none) {
+ // If no parameters are provided, use an empty ndarray
+ params = ndarray_new_linear_array(0, NDARRAY_INT16);
+ } else {
+ params = ndarray_from_mp_obj(args[ARG_params].u_obj, 0);
+ }
+
+ // Convert the filename to a std::string
+ size_t filename_len;
+ const char *filename_chr = mp_obj_str_get_data(filename, &filename_len);
+ std::string filename_str(filename_chr, filename_len);
+
+ // Create vector of uint8_t for encoding
+ std::vector buf;
+
+ // Convert the parameters to a vector of int
+ std::vector params_vec;
+ if (params->len > 0) {
+ params_vec.reserve(params->len);
+ for (size_t i = 0; i < params->len; ++i) {
+ mp_obj_t val = (mp_obj_t*) mp_binary_get_val_array(params->dtype, params->array, i);
+ // ndarrays default to float, and mp_obj_get_int() does not support
+ // float values, so we need to do the type conversion ourselves
+ mp_float_t val_float = mp_obj_get_float(val);
+ params_vec.push_back((int) val_float);
+ }
+ }
+
+ // Encode the image from the buffer
+ bool retval;
+ try {
+ retval = imencode(filename_str, img, buf, params_vec);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Convert the vector of uint8_t to a bytes object
+ mp_obj_t buf_obj = mp_obj_new_bytes((const byte *)buf.data(), buf.size());
+
+ // Call MicroPython's `open()` function to write the image file
+ mp_obj_t open_args[2];
+ open_args[0] = filename;
+ open_args[1] = mp_obj_new_str("wb", 2); // Open in binary write mode
+ mp_map_t open_kw_args = MP_EMPTY_MAP(); // No keyword arguments
+ mp_obj_t file_obj = mp_builtin_open(2, open_args, &open_kw_args);
+
+ // Call the `write()` method on the file object to write the image data
+ mp_obj_t write_method[3];
+ mp_load_method(file_obj, MP_QSTR_write, write_method);
+ write_method[2] = buf_obj; // Set the data to write
+ mp_call_method_n_kw(1, 0, write_method);
+
+ // Close the file object
+ mp_obj_t close_method[2];
+ mp_load_method(file_obj, MP_QSTR_close, close_method);
+ mp_call_method_n_kw(0, 0, close_method);
+
+ // Return the result
+ return mp_obj_new_bool(retval);
+}
diff --git a/src/imgcodecs.h b/src/imgcodecs.h
new file mode 100644
index 0000000..d07b777
--- /dev/null
+++ b/src/imgcodecs.h
@@ -0,0 +1,85 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * imgcodecs.h
+ *
+ * MicroPython wrappers for functions from the OpenCV imgcodecs module.
+ *------------------------------------------------------------------------------
+ */
+
+// C headers
+#include "py/runtime.h"
+
+// Function declarations
+extern mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgcodecs_imwrite(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+
+// Python references to the functions
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imread_obj, 1, cv2_imgcodecs_imread);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imwrite_obj, 2, cv2_imgcodecs_imwrite);
+
+// Global definitions for functions and constants
+#define OPENCV_IMGCODECS_GLOBALS \
+ /* Functions */ \
+ { MP_ROM_QSTR(MP_QSTR_imread), MP_ROM_PTR(&cv2_imgcodecs_imread_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_imwrite), MP_ROM_PTR(&cv2_imgcodecs_imwrite_obj) }, \
+ \
+ /* Image read flags, from opencv2/imgcodecs.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_UNCHANGED), MP_ROM_INT(-1) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_GRAYSCALE), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_BGR), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYDEPTH), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYCOLOR), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_LOAD_GDAL), MP_ROM_INT(8) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_2), MP_ROM_INT(16) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_2), MP_ROM_INT(17) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_4), MP_ROM_INT(32) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_4), MP_ROM_INT(33) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_8), MP_ROM_INT(64) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_8), MP_ROM_INT(65) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_IGNORE_ORIENTATION), MP_ROM_INT(128) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_RGB), MP_ROM_INT(256) }, \
+ \
+ /* Image write flags, from opencv2/imgcodecs.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_QUALITY), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_PROGRESSIVE), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_OPTIMIZE), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_RST_INTERVAL), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_LUMA_QUALITY), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_CHROMA_QUALITY), MP_ROM_INT(6) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_SAMPLING_FACTOR), MP_ROM_INT(7) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_COMPRESSION), MP_ROM_INT(16) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_STRATEGY), MP_ROM_INT(17) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_BILEVEL), MP_ROM_INT(18) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_PXM_BINARY), MP_ROM_INT(32) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_TYPE), MP_ROM_INT((3 << 4) + 0) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_COMPRESSION), MP_ROM_INT((3 << 4) + 1) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_DWA_COMPRESSION_LEVEL), MP_ROM_INT((3 << 4) + 2) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_WEBP_QUALITY), MP_ROM_INT(64) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_HDR_COMPRESSION), MP_ROM_INT((5 << 4) + 0) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_PAM_TUPLETYPE), MP_ROM_INT(128) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_RESUNIT), MP_ROM_INT(256) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_XDPI), MP_ROM_INT(257) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_YDPI), MP_ROM_INT(258) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_COMPRESSION), MP_ROM_INT(259) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_ROWSPERSTRIP), MP_ROM_INT(278) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_PREDICTOR), MP_ROM_INT(317) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG2000_COMPRESSION_X1000), MP_ROM_INT(272) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_QUALITY), MP_ROM_INT(512) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_DEPTH), MP_ROM_INT(513) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_SPEED), MP_ROM_INT(514) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_QUALITY), MP_ROM_INT(640) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_EFFORT), MP_ROM_INT(641) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DISTANCE), MP_ROM_INT(642) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DECODING_SPEED), MP_ROM_INT(643) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_LOOP), MP_ROM_INT(1024) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_SPEED), MP_ROM_INT(1025) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_QUALITY), MP_ROM_INT(1026) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_DITHER), MP_ROM_INT(1027) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_TRANSPARENCY), MP_ROM_INT(1028) }, \
+ { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_COLORTABLE), MP_ROM_INT(1029) }
+
\ No newline at end of file
diff --git a/src/imgproc.cpp b/src/imgproc.cpp
index 47d8f27..72fd805 100644
--- a/src/imgproc.cpp
+++ b/src/imgproc.cpp
@@ -1,3 +1,15 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * imgproc.cpp
+ *
+ * MicroPython wrappers for functions from the OpenCV imgproc module.
+ *------------------------------------------------------------------------------
+ */
+
// C++ headers
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
@@ -7,17 +19,21 @@
// C headers
extern "C" {
#include "imgproc.h"
-#include "ulab/code/ndarray.h"
+#include "ndarray.h"
} // extern "C"
using namespace cv;
-mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
// Define the arguments
- enum { ARG_src, ARG_code, ARG_dst };
+ enum { ARG_src, ARG_maxValue, ARG_adaptiveMethod, ARG_thresholdType, ARG_blockSize, ARG_C, ARG_dst };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
- { MP_QSTR_code, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_maxValue, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_adaptiveMethod, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_thresholdType, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_blockSize, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_C, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
{ MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
};
@@ -27,12 +43,290 @@ mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t
// Convert arguments to required types
Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
- int code = args[ARG_code].u_int;
+ mp_float_t maxValue = mp_obj_get_float(args[ARG_maxValue].u_obj);
+ int adaptiveMethod = args[ARG_adaptiveMethod].u_int;
+ int thresholdType = args[ARG_thresholdType].u_int;
+ int blockSize = args[ARG_blockSize].u_int;
+ mp_float_t C = mp_obj_get_float(args[ARG_C].u_obj);
Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
// Call the corresponding OpenCV function
try {
- cvtColor(src, dst, code);
+ adaptiveThreshold(src, dst, maxValue, adaptiveMethod, thresholdType, blockSize, C);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_curve, ARG_epsilon, ARG_closed, ARG_approxCurve };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_epsilon, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_closed, MP_ARG_BOOL, { .u_bool = false } },
+ { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj);
+ double epsilon = mp_obj_get_float(args[ARG_epsilon].u_obj);
+ bool closed = args[ARG_closed].u_bool;
+ Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ approxPolyDP(curve, approxCurve, epsilon, closed);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(approxCurve);
+}
+
+mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_curve, ARG_nsides, ARG_approxCurve, ARG_epsilon_percentage, ARG_ensure_convex };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_nsides, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_epsilon_percentage, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_ensure_convex, MP_ARG_BOOL, { .u_bool = true } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj);
+ int nsides = args[ARG_nsides].u_int;
+ Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj);
+ mp_float_t epsilon_percentage = args[ARG_epsilon_percentage].u_obj == mp_const_none ? -1.0 : mp_obj_get_float(args[ARG_epsilon_percentage].u_obj);
+ bool ensure_convex = args[ARG_ensure_convex].u_bool;
+
+ // Call the corresponding OpenCV function
+ try {
+ approxPolyN(curve, approxCurve, nsides, epsilon_percentage, ensure_convex);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(approxCurve);
+}
+
+mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_curve, ARG_closed };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_closed, MP_ARG_REQUIRED | MP_ARG_BOOL, { .u_bool = false } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj);
+ bool closed = args[ARG_closed].u_bool;
+
+ mp_float_t retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = arcLength(curve, closed);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mp_obj_new_float(retval);
+}
+
+mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_line_type, ARG_shift, ARG_tipLength };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_line_type, MP_ARG_INT, { .u_int = 8 } },
+ { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_tipLength, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj);
+ Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj);
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int thickness = args[ARG_thickness].u_int;
+ int line_type = args[ARG_line_type].u_int;
+ int shift = args[ARG_shift].u_int;
+ mp_float_t tipLength;
+ if(args[ARG_tipLength].u_obj == mp_const_none)
+ tipLength = 0.1; // Default value
+ else
+ tipLength = mp_obj_get_float(args[ARG_tipLength].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ arrowedLine(img, pt1, pt2, color, thickness, line_type, shift, tipLength);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_d, ARG_sigmaColor, ARG_sigmaSpace, ARG_dst, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_d, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_sigmaColor, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_sigmaSpace, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int d = args[ARG_d].u_int;
+ mp_float_t sigmaColor = mp_obj_get_float(args[ARG_sigmaColor].u_obj);
+ mp_float_t sigmaSpace = mp_obj_get_float(args[ARG_sigmaSpace].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ksize, ARG_dst, ARG_anchor, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ Point anchor = args[ARG_anchor].u_obj == mp_const_none ? Point(-1,-1) : mp_obj_to_point(args[ARG_anchor].u_obj);
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ blur(src, dst, ksize, anchor, borderType);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_boundingRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_array };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_array, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat array = mp_obj_to_mat(args[ARG_array].u_obj);
+
+ Rect retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = boundingRect(array);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result as a tuple
+ mp_obj_t retval_tuple[4];
+ retval_tuple[0] = mp_obj_new_int(retval.x);
+ retval_tuple[1] = mp_obj_new_int(retval.y);
+ retval_tuple[2] = mp_obj_new_int(retval.width);
+ retval_tuple[3] = mp_obj_new_int(retval.height);
+ mp_obj_t result = mp_obj_new_tuple(4, retval_tuple);
+ return result;
+}
+
+mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ddepth, ARG_ksize, ARG_dst, ARG_anchor, ARG_normalize, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } },
+ { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_normalize, MP_ARG_BOOL, { .u_bool = true } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int ddepth = args[ARG_ddepth].u_int;
+ Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ Point anchor = args[ARG_anchor].u_obj == mp_const_none ? Point(-1,-1) : mp_obj_to_point(args[ARG_anchor].u_obj);
+ bool normalize = args[ARG_normalize].u_bool;
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ boxFilter(src, dst, ddepth, ksize, anchor, normalize, borderType);
} catch(Exception& e) {
mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
}
@@ -40,3 +334,1823 @@ mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t
// Return the result
return mat_to_mp_obj(dst);
}
+
+mp_obj_t cv2_imgproc_boxPoints(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_box, ARG_points };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_box, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_points, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ mp_obj_tuple_t *box_tuple = (mp_obj_tuple_t*) args[ARG_box].u_obj;
+ RotatedRect box;
+ box.center = mp_obj_to_point2f(box_tuple->items[0]);
+ box.size = mp_obj_to_size2f(box_tuple->items[1]);
+ box.angle = mp_obj_get_float(box_tuple->items[2]);
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ boxPoints(box, points);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(points);
+}
+
+mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_threshold1, ARG_threshold2, ARG_edges, ARG_apertureSize, ARG_L2gradient };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_threshold1, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_threshold2, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_edges, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_apertureSize, MP_ARG_INT, { .u_int = 3 } },
+ { MP_QSTR_L2gradient, MP_ARG_BOOL, { .u_bool = false } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ int threshold1 = args[ARG_threshold1].u_int;
+ int threshold2 = args[ARG_threshold2].u_int;
+ Mat edges = mp_obj_to_mat(args[ARG_edges].u_obj);
+ int apertureSize = args[ARG_apertureSize].u_int;
+ bool L2gradient = args[ARG_L2gradient].u_bool;
+
+ // Call the corresponding OpenCV function
+ try {
+ Canny(image, edges, threshold1, threshold2, apertureSize, L2gradient);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(edges);
+}
+
+mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_center, ARG_radius, ARG_color, ARG_thickness, ARG_lineType, ARG_shift };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_center, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_radius, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Point center = mp_obj_to_point(args[ARG_center].u_obj);
+ int radius = args[ARG_radius].u_int;
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int thickness = args[ARG_thickness].u_int;
+ int lineType = args[ARG_lineType].u_int;
+ int shift = args[ARG_shift].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ circle(img, center, radius, color, thickness, lineType, shift);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_labels, ARG_connectivity, ARG_ltype };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_labels, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_connectivity, MP_ARG_INT, { .u_int = 8 } },
+ { MP_QSTR_ltype, MP_ARG_INT, { .u_int = CV_16U } }, // Normally CV_32S, but ulab doesn't support 32-bit integers
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ Mat labels = mp_obj_to_mat(args[ARG_labels].u_obj);
+ int connectivity = args[ARG_connectivity].u_int;
+ int ltype = args[ARG_ltype].u_int;
+
+ // Return value
+ int retval = 0;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = connectedComponents(image, labels, connectivity, ltype);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ mp_obj_t result[2];
+ result[0] = mp_obj_new_int(retval);
+ result[1] = mat_to_mp_obj(labels);
+ return mp_obj_new_tuple(2, result);
+}
+
+mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_labels, ARG_stats, ARG_centroids, ARG_connectivity, ARG_ltype };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_labels, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_stats, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_centroids, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_connectivity, MP_ARG_INT, { .u_int = 8 } },
+ { MP_QSTR_ltype, MP_ARG_INT, { .u_int = CV_16U } }, // Normally CV_32S, but ulab doesn't support 32-bit integers
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ Mat labels32S; // TODO: Allow user input
+ Mat stats32S; // TODO: Allow user input
+ Mat centroids64F; // TODO: Allow user input
+ int connectivity = args[ARG_connectivity].u_int;
+ int ltype = args[ARG_ltype].u_int;
+
+ // Return value
+ int retval = 0;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = connectedComponentsWithStats(image, labels32S, stats32S, centroids64F, connectivity, ltype);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Convert output matrices to float
+ Mat labels, stats, centroids;
+ labels.allocator = &GetNumpyAllocator();
+ stats.allocator = &GetNumpyAllocator();
+ centroids.allocator = &GetNumpyAllocator();
+ labels32S.convertTo(labels, CV_32F);
+ stats32S.convertTo(stats, CV_32F);
+ centroids64F.convertTo(centroids, CV_32F);
+
+ // Return the result
+ mp_obj_t result[4];
+ result[0] = mp_obj_new_int(retval);
+ result[1] = mat_to_mp_obj(labels);
+ result[2] = mat_to_mp_obj(stats);
+ result[3] = mat_to_mp_obj(centroids);
+ return mp_obj_new_tuple(4, result);
+}
+
+mp_obj_t cv2_imgproc_contourArea(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_contour, ARG_oriented };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_oriented, MP_ARG_BOOL, { .u_bool = false } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj);
+ bool oriented = args[ARG_oriented].u_bool;
+
+ mp_float_t retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = contourArea(contour, oriented);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mp_obj_new_float(retval);
+}
+
+mp_obj_t cv2_imgproc_convexHull(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_points, ARG_hull, ARG_clockwise, ARG_returnPoints };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_hull, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_clockwise, MP_ARG_BOOL, { .u_bool = false } },
+ { MP_QSTR_returnPoints, MP_ARG_BOOL, { .u_bool = true } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+ Mat hull; // TODO: Allow user input
+ bool clockwise = args[ARG_clockwise].u_bool;
+ bool returnPoints = args[ARG_returnPoints].u_bool;
+
+ // Call the corresponding OpenCV function
+ try {
+ convexHull(points, hull, clockwise, returnPoints);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // If hull is 32S, convert it to float
+ if (hull.type() == CV_32S) {
+ Mat hullFloat;
+ hull.convertTo(hullFloat, CV_32F);
+ hull = hullFloat;
+ }
+
+ // Return the result
+ return mat_to_mp_obj(hull);
+}
+
+mp_obj_t cv2_imgproc_convexityDefects(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_contour, ARG_convexhull, ARG_convexityDefects };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_convexhull, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_convexityDefects, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj);
+ Mat convexhull = mp_obj_to_mat(args[ARG_convexhull].u_obj);
+ Mat convexityDefects32S; // TODO: Allow user input
+
+ // contour must be of type CV_32S
+ Mat contour32S;
+ contour.convertTo(contour32S, CV_32S);
+
+ // convexhull must be of type CV_32S
+ Mat convexhull32S;
+ convexhull.convertTo(convexhull32S, CV_32S);
+
+ // Call the corresponding OpenCV function
+ try {
+ cv::convexityDefects(contour32S, convexhull32S, convexityDefects32S);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Convert the convexityDefects32S to float
+ Mat convexityDefects;
+ convexityDefects.allocator = &GetNumpyAllocator();
+ convexityDefects32S.convertTo(convexityDefects, CV_32F);
+
+ // Return the result
+ return mat_to_mp_obj(convexityDefects);
+}
+
+mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_code, ARG_dst };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_code, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int code = args[ARG_code].u_int;
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ cvtColor(src, dst, code);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } },
+ { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ Point anchor;
+ if(args[ARG_anchor].u_obj == mp_const_none)
+ anchor = Point(-1, -1); // Default value
+ else
+ anchor = mp_obj_to_point(args[ARG_anchor].u_obj);
+ int iterations = args[ARG_iterations].u_int;
+ int borderType = args[ARG_borderType].u_int;
+ Scalar borderValue;
+ if(args[ARG_borderValue].u_obj == mp_const_none)
+ borderValue = morphologyDefaultBorderValue(); // Default value
+ else
+ borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ dilate(src, dst, kernel, anchor, iterations, borderType, borderValue);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_drawContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_contours, ARG_contourIdx, ARG_color, ARG_thickness, ARG_lineType, ARG_hierarchy, ARG_maxLevel, ARG_offset };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_contours, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_contourIdx, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_hierarchy, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_maxLevel, MP_ARG_INT, { .u_int = INT_MAX } },
+ { MP_QSTR_offset, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ std::vector> contours = mp_obj_to_contours(args[ARG_contours].u_obj);
+ int contourIdx = args[ARG_contourIdx].u_int;
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int thickness = args[ARG_thickness].u_int;
+ int lineType = args[ARG_lineType].u_int;
+ Mat hierarchy = args[ARG_hierarchy].u_obj != mp_const_none ? mp_obj_to_mat(args[ARG_hierarchy].u_obj) : Mat();
+ int maxLevel = args[ARG_maxLevel].u_int;
+ Point offset = args[ARG_offset].u_obj != mp_const_none ? mp_obj_to_point(args[ARG_offset].u_obj) : Point();
+
+ // Call the corresponding OpenCV function
+ try {
+ drawContours(image, contours, contourIdx, color, thickness, lineType, hierarchy, maxLevel, offset);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(image);
+}
+
+mp_obj_t cv2_imgproc_drawMarker(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_position, ARG_color, ARG_markerType, ARG_markerSize, ARG_thickness, ARG_line_type };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_position, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_markerType, MP_ARG_INT, { .u_int = MARKER_CROSS } },
+ { MP_QSTR_markerSize, MP_ARG_INT, { .u_int = 20 } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_line_type, MP_ARG_INT, { .u_int = 8 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Point position = mp_obj_to_point(args[ARG_position].u_obj);
+ int markerType = args[ARG_markerType].u_int;
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int markerSize = args[ARG_markerSize].u_int;
+ int thickness = args[ARG_thickness].u_int;
+ int line_type = args[ARG_line_type].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ drawMarker(img, position, color, markerType, markerSize, thickness, line_type);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_ellipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_center, ARG_axes, ARG_angle, ARG_startAngle, ARG_endAngle, ARG_color, ARG_thickness, ARG_lineType, ARG_shift };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_center, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_axes, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_angle, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_startAngle, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_endAngle, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Point center = mp_obj_to_point(args[ARG_center].u_obj);
+ Size axes = mp_obj_to_size(args[ARG_axes].u_obj);
+ int angle = args[ARG_angle].u_int;
+ int startAngle = args[ARG_startAngle].u_int;
+ int endAngle = args[ARG_endAngle].u_int;
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int thickness = args[ARG_thickness].u_int;
+ int lineType = args[ARG_lineType].u_int;
+ int shift = args[ARG_shift].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ ellipse(img, center, axes, angle, startAngle, endAngle, color, thickness, lineType, shift);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } },
+ { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ Point anchor;
+ if(args[ARG_anchor].u_obj == mp_const_none)
+ anchor = Point(-1, -1); // Default value
+ else
+ anchor = mp_obj_to_point(args[ARG_anchor].u_obj);
+ int iterations = args[ARG_iterations].u_int;
+ int borderType = args[ARG_borderType].u_int;
+ Scalar borderValue;
+ if(args[ARG_borderValue].u_obj == mp_const_none)
+ borderValue = morphologyDefaultBorderValue(); // Default value
+ else
+ borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ erode(src, dst, kernel, anchor, iterations, borderType, borderValue);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_points, ARG_color, ARG_lineType, ARG_shift };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int lineType = args[ARG_lineType].u_int;
+ int shift = args[ARG_shift].u_int;
+
+ // points must be of type CV_32S
+ Mat points_32S;
+ points.allocator = &GetNumpyAllocator();
+ points.convertTo(points_32S, CV_32S);
+
+ // Call the corresponding OpenCV function
+ try {
+ fillConvexPoly(img, points_32S, color, lineType, shift);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_pts, ARG_color, ARG_lineType, ARG_shift, ARG_offset };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pts, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_offset, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Mat pts = mp_obj_to_mat(args[ARG_pts].u_obj);
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int lineType = args[ARG_lineType].u_int;
+ int shift = args[ARG_shift].u_int;
+ Point offset;
+ if(args[ARG_offset].u_obj == mp_const_none)
+ offset = Point(); // Default value
+ else
+ offset = mp_obj_to_point(args[ARG_offset].u_obj);
+
+ // points must be of type CV_32S
+ Mat pts_32S;
+ pts.allocator = &GetNumpyAllocator();
+ pts.convertTo(pts_32S, CV_32S);
+
+ // Call the corresponding OpenCV function
+ try {
+ fillPoly(img, pts_32S, color, lineType, shift, offset);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_filter2D(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ddepth, ARG_kernel, ARG_dst, ARG_anchor, ARG_delta, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } },
+ { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int ddepth = args[ARG_ddepth].u_int;
+ Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ Point anchor;
+ if(args[ARG_anchor].u_obj == mp_const_none)
+ anchor = Point(-1,-1); // Default value
+ else
+ anchor = mp_obj_to_point(args[ARG_anchor].u_obj);
+ mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj);
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ filter2D(src, dst, ddepth, kernel, anchor, delta, borderType);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_findContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_mode, ARG_method, ARG_contours, ARG_hierarchy, ARG_offset };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_mode, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_contours, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_hierarchy, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_offset, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ int mode = args[ARG_mode].u_int;
+ int method = args[ARG_method].u_int;
+ std::vector> contours; // TODO: Allow user input
+ std::vector hierarchy; // TODO: Allow user input
+ Point offset = args[ARG_offset].u_obj == mp_const_none ? Point() : mp_obj_to_point(args[ARG_offset].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ findContours(image, contours, hierarchy, mode, method, offset);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Convert contours to a tuple of ndarray objects
+ mp_obj_t contours_obj = mp_obj_new_tuple(contours.size(), NULL);
+ mp_obj_tuple_t *contours_tuple = (mp_obj_tuple_t*) MP_OBJ_TO_PTR(contours_obj);
+
+ for(size_t i = 0; i < contours.size(); i++) {
+ Mat mat_contour(contours[i]);
+ Mat mat_f32;
+ mat_contour.convertTo(mat_f32, CV_32F);
+ contours_tuple->items[i] = mat_to_mp_obj(mat_f32);
+ }
+
+ // Convert hierarchy to a Mat object
+ Mat mat_hierarchy(hierarchy);
+
+ // Return the result
+ mp_obj_t result_tuple[2];
+ result_tuple[0] = contours_tuple;
+ Mat mat_16s;
+ mat_hierarchy.convertTo(mat_16s, CV_16S);
+ result_tuple[1] = mat_to_mp_obj(mat_16s);
+ return mp_obj_new_tuple(2, result_tuple);
+}
+
+mp_obj_t cv2_imgproc_fitEllipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_points };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+
+ RotatedRect ellipse;
+
+ // Call the corresponding OpenCV function
+ try {
+ ellipse = fitEllipse(points);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Convert the result to a tuple
+ mp_obj_t center[2];
+ center[0] = mp_obj_new_float(ellipse.center.x);
+ center[1] = mp_obj_new_float(ellipse.center.y);
+ mp_obj_t size[2];
+ size[0] = mp_obj_new_float(ellipse.size.width);
+ size[1] = mp_obj_new_float(ellipse.size.height);
+ mp_obj_t result[3];
+ result[0] = mp_obj_new_tuple(2, center);
+ result[1] = mp_obj_new_tuple(2, size);
+ result[2] = mp_obj_new_float(ellipse.angle);
+ return mp_obj_new_tuple(3, result);
+}
+
+mp_obj_t cv2_imgproc_fitLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_points, ARG_distType, ARG_param, ARG_reps, ARG_aeps, ARG_line };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_distType, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_param, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_reps, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_aeps, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_line, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+ int distType = args[ARG_distType].u_int;
+ mp_float_t param = mp_obj_get_float(args[ARG_param].u_obj);
+ mp_float_t reps = mp_obj_get_float(args[ARG_reps].u_obj);
+ mp_float_t aeps = mp_obj_get_float(args[ARG_aeps].u_obj);
+ Mat line = mp_obj_to_mat(args[ARG_line].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ fitLine(points, line, distType, param, reps, aeps);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(line);
+}
+
+mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ksize, ARG_sigmaX, ARG_dst, ARG_sigmaY, ARG_borderType, ARG_hint };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_sigmaX, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_sigmaY, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ { MP_QSTR_hint, MP_ARG_INT, { .u_int = ALGO_HINT_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj);
+ mp_float_t sigmaX = mp_obj_get_float(args[ARG_sigmaX].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ mp_float_t sigmaY = args[ARG_sigmaY].u_obj == mp_const_none ? sigmaX : mp_obj_get_float(args[ARG_sigmaY].u_obj);
+ int borderType = args[ARG_borderType].u_int;
+ AlgorithmHint hint = (AlgorithmHint) args[ARG_hint].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ GaussianBlur(src, dst, ksize, sigmaX, sigmaY, borderType, hint);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_shape, ARG_ksize, ARG_anchor };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_shape, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ int shape = args[ARG_shape].u_int;
+ Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj);
+ Point anchor;
+ if(args[ARG_anchor].u_obj == mp_const_none)
+ anchor = Point(-1, -1); // Default value
+ else
+ anchor = mp_obj_to_point(args[ARG_anchor].u_obj);
+
+ // Instantiate result
+ Mat kernel;
+
+ // Call the corresponding OpenCV function
+ try {
+ kernel = getStructuringElement(shape, ksize, anchor);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(kernel);
+}
+
+mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_method, ARG_dp, ARG_minDist, ARG_circles, ARG_param1, ARG_param2, ARG_minRadius, ARG_maxRadius };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dp, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_minDist, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_circles, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_param1, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_param2, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_minRadius, MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_maxRadius, MP_ARG_INT, { .u_int = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ int method = args[ARG_method].u_int;
+ mp_float_t dp = mp_obj_get_float(args[ARG_dp].u_obj);
+ mp_float_t minDist = mp_obj_get_float(args[ARG_minDist].u_obj);
+ Mat circles = mp_obj_to_mat(args[ARG_circles].u_obj);
+ float param1;
+ if(args[ARG_param1].u_obj == mp_const_none)
+ param1 = 100; // Default value
+ else
+ param1 = mp_obj_get_float(args[ARG_param1].u_obj);
+ float param2;
+ if(args[ARG_param2].u_obj == mp_const_none)
+ param2 = 100; // Default value
+ else
+ param2 = mp_obj_get_float(args[ARG_param2].u_obj);
+ int minRadius = args[ARG_minRadius].u_int;
+ int maxRadius = args[ARG_maxRadius].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ HoughCircles(image, circles, method, dp, minDist, param1, param2, minRadius, maxRadius);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(circles);
+}
+
+mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_method, ARG_dp, ARG_minDist, ARG_circles, ARG_param1, ARG_param2, ARG_minRadius, ARG_maxRadius };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dp, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_minDist, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_circles, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_param1, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_param2, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_minRadius, MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_maxRadius, MP_ARG_INT, { .u_int = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ int method = args[ARG_method].u_int;
+ mp_float_t dp = mp_obj_get_float(args[ARG_dp].u_obj);
+ mp_float_t minDist = mp_obj_get_float(args[ARG_minDist].u_obj);
+ Mat circles = mp_obj_to_mat(args[ARG_circles].u_obj);
+ float param1;
+ if(args[ARG_param1].u_obj == mp_const_none)
+ param1 = 100; // Default value
+ else
+ param1 = mp_obj_get_float(args[ARG_param1].u_obj);
+ float param2;
+ if(args[ARG_param2].u_obj == mp_const_none)
+ param2 = 100; // Default value
+ else
+ param2 = mp_obj_get_float(args[ARG_param2].u_obj);
+ int minRadius = args[ARG_minRadius].u_int;
+ int maxRadius = args[ARG_maxRadius].u_int;
+
+ // Vector to hold the circles and votes
+ std::vector circles_acc;
+
+ // Call the corresponding OpenCV function
+ try {
+ HoughCircles(image, circles_acc, method, dp, minDist, param1, param2, minRadius, maxRadius);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Copy the vector of circles and votes to output circles object
+ Mat(circles_acc).copyTo(circles);
+
+ // Return the result
+ return mat_to_mp_obj(circles);
+}
+
+mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_srn, ARG_stn, ARG_min_theta, ARG_max_theta, ARG_use_edgeval };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } },
+ { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_srn, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_stn, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_min_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_max_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_use_edgeval, MP_ARG_BOOL, { .u_bool = false } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ mp_float_t rho;
+ if(args[ARG_rho].u_obj == mp_const_none)
+ rho = 1; // Default value
+ else
+ rho = mp_obj_get_float(args[ARG_rho].u_obj);
+ mp_float_t theta;
+ if(args[ARG_theta].u_obj == mp_const_none)
+ theta = CV_PI / 180; // Default value
+ else
+ theta = mp_obj_get_float(args[ARG_theta].u_obj);
+ int threshold = args[ARG_threshold].u_int;
+ Mat lines = mp_obj_to_mat(args[ARG_lines].u_obj);
+ mp_float_t srn;
+ if(args[ARG_srn].u_obj == mp_const_none)
+ srn = 0; // Default value
+ else
+ srn = mp_obj_get_float(args[ARG_srn].u_obj);
+ mp_float_t stn;
+ if(args[ARG_stn].u_obj == mp_const_none)
+ stn = 0; // Default value
+ else
+ stn = mp_obj_get_float(args[ARG_stn].u_obj);
+ mp_float_t min_theta;
+ if(args[ARG_min_theta].u_obj == mp_const_none)
+ min_theta = 0; // Default value
+ else
+ min_theta = mp_obj_get_float(args[ARG_min_theta].u_obj);
+ mp_float_t max_theta;
+ if(args[ARG_max_theta].u_obj == mp_const_none)
+ max_theta = CV_PI; // Default value
+ else
+ max_theta = mp_obj_get_float(args[ARG_max_theta].u_obj);
+ bool use_edgeval = args[ARG_use_edgeval].u_bool;
+
+ // Call the corresponding OpenCV function
+ try {
+ HoughLines(image, lines, rho, theta, threshold, srn, stn, min_theta, max_theta, use_edgeval);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(lines);
+}
+
+mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_minLineLength, ARG_maxLineGap };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } },
+ { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_minLineLength, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_maxLineGap, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ mp_float_t rho = mp_obj_get_float(args[ARG_rho].u_obj);
+ mp_float_t theta = mp_obj_get_float(args[ARG_theta].u_obj);
+ int threshold = args[ARG_threshold].u_int;
+ Mat lines32S; // TODO: Allow user input
+ mp_float_t minLineLength;
+ if(args[ARG_minLineLength].u_obj == mp_const_none)
+ minLineLength = 0; // Default value
+ else
+ minLineLength = mp_obj_get_float(args[ARG_minLineLength].u_obj);
+ mp_float_t maxLineGap;
+ if(args[ARG_maxLineGap].u_obj == mp_const_none)
+ maxLineGap = 0; // Default value
+ else
+ maxLineGap = mp_obj_get_float(args[ARG_maxLineGap].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ HoughLinesP(image, lines32S, rho, theta, threshold, minLineLength, maxLineGap);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Convert lines to float
+ Mat lines;
+ lines.allocator = &GetNumpyAllocator();
+ lines32S.convertTo(lines, CV_32F);
+
+ // Return the result
+ return mat_to_mp_obj(lines);
+}
+
+mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_srn, ARG_stn, ARG_min_theta, ARG_max_theta };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } },
+ { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_srn, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_stn, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_min_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_max_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat image = mp_obj_to_mat(args[ARG_image].u_obj);
+ mp_float_t rho;
+ if(args[ARG_rho].u_obj == mp_const_none)
+ rho = 1; // Default value
+ else
+ rho = mp_obj_get_float(args[ARG_rho].u_obj);
+ mp_float_t theta;
+ if(args[ARG_theta].u_obj == mp_const_none)
+ theta = CV_PI / 180; // Default value
+ else
+ theta = mp_obj_get_float(args[ARG_theta].u_obj);
+ int threshold = args[ARG_threshold].u_int;
+ Mat lines = mp_obj_to_mat(args[ARG_lines].u_obj);
+ mp_float_t srn;
+ if(args[ARG_srn].u_obj == mp_const_none)
+ srn = 0; // Default value
+ else
+ srn = mp_obj_get_float(args[ARG_srn].u_obj);
+ mp_float_t stn;
+ if(args[ARG_stn].u_obj == mp_const_none)
+ stn = 0; // Default value
+ else
+ stn = mp_obj_get_float(args[ARG_stn].u_obj);
+ mp_float_t min_theta;
+ if(args[ARG_min_theta].u_obj == mp_const_none)
+ min_theta = 0; // Default value
+ else
+ min_theta = mp_obj_get_float(args[ARG_min_theta].u_obj);
+ mp_float_t max_theta;
+ if(args[ARG_max_theta].u_obj == mp_const_none)
+ max_theta = CV_PI; // Default value
+ else
+ max_theta = mp_obj_get_float(args[ARG_max_theta].u_obj);
+
+ // Vector to hold the lines and votes
+ std::vector lines_acc;
+
+ // Call the corresponding OpenCV function
+ try {
+ HoughLines(image, lines_acc, rho, theta, threshold, srn, stn, min_theta, max_theta);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Copy the vector of lines and votes to output lines object
+ Mat(lines_acc).copyTo(lines);
+
+ // Return the result
+ return mat_to_mp_obj(lines);
+}
+
+mp_obj_t cv2_imgproc_isContourConvex(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_contour };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj);
+
+ // Call the corresponding OpenCV function
+ bool isConvex;
+ try {
+ isConvex = isContourConvex(contour);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mp_obj_new_bool(isConvex);
+}
+
+mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ddepth, ARG_dst, ARG_ksize, ARG_scale, ARG_delta, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_ksize, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_scale, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int ddepth = args[ARG_ddepth].u_int;
+ int ksize = args[ARG_ksize].u_int;
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ mp_float_t scale = args[ARG_scale].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_scale].u_obj);
+ mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj);
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ Laplacian(src, dst, ddepth, ksize, scale, delta, borderType);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_lineType, ARG_shift };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj);
+ Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj);
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int thickness = args[ARG_thickness].u_int;
+ int lineType = args[ARG_lineType].u_int;
+ int shift = args[ARG_shift].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ line(img, pt1, pt2, color, thickness, lineType, shift);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_matchShapes(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_contour1, ARG_contour2, ARG_method, ARG_parameter };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_contour1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_contour2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_parameter, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat contour1 = mp_obj_to_mat(args[ARG_contour1].u_obj);
+ Mat contour2 = mp_obj_to_mat(args[ARG_contour2].u_obj);
+ int method = args[ARG_method].u_int;
+ mp_float_t parameter = mp_obj_get_float(args[ARG_parameter].u_obj);
+
+ mp_float_t retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = matchShapes(contour1, contour2, method, parameter);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mp_obj_new_float(retval);
+}
+
+mp_obj_t cv2_imgproc_matchTemplate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_templ, ARG_method, ARG_result, ARG_mask };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_templ, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = TM_CCOEFF_NORMED } },
+ { MP_QSTR_result, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_mask, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Mat templ = mp_obj_to_mat(args[ARG_templ].u_obj);
+ int method = args[ARG_method].u_int;
+ Mat result = mp_obj_to_mat(args[ARG_result].u_obj);
+ Mat mask = mp_obj_to_mat(args[ARG_mask].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ matchTemplate(img, templ, result, method, mask);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(result);
+}
+
+mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ksize, ARG_dst };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int ksize = args[ARG_ksize].u_int;
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ medianBlur(src, dst, ksize);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_minAreaRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_points };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+
+ RotatedRect retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = minAreaRect(points);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result as a tuple
+ mp_obj_t center_tuple[2];
+ center_tuple[0] = mp_obj_new_float(retval.center.x);
+ center_tuple[1] = mp_obj_new_float(retval.center.y);
+ mp_obj_t size_tuple[2];
+ size_tuple[0] = mp_obj_new_float(retval.size.width);
+ size_tuple[1] = mp_obj_new_float(retval.size.height);
+ mp_obj_t result_tuple[3];
+ result_tuple[0] = mp_obj_new_tuple(2, center_tuple);
+ result_tuple[1] = mp_obj_new_tuple(2, size_tuple);
+ result_tuple[2] = mp_obj_new_float(retval.angle);
+ return mp_obj_new_tuple(3, result_tuple);
+}
+
+mp_obj_t cv2_imgproc_minEnclosingCircle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_points };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+
+ Point2f center;
+ float radius;
+
+ // Call the corresponding OpenCV function
+ try {
+ minEnclosingCircle(points, center, radius);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result as a tuple
+ mp_obj_t center_tuple[2];
+ center_tuple[0] = mp_obj_new_float(center.x);
+ center_tuple[1] = mp_obj_new_float(center.y);
+ mp_obj_t result_tuple[3];
+ result_tuple[0] = mp_obj_new_tuple(2, center_tuple);
+ result_tuple[1] = mp_obj_new_float(radius);
+ return mp_obj_new_tuple(2, result_tuple);
+}
+
+mp_obj_t cv2_imgproc_minEnclosingTriangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_points, ARG_triangle };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_triangle, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat points = mp_obj_to_mat(args[ARG_points].u_obj);
+ Mat triangle = mp_obj_to_mat(args[ARG_triangle].u_obj);
+
+ mp_float_t retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = minEnclosingTriangle(points, triangle);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result as a tuple
+ mp_obj_t result_tuple[2];
+ result_tuple[0] = mp_obj_new_float(retval);
+ result_tuple[1] = mat_to_mp_obj(triangle);
+ return mp_obj_new_tuple(2, result_tuple);
+}
+
+mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_binary };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_binary, MP_ARG_BOOL, { .u_bool = false } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ bool binary = args[ARG_binary].u_bool;
+ Moments moments;
+
+ // Call the corresponding OpenCV function
+ try {
+ moments = cv::moments(src, binary);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Create a dictionary to hold the moments
+ mp_obj_t moments_dict = mp_obj_new_dict(0);
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m00), mp_obj_new_float(moments.m00));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m10), mp_obj_new_float(moments.m10));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m01), mp_obj_new_float(moments.m01));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m20), mp_obj_new_float(moments.m20));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m11), mp_obj_new_float(moments.m11));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m02), mp_obj_new_float(moments.m02));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m30), mp_obj_new_float(moments.m30));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m21), mp_obj_new_float(moments.m21));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m12), mp_obj_new_float(moments.m12));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m03), mp_obj_new_float(moments.m03));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu20), mp_obj_new_float(moments.mu20));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu11), mp_obj_new_float(moments.mu11));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu02), mp_obj_new_float(moments.mu02));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu30), mp_obj_new_float(moments.mu30));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu21), mp_obj_new_float(moments.mu21));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu12), mp_obj_new_float(moments.mu12));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu03), mp_obj_new_float(moments.mu03));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu20), mp_obj_new_float(moments.nu20));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu11), mp_obj_new_float(moments.nu11));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu02), mp_obj_new_float(moments.nu02));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu30), mp_obj_new_float(moments.nu30));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu21), mp_obj_new_float(moments.nu21));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu12), mp_obj_new_float(moments.nu12));
+ mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu03), mp_obj_new_float(moments.nu03));
+
+ // Return the moments dictionary
+ return moments_dict;
+}
+
+mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_op, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_op, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } },
+ { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int op = args[ARG_op].u_int;
+ Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj);
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ Point anchor;
+ if(args[ARG_anchor].u_obj == mp_const_none)
+ anchor = Point(-1, -1); // Default value
+ else
+ anchor = mp_obj_to_point(args[ARG_anchor].u_obj);
+ int iterations = args[ARG_iterations].u_int;
+ int borderType = args[ARG_borderType].u_int;
+ Scalar borderValue;
+ if(args[ARG_borderValue].u_obj == mp_const_none)
+ borderValue = morphologyDefaultBorderValue(); // Default value
+ else
+ borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj);
+
+ // Call the corresponding OpenCV function
+ try {
+ morphologyEx(src, dst, op, kernel, anchor, iterations, borderType, borderValue);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_pointPolygonTest(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_contour, ARG_pt, ARG_measureDist };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pt, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_measureDist, MP_ARG_REQUIRED | MP_ARG_BOOL, { .u_bool = false } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj);
+ Point pt = mp_obj_to_point(args[ARG_pt].u_obj);
+ bool measureDist = args[ARG_measureDist].u_bool;
+
+ mp_float_t retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = pointPolygonTest(contour, pt, measureDist);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mp_obj_new_float(retval);
+}
+
+mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_text, ARG_org, ARG_fontFace, ARG_fontScale, ARG_color, ARG_thickness, ARG_lineType, ARG_bottomLeftOrigin };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_text, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_org, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_fontFace, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = FONT_HERSHEY_SIMPLEX } },
+ { MP_QSTR_fontScale, MP_ARG_REQUIRED, { .u_obj = mp_const_none } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_bottomLeftOrigin, MP_ARG_BOOL, { .u_bool = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ size_t len;
+ const char *text_str = mp_obj_str_get_data(args[ARG_text].u_obj, &len);
+ String text(text_str, len);
+ Point org = mp_obj_to_point(args[ARG_org].u_obj);
+ int fontFace = args[ARG_fontFace].u_int;
+ mp_float_t fontScale = mp_obj_get_float(args[ARG_fontScale].u_obj);
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int thickness = args[ARG_thickness].u_int;
+ int lineType = args[ARG_lineType].u_int;
+ bool bottomLeftOrigin = args[ARG_bottomLeftOrigin].u_bool;
+
+ // Call the corresponding OpenCV function
+ try {
+ putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_lineType, ARG_shift };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } },
+ { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } },
+ { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat img = mp_obj_to_mat(args[ARG_img].u_obj);
+ Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj);
+ Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj);
+ Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj);
+ int thickness = args[ARG_thickness].u_int;
+ int lineType = args[ARG_lineType].u_int;
+ int shift = args[ARG_shift].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ rectangle(img, pt1, pt2, color, thickness, lineType, shift);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(img);
+}
+
+mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ddepth, ARG_dx, ARG_dy, ARG_dst, ARG_scale, ARG_delta, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dx, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dy, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_scale, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int ddepth = args[ARG_ddepth].u_int;
+ int dx = args[ARG_dx].u_int;
+ int dy = args[ARG_dy].u_int;
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ mp_float_t scale = args[ARG_scale].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_scale].u_obj);
+ mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj);
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_Sobel(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_ddepth, ARG_dx, ARG_dy, ARG_dst, ARG_ksize, ARG_scale, ARG_delta, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dx, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dy, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_ksize, MP_ARG_INT, { .u_int = 3 } },
+ { MP_QSTR_scale, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ int ddepth = args[ARG_ddepth].u_int;
+ int dx = args[ARG_dx].u_int;
+ int dy = args[ARG_dy].u_int;
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+ int ksize = args[ARG_ksize].u_int;
+ mp_float_t scale = args[ARG_scale].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_scale].u_obj);
+ mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj);
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ Sobel(src, dst, ddepth, dx, dy, ksize, scale, delta, borderType);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ return mat_to_mp_obj(dst);
+}
+
+mp_obj_t cv2_imgproc_spatialGradient(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_dx, ARG_dy, ARG_ksize, ARG_borderType };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_dx, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_dy, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ { MP_QSTR_ksize, MP_ARG_INT, { .u_int = 3 } },
+ { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ Mat dx = mp_obj_to_mat(args[ARG_dx].u_obj);
+ Mat dy = mp_obj_to_mat(args[ARG_dy].u_obj);
+ int ksize = args[ARG_ksize].u_int;
+ int borderType = args[ARG_borderType].u_int;
+
+ // Call the corresponding OpenCV function
+ try {
+ spatialGradient(src, dx, dy, ksize, borderType);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result
+ mp_obj_t result[2];
+ result[0] = mat_to_mp_obj(dx);
+ result[1] = mat_to_mp_obj(dy);
+ return mp_obj_new_tuple(2, result);
+}
+
+mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ // Define the arguments
+ enum { ARG_src, ARG_thresh, ARG_maxval, ARG_type, ARG_dst };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_thresh, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_maxval, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } },
+ { MP_QSTR_type, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = THRESH_BINARY } },
+ { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } },
+ };
+
+ // Parse the arguments
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ // Convert arguments to required types
+ Mat src = mp_obj_to_mat(args[ARG_src].u_obj);
+ mp_float_t thresh = mp_obj_get_float(args[ARG_thresh].u_obj);
+ mp_float_t maxval = mp_obj_get_float(args[ARG_maxval].u_obj);
+ int type = args[ARG_type].u_int;
+ Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj);
+
+ mp_float_t retval;
+
+ // Call the corresponding OpenCV function
+ try {
+ retval = threshold(src, dst, thresh, maxval, type);
+ } catch(Exception& e) {
+ mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what()));
+ }
+
+ // Return the result as a tuple
+ mp_obj_t result_tuple[2];
+ result_tuple[0] = mp_obj_new_float(retval);
+ result_tuple[1] = mat_to_mp_obj(dst);
+ return mp_obj_new_tuple(2, result_tuple);
+}
diff --git a/src/imgproc.h b/src/imgproc.h
index 33b06b0..0847117 100644
--- a/src/imgproc.h
+++ b/src/imgproc.h
@@ -1,4 +1,337 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * imgproc.h
+ *
+ * MicroPython wrappers for functions from the OpenCV imgproc module.
+ *------------------------------------------------------------------------------
+ */
+
// C headers
#include "py/runtime.h"
+// Function declarations
+extern mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_boundingRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_boxPoints(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_contourArea(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_convexHull(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_convexityDefects(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_drawContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_drawMarker(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_ellipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_filter2D(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_findContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_fitEllipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_fitLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_isContourConvex(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_matchShapes(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_matchTemplate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_minAreaRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_minEnclosingCircle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_minEnclosingTriangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_pointPolygonTest(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_Sobel(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_spatialGradient(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+extern mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args);
+
+// Python references to the functions
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_approxPolyDP_obj, 3, cv2_imgproc_approxPolyDP);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_approxPolyN_obj, 2, cv2_imgproc_approxPolyN);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arcLength_obj, 2, cv2_imgproc_arcLength);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgproc_bilateralFilter);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boundingRect_obj, 1, cv2_imgproc_boundingRect);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxFilter);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxPoints_obj, 1, cv2_imgproc_boxPoints);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_imgproc_connectedComponents);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_contourArea_obj, 1, cv2_imgproc_contourArea);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexHull_obj, 1, cv2_imgproc_convexHull);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexityDefects_obj, 1, cv2_imgproc_convexityDefects);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawContours_obj, 3, cv2_imgproc_drawContours);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawMarker_obj, 3, cv2_imgproc_drawMarker);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_ellipse_obj, 7, cv2_imgproc_ellipse);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc_fillConvexPoly);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_filter2D_obj, 3, cv2_imgproc_filter2D);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_findContours_obj, 3, cv2_imgproc_findContours);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fitEllipse_obj, 1, cv2_imgproc_fitEllipse);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fitLine_obj, 5, cv2_imgproc_fitLine);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_GaussianBlur_obj, 3, cv2_imgproc_GaussianBlur);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4, cv2_imgproc_HoughCirclesWithAccumulator);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_HoughLines);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_isContourConvex_obj, 1, cv2_imgproc_isContourConvex);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Laplacian_obj, 2, cv2_imgproc_Laplacian);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchShapes_obj, 3, cv2_imgproc_matchShapes);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchTemplate_obj, 3, cv2_imgproc_matchTemplate);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minAreaRect_obj, 1, cv2_imgproc_minAreaRect);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minEnclosingCircle_obj, 1, cv2_imgproc_minEnclosingCircle);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minEnclosingTriangle_obj, 1, cv2_imgproc_minEnclosingTriangle);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_moments_obj, 1, cv2_imgproc_moments);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_pointPolygonTest_obj, 3, cv2_imgproc_pointPolygonTest);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Scharr_obj, 4, cv2_imgproc_Scharr);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Sobel_obj, 4, cv2_imgproc_Sobel);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_spatialGradient_obj, 1, cv2_imgproc_spatialGradient);
+static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_threshold);
+
+// Global definitions for functions and constants
+#define OPENCV_IMGPROC_GLOBALS \
+ /* Functions */ \
+ { MP_ROM_QSTR(MP_QSTR_adaptiveThreshold), MP_ROM_PTR(&cv2_imgproc_adaptiveThreshold_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_approxPolyDP), MP_ROM_PTR(&cv2_imgproc_approxPolyDP_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_approxPolyN), MP_ROM_PTR(&cv2_imgproc_approxPolyN_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_arcLength), MP_ROM_PTR(&cv2_imgproc_arcLength_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_bilateralFilter), MP_ROM_PTR(&cv2_imgproc_bilateralFilter_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_boundingRect), MP_ROM_PTR(&cv2_imgproc_boundingRect_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_boxFilter), MP_ROM_PTR(&cv2_imgproc_boxFilter_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_boxPoints), MP_ROM_PTR(&cv2_imgproc_boxPoints_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_connectedComponents), MP_ROM_PTR(&cv2_imgproc_connectedComponents_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_contourArea), MP_ROM_PTR(&cv2_imgproc_contourArea_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_convexHull), MP_ROM_PTR(&cv2_imgproc_convexHull_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_convexityDefects), MP_ROM_PTR(&cv2_imgproc_convexityDefects_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_drawContours), MP_ROM_PTR(&cv2_imgproc_drawContours_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_drawMarker), MP_ROM_PTR(&cv2_imgproc_drawMarker_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_ellipse), MP_ROM_PTR(&cv2_imgproc_ellipse_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_erode), MP_ROM_PTR(&cv2_imgproc_erode_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_fillConvexPoly), MP_ROM_PTR(&cv2_imgproc_fillConvexPoly_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_filter2D), MP_ROM_PTR(&cv2_imgproc_filter2D_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_findContours), MP_ROM_PTR(&cv2_imgproc_findContours_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_fitEllipse), MP_ROM_PTR(&cv2_imgproc_fitEllipse_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_fitLine), MP_ROM_PTR(&cv2_imgproc_fitLine_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_GaussianBlur), MP_ROM_PTR(&cv2_imgproc_GaussianBlur_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_HoughCirclesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughCirclesWithAccumulator_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_isContourConvex), MP_ROM_PTR(&cv2_imgproc_isContourConvex_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_Laplacian), MP_ROM_PTR(&cv2_imgproc_Laplacian_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_matchShapes), MP_ROM_PTR(&cv2_imgproc_matchShapes_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_matchTemplate), MP_ROM_PTR(&cv2_imgproc_matchTemplate_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_minAreaRect), MP_ROM_PTR(&cv2_imgproc_minAreaRect_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_minEnclosingCircle), MP_ROM_PTR(&cv2_imgproc_minEnclosingCircle_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_minEnclosingTriangle), MP_ROM_PTR(&cv2_imgproc_minEnclosingTriangle_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_moments), MP_ROM_PTR(&cv2_imgproc_moments_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_pointPolygonTest), MP_ROM_PTR(&cv2_imgproc_pointPolygonTest_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_Scharr), MP_ROM_PTR(&cv2_imgproc_Scharr_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_Sobel), MP_ROM_PTR(&cv2_imgproc_Sobel_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_spatialGradient), MP_ROM_PTR(&cv2_imgproc_spatialGradient_obj) }, \
+ { MP_ROM_QSTR(MP_QSTR_threshold), MP_ROM_PTR(&cv2_imgproc_threshold_obj) }, \
+ \
+ /* Morphology operation types, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_ERODE), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_DILATE), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_OPEN), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_CLOSE), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_GRADIENT), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_TOPHAT), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_BLACKHAT), MP_ROM_INT(6) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_HITMISS), MP_ROM_INT(7) }, \
+ \
+ /* Morphology shapes, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_RECT), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_CROSS), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_MORPH_ELLIPSE), MP_ROM_INT(2) }, \
+ \
+ /* Distance types, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_DIST_USER), MP_ROM_INT(-1) }, \
+ { MP_ROM_QSTR(MP_QSTR_DIST_L1), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_DIST_L2), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_DIST_C), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_DIST_L12), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_DIST_FAIR), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_DIST_WELSCH), MP_ROM_INT(6) }, \
+ { MP_ROM_QSTR(MP_QSTR_DIST_HUBER), MP_ROM_INT(7) }, \
+ \
+ /* Threshold types, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY_INV), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_TRUNC), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO_INV), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_MASK), MP_ROM_INT(7) }, \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_OTSU), MP_ROM_INT(8) }, \
+ { MP_ROM_QSTR(MP_QSTR_THRESH_TRIANGLE), MP_ROM_INT(16) }, \
+ \
+ /* Adaptive threshold methods, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_MEAN_C), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_GAUSSIAN_C), MP_ROM_INT(1) }, \
+ \
+ /* Retrieval modes, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_RETR_EXTERNAL), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_RETR_LIST), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_RETR_CCOMP), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_RETR_TREE), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_RETR_FLOODFILL), MP_ROM_INT(4) }, \
+ \
+ /* Contour approximation methods, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_CHAIN_CODE), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_NONE), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_SIMPLE), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_TC89_L1), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_TC89_KCOS), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_LINK_RUNS), MP_ROM_INT(5) }, \
+ \
+ /* Shape match modes, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_CONTOURS_MATCH_I1), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_CONTOURS_MATCH_I2), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_CONTOURS_MATCH_I3), MP_ROM_INT(3) }, \
+ \
+ /* Hough modes, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_HOUGH_STANDARD), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_HOUGH_PROBABILISTIC), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_HOUGH_MULTI_SCALE), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT_ALT), MP_ROM_INT(4) }, \
+ \
+ /* Color conversion codes, from opencv2/imgproc.hpp */ \
+ /* Not all are included, to reduce bloat */ \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGRA), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2RGBA), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2RGB), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGBA), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGRA), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGB), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGB), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGBA), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGRA), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2GRAY), MP_ROM_INT(6) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2GRAY), MP_ROM_INT(7) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR), MP_ROM_INT(8) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGB), MP_ROM_INT(8) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGRA), MP_ROM_INT(9) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGBA), MP_ROM_INT(9) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2GRAY), MP_ROM_INT(10) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2GRAY), MP_ROM_INT(11) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGR565), MP_ROM_INT(12) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR565), MP_ROM_INT(13) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGR), MP_ROM_INT(14) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGB), MP_ROM_INT(15) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR565), MP_ROM_INT(16) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR565), MP_ROM_INT(17) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGRA), MP_ROM_INT(18) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGBA), MP_ROM_INT(19) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR565), MP_ROM_INT(20) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652GRAY), MP_ROM_INT(21) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2YCrCb), MP_ROM_INT(36) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2YCrCb), MP_ROM_INT(37) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2BGR), MP_ROM_INT(38) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2RGB), MP_ROM_INT(39) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2HSV), MP_ROM_INT(40) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2HSV), MP_ROM_INT(41) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2BGR), MP_ROM_INT(54) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2RGB), MP_ROM_INT(55) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2BGR), MP_ROM_INT(46) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2BGR), MP_ROM_INT(47) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2BGR), MP_ROM_INT(48) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2BGR), MP_ROM_INT(49) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2RGB), MP_ROM_INT(46) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2RGB), MP_ROM_INT(47) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2RGB), MP_ROM_INT(48) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2RGB), MP_ROM_INT(49) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2GRAY), MP_ROM_INT(86) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2GRAY), MP_ROM_INT(87) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2GRAY), MP_ROM_INT(88) }, \
+ { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2GRAY), MP_ROM_INT(89) }, \
+ \
+ /* Line types, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_FILLED), MP_ROM_INT(-1) }, \
+ { MP_ROM_QSTR(MP_QSTR_LINE_4), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_LINE_8), MP_ROM_INT(8) }, \
+ { MP_ROM_QSTR(MP_QSTR_LINE_AA), MP_ROM_INT(16) }, \
+ \
+ /* Hershey fonts, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SIMPLEX), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_PLAIN), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_DUPLEX), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_TRIPLEX), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX_SMALL), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_SIMPLEX), MP_ROM_INT(6) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_COMPLEX), MP_ROM_INT(7) }, \
+ { MP_ROM_QSTR(MP_QSTR_FONT_ITALIC), MP_ROM_INT(16) }, \
+ \
+ /* Marker types, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_MARKER_CROSS), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_MARKER_TILTED_CROSS), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_MARKER_STAR), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_MARKER_DIAMOND), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_MARKER_SQUARE), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_UP), MP_ROM_INT(5) }, \
+ { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_DOWN), MP_ROM_INT(6) }, \
+ \
+ /* Template matching modes, from opencv2/imgproc.hpp */ \
+ { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF), MP_ROM_INT(0) }, \
+ { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF_NORMED), MP_ROM_INT(1) }, \
+ { MP_ROM_QSTR(MP_QSTR_TM_CCORR), MP_ROM_INT(2) }, \
+ { MP_ROM_QSTR(MP_QSTR_TM_CCORR_NORMED), MP_ROM_INT(3) }, \
+ { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF), MP_ROM_INT(4) }, \
+ { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF_NORMED), MP_ROM_INT(5) }
diff --git a/src/numpy.cpp b/src/numpy.cpp
index ca84850..aeee8d7 100644
--- a/src/numpy.cpp
+++ b/src/numpy.cpp
@@ -1,9 +1,32 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * numpy.cpp
+ *
+ * OpenCV Mat allocator implementation for ulab NumPy. Derived from:
+ * https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_numpy.cpp
+ * Licensed under the Apache License, Version 2.0
+ * Copyright (C) 2000-2022, Intel Corporation, all rights reserved.
+ * Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+ * Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
+ * Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
+ * Copyright (C) 2015-2023, OpenCV Foundation, all rights reserved.
+ * Copyright (C) 2008-2016, Itseez Inc., all rights reserved.
+ * Copyright (C) 2019-2023, Xperience AI, all rights reserved.
+ * Copyright (C) 2019-2022, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
+ * Copyright (C) 2022-2023, Southern University of Science And Technology, all rights reserved.
+ *
+ * Third party copyrights are property of their respective owners.
+ *------------------------------------------------------------------------------
+ */
+
// C++ headers
#include "numpy.h"
#include "convert.h"
-// Derived from:
-// https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_numpy.cpp#L11-L22
UMatData* NumpyAllocator::allocate(ndarray_obj_t* ndarray, int dims, const int* sizes, int type, size_t* step) const
{
UMatData* u = new UMatData(this);
diff --git a/src/numpy.h b/src/numpy.h
index 89f0dd2..ccee157 100644
--- a/src/numpy.h
+++ b/src/numpy.h
@@ -1,10 +1,35 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * numpy.h
+ *
+ * OpenCV Mat allocator implementation for ulab NumPy. Derived from:
+ * https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_numpy.hpp
+ * Licensed under the Apache License, Version 2.0
+ * Copyright (C) 2000-2022, Intel Corporation, all rights reserved.
+ * Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+ * Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
+ * Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
+ * Copyright (C) 2015-2023, OpenCV Foundation, all rights reserved.
+ * Copyright (C) 2008-2016, Itseez Inc., all rights reserved.
+ * Copyright (C) 2019-2023, Xperience AI, all rights reserved.
+ * Copyright (C) 2019-2022, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
+ * Copyright (C) 2022-2023, Southern University of Science And Technology, all rights reserved.
+ *
+ * Third party copyrights are property of their respective owners.
+ *------------------------------------------------------------------------------
+ */
+
// C++ headers
#include "opencv2/core.hpp"
// C headers
extern "C" {
#include "py/runtime.h"
-#include "ulab/code/ndarray.h"
+#include "ndarray.h"
} // extern "C"
using namespace cv;
diff --git a/src/opencv b/src/opencv
deleted file mode 160000
index 6e8a224..0000000
--- a/src/opencv
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 6e8a2245ddcf849cef74519a71f00dd9abbfcfeb
diff --git a/src/opencv_upy.c b/src/opencv_upy.c
index 63e24ad..beffda4 100644
--- a/src/opencv_upy.c
+++ b/src/opencv_upy.c
@@ -1,97 +1,30 @@
+/*
+ *------------------------------------------------------------------------------
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025 SparkFun Electronics
+ *------------------------------------------------------------------------------
+ * opencv_upy.c
+ *
+ * OpenCV module registration.
+ *------------------------------------------------------------------------------
+ */
+
#include "core.h"
+#include "highgui.h"
+#include "imgcodecs.h"
#include "imgproc.h"
-// Define a Python reference to the function we'll make available.
-// See example.cpp for the definition.
-static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange);
-static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_max_obj, 2, cv2_core_max);
-static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_min_obj, 2, cv2_core_min);
-static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor);
-
-// Define all attributes of the module.
-// Table entries are key/value pairs of the attribute name (a string)
-// and the MicroPython object reference.
-// All identifiers and strings are written as MP_QSTR_xxx and will be
-// optimized to word-sized integers by the build system (interned strings).
+// Python module globals dictionary
static const mp_rom_map_elem_t cv2_module_globals_table[] = {
- ////////////////////////////////////////////////////////////////////////////
- // Module name
- ////////////////////////////////////////////////////////////////////////////
-
+ // Python module name
{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_cv2) },
- ////////////////////////////////////////////////////////////////////////////
- // Constants
- ////////////////////////////////////////////////////////////////////////////
-
- // Color conversion codes. These are defined in ,
- // however we can't include that header here because it's C++ and this is C,
- // so we have to redefine them here. Only a subset of the most common
- // conversions are included here.
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGR2BGRA), MP_ROM_INT(0) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGB2RGBA), MP_ROM_INT(0) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGRA2BGR), MP_ROM_INT(1) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGBA2RGB), MP_ROM_INT(1) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGR2RGBA), MP_ROM_INT(2) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGB2BGRA), MP_ROM_INT(2) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGBA2BGR), MP_ROM_INT(3) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGRA2RGB), MP_ROM_INT(3) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGR2RGB), MP_ROM_INT(4) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGB2BGR), MP_ROM_INT(4) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGRA2RGBA), MP_ROM_INT(5) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGBA2BGRA), MP_ROM_INT(5) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2GRAY), MP_ROM_INT(6) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2GRAY), MP_ROM_INT(7) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR), MP_ROM_INT(8) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGB), MP_ROM_INT(8) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGRA), MP_ROM_INT(9) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGBA), MP_ROM_INT(9) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2GRAY), MP_ROM_INT(10) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2GRAY), MP_ROM_INT(11) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGR565), MP_ROM_INT(12) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR565), MP_ROM_INT(13) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGR), MP_ROM_INT(14) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGB), MP_ROM_INT(15) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR565), MP_ROM_INT(16) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR565), MP_ROM_INT(17) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGRA), MP_ROM_INT(18) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGBA), MP_ROM_INT(19) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR565), MP_ROM_INT(20) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652GRAY), MP_ROM_INT(21) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2YCrCb), MP_ROM_INT(36) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2YCrCb), MP_ROM_INT(37) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2BGR), MP_ROM_INT(38) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2RGB), MP_ROM_INT(39) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2HSV), MP_ROM_INT(40) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2HSV), MP_ROM_INT(41) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2BGR), MP_ROM_INT(54) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2RGB), MP_ROM_INT(55) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2BGR), MP_ROM_INT(46) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2BGR), MP_ROM_INT(47) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2BGR), MP_ROM_INT(48) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2BGR), MP_ROM_INT(49) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2RGB), MP_ROM_INT(46) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2RGB), MP_ROM_INT(47) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2RGB), MP_ROM_INT(48) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2RGB), MP_ROM_INT(49) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2GRAY), MP_ROM_INT(86) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2GRAY), MP_ROM_INT(87) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2GRAY), MP_ROM_INT(88) },
- { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2GRAY), MP_ROM_INT(89) },
-
- ////////////////////////////////////////////////////////////////////////////
- // OpenCV core functions
- ////////////////////////////////////////////////////////////////////////////
-
- { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) },
- { MP_ROM_QSTR(MP_QSTR_max), MP_ROM_PTR(&cv2_core_max_obj) },
- { MP_ROM_QSTR(MP_QSTR_min), MP_ROM_PTR(&cv2_core_min_obj) },
-
- ////////////////////////////////////////////////////////////////////////////
- // OpenCV imgproc functions
- ////////////////////////////////////////////////////////////////////////////
-
- { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) },
+ // Inlude globals from each OpenCV module
+ OPENCV_CORE_GLOBALS,
+ OPENCV_HIGHGUI_GLOBALS,
+ OPENCV_IMGCODECS_GLOBALS,
+ OPENCV_IMGPROC_GLOBALS,
};
static MP_DEFINE_CONST_DICT(cv2_module_globals, cv2_module_globals_table);
diff --git a/src/ulab b/src/ulab
deleted file mode 160000
index 825ec2b..0000000
--- a/src/ulab
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 825ec2b143ebd8d3d3707bac2af0fe1ae6cb401a
diff --git a/ulab b/ulab
new file mode 160000
index 0000000..8eb8eaf
--- /dev/null
+++ b/ulab
@@ -0,0 +1 @@
+Subproject commit 8eb8eaf5a19f5ed3a2e2193ba6e727d7518458a9