diff --git a/.github/workflows/build-and-release.yml b/.github/workflows/build-and-release.yml index 62e90f580..c90987b38 100644 --- a/.github/workflows/build-and-release.yml +++ b/.github/workflows/build-and-release.yml @@ -353,3 +353,63 @@ jobs: releaseDraft: false prerelease: false args: ${{ matrix.args }} + + + # Create tar.gz archives for AUR (Arch Linux) + create-aur-tarball: + needs: [publish-tauri] + runs-on: ubuntu-22.04 + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + + - name: Get version from tag + id: get_version + run: | + if [ "${{ github.event_name }}" = "release" ]; then + TAG="${{ github.event.release.tag_name }}" + else + TAG="${{ github.event.inputs.tag }}" + fi + VERSION="${TAG#v}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "tag=${TAG}" >> $GITHUB_OUTPUT + + - name: Download Linux deb from release + run: | + VERSION="${{ steps.get_version.outputs.version }}" + TAG="${{ steps.get_version.outputs.tag }}" + + # Wait for release assets to be available + sleep 30 + + # Download the deb package + curl -fLo pictopy.deb "https://github.com/AOSSIE-Org/PictoPy/releases/download/${TAG}/picto-py_${VERSION}_amd64.deb" || \ + curl -fLo pictopy.deb "https://github.com/AOSSIE-Org/PictoPy/releases/download/${TAG}/PictoPy_${VERSION}_amd64.deb" + + - name: Extract and repackage as tar.gz + run: | + VERSION="${{ steps.get_version.outputs.version }}" + + # Extract deb package + mkdir -p extract + dpkg-deb -x pictopy.deb extract/ + + # Create tar.gz with proper structure + cd extract + tar -czvf "../pictopy_${VERSION}_amd64.tar.gz" . + cd .. + + # Calculate checksum + sha256sum "pictopy_${VERSION}_amd64.tar.gz" > "pictopy_${VERSION}_amd64.tar.gz.sha256" + + - name: Upload tar.gz to release + uses: softprops/action-gh-release@v1 + with: + tag_name: ${{ steps.get_version.outputs.tag }} + files: | + pictopy_${{ steps.get_version.outputs.version }}_amd64.tar.gz + pictopy_${{ steps.get_version.outputs.version }}_amd64.tar.gz.sha256 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/publish-aur.yml b/.github/workflows/publish-aur.yml new file mode 100644 index 000000000..28516f34c --- /dev/null +++ b/.github/workflows/publish-aur.yml @@ -0,0 +1,162 @@ +name: Publish to AUR + +on: + release: + types: [published] + workflow_dispatch: + inputs: + tag: + description: "Tag name for the release (e.g., v1.1.0)" + required: true + type: string + +jobs: + publish-aur: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get version from tag + id: get_version + run: | + if [ "${{ github.event_name }}" = "release" ]; then + TAG="${{ github.event.release.tag_name }}" + else + TAG="${{ github.event.inputs.tag }}" + fi + # Remove 'v' prefix if present + VERSION="${TAG#v}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "tag=${TAG}" >> $GITHUB_OUTPUT + + - name: Wait for release assets + run: | + echo "Waiting for release assets to be available..." + sleep 60 + + - name: Calculate SHA256 checksums + id: checksums + run: | + VERSION="${{ steps.get_version.outputs.version }}" + BASE_URL="https://github.com/AOSSIE-Org/PictoPy/releases/download/v${VERSION}" + + # Download and calculate checksums for x86_64 + echo "Downloading x86_64 tarball..." + if curl -fLo pictopy_amd64.tar.gz "${BASE_URL}/pictopy_${VERSION}_amd64.tar.gz"; then + SHA256_X86_64=$(sha256sum pictopy_amd64.tar.gz | cut -d' ' -f1) + echo "sha256_x86_64=${SHA256_X86_64}" >> $GITHUB_OUTPUT + else + echo "sha256_x86_64=SKIP" >> $GITHUB_OUTPUT + fi + + # Download and calculate checksums for aarch64 + echo "Downloading aarch64 tarball..." + if curl -fLo pictopy_arm64.tar.gz "${BASE_URL}/pictopy_${VERSION}_arm64.tar.gz"; then + SHA256_AARCH64=$(sha256sum pictopy_arm64.tar.gz | cut -d' ' -f1) + echo "sha256_aarch64=${SHA256_AARCH64}" >> $GITHUB_OUTPUT + else + echo "sha256_aarch64=SKIP" >> $GITHUB_OUTPUT + fi + + - name: Update PKGBUILD + run: | + VERSION="${{ steps.get_version.outputs.version }}" + SHA256_X86_64="${{ steps.checksums.outputs.sha256_x86_64 }}" + SHA256_AARCH64="${{ steps.checksums.outputs.sha256_aarch64 }}" + + cd aur + + # Update version in PKGBUILD + sed -i "s/^pkgver=.*/pkgver=${VERSION}/" PKGBUILD + sed -i "s/^pkgrel=.*/pkgrel=1/" PKGBUILD + + # Update source URLs + sed -i "s|pictopy_[0-9.]*_amd64|pictopy_${VERSION}_amd64|g" PKGBUILD + sed -i "s|pictopy_[0-9.]*_arm64|pictopy_${VERSION}_arm64|g" PKGBUILD + sed -i "s|/v[0-9.]*/|/v${VERSION}/|g" PKGBUILD + + # Update checksums + if [ "${SHA256_X86_64}" != "SKIP" ]; then + sed -i "s/^sha256sums_x86_64=.*/sha256sums_x86_64=('${SHA256_X86_64}')/" PKGBUILD + fi + if [ "${SHA256_AARCH64}" != "SKIP" ]; then + sed -i "s/^sha256sums_aarch64=.*/sha256sums_aarch64=('${SHA256_AARCH64}')/" PKGBUILD + fi + + cat PKGBUILD + + - name: Generate .SRCINFO + run: | + cd aur + + VERSION="${{ steps.get_version.outputs.version }}" + SHA256_X86_64="${{ steps.checksums.outputs.sha256_x86_64 }}" + SHA256_AARCH64="${{ steps.checksums.outputs.sha256_aarch64 }}" + + cat > .SRCINFO << EOF + pkgbase = pictopy + pkgdesc = A privacy-focused photo management application with AI-powered tagging and face recognition + pkgver = ${VERSION} + pkgrel = 1 + url = https://github.com/AOSSIE-Org/PictoPy + install = pictopy.install + arch = x86_64 + arch = aarch64 + license = MIT + makedepends = rust + makedepends = cargo + makedepends = nodejs + makedepends = npm + makedepends = python + makedepends = python-pip + makedepends = pyinstaller + makedepends = webkit2gtk-4.1 + makedepends = base-devel + makedepends = curl + makedepends = wget + makedepends = file + makedepends = openssl + makedepends = appmenu-gtk-module + makedepends = librsvg + depends = webkit2gtk-4.1 + depends = gtk3 + depends = glib2 + depends = cairo + depends = pango + depends = gdk-pixbuf2 + depends = libsoup3 + depends = openssl + depends = hicolor-icon-theme + optdepends = python-onnxruntime: For AI model inference + optdepends = python-opencv: For image processing + optdepends = python-numpy: For numerical operations + options = !strip + options = !emptydirs + source_x86_64 = pictopy-${VERSION}-x86_64.tar.gz::https://github.com/AOSSIE-Org/PictoPy/releases/download/v${VERSION}/pictopy_${VERSION}_amd64.tar.gz + sha256sums_x86_64 = ${SHA256_X86_64} + source_aarch64 = pictopy-${VERSION}-aarch64.tar.gz::https://github.com/AOSSIE-Org/PictoPy/releases/download/v${VERSION}/pictopy_${VERSION}_arm64.tar.gz + sha256sums_aarch64 = ${SHA256_AARCH64} + + pkgname = pictopy + EOF + + # Remove leading whitespace + sed -i 's/^ //' .SRCINFO + + cat .SRCINFO + + - name: Publish to AUR + uses: KSXGitHub/github-actions-deploy-aur@v3.0.1 + with: + pkgname: pictopy + pkgbuild: ./aur/PKGBUILD + commit_username: ${{ secrets.AUR_USERNAME }} + commit_email: ${{ secrets.AUR_EMAIL }} + ssh_private_key: ${{ secrets.AUR_SSH_PRIVATE_KEY }} + commit_message: "Update to version ${{ steps.get_version.outputs.version }}" + ssh_keyscan_types: ed25519 + force_push: true + assets: | + ./aur/pictopy.install + ./aur/.SRCINFO diff --git a/README.md b/README.md index 595889480..dbf643161 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,36 @@ PictoPy is an advanced desktop gallery application that combines the power of Tauri, React, and Rust for the frontend with a Python backend for sophisticated image analysis and management. +## Installation + +### Arch Linux (AUR) + +PictoPy is available on the Arch User Repository (AUR) for Arch-based distributions (Arch, Manjaro, EndeavourOS, etc.): + +```bash +# Using yay +yay -S pictopy + +# Using paru +paru -S pictopy + +# Using pikaur +pikaur -S pictopy +``` + +For the development version built from source: +```bash +yay -S pictopy-git +``` + +### Other Linux Distributions + +Download the AppImage or .deb package from the [Releases](https://github.com/AOSSIE-Org/PictoPy/releases) page. + +### Windows & macOS + +Download the installer from the [Releases](https://github.com/AOSSIE-Org/PictoPy/releases) page. + # Want to Contribute? 😄     diff --git a/aur/.SRCINFO b/aur/.SRCINFO new file mode 100644 index 000000000..5ef354a1c --- /dev/null +++ b/aur/.SRCINFO @@ -0,0 +1,44 @@ +pkgbase = pictopy + pkgdesc = A privacy-focused photo management application with AI-powered tagging and face recognition + pkgver = 1.1.0 + pkgrel = 1 + url = https://github.com/AOSSIE-Org/PictoPy + install = pictopy.install + arch = x86_64 + arch = aarch64 + license = MIT + makedepends = rust + makedepends = cargo + makedepends = nodejs + makedepends = npm + makedepends = python + makedepends = python-pip + makedepends = pyinstaller + makedepends = webkit2gtk-4.1 + makedepends = base-devel + makedepends = curl + makedepends = wget + makedepends = file + makedepends = openssl + makedepends = appmenu-gtk-module + makedepends = librsvg + depends = webkit2gtk-4.1 + depends = gtk3 + depends = glib2 + depends = cairo + depends = pango + depends = gdk-pixbuf2 + depends = libsoup3 + depends = openssl + depends = hicolor-icon-theme + optdepends = python-onnxruntime: For AI model inference + optdepends = python-opencv: For image processing + optdepends = python-numpy: For numerical operations + options = !strip + options = !emptydirs + source_x86_64 = pictopy-1.1.0-x86_64.tar.gz::https://github.com/AOSSIE-Org/PictoPy/releases/download/v1.1.0/pictopy_1.1.0_amd64.tar.gz + sha256sums_x86_64 = SKIP + source_aarch64 = pictopy-1.1.0-aarch64.tar.gz::https://github.com/AOSSIE-Org/PictoPy/releases/download/v1.1.0/pictopy_1.1.0_arm64.tar.gz + sha256sums_aarch64 = SKIP + +pkgname = pictopy diff --git a/aur/PKGBUILD b/aur/PKGBUILD new file mode 100644 index 000000000..b14905656 --- /dev/null +++ b/aur/PKGBUILD @@ -0,0 +1,85 @@ +# Maintainer: AOSSIE +# Contributor: PictoPy Team + +pkgname=pictopy +pkgver=1.1.0 +pkgrel=1 +pkgdesc="A privacy-focused photo management application with AI-powered tagging and face recognition" +arch=('x86_64' 'aarch64') +url="https://github.com/AOSSIE-Org/PictoPy" +license=('MIT') +depends=( + 'webkit2gtk-4.1' + 'gtk3' + 'glib2' + 'cairo' + 'pango' + 'gdk-pixbuf2' + 'libsoup3' + 'openssl' + 'hicolor-icon-theme' +) +makedepends=( + 'rust' + 'cargo' + 'nodejs' + 'npm' + 'python' + 'python-pip' + 'pyinstaller' + 'webkit2gtk-4.1' + 'base-devel' + 'curl' + 'wget' + 'file' + 'openssl' + 'appmenu-gtk-module' + 'librsvg' +) +optdepends=( + 'python-onnxruntime: For AI model inference' + 'python-opencv: For image processing' + 'python-numpy: For numerical operations' +) +options=('!strip' '!emptydirs') +install=${pkgname}.install +source_x86_64=("${pkgname}-${pkgver}-x86_64.tar.gz::${url}/releases/download/v${pkgver}/pictopy_${pkgver}_amd64.tar.gz") +source_aarch64=("${pkgname}-${pkgver}-aarch64.tar.gz::${url}/releases/download/v${pkgver}/pictopy_${pkgver}_arm64.tar.gz") +sha256sums_x86_64=('SKIP') +sha256sums_aarch64=('SKIP') + +package() { + cd "${srcdir}" + + # Install the main application binary + install -Dm755 "usr/bin/picto-py" "${pkgdir}/usr/bin/pictopy" + + # Install libraries and resources + if [ -d "usr/lib" ]; then + cp -r usr/lib "${pkgdir}/usr/" + fi + + # Install desktop entry + install -Dm644 "usr/share/applications/picto-py.desktop" \ + "${pkgdir}/usr/share/applications/pictopy.desktop" + + # Update desktop entry to use correct binary name + sed -i 's/Exec=picto-py/Exec=pictopy/g' "${pkgdir}/usr/share/applications/pictopy.desktop" + + # Install icons + for size in 32x32 128x128 256x256; do + if [ -f "usr/share/icons/hicolor/${size}/apps/picto-py.png" ]; then + install -Dm644 "usr/share/icons/hicolor/${size}/apps/picto-py.png" \ + "${pkgdir}/usr/share/icons/hicolor/${size}/apps/pictopy.png" + fi + done + + # Install scalable icon if available + if [ -f "usr/share/icons/hicolor/scalable/apps/picto-py.svg" ]; then + install -Dm644 "usr/share/icons/hicolor/scalable/apps/picto-py.svg" \ + "${pkgdir}/usr/share/icons/hicolor/scalable/apps/pictopy.svg" + fi + + # Install license + install -Dm644 "${srcdir}/../LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" 2>/dev/null || true +} diff --git a/aur/PKGBUILD-git b/aur/PKGBUILD-git new file mode 100644 index 000000000..c52bebdb3 --- /dev/null +++ b/aur/PKGBUILD-git @@ -0,0 +1,164 @@ +# Maintainer: AOSSIE +# Contributor: PictoPy Team + +pkgname=pictopy-git +pkgver=1.1.0 +pkgrel=1 +pkgdesc="A privacy-focused photo management application with AI-powered tagging and face recognition (git version)" +arch=('x86_64' 'aarch64') +url="https://github.com/AOSSIE-Org/PictoPy" +license=('MIT') +depends=( + 'webkit2gtk-4.1' + 'gtk3' + 'glib2' + 'cairo' + 'pango' + 'gdk-pixbuf2' + 'libsoup3' + 'openssl' + 'hicolor-icon-theme' +) +makedepends=( + 'git' + 'rust' + 'cargo' + 'nodejs' + 'npm' + 'python' + 'python-pip' + 'python-virtualenv' + 'webkit2gtk-4.1' + 'base-devel' + 'curl' + 'wget' + 'file' + 'openssl' + 'appmenu-gtk-module' + 'librsvg' +) +optdepends=( + 'python-onnxruntime: For AI model inference' + 'python-opencv: For image processing' + 'python-numpy: For numerical operations' +) +provides=('pictopy') +conflicts=('pictopy') +options=('!strip' '!emptydirs') +install=pictopy.install +source=("${pkgname}::git+${url}.git") +sha256sums=('SKIP') + +pkgver() { + cd "${srcdir}/${pkgname}" + git describe --tags --long 2>/dev/null | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g' || echo "1.1.0" +} + +build() { + cd "${srcdir}/${pkgname}" + + # Build backend server + echo "Building backend server..." + cd backend + python -m venv venv + source venv/bin/activate + pip install --upgrade pip + pip install -r requirements.txt + pip install pyinstaller + pyinstaller main.py --name PictoPy_Server --onedir --distpath dist + mkdir -p dist/PictoPy_Server/images + cp -r app dist/PictoPy_Server/ + deactivate + cd .. + + # Build sync microservice + echo "Building sync microservice..." + cd sync-microservice + python -m venv venv + source venv/bin/activate + pip install --upgrade pip + pip install -r requirements.txt + pip install pyinstaller + pyinstaller main.py --name PictoPy_Sync --onedir --distpath dist + cp -r app dist/PictoPy_Sync/ + deactivate + cd .. + + # Build frontend with Tauri + echo "Building frontend..." + cd frontend + npm install + npm run build + cd src-tauri + cargo build --release + cd ../.. +} + +package() { + cd "${srcdir}/${pkgname}" + + # Install the main application binary + install -Dm755 "frontend/src-tauri/target/release/picto-py" \ + "${pkgdir}/usr/bin/pictopy" + + # Install backend resources + install -dm755 "${pkgdir}/usr/lib/pictopy/resources/backend" + cp -r backend/dist/PictoPy_Server/* "${pkgdir}/usr/lib/pictopy/resources/backend/" + + # Install sync microservice resources + install -dm755 "${pkgdir}/usr/lib/pictopy/resources/sync-microservice" + cp -r sync-microservice/dist/PictoPy_Sync/* "${pkgdir}/usr/lib/pictopy/resources/sync-microservice/" + + # Set permissions for resources + chmod -R 755 "${pkgdir}/usr/lib/pictopy/resources" + + # Install desktop entry + install -Dm644 /dev/stdin "${pkgdir}/usr/share/applications/pictopy.desktop" << EOF +[Desktop Entry] +Name=PictoPy +Comment=Privacy-focused photo management with AI-powered tagging +Exec=pictopy +Icon=pictopy +Terminal=false +Type=Application +Categories=Graphics;Photography;Viewer; +Keywords=photo;image;gallery;ai;tagging;face;recognition; +StartupWMClass=PictoPy +EOF + + # Install icons + install -Dm644 "frontend/src-tauri/icons/32x32.png" \ + "${pkgdir}/usr/share/icons/hicolor/32x32/apps/pictopy.png" + install -Dm644 "frontend/src-tauri/icons/128x128.png" \ + "${pkgdir}/usr/share/icons/hicolor/128x128/apps/pictopy.png" + install -Dm644 "frontend/src-tauri/icons/128x128@2x.png" \ + "${pkgdir}/usr/share/icons/hicolor/256x256/apps/pictopy.png" + install -Dm644 "frontend/src-tauri/icons/icon.png" \ + "${pkgdir}/usr/share/icons/hicolor/512x512/apps/pictopy.png" + + # Install license + install -Dm644 "LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" 2>/dev/null || \ + install -Dm644 /dev/stdin "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" << 'EOF' +MIT License + +Copyright (c) AOSSIE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +EOF +} diff --git a/aur/README.md b/aur/README.md new file mode 100644 index 000000000..a9a47e0d8 --- /dev/null +++ b/aur/README.md @@ -0,0 +1,126 @@ +# PictoPy AUR Package + +This directory contains the files needed to publish PictoPy to the [Arch User Repository (AUR)](https://aur.archlinux.org/). + +## Package Variants + +### `pictopy` (Binary Package) +The main package that installs pre-built binaries from GitHub releases. This is the recommended option for most users. + +### `pictopy-git` (Source Package) +Builds PictoPy from the latest git source. Use this if you want the bleeding-edge version or need to make modifications. + +## Installation + +### Using an AUR Helper (Recommended) + +```bash +# Using yay +yay -S pictopy + +# Using paru +paru -S pictopy + +# Using pikaur +pikaur -S pictopy +``` + +### Manual Installation + +```bash +# Clone the AUR repository +git clone https://aur.archlinux.org/pictopy.git +cd pictopy + +# Build and install +makepkg -si +``` + +### Installing from Git Source + +```bash +# Using yay +yay -S pictopy-git + +# Or manually +git clone https://aur.archlinux.org/pictopy-git.git +cd pictopy-git +makepkg -si +``` + +## Dependencies + +### Runtime Dependencies +- `webkit2gtk-4.1` - WebKit rendering engine +- `gtk3` - GTK+ 3 toolkit +- `glib2` - GLib library +- `cairo` - 2D graphics library +- `pango` - Text rendering +- `gdk-pixbuf2` - Image loading +- `libsoup3` - HTTP library +- `openssl` - Cryptography +- `hicolor-icon-theme` - Icon theme + +### Optional Dependencies +- `python-onnxruntime` - For AI model inference +- `python-opencv` - For image processing +- `python-numpy` - For numerical operations + +## Updating the AUR Package + +The package is automatically updated via GitHub Actions when a new release is published. To manually update: + +1. Update the `pkgver` in `PKGBUILD` +2. Update the source URLs if needed +3. Regenerate checksums: `updpkgsums` +4. Regenerate `.SRCINFO`: `makepkg --printsrcinfo > .SRCINFO` +5. Commit and push to AUR + +## GitHub Actions Setup + +To enable automatic AUR publishing, add these secrets to your GitHub repository: + +- `AUR_USERNAME` - Your AUR username +- `AUR_EMAIL` - Your AUR email +- `AUR_SSH_PRIVATE_KEY` - SSH private key registered with AUR + +### Generating SSH Key for AUR + +```bash +# Generate a new SSH key +ssh-keygen -t ed25519 -C "your-email@example.com" -f aur_key + +# Add the public key to your AUR account +cat aur_key.pub +# Copy this to: https://aur.archlinux.org/account/YOUR_USERNAME/edit + +# Add the private key as a GitHub secret (AUR_SSH_PRIVATE_KEY) +cat aur_key +``` + +## Troubleshooting + +### Build Fails with Missing Dependencies +```bash +# Install all build dependencies +sudo pacman -S --needed base-devel rust cargo nodejs npm python python-pip webkit2gtk-4.1 +``` + +### Application Won't Start +```bash +# Check for missing libraries +ldd /usr/bin/pictopy | grep "not found" + +# Install missing dependencies +sudo pacman -S webkit2gtk-4.1 gtk3 +``` + +### AI Features Not Working +```bash +# Install optional AI dependencies +sudo pacman -S python-onnxruntime python-opencv python-numpy +``` + +## License + +MIT License - See the main repository for details. diff --git a/aur/pictopy.install b/aur/pictopy.install new file mode 100644 index 000000000..b9f8a9f13 --- /dev/null +++ b/aur/pictopy.install @@ -0,0 +1,47 @@ +# PictoPy post-install hooks for Arch Linux + +post_install() { + echo "==> PictoPy has been installed successfully!" + echo "" + echo "==> To start PictoPy, run: pictopy" + echo "==> Or find it in your application menu." + echo "" + echo "==> Note: On first run, PictoPy will download required AI models." + echo "==> This may take a few minutes depending on your internet connection." + echo "" + + # Update icon cache + if [ -x /usr/bin/gtk-update-icon-cache ]; then + gtk-update-icon-cache -q -t -f /usr/share/icons/hicolor + fi + + # Update desktop database + if [ -x /usr/bin/update-desktop-database ]; then + update-desktop-database -q /usr/share/applications + fi +} + +post_upgrade() { + post_install + echo "==> PictoPy has been upgraded to version $1" +} + +pre_remove() { + echo "==> Removing PictoPy..." +} + +post_remove() { + # Update icon cache + if [ -x /usr/bin/gtk-update-icon-cache ]; then + gtk-update-icon-cache -q -t -f /usr/share/icons/hicolor + fi + + # Update desktop database + if [ -x /usr/bin/update-desktop-database ]; then + update-desktop-database -q /usr/share/applications + fi + + echo "==> PictoPy has been removed." + echo "==> User data in ~/.local/share/pictopy has been preserved." + echo "==> To remove all data, run: rm -rf ~/.local/share/pictopy" +} diff --git a/backend/app/logging/setup_logging.py b/backend/app/logging/setup_logging.py index e64424654..a78891c81 100644 --- a/backend/app/logging/setup_logging.py +++ b/backend/app/logging/setup_logging.py @@ -214,8 +214,8 @@ class InterceptHandler(logging.Handler): Handler to intercept logs from other loggers (like Uvicorn) and redirect them through our custom logger. - This implementation is based on Loguru's approach and routes logs directly to - the root logger. + This implementation avoids recursive logging by directly emitting to the root + logger's handlers instead of calling logger.log(). """ def __init__(self, component_name: str): @@ -227,27 +227,40 @@ def __init__(self, component_name: str): """ super().__init__() self.component_name = component_name + self._inside_emit = False # Guard against recursion def emit(self, record: logging.LogRecord) -> None: """ Process a log record by forwarding it through our custom logger. + This method avoids recursion by: + 1. Using a recursion guard flag + 2. Directly emitting to root logger handlers instead of calling logger.log() + Args: record: The log record to process """ - # Get the appropriate module name - module_name = record.name - if "." in module_name: - module_name = module_name.split(".")[-1] - - # Create a message that includes the original module in the format - msg = record.getMessage() - - # Find the appropriate logger - logger = get_logger(module_name) - - # Log the message with our custom formatting - logger.log(record.levelno, f"[uvicorn] {msg}") + # Prevent recursive logging + if self._inside_emit: + return + + self._inside_emit = True + try: + # Modify the record message to include uvicorn prefix + original_msg = record.getMessage() + record.msg = f"[uvicorn] {original_msg}" + record.args = () # Clear args since we've already formatted the message + + # Get the root logger and emit directly to its handlers + # This avoids calling logger.log() which would cause recursion + root_logger = logging.getLogger() + for handler in root_logger.handlers: + # Skip this handler to avoid recursion + if handler is not self: + if record.levelno >= handler.level: + handler.emit(record) + finally: + self._inside_emit = False def configure_uvicorn_logging(component_name: str) -> None: diff --git a/backend/app/routes/images.py b/backend/app/routes/images.py index 2e40cd825..82640a19f 100644 --- a/backend/app/routes/images.py +++ b/backend/app/routes/images.py @@ -3,6 +3,7 @@ from app.database.images import db_get_all_images from app.schemas.images import ErrorResponse from app.utils.images import image_util_parse_metadata +from app.utils.duplicate_detection import get_duplicate_groups_with_scores from pydantic import BaseModel from app.database.images import db_toggle_image_favourite_status from app.logging.setup_logging import get_logger @@ -128,3 +129,122 @@ class ImageInfoResponse(BaseModel): isTagged: bool isFavourite: bool tags: Optional[List[str]] = None + + +# Duplicate Detection Models +class DuplicateImageInfo(BaseModel): + id: str + path: str + thumbnailPath: str + sharpness_score: float + exposure_score: float + overall_score: float + is_best_shot: bool + + +class DuplicateGroup(BaseModel): + group_id: int + image_count: int + best_shot_id: str + images: List[DuplicateImageInfo] + + +class GetDuplicatesResponse(BaseModel): + success: bool + message: str + data: List[DuplicateGroup] + + +@router.get( + "/duplicates", + response_model=GetDuplicatesResponse, + responses={500: {"model": ErrorResponse}}, +) +def get_duplicate_images( + similarity_threshold: int = Query( + default=10, + ge=1, + le=50, + description="Maximum hash distance to consider images as duplicates (lower = stricter, default 10)" + ) +): + """ + Find duplicate/similar images and suggest the best shot from each group. + + This endpoint analyzes all images in the library to find groups of similar images + (e.g., multiple shots of the same scene). For each group, it calculates quality + scores based on sharpness and exposure, and suggests the "best shot". + + Quality Metrics: + - Sharpness: Measured using Laplacian variance (higher = less blur) + - Exposure: Analyzes histogram for proper brightness and contrast + - Overall: Weighted combination (60% sharpness, 40% exposure) + + Args: + similarity_threshold: Hash distance threshold (1-50, default 10) + - Lower values = stricter matching (fewer false positives) + - Higher values = looser matching (may group different images) + - Recommended: 10 for ~96% similarity + + Returns: + List of duplicate groups with quality scores and best shot recommendation + """ + try: + # Get all images from database + images = db_get_all_images() + + if not images: + return GetDuplicatesResponse( + success=True, + message="No images found in library", + data=[] + ) + + # Prepare image data for duplicate detection + image_data = [ + { + 'id': img['id'], + 'path': img['path'], + 'thumbnailPath': img.get('thumbnailPath', '') + } + for img in images + ] + + # Find duplicate groups with quality scores + duplicate_groups = get_duplicate_groups_with_scores( + image_data, + similarity_threshold=similarity_threshold + ) + + # Convert to response format + response_data = [ + DuplicateGroup( + group_id=group['group_id'], + image_count=group['image_count'], + best_shot_id=group['best_shot_id'], + images=[ + DuplicateImageInfo(**img_info) + for img_info in group['images'] + ] + ) + for group in duplicate_groups + ] + + total_duplicates = sum(g.image_count for g in response_data) + + return GetDuplicatesResponse( + success=True, + message=f"Found {len(response_data)} duplicate groups with {total_duplicates} total images", + data=response_data + ) + + except Exception as e: + logger.error(f"Error finding duplicates: {e}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=ErrorResponse( + success=False, + error="Internal server error", + message=f"Unable to find duplicates: {str(e)}", + ).model_dump(), + ) diff --git a/backend/app/utils/duplicate_detection.py b/backend/app/utils/duplicate_detection.py new file mode 100644 index 000000000..37793f2e0 --- /dev/null +++ b/backend/app/utils/duplicate_detection.py @@ -0,0 +1,332 @@ +""" +Duplicate and "Best Shot" Detection Module. + +This module provides functionality to: +1. Detect duplicate/similar images using perceptual hashing +2. Score image quality based on sharpness and exposure +3. Suggest the "best shot" from a group of similar images +""" + +import cv2 +import numpy as np +from typing import List, Dict, Tuple, Optional +from dataclasses import dataclass +from collections import defaultdict +from PIL import Image + +from app.logging.setup_logging import get_logger + +logger = get_logger(__name__) + + +@dataclass +class ImageQualityScore: + """Represents the quality score of an image.""" + image_id: str + image_path: str + sharpness_score: float + exposure_score: float + overall_score: float + + +@dataclass +class DuplicateGroup: + """Represents a group of duplicate/similar images.""" + group_id: int + images: List[Dict] + best_shot_id: str + best_shot_path: str + + +def compute_phash(image_path: str, hash_size: int = 16) -> Optional[np.ndarray]: + """ + Compute perceptual hash (pHash) for an image. + + pHash is robust to minor changes like resizing, compression, and slight color adjustments. + + Args: + image_path: Path to the image file + hash_size: Size of the hash (default 16 for 256-bit hash) + + Returns: + Binary hash array or None if image cannot be processed + """ + try: + # Read image in grayscale + img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) + if img is None: + logger.warning(f"Could not read image: {image_path}") + return None + + # Resize to hash_size + 1 for DCT + resized = cv2.resize(img, (hash_size + 1, hash_size), interpolation=cv2.INTER_AREA) + + # Compute DCT (Discrete Cosine Transform) + dct = cv2.dct(np.float32(resized)) + + # Use top-left hash_size x hash_size of DCT (low frequencies) + dct_low = dct[:hash_size, :hash_size] + + # Compute median and create binary hash + median = np.median(dct_low) + phash = (dct_low > median).flatten().astype(np.uint8) + + return phash + except Exception as e: + logger.error(f"Error computing pHash for {image_path}: {e}") + return None + + +def compute_hash_distance(hash1: np.ndarray, hash2: np.ndarray) -> int: + """ + Compute Hamming distance between two hashes. + + Args: + hash1: First hash array + hash2: Second hash array + + Returns: + Hamming distance (number of differing bits) + """ + return np.sum(hash1 != hash2) + + +def calculate_sharpness(image_path: str) -> float: + """ + Calculate image sharpness using Laplacian variance. + + Higher values indicate sharper images (less blur). + + Args: + image_path: Path to the image file + + Returns: + Sharpness score (Laplacian variance) + """ + try: + img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) + if img is None: + return 0.0 + + # Compute Laplacian variance + laplacian = cv2.Laplacian(img, cv2.CV_64F) + variance = laplacian.var() + + return float(variance) + except Exception as e: + logger.error(f"Error calculating sharpness for {image_path}: {e}") + return 0.0 + + +def calculate_exposure_score(image_path: str) -> float: + """ + Calculate exposure quality score. + + Measures how well-exposed an image is by analyzing histogram distribution. + Penalizes over-exposed (too bright) and under-exposed (too dark) images. + + Args: + image_path: Path to the image file + + Returns: + Exposure score (0-1, higher is better) + """ + try: + img = cv2.imread(image_path) + if img is None: + return 0.0 + + # Convert to grayscale for analysis + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + # Calculate histogram + hist = cv2.calcHist([gray], [0], None, [256], [0, 256]) + hist = hist.flatten() / hist.sum() # Normalize + + # Calculate mean brightness + mean_brightness = np.sum(np.arange(256) * hist) + + # Ideal brightness is around 128 (middle of range) + # Score decreases as we move away from ideal + brightness_score = 1.0 - abs(mean_brightness - 128) / 128 + + # Calculate contrast (standard deviation of histogram) + std_brightness = np.sqrt(np.sum(((np.arange(256) - mean_brightness) ** 2) * hist)) + + # Good contrast is around 50-80 + contrast_score = min(std_brightness / 60, 1.0) + + # Check for clipping (over/under exposure) + dark_pixels = np.sum(hist[:10]) # Very dark pixels + bright_pixels = np.sum(hist[245:]) # Very bright pixels + clipping_penalty = 1.0 - (dark_pixels + bright_pixels) + + # Combined exposure score + exposure_score = (brightness_score * 0.4 + contrast_score * 0.3 + clipping_penalty * 0.3) + + return max(0.0, min(1.0, exposure_score)) + except Exception as e: + logger.error(f"Error calculating exposure for {image_path}: {e}") + return 0.0 + + +def calculate_image_quality(image_id: str, image_path: str) -> ImageQualityScore: + """ + Calculate overall quality score for an image. + + Args: + image_id: Unique identifier for the image + image_path: Path to the image file + + Returns: + ImageQualityScore with individual and overall scores + """ + sharpness = calculate_sharpness(image_path) + exposure = calculate_exposure_score(image_path) + + # Normalize sharpness to 0-1 range (typical values are 0-1000+) + # Using log scale to handle wide range of values + normalized_sharpness = min(1.0, np.log1p(sharpness) / 10) + + # Overall score: weighted combination + # Sharpness is more important for "best shot" selection + overall = normalized_sharpness * 0.6 + exposure * 0.4 + + return ImageQualityScore( + image_id=image_id, + image_path=image_path, + sharpness_score=sharpness, + exposure_score=exposure, + overall_score=overall + ) + + +def find_duplicate_groups( + images: List[Dict], + similarity_threshold: int = 10 +) -> List[DuplicateGroup]: + """ + Find groups of duplicate/similar images. + + Args: + images: List of image dictionaries with 'id' and 'path' keys + similarity_threshold: Maximum Hamming distance to consider images as duplicates + (lower = stricter matching, default 10 for ~96% similarity) + + Returns: + List of DuplicateGroup objects containing similar images + """ + if not images: + return [] + + logger.info(f"Finding duplicates among {len(images)} images...") + + # Compute hashes for all images + image_hashes: List[Tuple[Dict, Optional[np.ndarray]]] = [] + for img in images: + phash = compute_phash(img['path']) + image_hashes.append((img, phash)) + + # Filter out images where hash computation failed + valid_images = [(img, h) for img, h in image_hashes if h is not None] + + if len(valid_images) < 2: + return [] + + # Find similar images using Union-Find approach + n = len(valid_images) + parent = list(range(n)) + + def find(x): + if parent[x] != x: + parent[x] = find(parent[x]) + return parent[x] + + def union(x, y): + px, py = find(x), find(y) + if px != py: + parent[px] = py + + # Compare all pairs + for i in range(n): + for j in range(i + 1, n): + distance = compute_hash_distance(valid_images[i][1], valid_images[j][1]) + if distance <= similarity_threshold: + union(i, j) + + # Group images by their root parent + groups_dict = defaultdict(list) + for i in range(n): + root = find(i) + groups_dict[root].append(valid_images[i][0]) + + # Filter groups with more than one image and find best shot + duplicate_groups = [] + group_id = 0 + + for images_in_group in groups_dict.values(): + if len(images_in_group) > 1: + # Calculate quality scores for all images in group + quality_scores = [ + calculate_image_quality(img['id'], img['path']) + for img in images_in_group + ] + + # Find best shot (highest overall score) + best_shot = max(quality_scores, key=lambda x: x.overall_score) + + duplicate_groups.append(DuplicateGroup( + group_id=group_id, + images=images_in_group, + best_shot_id=best_shot.image_id, + best_shot_path=best_shot.image_path + )) + group_id += 1 + + logger.info(f"Found {len(duplicate_groups)} duplicate groups") + return duplicate_groups + + +def get_duplicate_groups_with_scores( + images: List[Dict], + similarity_threshold: int = 10 +) -> List[Dict]: + """ + Get duplicate groups with detailed quality scores for each image. + + Args: + images: List of image dictionaries with 'id' and 'path' keys + similarity_threshold: Maximum Hamming distance for duplicates + + Returns: + List of dictionaries containing group info and quality scores + """ + groups = find_duplicate_groups(images, similarity_threshold) + + result = [] + for group in groups: + # Calculate detailed scores for each image + images_with_scores = [] + for img in group.images: + score = calculate_image_quality(img['id'], img['path']) + images_with_scores.append({ + 'id': img['id'], + 'path': img['path'], + 'thumbnailPath': img.get('thumbnailPath', ''), + 'sharpness_score': round(score.sharpness_score, 2), + 'exposure_score': round(score.exposure_score, 4), + 'overall_score': round(score.overall_score, 4), + 'is_best_shot': img['id'] == group.best_shot_id + }) + + # Sort by overall score descending + images_with_scores.sort(key=lambda x: x['overall_score'], reverse=True) + + result.append({ + 'group_id': group.group_id, + 'image_count': len(group.images), + 'best_shot_id': group.best_shot_id, + 'images': images_with_scores + }) + + return result diff --git a/frontend/src-tauri/tauri.conf.json b/frontend/src-tauri/tauri.conf.json index 8ad815dfd..b56801605 100644 --- a/frontend/src-tauri/tauri.conf.json +++ b/frontend/src-tauri/tauri.conf.json @@ -7,11 +7,17 @@ }, "bundle": { "active": true, - "targets": ["nsis", "deb", "app"], + "targets": "all", "createUpdaterArtifacts": true, "linux": { "deb": { "postInstallScript": "./postinstall.sh" + }, + "appimage": { + "bundleMediaFramework": true + }, + "rpm": { + "release": "1" } }, "icon": [ diff --git a/frontend/src/components/ErrorBoundary/ErrorBoundary.tsx b/frontend/src/components/ErrorBoundary/ErrorBoundary.tsx new file mode 100644 index 000000000..ec03ab91a --- /dev/null +++ b/frontend/src/components/ErrorBoundary/ErrorBoundary.tsx @@ -0,0 +1,124 @@ +import React, { Component, ErrorInfo, ReactNode } from 'react'; +import { AlertTriangle, RefreshCw } from 'lucide-react'; + +interface Props { + children: ReactNode; +} + +interface State { + hasError: boolean; + error: Error | null; + errorInfo: ErrorInfo | null; +} + +/** + * Error Boundary component that catches JavaScript errors anywhere in the + * child component tree and displays a fallback UI instead of crashing the app. + * + * Reference: https://react.dev/reference/react/Component#catching-rendering-errors-with-an-error-boundary + */ +class ErrorBoundary extends Component { + constructor(props: Props) { + super(props); + this.state = { + hasError: false, + error: null, + errorInfo: null, + }; + } + + static getDerivedStateFromError(error: Error): Partial { + // Update state so the next render shows the fallback UI + return { hasError: true, error }; + } + + componentDidCatch(error: Error, errorInfo: ErrorInfo): void { + // Log error details for debugging + console.error('ErrorBoundary caught an error:', error); + console.error('Error info:', errorInfo); + + this.setState({ + error, + errorInfo, + }); + } + + handleReload = (): void => { + // Reset error state and reload the page + this.setState({ hasError: false, error: null, errorInfo: null }); + window.location.reload(); + }; + + handleReset = (): void => { + // Reset error state without reloading (try to recover) + this.setState({ hasError: false, error: null, errorInfo: null }); + }; + + render(): ReactNode { + if (this.state.hasError) { + return ( +
+
+ {/* Error Icon */} +
+
+ +
+
+ + {/* Error Title */} +

+ Something went wrong +

+ + {/* Error Description */} +

+ An unexpected error occurred. Please try reloading the + application. +

+ + {/* Error Details (collapsible) */} + {this.state.error && ( +
+ + Error Details + +
+

+ {this.state.error.toString()} +

+ {this.state.errorInfo && ( +
+                      {this.state.errorInfo.componentStack}
+                    
+ )} +
+
+ )} + + {/* Action Buttons */} +
+ + +
+
+
+ ); + } + + return this.props.children; + } +} + +export default ErrorBoundary; diff --git a/frontend/src/components/ErrorBoundary/index.ts b/frontend/src/components/ErrorBoundary/index.ts new file mode 100644 index 000000000..e5d6dda21 --- /dev/null +++ b/frontend/src/components/ErrorBoundary/index.ts @@ -0,0 +1 @@ +export { default as ErrorBoundary } from './ErrorBoundary'; diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx index e01868d7c..b371e604e 100644 --- a/frontend/src/main.tsx +++ b/frontend/src/main.tsx @@ -2,6 +2,7 @@ import React from 'react'; import ReactDOM from 'react-dom/client'; import App from './App'; import BrowserWarning from './components/BrowserWarning'; +import { ErrorBoundary } from './components/ErrorBoundary'; import { isProd } from './utils/isProd'; import { startServer } from './utils/serverUtils'; import { isTauriEnvironment } from './utils/tauriUtils'; @@ -32,9 +33,11 @@ const Main = () => { } return ( - - - + + + + + ); }; diff --git a/frontend/src/pages/SettingsPage/components/FolderManagementCard.tsx b/frontend/src/pages/SettingsPage/components/FolderManagementCard.tsx index db4b029fa..3d609a384 100644 --- a/frontend/src/pages/SettingsPage/components/FolderManagementCard.tsx +++ b/frontend/src/pages/SettingsPage/components/FolderManagementCard.tsx @@ -1,9 +1,17 @@ -import React from 'react'; -import { Folder, Trash2, Check } from 'lucide-react'; +import React, { useState } from 'react'; +import { Folder, Trash2, Check, AlertTriangle } from 'lucide-react'; import { Switch } from '@/components/ui/switch'; import { Button } from '@/components/ui/button'; import { Progress } from '@/components/ui/progress'; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from '@/components/ui/dialog'; import { useSelector } from 'react-redux'; import { RootState } from '@/app/store'; import FolderPicker from '@/components/FolderPicker/FolderPicker'; @@ -29,6 +37,33 @@ const FolderManagementCard: React.FC = () => { (state: RootState) => state.folders.taggingStatus, ); + // State for delete confirmation dialog + const [folderToDelete, setFolderToDelete] = useState( + null, + ); + const [isDeleteDialogOpen, setIsDeleteDialogOpen] = useState(false); + + // Handle delete button click - show confirmation dialog + const handleDeleteClick = (folder: FolderDetails) => { + setFolderToDelete(folder); + setIsDeleteDialogOpen(true); + }; + + // Handle confirmed deletion + const handleConfirmDelete = () => { + if (folderToDelete) { + deleteFolder(folderToDelete.folder_id); + setIsDeleteDialogOpen(false); + setFolderToDelete(null); + } + }; + + // Handle cancel deletion + const handleCancelDelete = () => { + setIsDeleteDialogOpen(false); + setFolderToDelete(null); + }; + return ( { + + + + ); };