diff --git a/.github/workflows/adventurelog-bot.yml b/.github/workflows/adventurelog-bot.yml index d03a9c313..567720eb3 100644 --- a/.github/workflows/adventurelog-bot.yml +++ b/.github/workflows/adventurelog-bot.yml @@ -61,9 +61,9 @@ jobs: await safeClosePr(); } - // Ignore specific user - if (context.actor === "seanmorley15") { - console.log("Skipping maintainer PR"); + // Ignore PRs created by the maintainer to avoid blocking their work, as well as dependabot + if (context.actor === "seanmorley15" || context.actor === "dependabot") { + console.log("Skipping maintainer or dependabot PR"); return; } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 806ec3401..ec00bf11f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -163,7 +163,7 @@ If your changes affect: please update the documentation in the: ``` -/documentation +/docs ``` folder accordingly. diff --git a/backend/server/adventures/geocoding.py b/backend/server/adventures/geocoding.py index fb80c32f9..baa03730f 100644 --- a/backend/server/adventures/geocoding.py +++ b/backend/server/adventures/geocoding.py @@ -3,6 +3,7 @@ import socket import re import unicodedata +from urllib.parse import quote from worldtravel.models import Region, City, VisitedRegion, VisitedCity from django.conf import settings @@ -20,7 +21,12 @@ def search_google(query): headers = { 'Content-Type': 'application/json', 'X-Goog-Api-Key': api_key, - 'X-Goog-FieldMask': 'places.displayName.text,places.formattedAddress,places.location,places.types,places.rating,places.userRatingCount' + 'X-Goog-FieldMask': ( + 'places.id,places.displayName.text,places.formattedAddress,places.location,' + 'places.types,places.rating,places.userRatingCount,places.websiteUri,' + 'places.nationalPhoneNumber,places.internationalPhoneNumber,' + 'places.editorialSummary.text,places.googleMapsUri,places.photos.name' + ) } payload = { @@ -52,6 +58,14 @@ def search_google(query): if rating is not None and ratings_total: importance = round(float(rating) * ratings_total / 100, 2) + photos = [] + for photo in place.get('photos', [])[:5]: + photo_name = photo.get('name') + if photo_name: + photos.append( + f"https://places.googleapis.com/v1/{photo_name}/media?key={api_key}&maxHeightPx=800&maxWidthPx=800" + ) + # Extract display name from the new API structure display_name_obj = place.get("displayName", {}) name = display_name_obj.get("text") if display_name_obj else None @@ -61,9 +75,18 @@ def search_google(query): "lon": location.get("longitude"), "name": name, "display_name": place.get("formattedAddress"), + "place_id": place.get("id"), "type": primary_type, + "types": types, "category": category, + "description": (place.get('editorialSummary') or {}).get('text'), + "website": place.get('websiteUri'), + "phone_number": place.get('internationalPhoneNumber') or place.get('nationalPhoneNumber'), + "google_maps_url": place.get('googleMapsUri'), "importance": importance, + "rating": rating, + "review_count": ratings_total, + "photos": photos, "addresstype": addresstype, "powered_by": "google", }) @@ -172,6 +195,359 @@ def search(query): # If Google fails, fallback to OSM return search_osm(query) + +def _fetch_wikipedia_summary(query, language='en'): + normalized_query = (query or '').strip() + if not normalized_query: + return None + + candidates = [normalized_query] + if ',' in normalized_query: + head = normalized_query.split(',')[0].strip() + if head and head not in candidates: + candidates.append(head) + + for candidate in candidates: + try: + encoded_query = quote(candidate, safe='') + url = f"https://{language}.wikipedia.org/api/rest_v1/page/summary/{encoded_query}" + response = requests.get( + url, + headers={'User-Agent': 'AdventureLog Server'}, + timeout=(2, 5), + ) + if response.status_code != 200: + continue + + data = response.json() + if data.get('type') == 'disambiguation': + continue + + extract = (data.get('extract') or '').strip() + if len(extract) >= 120: + return extract + except requests.exceptions.RequestException: + continue + + return None + + +def _compose_place_description( + editorial_summary, + review_snippets, +): + parts = [] + + summary = (editorial_summary or '').strip() + if summary: + parts.append(f"### About\n\n{summary}") + + cleaned_reviews = [] + for snippet in review_snippets: + text = (snippet or '').strip() + if len(text) >= 40: + cleaned_reviews.append(text) + if len(cleaned_reviews) >= 2: + break + + if cleaned_reviews: + review_block = '### Visitor Highlights\n\n' + '\n'.join( + f"- {text}" for text in cleaned_reviews + ) + parts.append(review_block) + + return '\n\n'.join(parts).strip() or None + + +def get_place_details(place_id, fallback_query=None, language='en'): + if not place_id: + return {'error': 'place_id is required'} + + details = { + 'description': None, + 'name': None, + 'formatted_address': None, + 'types': [], + 'rating': None, + 'review_count': None, + 'website': None, + 'phone_number': None, + 'google_maps_url': None, + 'source': None, + } + + api_key = settings.GOOGLE_MAPS_API_KEY + if api_key: + try: + url = f"https://places.googleapis.com/v1/places/{place_id}" + headers = { + 'X-Goog-Api-Key': api_key, + 'X-Goog-FieldMask': ( + 'id,displayName.text,formattedAddress,editorialSummary.text,types,' + 'rating,userRatingCount,websiteUri,nationalPhoneNumber,' + 'internationalPhoneNumber,googleMapsUri,reviews.text.text' + ), + } + response = requests.get(url, headers=headers, timeout=(2, 6)) + response.raise_for_status() + + place = response.json() + details['name'] = (place.get('displayName') or {}).get('text') + details['formatted_address'] = place.get('formattedAddress') + details['types'] = place.get('types') or [] + details['rating'] = place.get('rating') + details['review_count'] = place.get('userRatingCount') + details['website'] = place.get('websiteUri') + details['phone_number'] = ( + place.get('internationalPhoneNumber') or place.get('nationalPhoneNumber') + ) + details['google_maps_url'] = place.get('googleMapsUri') + + editorial_summary = (place.get('editorialSummary') or {}).get('text') + reviews = place.get('reviews') or [] + review_snippets = [((review.get('text') or {}).get('text')) for review in reviews] + details['description'] = _compose_place_description( + editorial_summary, + review_snippets, + ) + if details['description']: + details['source'] = 'google' + except requests.exceptions.RequestException: + pass + + # Google summaries are often short; fallback to Wikipedia for richer context. + description_text = (details.get('description') or '').strip() + if len(description_text) < 220: + wikipedia_summary = _fetch_wikipedia_summary( + fallback_query or details.get('name') or '', + language=language, + ) + if wikipedia_summary: + if description_text: + details['description'] = f"{description_text}\n\n### Background\n\n{wikipedia_summary}" + details['source'] = 'google+wikipedia' + else: + details['description'] = f"### Background\n\n{wikipedia_summary}" + details['source'] = 'wikipedia' + + if not details.get('description'): + return {'error': 'Unable to enrich place description'} + + return details + + +def _clean_location_candidate(value): + if value is None: + return None + cleaned = str(value).strip() + return cleaned or None + + +def _looks_like_street_address(value): + candidate = _clean_location_candidate(value) + if not candidate: + return False + + lowered = candidate.lower() + if not re.search(r"\d", lowered): + return False + + if lowered.count(",") >= 2: + return True + + if not re.match(r"^\d{1,6}\s+\S+", lowered): + return False + + street_tokens = ( + "st", + "street", + "rd", + "road", + "ave", + "avenue", + "blvd", + "boulevard", + "dr", + "drive", + "ln", + "lane", + "ct", + "court", + "pl", + "place", + "pkwy", + "parkway", + "hwy", + "highway", + "trl", + "trail", + ) + return any(re.search(rf"\b{token}\b", lowered) for token in street_tokens) + + +def _first_preferred_location_name(candidates, allow_address_fallback=False): + address_fallback = None + for candidate in candidates: + cleaned = _clean_location_candidate(candidate) + if not cleaned: + continue + if not _looks_like_street_address(cleaned): + return cleaned + if address_fallback is None: + address_fallback = cleaned + return address_fallback if allow_address_fallback else None + + +def _extract_google_component_name(address_components): + preferred_types = ( + "premise", + "point_of_interest", + "establishment", + "subpremise", + "natural_feature", + "airport", + "park", + "tourist_attraction", + "shopping_mall", + "university", + "school", + "hospital", + ) + + for preferred_type in preferred_types: + for component in address_components or []: + types = component.get("types", []) + if preferred_type in types: + return component.get("long_name") or component.get("short_name") + return None + + +def _score_google_result_types(types): + priority = ( + "point_of_interest", + "establishment", + "premise", + "subpremise", + "tourist_attraction", + "park", + "airport", + "shopping_mall", + "university", + "school", + "hospital", + "street_address", + "route", + ) + for idx, type_name in enumerate(priority): + if type_name in types: + return len(priority) - idx + return 0 + + +def _fetch_google_nearby_place_name(lat, lon, api_key): + url = "https://places.googleapis.com/v1/places:searchNearby" + headers = { + 'Content-Type': 'application/json', + 'X-Goog-Api-Key': api_key, + 'X-Goog-FieldMask': 'places.displayName.text,places.formattedAddress,places.types', + } + payload = { + "maxResultCount": 6, + "rankPreference": "DISTANCE", + "locationRestriction": { + "circle": { + "center": { + "latitude": float(lat), + "longitude": float(lon), + }, + "radius": 45.0, + } + }, + } + + try: + response = requests.post(url, headers=headers, json=payload, timeout=(2, 5)) + response.raise_for_status() + places = (response.json() or {}).get("places", []) + except requests.exceptions.RequestException: + return None + + candidates = [((place.get("displayName") or {}).get("text")) for place in places] + return _first_preferred_location_name(candidates, allow_address_fallback=False) + + +def _extract_google_location_name(results, nearby_place_name=None): + preferred_nearby = _first_preferred_location_name([nearby_place_name], allow_address_fallback=False) + if preferred_nearby: + return preferred_nearby + + scored_candidates = [] + for result in results or []: + score = _score_google_result_types(result.get("types", [])) + if score <= 0: + continue + component_name = _extract_google_component_name(result.get("address_components", [])) + name_candidate = _first_preferred_location_name([component_name], allow_address_fallback=False) + if name_candidate: + scored_candidates.append((score, name_candidate)) + + if scored_candidates: + scored_candidates.sort(key=lambda item: item[0], reverse=True) + return scored_candidates[0][1] + + component_candidates = [ + _extract_google_component_name(result.get("address_components", [])) + for result in (results or []) + ] + component_pick = _first_preferred_location_name(component_candidates, allow_address_fallback=False) + if component_pick: + return component_pick + + formatted_candidates = [result.get("formatted_address") for result in (results or [])] + return _first_preferred_location_name(formatted_candidates, allow_address_fallback=True) + + +def _extract_osm_location_name(data): + address = data.get("address", {}) or {} + namedetails = data.get("namedetails", {}) or {} + extratags = data.get("extratags", {}) or {} + + candidates = [ + data.get("name"), + namedetails.get("name"), + namedetails.get("official_name"), + namedetails.get("short_name"), + namedetails.get("brand"), + namedetails.get("loc_name"), + address.get("amenity"), + address.get("tourism"), + address.get("attraction"), + address.get("building"), + address.get("shop"), + address.get("leisure"), + address.get("historic"), + address.get("man_made"), + address.get("office"), + address.get("aeroway"), + address.get("railway"), + address.get("public_transport"), + address.get("craft"), + address.get("house_name"), + extratags.get("name"), + extratags.get("official_name"), + extratags.get("brand"), + extratags.get("operator"), + ] + + preferred = _first_preferred_location_name(candidates, allow_address_fallback=False) + if preferred: + return preferred + + return _first_preferred_location_name( + [data.get("name"), data.get("display_name")], + allow_address_fallback=True, + ) + # ----------------- # REVERSE GEOCODING # ----------------- @@ -186,10 +562,7 @@ def extractIsoCode(user, data): country_code = None city = None visited_city = None - location_name = None - - if 'name' in data.keys(): - location_name = data['name'] + location_name = _clean_location_candidate(data.get('location_name') or data.get('name')) address = data.get('address', {}) or {} @@ -369,7 +742,10 @@ def reverse_geocode(lat, lon, user): return reverse_geocode_osm(lat, lon, user) def reverse_geocode_osm(lat, lon, user): - url = f"https://nominatim.openstreetmap.org/reverse?format=jsonv2&lat={lat}&lon={lon}" + url = ( + "https://nominatim.openstreetmap.org/reverse" + f"?format=jsonv2&addressdetails=1&namedetails=1&extratags=1&zoom=18&lat={lat}&lon={lon}" + ) headers = {'User-Agent': 'AdventureLog Server'} connect_timeout = 1 read_timeout = 5 @@ -381,6 +757,7 @@ def reverse_geocode_osm(lat, lon, user): response = requests.get(url, headers=headers, timeout=(connect_timeout, read_timeout)) response.raise_for_status() data = response.json() + data["location_name"] = _extract_osm_location_name(data) return extractIsoCode(user, data) except requests.exceptions.Timeout: return {"error": "Request timed out while contacting OpenStreetMap. Please try again."} @@ -424,11 +801,23 @@ def reverse_geocode_google(lat, lon, user): else: return {"error": "Geocoding failed. Please try again."} + results = data.get("results", []) + if not results: + return {"error": "No location found for the given coordinates."} + + nearby_place_name = _fetch_google_nearby_place_name(lat, lon, api_key) + location_name = _extract_google_location_name(results, nearby_place_name=nearby_place_name) + # Convert Google schema to Nominatim-style for extractIsoCode - first_result = data.get("results", [])[0] + first_result = results[0] + address_result = next( + (result for result in results if "plus_code" not in result.get("types", [])), + first_result, + ) result_data = { "name": first_result.get("formatted_address"), - "address": _parse_google_address_components(first_result.get("address_components", [])) + "location_name": location_name, + "address": _parse_google_address_components(address_result.get("address_components", [])), } return extractIsoCode(user, result_data) except requests.exceptions.Timeout: diff --git a/backend/server/adventures/views/location_image_view.py b/backend/server/adventures/views/location_image_view.py index d1a9c4b01..27a5d177a 100644 --- a/backend/server/adventures/views/location_image_view.py +++ b/backend/server/adventures/views/location_image_view.py @@ -4,8 +4,11 @@ from rest_framework.permissions import IsAuthenticated from rest_framework.throttling import UserRateThrottle from django.http import HttpResponse +from concurrent.futures import ThreadPoolExecutor, as_completed import ipaddress +import mimetypes import socket +from urllib.parse import urljoin from urllib.parse import urlparse from django.db.models import Q from django.core.files.base import ContentFile @@ -17,6 +20,7 @@ import requests from adventures.permissions import ContentImagePermission import logging +import uuid logger = logging.getLogger(__name__) @@ -25,6 +29,17 @@ class ImageProxyThrottle(UserRateThrottle): scope = 'image_proxy' +def _public_import_error_message(exc): + """Return a safe, user-facing import error without exposing internal details.""" + if isinstance(exc, ValueError): + return "Invalid image URL" + if isinstance(exc, requests.exceptions.Timeout): + return "Download timeout" + if isinstance(exc, requests.exceptions.RequestException): + return "Failed to fetch image from the remote server" + return "Image import failed" + + def _is_safe_url(image_url): """ Validate a URL for safe proxy use. @@ -67,6 +82,149 @@ def _is_safe_url(image_url): return True, parsed +def download_remote_image(image_url): + safe, result = _is_safe_url(image_url) + if not safe: + raise ValueError(result) + + headers = {'User-Agent': 'AdventureLog/1.0 (Image Import)'} + max_redirects = 3 + current_url = image_url + + response = None + for _ in range(max_redirects + 1): + response = requests.get( + current_url, + timeout=10, + headers=headers, + stream=True, + allow_redirects=False, + ) + + if not response.is_redirect: + break + + redirect_url = response.headers.get('Location', '') + if not redirect_url: + raise ValueError('Redirect with missing Location header') + + # Handle relative redirects safely. + redirect_url = urljoin(current_url, redirect_url) + + safe, result = _is_safe_url(redirect_url) + if not safe: + raise ValueError(f'Redirect blocked: {result}') + + current_url = redirect_url + else: + raise ValueError('Too many redirects') + + if response is None: + raise ValueError('Failed to fetch image') + + response.raise_for_status() + + content_type = response.headers.get('Content-Type', '').split(';')[0].strip().lower() + if not content_type.startswith('image/'): + raise ValueError('URL does not point to an image') + + content_length = response.headers.get('Content-Length') + if content_length and int(content_length) > 20 * 1024 * 1024: + raise ValueError('Image too large (max 20MB)') + + ext = mimetypes.guess_extension(content_type) or '.jpg' + if ext == '.jpe': + ext = '.jpg' + + return { + 'filename': f"remote_{uuid.uuid4().hex}{ext}", + 'content': response.content, + 'content_type': content_type, + 'source_url': image_url, + } + + +def import_remote_images_for_object(content_object, urls, owner=None, max_workers=5): + """Download remote URLs and attach them as ContentImage records for a content object.""" + content_type = ContentType.objects.get_for_model(content_object.__class__) + object_id = str(content_object.id) + image_owner = owner or getattr(content_object, 'user', None) + + downloaded_results = [] + worker_count = max(1, min(max_workers, len(urls))) + + with ThreadPoolExecutor(max_workers=worker_count) as executor: + futures = { + executor.submit(download_remote_image, image_url): (index, image_url) + for index, image_url in enumerate(urls) + } + + for future in as_completed(futures): + index, image_url = futures[future] + try: + file_data = future.result() + downloaded_results.append((index, image_url, file_data, None)) + except Exception as exc: + logger.warning( + "Image import failed for URL %s", + image_url, + exc_info=True, + ) + downloaded_results.append((index, image_url, None, _public_import_error_message(exc))) + + downloaded_results.sort(key=lambda item: item[0]) + + existing_image_count = ContentImage.objects.filter( + content_type=content_type, + object_id=object_id, + ).count() + set_primary_next = existing_image_count == 0 + + created_images = [] + results = [] + failed = [] + + for _, image_url, file_data, error_message in downloaded_results: + if error_message: + failure = { + 'url': image_url, + 'error': error_message, + } + results.append({ + **failure, + 'status': 'failed', + }) + failed.append(failure) + continue + + image_file = ContentFile(file_data['content'], name=file_data['filename']) + image = ContentImage.objects.create( + user=image_owner, + image=image_file, + content_type=content_type, + object_id=object_id, + is_primary=set_primary_next, + ) + if set_primary_next: + set_primary_next = False + + created_images.append(image) + results.append({ + 'url': image_url, + 'status': 'created', + 'id': str(image.id), + }) + + return { + 'created_images': created_images, + 'results': results, + 'created_count': len(created_images), + 'requested_count': len(urls), + 'failed_count': len(failed), + 'failed': failed, + } + + class ContentImageViewSet(viewsets.ModelViewSet): serializer_class = ContentImageSerializer permission_classes = [ContentImagePermission] @@ -192,69 +350,12 @@ def fetch_from_url(self, request): status=status.HTTP_400_BAD_REQUEST ) - # Validate the initial URL (scheme, port, SSRF check on all resolved IPs) - safe, result = _is_safe_url(image_url) - if not safe: - return Response({"error": result}, status=status.HTTP_400_BAD_REQUEST) - try: - headers = {'User-Agent': 'AdventureLog/1.0 (Image Proxy)'} - max_redirects = 3 - current_url = image_url - - for _ in range(max_redirects + 1): - response = requests.get( - current_url, - timeout=10, - headers=headers, - stream=True, - allow_redirects=False, - ) + image_data = download_remote_image(str(image_url).strip()) + return HttpResponse(image_data['content'], content_type=image_data['content_type'], status=200) - if not response.is_redirect: - break - - # Re-validate every redirect destination before following - redirect_url = response.headers.get('Location', '') - if not redirect_url: - return Response( - {"error": "Redirect with missing Location header"}, - status=status.HTTP_502_BAD_GATEWAY, - ) - - safe, result = _is_safe_url(redirect_url) - if not safe: - return Response( - {"error": f"Redirect blocked: {result}"}, - status=status.HTTP_400_BAD_REQUEST, - ) - - current_url = redirect_url - else: - return Response( - {"error": "Too many redirects"}, - status=status.HTTP_400_BAD_REQUEST, - ) - - response.raise_for_status() - - content_type = response.headers.get('Content-Type', '') - if not content_type.startswith('image/'): - return Response( - {"error": "URL does not point to an image"}, - status=status.HTTP_400_BAD_REQUEST - ) - - content_length = response.headers.get('Content-Length') - if content_length and int(content_length) > 20 * 1024 * 1024: - return Response( - {"error": "Image too large (max 20MB)"}, - status=status.HTTP_400_BAD_REQUEST - ) - - image_data = response.content - - return HttpResponse(image_data, content_type=content_type, status=200) + except ValueError: + return Response({"error": "Invalid image URL"}, status=status.HTTP_400_BAD_REQUEST) except requests.exceptions.Timeout: logger.error("Timeout fetching image from URL %s", image_url) @@ -269,6 +370,64 @@ def fetch_from_url(self, request): status=status.HTTP_502_BAD_GATEWAY ) + @action(detail=False, methods=['post'], permission_classes=[IsAuthenticated]) + def import_from_urls(self, request): + content_type_name = request.data.get('content_type') + object_id = request.data.get('object_id') + urls = request.data.get('urls') + + if not isinstance(urls, list) or not urls: + return Response({"error": "urls must be a non-empty array"}, status=status.HTTP_400_BAD_REQUEST) + + urls = [str(url).strip() for url in urls if str(url).strip()] + if not urls: + return Response({"error": "No valid URLs provided"}, status=status.HTTP_400_BAD_REQUEST) + + if len(urls) > 10: + return Response({"error": "Maximum 10 URLs per request"}, status=status.HTTP_400_BAD_REQUEST) + + content_object = self._get_and_validate_content_object(content_type_name, object_id) + if isinstance(content_object, Response): + return content_object + + owner = getattr(content_object, 'user', request.user) + + import_summary = import_remote_images_for_object( + content_object, + urls, + owner=owner, + max_workers=min(5, len(urls)), + ) + + created_images = import_summary['created_images'] + results = import_summary['results'] + + if not created_images: + return Response( + { + 'error': 'No images could be imported', + 'results': results, + }, + status=status.HTTP_400_BAD_REQUEST, + ) + + serialized = ContentImageSerializer(created_images, many=True, context={'request': request}) + response_status = ( + status.HTTP_201_CREATED + if import_summary['created_count'] == import_summary['requested_count'] + else status.HTTP_200_OK + ) + + return Response( + { + 'created': serialized.data, + 'results': results, + 'created_count': import_summary['created_count'], + 'requested_count': import_summary['requested_count'], + }, + status=response_status, + ) + def create(self, request, *args, **kwargs): # Get content type and object ID from request content_type_name = request.data.get('content_type') diff --git a/backend/server/adventures/views/location_view.py b/backend/server/adventures/views/location_view.py index c9630e183..e8c6fa67d 100644 --- a/backend/server/adventures/views/location_view.py +++ b/backend/server/adventures/views/location_view.py @@ -1,4 +1,5 @@ import logging +from urllib.parse import urlparse from django.utils import timezone from django.db import transaction from django.core.exceptions import PermissionDenied @@ -14,6 +15,9 @@ from adventures.permissions import IsOwnerOrSharedWithFullAccess from adventures.serializers import LocationSerializer, MapPinSerializer, CalendarLocationSerializer from adventures.utils import pagination +from adventures.geocoding import get_place_details, reverse_geocode +from worldtravel.models import City, Country, Region +from .location_image_view import import_remote_images_for_object logger = logging.getLogger(__name__) @@ -158,6 +162,122 @@ def destroy(self, request, *args, **kwargs): # ==================== CUSTOM ACTIONS ==================== + @action(detail=False, methods=['post'], url_path='quick-add') + @transaction.atomic + def quick_add(self, request): + """Create a location from lightweight map/place input in one server-side call.""" + payload = request.data if isinstance(request.data, dict) else {} + + name = str(payload.get('name') or '').strip() + if not name: + return Response({"error": "name is required"}, status=status.HTTP_400_BAD_REQUEST) + + latitude = self._coerce_coordinate(payload.get('latitude'), -90, 90) + longitude = self._coerce_coordinate(payload.get('longitude'), -180, 180) + if latitude is None or longitude is None: + return Response( + {"error": "Valid latitude and longitude are required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + collection = self._resolve_quick_add_collection(payload.get('collection_id')) + if isinstance(collection, Response): + return collection + + place_id = str(payload.get('place_id') or '').strip() or None + reverse_data = {} + details = {} + + try: + reverse_result = reverse_geocode(latitude, longitude, request.user) + if isinstance(reverse_result, dict) and 'error' not in reverse_result: + reverse_data = reverse_result + except Exception: + reverse_data = {} + + if place_id: + details_result = get_place_details(place_id, fallback_query=name) + if isinstance(details_result, dict): + if 'error' not in details_result or details_result.get('description'): + details = details_result + + rating = self._coerce_float(payload.get('rating')) + if rating is None: + rating = self._coerce_float(details.get('rating')) + + review_count = self._coerce_int(payload.get('review_count')) + if review_count is None: + review_count = self._coerce_int(details.get('review_count')) + + website = self._clean_url(details.get('website')) or self._clean_url(payload.get('website')) + maps_url = self._clean_url(details.get('google_maps_url')) or self._clean_url( + payload.get('google_maps_url') + ) + link = self._clean_url(payload.get('link')) or website or maps_url + + phone_number = str(details.get('phone_number') or payload.get('phone_number') or '').strip() or None + + location_label = ( + str(payload.get('location') or '').strip() + or str(reverse_data.get('display_name') or '').strip() + or str(details.get('formatted_address') or '').strip() + or None + ) + + description = self._build_quick_add_description( + base_description=payload.get('description'), + detailed_description=details.get('description'), + ) + + category_payload = self._normalize_quick_add_category(payload.get('category')) + if isinstance(category_payload, Response): + return category_payload + + serializer_payload = { + 'name': name, + 'location': location_label, + 'latitude': latitude, + 'longitude': longitude, + 'rating': rating, + 'description': description, + 'link': link, + 'tags': self._sanitize_tags(payload.get('types') or payload.get('tags')), + 'is_public': self._coerce_bool(payload.get('is_public'), default=False), + } + + if category_payload: + serializer_payload['category'] = category_payload + + if collection: + serializer_payload['collections'] = [str(collection.id)] + + serializer = self.get_serializer(data=serializer_payload) + serializer.is_valid(raise_exception=True) + self.perform_create(serializer) + + location = serializer.instance + self._apply_reverse_geocode_metadata(location, reverse_data, location_label) + + photo_urls = self._sanitize_photo_urls(payload.get('photos')) + image_import_summary = None + if photo_urls: + image_import_summary = import_remote_images_for_object( + location, + photo_urls, + owner=location.user, + max_workers=min(5, len(photo_urls)), + ) + + response_data = self.get_serializer(location).data + if image_import_summary and image_import_summary.get('failed'): + response_data['quick_add_image_import'] = { + 'created_count': image_import_summary['created_count'], + 'failed_count': image_import_summary['failed_count'], + 'failed': image_import_summary['failed'], + } + + return Response(response_data, status=status.HTTP_201_CREATED) + @action(detail=False, methods=['get']) def filtered(self, request): """Filter locations by category types and visit status.""" @@ -460,6 +580,198 @@ def _validate_collection_permissions(self, collections): f"You don't have permission to add location to collection '{collection.name}'" ) + def _resolve_quick_add_collection(self, collection_id): + if not collection_id: + return None + + try: + collection = Collection.objects.get(id=collection_id) + except Collection.DoesNotExist: + return Response( + {"error": "Collection not found."}, + status=status.HTTP_404_NOT_FOUND, + ) + + try: + self._validate_collection_permissions([collection]) + except PermissionDenied: + return Response( + {"error": "You do not have permission to add this location to the selected collection."}, + status=status.HTTP_403_FORBIDDEN, + ) + + return collection + + def _coerce_coordinate(self, value, min_value, max_value): + try: + number = round(float(value), 6) + except (TypeError, ValueError): + return None + + if number < min_value or number > max_value: + return None + + return number + + def _coerce_float(self, value): + try: + return float(value) + except (TypeError, ValueError): + return None + + def _coerce_int(self, value): + try: + return int(value) + except (TypeError, ValueError): + return None + + def _coerce_bool(self, value, default=False): + if isinstance(value, bool): + return value + if isinstance(value, str): + normalized = value.strip().lower() + if normalized in {'true', '1', 'yes', 'on'}: + return True + if normalized in {'false', '0', 'no', 'off'}: + return False + return default + + def _clean_url(self, value): + if not isinstance(value, str): + return None + + normalized = value.strip() + if not normalized: + return None + + parsed = urlparse(normalized) + if parsed.scheme in {'http', 'https'} and parsed.netloc: + return normalized + + return None + + def _sanitize_tags(self, raw_tags): + if not isinstance(raw_tags, list): + return [] + + tags = [] + for item in raw_tags: + if not isinstance(item, str): + continue + + value = item.strip() + if not value or value in tags: + continue + + tags.append(value) + if len(tags) >= 8: + break + + return tags + + def _sanitize_photo_urls(self, raw_urls): + if not isinstance(raw_urls, list): + return [] + + cleaned = [] + for value in raw_urls: + url = self._clean_url(value) + if not url or url in cleaned: + continue + cleaned.append(url) + if len(cleaned) >= 5: + break + + return cleaned + + def _normalize_quick_add_category(self, raw_category): + if not raw_category: + return None + + if isinstance(raw_category, dict): + category_id = raw_category.get('id') + name = str(raw_category.get('name') or '').strip().lower() + display_name = str(raw_category.get('display_name') or '').strip() + icon = str(raw_category.get('icon') or '').strip() or '🌍' + elif isinstance(raw_category, str): + category_id = raw_category.strip() + name = '' + display_name = '' + icon = '🌍' + else: + return Response( + {"error": "category must be an object or string"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + category = None + if category_id: + category = Category.objects.filter(id=category_id, user=self.request.user).first() + if not category: + return Response( + {"error": "Category not found or inaccessible"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if category: + return { + 'name': category.name, + 'display_name': category.display_name, + 'icon': category.icon, + } + + if not name: + return None + + return { + 'name': name, + 'display_name': display_name or name, + 'icon': icon, + } + + def _build_quick_add_description( + self, + base_description, + detailed_description, + ): + description = str(detailed_description or '').strip() or str(base_description or '').strip() + + return description or None + + def _apply_reverse_geocode_metadata(self, location, reverse_data, fallback_location): + if not isinstance(reverse_data, dict): + reverse_data = {} + + updated_fields = [] + + region_id = reverse_data.get('region_id') + if region_id: + region = Region.objects.filter(id=region_id).first() + if region and location.region_id != region.id: + location.region = region + updated_fields.append('region') + + city_id = reverse_data.get('city_id') + if city_id: + city = City.objects.filter(id=city_id).first() + if city and location.city_id != city.id: + location.city = city + updated_fields.append('city') + + country_id = reverse_data.get('country_id') + if country_id: + country = Country.objects.filter(country_code=country_id).first() + if country and location.country_id != country.id: + location.country = country + updated_fields.append('country') + + if fallback_location and not location.location: + location.location = fallback_location + updated_fields.append('location') + + if updated_fields: + location.save(update_fields=updated_fields, _skip_geocode=True) + def _apply_visit_filtering(self, queryset, request): """Apply visit status filtering to queryset.""" is_visited_param = request.query_params.get('is_visited') diff --git a/backend/server/adventures/views/reverse_geocode_view.py b/backend/server/adventures/views/reverse_geocode_view.py index b06353005..d9fa55d1a 100644 --- a/backend/server/adventures/views/reverse_geocode_view.py +++ b/backend/server/adventures/views/reverse_geocode_view.py @@ -7,7 +7,7 @@ from adventures.serializers import LocationSerializer from adventures.geocoding import reverse_geocode from django.conf import settings -from adventures.geocoding import search_google, search_osm +from adventures.geocoding import search_google, search_osm, get_place_details class ReverseGeocodeViewSet(viewsets.ViewSet): permission_classes = [IsAuthenticated] @@ -131,4 +131,18 @@ def mark_visited_region(self, request): "regions": new_regions, "new_cities": new_city_count, "cities": new_cities - }) \ No newline at end of file + }) + + @action(detail=False, methods=['get']) + def place_details(self, request): + place_id = request.query_params.get('place_id', '').strip() + if not place_id: + return Response({"error": "place_id parameter is required"}, status=400) + + name = request.query_params.get('name', '') + language = request.query_params.get('language', 'en') + + details = get_place_details(place_id, fallback_query=name, language=language) + if 'error' in details and not details.get('description'): + return Response(details, status=502) + return Response(details) \ No newline at end of file diff --git a/backend/server/templates/base.html b/backend/server/templates/base.html index 205445ee4..24234e179 100644 --- a/backend/server/templates/base.html +++ b/backend/server/templates/base.html @@ -175,7 +175,7 @@
{quickAddedLocation.name}
{selectedLocation.name}
+{selectedLocation.name}
+{selectedLocation.location}
+ {#if selectedLocation.rating} +{selectedMarker.lat.toFixed(6)}, {selectedMarker.lng.toFixed(6)}
- {#if selectedLocation.category} -- {selectedLocation.category} • {selectedLocation.type || 'location'} -
- {/if} - - - {#if locationData?.city || locationData?.region || locationData?.country} -- {locationData.display_name} -
- {/if}+ Optional. If not selected, backend defaults to General. +
+