diff --git a/README.rst b/README.rst
index 52d93001..a9422c0d 100644
--- a/README.rst
+++ b/README.rst
@@ -7,6 +7,12 @@ django-pyodbc-azure
.. image:: http://img.shields.io/pypi/l/django-pyodbc-azure.svg?style=flat
:target: http://opensource.org/licenses/BSD-3-Clause
+.. image:: https://ci.appveyor.com/api/projects/status/i9hfnl2gfeiq82qb?svg=true
+ :target: https://ci.appveyor.com/project/denisenkom/django-pyodbc-azure
+
+.. image:: https://codecov.io/gh/denisenkom/django-pyodbc-azure/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/denisenkom/django-pyodbc-azure
+
*django-pyodbc-azure* is a modern fork of
`django-pyodbc `__, a
`Django `__ Microsoft SQL Server external
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 00000000..12fc7767
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,52 @@
+version: 1.0.{build}
+
+os: Windows Server 2012 R2
+
+environment:
+ HOST: localhost
+ SQLUSER: sa
+ SQLPASSWORD: Password12!
+ DATABASE: test
+ matrix:
+ - PYTHON: "C:\\Python36"
+ DJANGOVER: 1.11.3
+ SQLINSTANCE: SQL2016
+ - PYTHON: "C:\\Python36"
+ DJANGOVER: 1.10.7
+ SQLINSTANCE: SQL2016
+ - PYTHON: "C:\\Python36"
+ DJANGOVER: 1.9.13
+ SQLINSTANCE: SQL2016
+ #- PYTHON: "C:\\Python36"
+ # DJANGOVER: 1.11.3
+ # SQLINSTANCE: SQL2014
+ - PYTHON: "C:\\Python36"
+ DJANGOVER: 1.11.3
+ SQLINSTANCE: SQL2012SP1
+
+install:
+ - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+ - python --version
+ - "python -c \"import struct; print(struct.calcsize('P') * 8)\""
+ - pip install django==%DJANGOVER%
+ - pip install enum34
+ - pip install python-memcached <= 1.53
+ - pip install mock codecov
+ - pip install -e .
+
+build_script:
+ - python setup.py sdist
+
+before_test:
+ # setup SQL Server
+ - ps: |
+ $instanceName = $env:SQLINSTANCE
+ Start-Service "MSSQL`$$instanceName"
+ Start-Service "SQLBrowser"
+ - sqlcmd -S "(local)\%SQLINSTANCE%" -Q "Use [master]; CREATE DATABASE test;"
+ - sqlcmd -S "(local)\%SQLINSTANCE%" -h -1 -Q "set nocount on; Select @@version"
+
+
+test_script:
+ - coverage run tests/runtests.py --noinput --settings=test_mssql --debug-sql
+ - codecov
diff --git a/tests/aggregation/__init__.py b/tests/aggregation/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/aggregation/models.py b/tests/aggregation/models.py
new file mode 100644
index 00000000..fd441fe5
--- /dev/null
+++ b/tests/aggregation/models.py
@@ -0,0 +1,44 @@
+from django.db import models
+
+
+class Author(models.Model):
+ name = models.CharField(max_length=100)
+ age = models.IntegerField()
+ friends = models.ManyToManyField('self', blank=True)
+
+ def __str__(self):
+ return self.name
+
+
+class Publisher(models.Model):
+ name = models.CharField(max_length=255)
+ num_awards = models.IntegerField()
+ duration = models.DurationField(blank=True, null=True)
+
+ def __str__(self):
+ return self.name
+
+
+class Book(models.Model):
+ isbn = models.CharField(max_length=9)
+ name = models.CharField(max_length=255)
+ pages = models.IntegerField()
+ rating = models.FloatField()
+ price = models.DecimalField(decimal_places=2, max_digits=6)
+ authors = models.ManyToManyField(Author)
+ contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set')
+ publisher = models.ForeignKey(Publisher, models.CASCADE)
+ pubdate = models.DateField()
+
+ def __str__(self):
+ return self.name
+
+
+class Store(models.Model):
+ name = models.CharField(max_length=255)
+ books = models.ManyToManyField(Book)
+ original_opening = models.DateTimeField()
+ friday_night_closing = models.TimeField()
+
+ def __str__(self):
+ return self.name
diff --git a/tests/aggregation/test_filter_argument.py b/tests/aggregation/test_filter_argument.py
new file mode 100644
index 00000000..54836178
--- /dev/null
+++ b/tests/aggregation/test_filter_argument.py
@@ -0,0 +1,81 @@
+import datetime
+from decimal import Decimal
+
+from django.db.models import Case, Count, F, Q, Sum, When
+from django.test import TestCase
+
+from .models import Author, Book, Publisher
+
+
+class FilteredAggregateTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.a1 = Author.objects.create(name='test', age=40)
+ cls.a2 = Author.objects.create(name='test2', age=60)
+ cls.a3 = Author.objects.create(name='test3', age=100)
+ cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
+ cls.b1 = Book.objects.create(
+ isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
+ pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
+ pubdate=datetime.date(2007, 12, 6),
+ )
+ cls.b2 = Book.objects.create(
+ isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
+ pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a2, publisher=cls.p1,
+ pubdate=datetime.date(2008, 3, 3),
+ )
+ cls.b3 = Book.objects.create(
+ isbn='159059996', name='Practical Django Projects',
+ pages=600, rating=4.5, price=Decimal('29.69'), contact=cls.a3, publisher=cls.p1,
+ pubdate=datetime.date(2008, 6, 23),
+ )
+ cls.a1.friends.add(cls.a2)
+ cls.a1.friends.add(cls.a3)
+ cls.b1.authors.add(cls.a1)
+ cls.b1.authors.add(cls.a3)
+ cls.b2.authors.add(cls.a2)
+ cls.b3.authors.add(cls.a3)
+
+ def test_filtered_aggregates(self):
+ agg = Sum('age', filter=Q(name__startswith='test'))
+ self.assertEqual(Author.objects.aggregate(age=agg)['age'], 200)
+
+ def test_double_filtered_aggregates(self):
+ agg = Sum('age', filter=Q(Q(name='test2') & ~Q(name='test')))
+ self.assertEqual(Author.objects.aggregate(age=agg)['age'], 60)
+
+ def test_excluded_aggregates(self):
+ agg = Sum('age', filter=~Q(name='test2'))
+ self.assertEqual(Author.objects.aggregate(age=agg)['age'], 140)
+
+ def test_related_aggregates_m2m(self):
+ agg = Sum('friends__age', filter=~Q(friends__name='test'))
+ self.assertEqual(Author.objects.filter(name='test').aggregate(age=agg)['age'], 160)
+
+ def test_related_aggregates_m2m_and_fk(self):
+ q = Q(friends__book__publisher__name='Apress') & ~Q(friends__name='test3')
+ agg = Sum('friends__book__pages', filter=q)
+ self.assertEqual(Author.objects.filter(name='test').aggregate(pages=agg)['pages'], 528)
+
+ def test_plain_annotate(self):
+ agg = Sum('book__pages', filter=Q(book__rating__gt=3))
+ qs = Author.objects.annotate(pages=agg).order_by('pk')
+ self.assertSequenceEqual([a.pages for a in qs], [447, None, 1047])
+
+ def test_filtered_aggregate_on_annotate(self):
+ pages_annotate = Sum('book__pages', filter=Q(book__rating__gt=3))
+ age_agg = Sum('age', filter=Q(total_pages__gte=400))
+ aggregated = Author.objects.annotate(total_pages=pages_annotate).aggregate(summed_age=age_agg)
+ self.assertEqual(aggregated, {'summed_age': 140})
+
+ def test_case_aggregate(self):
+ agg = Sum(
+ Case(When(friends__age=40, then=F('friends__age'))),
+ filter=Q(friends__name__startswith='test'),
+ )
+ self.assertEqual(Author.objects.aggregate(age=agg)['age'], 80)
+
+ def test_sum_star_exception(self):
+ msg = 'Star cannot be used with filter. Please specify a field.'
+ with self.assertRaisesMessage(ValueError, msg):
+ Count('*', filter=Q(age=40))
diff --git a/tests/aggregation/tests.py b/tests/aggregation/tests.py
new file mode 100644
index 00000000..4572e2a8
--- /dev/null
+++ b/tests/aggregation/tests.py
@@ -0,0 +1,1109 @@
+import datetime
+import re
+from decimal import Decimal
+
+from django.core.exceptions import FieldError
+from django.db import connection
+from django.db.models import (
+ Avg, Count, DecimalField, DurationField, F, FloatField, Func, IntegerField,
+ Max, Min, Sum, Value,
+)
+from django.test import TestCase
+from django.test.utils import Approximate, CaptureQueriesContext
+from django.utils import timezone
+
+from .models import Author, Book, Publisher, Store
+
+
+class AggregateTestCase(TestCase):
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
+ cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
+ cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
+ cls.a4 = Author.objects.create(name='James Bennett', age=29)
+ cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
+ cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
+ cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
+ cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
+ cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
+ cls.a1.friends.add(cls.a2, cls.a4)
+ cls.a2.friends.add(cls.a1, cls.a7)
+ cls.a4.friends.add(cls.a1)
+ cls.a5.friends.add(cls.a6, cls.a7)
+ cls.a6.friends.add(cls.a5, cls.a7)
+ cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
+ cls.a8.friends.add(cls.a9)
+ cls.a9.friends.add(cls.a8)
+
+ cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
+ cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
+ cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
+ cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
+ cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
+
+ cls.b1 = Book.objects.create(
+ isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
+ pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
+ pubdate=datetime.date(2007, 12, 6)
+ )
+ cls.b2 = Book.objects.create(
+ isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
+ pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
+ pubdate=datetime.date(2008, 3, 3)
+ )
+ cls.b3 = Book.objects.create(
+ isbn='159059996', name='Practical Django Projects',
+ pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
+ pubdate=datetime.date(2008, 6, 23)
+ )
+ cls.b4 = Book.objects.create(
+ isbn='013235613', name='Python Web Development with Django',
+ pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
+ pubdate=datetime.date(2008, 11, 3)
+ )
+ cls.b5 = Book.objects.create(
+ isbn='013790395', name='Artificial Intelligence: A Modern Approach',
+ pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
+ pubdate=datetime.date(1995, 1, 15)
+ )
+ cls.b6 = Book.objects.create(
+ isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
+ pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
+ pubdate=datetime.date(1991, 10, 15)
+ )
+ cls.b1.authors.add(cls.a1, cls.a2)
+ cls.b2.authors.add(cls.a3)
+ cls.b3.authors.add(cls.a4)
+ cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
+ cls.b5.authors.add(cls.a8, cls.a9)
+ cls.b6.authors.add(cls.a8)
+
+ s1 = Store.objects.create(
+ name='Amazon.com',
+ original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
+ friday_night_closing=datetime.time(23, 59, 59)
+ )
+ s2 = Store.objects.create(
+ name='Books.com',
+ original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
+ friday_night_closing=datetime.time(23, 59, 59)
+ )
+ s3 = Store.objects.create(
+ name="Mamma and Pappa's Books",
+ original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
+ friday_night_closing=datetime.time(21, 30)
+ )
+ s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
+ s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
+ s3.books.add(cls.b3, cls.b4, cls.b6)
+
+ def test_empty_aggregate(self):
+ self.assertEqual(Author.objects.all().aggregate(), {})
+
+ def test_aggregate_in_order_by(self):
+ msg = (
+ 'Using an aggregate in order_by() without also including it in '
+ 'annotate() is not allowed: Avg(F(book__rating)'
+ )
+ with self.assertRaisesMessage(FieldError, msg):
+ Author.objects.values('age').order_by(Avg('book__rating'))
+
+ def test_single_aggregate(self):
+ vals = Author.objects.aggregate(Avg("age"))
+ self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
+
+ def test_multiple_aggregates(self):
+ vals = Author.objects.aggregate(Sum("age"), Avg("age"))
+ self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
+
+ def test_filter_aggregate(self):
+ vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
+ self.assertEqual(vals, {'age__sum': 254})
+
+ def test_related_aggregate(self):
+ vals = Author.objects.aggregate(Avg("friends__age"))
+ self.assertEqual(vals, {'friends__age__avg': Approximate(34.07, places=2)})
+
+ vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
+ self.assertEqual(vals, {'authors__age__avg': Approximate(38.2857, places=2)})
+
+ vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
+ self.assertEqual(vals, {'book__rating__avg': 4.0})
+
+ vals = Book.objects.aggregate(Sum("publisher__num_awards"))
+ self.assertEqual(vals, {'publisher__num_awards__sum': 30})
+
+ vals = Publisher.objects.aggregate(Sum("book__price"))
+ self.assertEqual(vals, {'book__price__sum': Decimal('270.27')})
+
+ def test_aggregate_multi_join(self):
+ vals = Store.objects.aggregate(Max("books__authors__age"))
+ self.assertEqual(vals, {'books__authors__age__max': 57})
+
+ vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
+ self.assertEqual(vals, {'book__publisher__num_awards__min': 1})
+
+ def test_aggregate_alias(self):
+ vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
+ self.assertEqual(vals, {'amazon_mean': Approximate(4.08, places=2)})
+
+ def test_annotate_basic(self):
+ self.assertQuerysetEqual(
+ Book.objects.annotate().order_by('pk'), [
+ "The Definitive Guide to Django: Web Development Done Right",
+ "Sams Teach Yourself Django in 24 Hours",
+ "Practical Django Projects",
+ "Python Web Development with Django",
+ "Artificial Intelligence: A Modern Approach",
+ "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
+ ],
+ lambda b: b.name
+ )
+
+ books = Book.objects.annotate(mean_age=Avg("authors__age"))
+ b = books.get(pk=self.b1.pk)
+ self.assertEqual(
+ b.name,
+ 'The Definitive Guide to Django: Web Development Done Right'
+ )
+ self.assertEqual(b.mean_age, 34.5)
+
+ def test_annotate_defer(self):
+ qs = Book.objects.annotate(
+ page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
+
+ rows = [
+ (self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
+ ]
+ self.assertQuerysetEqual(
+ qs.order_by('pk'), rows,
+ lambda r: (r.id, r.isbn, r.page_sum, r.name)
+ )
+
+ def test_annotate_defer_select_related(self):
+ qs = Book.objects.select_related('contact').annotate(
+ page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
+
+ rows = [
+ (self.b1.id, "159059725", 447, "Adrian Holovaty",
+ "The Definitive Guide to Django: Web Development Done Right")
+ ]
+ self.assertQuerysetEqual(
+ qs.order_by('pk'), rows,
+ lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
+ )
+
+ def test_annotate_m2m(self):
+ books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
+ self.assertQuerysetEqual(
+ books, [
+ ('Artificial Intelligence: A Modern Approach', 51.5),
+ ('Practical Django Projects', 29.0),
+ ('Python Web Development with Django', Approximate(30.3, places=1)),
+ ('Sams Teach Yourself Django in 24 Hours', 45.0)
+ ],
+ lambda b: (b.name, b.authors__age__avg),
+ )
+
+ books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
+ self.assertQuerysetEqual(
+ books, [
+ ('Artificial Intelligence: A Modern Approach', 2),
+ ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
+ ('Practical Django Projects', 1),
+ ('Python Web Development with Django', 3),
+ ('Sams Teach Yourself Django in 24 Hours', 1),
+ ('The Definitive Guide to Django: Web Development Done Right', 2)
+ ],
+ lambda b: (b.name, b.num_authors)
+ )
+
+ def test_backwards_m2m_annotate(self):
+ authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
+ self.assertQuerysetEqual(
+ authors, [
+ ('Adrian Holovaty', 4.5),
+ ('Brad Dayley', 3.0),
+ ('Jacob Kaplan-Moss', 4.5),
+ ('James Bennett', 4.0),
+ ('Paul Bissex', 4.0),
+ ('Stuart Russell', 4.0)
+ ],
+ lambda a: (a.name, a.book__rating__avg)
+ )
+
+ authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
+ self.assertQuerysetEqual(
+ authors, [
+ ('Adrian Holovaty', 1),
+ ('Brad Dayley', 1),
+ ('Jacob Kaplan-Moss', 1),
+ ('James Bennett', 1),
+ ('Jeffrey Forcier', 1),
+ ('Paul Bissex', 1),
+ ('Peter Norvig', 2),
+ ('Stuart Russell', 1),
+ ('Wesley J. Chun', 1)
+ ],
+ lambda a: (a.name, a.num_books)
+ )
+
+ def test_reverse_fkey_annotate(self):
+ books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
+ self.assertQuerysetEqual(
+ books, [
+ ('Artificial Intelligence: A Modern Approach', 7),
+ ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
+ ('Practical Django Projects', 3),
+ ('Python Web Development with Django', 7),
+ ('Sams Teach Yourself Django in 24 Hours', 1),
+ ('The Definitive Guide to Django: Web Development Done Right', 3)
+ ],
+ lambda b: (b.name, b.publisher__num_awards__sum)
+ )
+
+ publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
+ self.assertQuerysetEqual(
+ publishers, [
+ ('Apress', Decimal("59.69")),
+ ("Jonno's House of Books", None),
+ ('Morgan Kaufmann', Decimal("75.00")),
+ ('Prentice Hall', Decimal("112.49")),
+ ('Sams', Decimal("23.09"))
+ ],
+ lambda p: (p.name, p.book__price__sum)
+ )
+
+ def test_annotate_values(self):
+ books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
+ self.assertEqual(
+ books, [
+ {
+ "contact_id": self.a1.id,
+ "id": self.b1.id,
+ "isbn": "159059725",
+ "mean_age": 34.5,
+ "name": "The Definitive Guide to Django: Web Development Done Right",
+ "pages": 447,
+ "price": Approximate(Decimal("30")),
+ "pubdate": datetime.date(2007, 12, 6),
+ "publisher_id": self.p1.id,
+ "rating": 4.5,
+ }
+ ]
+ )
+
+ books = (
+ Book.objects
+ .filter(pk=self.b1.pk)
+ .annotate(mean_age=Avg('authors__age'))
+ .values('pk', 'isbn', 'mean_age')
+ )
+ self.assertEqual(
+ list(books), [
+ {
+ "pk": self.b1.pk,
+ "isbn": "159059725",
+ "mean_age": 34.5,
+ }
+ ]
+ )
+
+ books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
+ self.assertEqual(
+ list(books),
+ [{'name': 'The Definitive Guide to Django: Web Development Done Right'}],
+ )
+
+ books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
+ self.assertEqual(
+ list(books), [
+ {
+ "contact_id": self.a1.id,
+ "id": self.b1.id,
+ "isbn": "159059725",
+ "mean_age": 34.5,
+ "name": "The Definitive Guide to Django: Web Development Done Right",
+ "pages": 447,
+ "price": Approximate(Decimal("30")),
+ "pubdate": datetime.date(2007, 12, 6),
+ "publisher_id": self.p1.id,
+ "rating": 4.5,
+ }
+ ]
+ )
+
+ books = (
+ Book.objects
+ .values("rating")
+ .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
+ .order_by("rating")
+ )
+ self.assertEqual(
+ list(books), [
+ {
+ "rating": 3.0,
+ "n_authors": 1,
+ "mean_age": 45.0,
+ },
+ {
+ "rating": 4.0,
+ "n_authors": 6,
+ "mean_age": Approximate(37.16, places=1)
+ },
+ {
+ "rating": 4.5,
+ "n_authors": 2,
+ "mean_age": 34.5,
+ },
+ {
+ "rating": 5.0,
+ "n_authors": 1,
+ "mean_age": 57.0,
+ }
+ ]
+ )
+
+ authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
+ self.assertQuerysetEqual(
+ authors, [
+ ('Adrian Holovaty', 32.0),
+ ('Brad Dayley', None),
+ ('Jacob Kaplan-Moss', 29.5),
+ ('James Bennett', 34.0),
+ ('Jeffrey Forcier', 27.0),
+ ('Paul Bissex', 31.0),
+ ('Peter Norvig', 46.0),
+ ('Stuart Russell', 57.0),
+ ('Wesley J. Chun', Approximate(33.66, places=1))
+ ],
+ lambda a: (a.name, a.friends__age__avg)
+ )
+
+ def test_count(self):
+ vals = Book.objects.aggregate(Count("rating"))
+ self.assertEqual(vals, {"rating__count": 6})
+
+ vals = Book.objects.aggregate(Count("rating", distinct=True))
+ self.assertEqual(vals, {"rating__count": 4})
+
+ #def test_count_star(self):
+ # with self.assertNumQueries(1) as ctx:
+ # Book.objects.aggregate(n=Count("*"))
+ # sql = ctx.captured_queries[0]['sql']
+ # self.assertIn('SELECT COUNT(*) ', sql)
+
+ def test_non_grouped_annotation_not_in_group_by(self):
+ """
+ An annotation not included in values() before an aggregate should be
+ excluded from the group by clause.
+ """
+ qs = (
+ Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating')
+ .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
+ )
+ self.assertEqual(list(qs), [{'rating': 4.0, 'count': 2}])
+
+ def test_grouped_annotation_in_group_by(self):
+ """
+ An annotation included in values() before an aggregate should be
+ included in the group by clause.
+ """
+ qs = (
+ Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice')
+ .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
+ )
+ self.assertEqual(
+ list(qs), [
+ {'rating': 4.0, 'count': 1},
+ {'rating': 4.0, 'count': 2},
+ ]
+ )
+
+ def test_fkey_aggregate(self):
+ explicit = list(Author.objects.annotate(Count('book__id')))
+ implicit = list(Author.objects.annotate(Count('book')))
+ self.assertEqual(explicit, implicit)
+
+ def test_annotate_ordering(self):
+ books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
+ self.assertEqual(
+ list(books), [
+ {'rating': 4.5, 'oldest': 35},
+ {'rating': 3.0, 'oldest': 45},
+ {'rating': 4.0, 'oldest': 57},
+ {'rating': 5.0, 'oldest': 57},
+ ]
+ )
+
+ books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
+ self.assertEqual(
+ list(books), [
+ {'rating': 5.0, 'oldest': 57},
+ {'rating': 4.0, 'oldest': 57},
+ {'rating': 3.0, 'oldest': 45},
+ {'rating': 4.5, 'oldest': 35},
+ ]
+ )
+
+ def test_aggregate_annotation(self):
+ vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
+ self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
+
+ def test_avg_duration_field(self):
+ # Explicit `output_field`.
+ self.assertEqual(
+ Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
+ {'duration__avg': datetime.timedelta(days=1, hours=12)}
+ )
+ # Implicit `output_field`.
+ self.assertEqual(
+ Publisher.objects.aggregate(Avg('duration')),
+ {'duration__avg': datetime.timedelta(days=1, hours=12)}
+ )
+
+ def test_sum_duration_field(self):
+ self.assertEqual(
+ Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
+ {'duration__sum': datetime.timedelta(days=3)}
+ )
+
+ def test_sum_distinct_aggregate(self):
+ """
+ Sum on a distinct() QuerySet should aggregate only the distinct items.
+ """
+ authors = Author.objects.filter(book__in=[self.b5, self.b6])
+ self.assertEqual(authors.count(), 3)
+
+ distinct_authors = authors.distinct()
+ self.assertEqual(distinct_authors.count(), 2)
+
+ # Selected author ages are 57 and 46
+ age_sum = distinct_authors.aggregate(Sum('age'))
+ self.assertEqual(age_sum['age__sum'], 103)
+
+ def test_filtering(self):
+ p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
+ Book.objects.create(
+ name='ExpensiveBook1',
+ pages=1,
+ isbn='111',
+ rating=3.5,
+ price=Decimal("1000"),
+ publisher=p,
+ contact_id=self.a1.id,
+ pubdate=datetime.date(2008, 12, 1)
+ )
+ Book.objects.create(
+ name='ExpensiveBook2',
+ pages=1,
+ isbn='222',
+ rating=4.0,
+ price=Decimal("1000"),
+ publisher=p,
+ contact_id=self.a1.id,
+ pubdate=datetime.date(2008, 12, 2)
+ )
+ Book.objects.create(
+ name='ExpensiveBook3',
+ pages=1,
+ isbn='333',
+ rating=4.5,
+ price=Decimal("35"),
+ publisher=p,
+ contact_id=self.a1.id,
+ pubdate=datetime.date(2008, 12, 3)
+ )
+
+ publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
+ self.assertQuerysetEqual(
+ publishers,
+ ['Apress', 'Prentice Hall', 'Expensive Publisher'],
+ lambda p: p.name,
+ )
+
+ publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
+ self.assertQuerysetEqual(
+ publishers, [
+ "Apress",
+ "Apress",
+ "Sams",
+ "Prentice Hall",
+ "Expensive Publisher",
+ ],
+ lambda p: p.name
+ )
+
+ publishers = (
+ Publisher.objects
+ .annotate(num_books=Count("book__id"))
+ .filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
+ .order_by("pk")
+ )
+ self.assertQuerysetEqual(
+ publishers,
+ ['Apress', 'Prentice Hall', 'Expensive Publisher'],
+ lambda p: p.name,
+ )
+
+ publishers = (
+ Publisher.objects
+ .filter(book__price__lt=Decimal("40.0"))
+ .annotate(num_books=Count("book__id"))
+ .filter(num_books__gt=1)
+ .order_by("pk")
+ )
+ self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name)
+
+ publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
+ self.assertQuerysetEqual(
+ publishers, [
+ "Apress",
+ "Sams",
+ "Prentice Hall",
+ "Morgan Kaufmann",
+ "Expensive Publisher",
+ ],
+ lambda p: p.name
+ )
+
+ publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
+ self.assertQuerysetEqual(
+ publishers,
+ ['Apress', 'Sams', 'Prentice Hall', 'Morgan Kaufmann'],
+ lambda p: p.name
+ )
+
+ publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
+ self.assertQuerysetEqual(
+ publishers,
+ ['Sams', 'Morgan Kaufmann', 'Expensive Publisher'],
+ lambda p: p.name,
+ )
+
+ publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
+ self.assertEqual(len(publishers), 0)
+
+ def test_annotation(self):
+ vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
+ self.assertEqual(vals, {"friends__id__count": 2})
+
+ books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
+ self.assertQuerysetEqual(
+ books, [
+ "The Definitive Guide to Django: Web Development Done Right",
+ "Artificial Intelligence: A Modern Approach",
+ ],
+ lambda b: b.name
+ )
+
+ authors = (
+ Author.objects
+ .annotate(num_friends=Count("friends__id", distinct=True))
+ .filter(num_friends=0)
+ .order_by("pk")
+ )
+ self.assertQuerysetEqual(authors, ['Brad Dayley'], lambda a: a.name)
+
+ publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
+ self.assertQuerysetEqual(publishers, ['Apress', 'Prentice Hall'], lambda p: p.name)
+
+ publishers = (
+ Publisher.objects
+ .filter(book__price__lt=Decimal("40.0"))
+ .annotate(num_books=Count("book__id"))
+ .filter(num_books__gt=1)
+ )
+ self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name)
+
+ books = (
+ Book.objects
+ .annotate(num_authors=Count("authors__id"))
+ .filter(authors__name__contains="Norvig", num_authors__gt=1)
+ )
+ self.assertQuerysetEqual(
+ books,
+ ['Artificial Intelligence: A Modern Approach'],
+ lambda b: b.name
+ )
+
+ def test_more_aggregation(self):
+ a = Author.objects.get(name__contains='Norvig')
+ b = Book.objects.get(name__contains='Done Right')
+ b.authors.add(a)
+ b.save()
+
+ vals = (
+ Book.objects
+ .annotate(num_authors=Count("authors__id"))
+ .filter(authors__name__contains="Norvig", num_authors__gt=1)
+ .aggregate(Avg("rating"))
+ )
+ self.assertEqual(vals, {"rating__avg": 4.25})
+
+ def test_even_more_aggregate(self):
+ publishers = Publisher.objects.annotate(
+ earliest_book=Min("book__pubdate"),
+ ).exclude(earliest_book=None).order_by("earliest_book").values(
+ 'earliest_book',
+ 'num_awards',
+ 'id',
+ 'name',
+ )
+ self.assertEqual(
+ list(publishers), [
+ {
+ 'earliest_book': datetime.date(1991, 10, 15),
+ 'num_awards': 9,
+ 'id': self.p4.id,
+ 'name': 'Morgan Kaufmann'
+ },
+ {
+ 'earliest_book': datetime.date(1995, 1, 15),
+ 'num_awards': 7,
+ 'id': self.p3.id,
+ 'name': 'Prentice Hall'
+ },
+ {
+ 'earliest_book': datetime.date(2007, 12, 6),
+ 'num_awards': 3,
+ 'id': self.p1.id,
+ 'name': 'Apress'
+ },
+ {
+ 'earliest_book': datetime.date(2008, 3, 3),
+ 'num_awards': 1,
+ 'id': self.p2.id,
+ 'name': 'Sams'
+ }
+ ]
+ )
+
+ vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
+ self.assertEqual(
+ vals,
+ {
+ "friday_night_closing__max": datetime.time(23, 59, 59),
+ "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
+ }
+ )
+
+ def test_annotate_values_list(self):
+ books = (
+ Book.objects
+ .filter(pk=self.b1.pk)
+ .annotate(mean_age=Avg("authors__age"))
+ .values_list("pk", "isbn", "mean_age")
+ )
+ self.assertEqual(list(books), [(self.b1.id, '159059725', 34.5)])
+
+ books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
+ self.assertEqual(list(books), [('159059725',)])
+
+ books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
+ self.assertEqual(list(books), [(34.5,)])
+
+ books = (
+ Book.objects
+ .filter(pk=self.b1.pk)
+ .annotate(mean_age=Avg("authors__age"))
+ .values_list("mean_age", flat=True)
+ )
+ self.assertEqual(list(books), [34.5])
+
+ books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
+ self.assertEqual(
+ list(books), [
+ (Decimal("29.69"), 2),
+ (Decimal('23.09'), 1),
+ (Decimal('30'), 1),
+ (Decimal('75'), 1),
+ (Decimal('82.8'), 1),
+ ]
+ )
+
+ def test_dates_with_aggregation(self):
+ """
+ .dates() returns a distinct set of dates when applied to a
+ QuerySet with aggregation.
+
+ Refs #18056. Previously, .dates() would return distinct (date_kind,
+ aggregation) sets, in this case (year, num_authors), so 2008 would be
+ returned twice because there are books from 2008 with a different
+ number of authors.
+ """
+ dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
+ self.assertQuerysetEqual(
+ dates, [
+ "datetime.date(1991, 1, 1)",
+ "datetime.date(1995, 1, 1)",
+ "datetime.date(2007, 1, 1)",
+ "datetime.date(2008, 1, 1)"
+ ]
+ )
+
+ def test_values_aggregation(self):
+ # Refs #20782
+ max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
+ self.assertEqual(max_rating['max_rating'], 5)
+ max_books_per_rating = Book.objects.values('rating').annotate(
+ books_per_rating=Count('id')
+ ).aggregate(Max('books_per_rating'))
+ self.assertEqual(
+ max_books_per_rating,
+ {'books_per_rating__max': 3})
+
+ def test_ticket17424(self):
+ """
+ Doing exclude() on a foreign model after annotate() doesn't crash.
+ """
+ all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
+ annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
+
+ # The value doesn't matter, we just need any negative
+ # constraint on a related model that's a noop.
+ excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
+
+ # Try to generate query tree
+ str(excluded_books.query)
+
+ self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
+
+ # Check internal state
+ self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
+ self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
+
+ def test_ticket12886(self):
+ """
+ Aggregation over sliced queryset works correctly.
+ """
+ qs = Book.objects.all().order_by('-rating')[0:3]
+ vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
+ self.assertAlmostEqual(vals, 4.5, places=2)
+
+ def test_ticket11881(self):
+ """
+ Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
+ select_related() stuff.
+ """
+ qs = Book.objects.all().select_for_update().order_by(
+ 'pk').select_related('publisher').annotate(max_pk=Max('pk'))
+ with CaptureQueriesContext(connection) as captured_queries:
+ qs.aggregate(avg_pk=Avg('max_pk'))
+ self.assertEqual(len(captured_queries), 1)
+ qstr = captured_queries[0]['sql'].lower()
+ self.assertNotIn('for update', qstr)
+ forced_ordering = connection.ops.force_no_ordering()
+ if forced_ordering:
+ # If the backend needs to force an ordering we make sure it's
+ # the only "ORDER BY" clause present in the query.
+ self.assertEqual(
+ re.findall(r'order by (\w+)', qstr),
+ [', '.join(f[1][0] for f in forced_ordering).lower()]
+ )
+ else:
+ self.assertNotIn('order by', qstr)
+ self.assertEqual(qstr.count(' join '), 0)
+
+ def test_decimal_max_digits_has_no_effect(self):
+ Book.objects.all().delete()
+ a1 = Author.objects.first()
+ p1 = Publisher.objects.first()
+ thedate = timezone.now()
+ for i in range(10):
+ Book.objects.create(
+ isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
+ price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
+
+ book = Book.objects.aggregate(price_sum=Sum('price'))
+ self.assertEqual(book['price_sum'], Decimal("99999.80"))
+
+ def test_nonaggregate_aggregation_throws(self):
+ with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'):
+ Book.objects.aggregate(fail=F('price'))
+
+ def test_nonfield_annotation(self):
+ book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
+ self.assertEqual(book.val, 2)
+ book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
+ self.assertEqual(book.val, 2)
+ book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
+ self.assertEqual(book.val, 2)
+
+ def test_missing_output_field_raises_error(self):
+ with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'):
+ Book.objects.annotate(val=Max(2)).first()
+
+ def test_annotation_expressions(self):
+ authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
+ authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
+ for qs in (authors, authors2):
+ self.assertQuerysetEqual(
+ qs, [
+ ('Adrian Holovaty', 132),
+ ('Brad Dayley', None),
+ ('Jacob Kaplan-Moss', 129),
+ ('James Bennett', 63),
+ ('Jeffrey Forcier', 128),
+ ('Paul Bissex', 120),
+ ('Peter Norvig', 103),
+ ('Stuart Russell', 103),
+ ('Wesley J. Chun', 176)
+ ],
+ lambda a: (a.name, a.combined_ages)
+ )
+
+ def test_aggregation_expressions(self):
+ a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
+ a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
+ a3 = Author.objects.aggregate(av_age=Avg('age'))
+ self.assertEqual(a1, {'av_age': 37})
+ self.assertEqual(a2, {'av_age': 37})
+ self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
+
+ def test_avg_decimal_field(self):
+ v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
+ self.assertIsInstance(v, float)
+ self.assertEqual(v, Approximate(47.39, places=2))
+
+ def test_order_of_precedence(self):
+ p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
+ self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
+
+ p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
+ self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
+
+ def test_combine_different_types(self):
+ msg = 'Expression contains mixed types. You must set output_field.'
+ qs = Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price'))
+ with self.assertRaisesMessage(FieldError, msg):
+ qs.first()
+ with self.assertRaisesMessage(FieldError, msg):
+ qs.first()
+
+ b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
+ output_field=IntegerField())).get(pk=self.b4.pk)
+ self.assertEqual(b1.sums, 383)
+
+ b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
+ output_field=FloatField())).get(pk=self.b4.pk)
+ self.assertEqual(b2.sums, 383.69)
+
+ b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
+ output_field=DecimalField())).get(pk=self.b4.pk)
+ self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
+
+ def test_complex_aggregations_require_kwarg(self):
+ with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
+ Author.objects.annotate(Sum(F('age') + F('friends__age')))
+ with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
+ Author.objects.aggregate(Sum('age') / Count('age'))
+ with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
+ Author.objects.aggregate(Sum(1))
+
+ def test_aggregate_over_complex_annotation(self):
+ qs = Author.objects.annotate(
+ combined_ages=Sum(F('age') + F('friends__age')))
+
+ age = qs.aggregate(max_combined_age=Max('combined_ages'))
+ self.assertEqual(age['max_combined_age'], 176)
+
+ age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
+ self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
+
+ age = qs.aggregate(
+ max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
+ self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
+
+ age = qs.aggregate(
+ max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
+ sum_combined_age=Sum('combined_ages'))
+ self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
+ self.assertEqual(age['sum_combined_age'], 954)
+
+ age = qs.aggregate(
+ max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
+ sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
+ self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
+ self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
+
+ def test_values_annotation_with_expression(self):
+ # ensure the F() is promoted to the group by clause
+ qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
+ a = qs.get(name="Adrian Holovaty")
+ self.assertEqual(a['another_age'], 68)
+
+ qs = qs.annotate(friend_count=Count('friends'))
+ a = qs.get(name="Adrian Holovaty")
+ self.assertEqual(a['friend_count'], 2)
+
+ qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
+ name="Adrian Holovaty").order_by('-combined_age')
+ self.assertEqual(
+ list(qs), [
+ {
+ "name": 'Adrian Holovaty',
+ "another_age": 68,
+ "friend_count": 1,
+ "combined_age": 69
+ },
+ {
+ "name": 'Adrian Holovaty',
+ "another_age": 68,
+ "friend_count": 1,
+ "combined_age": 63
+ }
+ ]
+ )
+
+ vals = qs.values('name', 'combined_age')
+ self.assertEqual(
+ list(vals), [
+ {'name': 'Adrian Holovaty', 'combined_age': 69},
+ {'name': 'Adrian Holovaty', 'combined_age': 63},
+ ]
+ )
+
+ def test_annotate_values_aggregate(self):
+ alias_age = Author.objects.annotate(
+ age_alias=F('age')
+ ).values(
+ 'age_alias',
+ ).aggregate(sum_age=Sum('age_alias'))
+
+ age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
+
+ self.assertEqual(alias_age['sum_age'], age['sum_age'])
+
+ def test_annotate_over_annotate(self):
+ author = Author.objects.annotate(
+ age_alias=F('age')
+ ).annotate(
+ sum_age=Sum('age_alias')
+ ).get(name="Adrian Holovaty")
+
+ other_author = Author.objects.annotate(
+ sum_age=Sum('age')
+ ).get(name="Adrian Holovaty")
+
+ self.assertEqual(author.sum_age, other_author.sum_age)
+
+ def test_annotated_aggregate_over_annotated_aggregate(self):
+ with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"):
+ Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
+
+ class MyMax(Max):
+ def as_sql(self, compiler, connection):
+ self.set_source_expressions(self.get_source_expressions()[0:1])
+ return super().as_sql(compiler, connection)
+
+ with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"):
+ Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price'))
+
+ def test_multi_arg_aggregate(self):
+ class MyMax(Max):
+ output_field = DecimalField()
+
+ def as_sql(self, compiler, connection):
+ copy = self.copy()
+ copy.set_source_expressions(copy.get_source_expressions()[0:1])
+ return super(MyMax, copy).as_sql(compiler, connection)
+
+ with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
+ Book.objects.aggregate(MyMax('pages', 'price'))
+
+ with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
+ Book.objects.annotate(MyMax('pages', 'price'))
+
+ Book.objects.aggregate(max_field=MyMax('pages', 'price'))
+
+ def test_add_implementation(self):
+ class MySum(Sum):
+ pass
+
+ # test completely changing how the output is rendered
+ def lower_case_function_override(self, compiler, connection):
+ sql, params = compiler.compile(self.source_expressions[0])
+ substitutions = {'function': self.function.lower(), 'expressions': sql}
+ substitutions.update(self.extra)
+ return self.template % substitutions, params
+ setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
+
+ qs = Book.objects.annotate(
+ sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
+ )
+ self.assertEqual(str(qs.query).count('sum('), 1)
+ b1 = qs.get(pk=self.b4.pk)
+ self.assertEqual(b1.sums, 383)
+
+ # test changing the dict and delegating
+ def lower_case_function_super(self, compiler, connection):
+ self.extra['function'] = self.function.lower()
+ return super(MySum, self).as_sql(compiler, connection)
+ setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
+
+ qs = Book.objects.annotate(
+ sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
+ )
+ self.assertEqual(str(qs.query).count('sum('), 1)
+ b1 = qs.get(pk=self.b4.pk)
+ self.assertEqual(b1.sums, 383)
+
+ # test overriding all parts of the template
+ def be_evil(self, compiler, connection):
+ substitutions = {'function': 'MAX', 'expressions': '2'}
+ substitutions.update(self.extra)
+ return self.template % substitutions, ()
+ setattr(MySum, 'as_' + connection.vendor, be_evil)
+
+ qs = Book.objects.annotate(
+ sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
+ )
+ self.assertEqual(str(qs.query).count('MAX('), 1)
+ b1 = qs.get(pk=self.b4.pk)
+ self.assertEqual(b1.sums, 2)
+
+ def test_complex_values_aggregation(self):
+ max_rating = Book.objects.values('rating').aggregate(
+ double_max_rating=Max('rating') + Max('rating'))
+ self.assertEqual(max_rating['double_max_rating'], 5 * 2)
+
+ max_books_per_rating = Book.objects.values('rating').annotate(
+ books_per_rating=Count('id') + 5
+ ).aggregate(Max('books_per_rating'))
+ self.assertEqual(
+ max_books_per_rating,
+ {'books_per_rating__max': 3 + 5})
+
+# def test_expression_on_aggregation(self):
+#
+# # Create a plain expression
+# class Greatest(Func):
+# function = 'GREATEST'
+#
+# def as_sqlite(self, compiler, connection):
+# return super().as_sql(compiler, connection, function='MAX')
+#
+# qs = Publisher.objects.annotate(
+# price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
+# ).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
+# self.assertQuerysetEqual(
+# qs, [1, 3, 7, 9], lambda v: v.num_awards)
+#
+# qs2 = Publisher.objects.annotate(
+# rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
+# output_field=FloatField())
+# ).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
+# self.assertQuerysetEqual(
+# qs2, [1, 3], lambda v: v.num_awards)
+
+ def test_arguments_must_be_expressions(self):
+ msg = 'QuerySet.aggregate() received non-expression(s): %s.'
+ with self.assertRaisesMessage(TypeError, msg % FloatField()):
+ Book.objects.aggregate(FloatField())
+ with self.assertRaisesMessage(TypeError, msg % True):
+ Book.objects.aggregate(is_book=True)
+ with self.assertRaisesMessage(TypeError, msg % ', '.join([str(FloatField()), 'True'])):
+ Book.objects.aggregate(FloatField(), Avg('price'), is_book=True)
diff --git a/tests/aggregation_regress/__init__.py b/tests/aggregation_regress/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/aggregation_regress/models.py b/tests/aggregation_regress/models.py
new file mode 100644
index 00000000..3498cbf1
--- /dev/null
+++ b/tests/aggregation_regress/models.py
@@ -0,0 +1,104 @@
+from django.contrib.contenttypes.fields import (
+ GenericForeignKey, GenericRelation,
+)
+from django.contrib.contenttypes.models import ContentType
+from django.db import models
+
+
+class Author(models.Model):
+ name = models.CharField(max_length=100)
+ age = models.IntegerField()
+ friends = models.ManyToManyField('self', blank=True)
+
+ def __str__(self):
+ return self.name
+
+
+class Publisher(models.Model):
+ name = models.CharField(max_length=255)
+ num_awards = models.IntegerField()
+
+ def __str__(self):
+ return self.name
+
+
+class ItemTag(models.Model):
+ tag = models.CharField(max_length=100)
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.PositiveIntegerField()
+ content_object = GenericForeignKey('content_type', 'object_id')
+
+
+class Book(models.Model):
+ isbn = models.CharField(max_length=9)
+ name = models.CharField(max_length=255)
+ pages = models.IntegerField()
+ rating = models.FloatField()
+ price = models.DecimalField(decimal_places=2, max_digits=6)
+ authors = models.ManyToManyField(Author)
+ contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set')
+ publisher = models.ForeignKey(Publisher, models.CASCADE)
+ pubdate = models.DateField()
+ tags = GenericRelation(ItemTag)
+
+ class Meta:
+ ordering = ('name',)
+
+ def __str__(self):
+ return self.name
+
+
+class Store(models.Model):
+ name = models.CharField(max_length=255)
+ books = models.ManyToManyField(Book)
+ original_opening = models.DateTimeField()
+ friday_night_closing = models.TimeField()
+
+ def __str__(self):
+ return self.name
+
+
+class Entries(models.Model):
+ EntryID = models.AutoField(primary_key=True, db_column='Entry ID')
+ Entry = models.CharField(unique=True, max_length=50)
+ Exclude = models.BooleanField(default=False)
+
+
+class Clues(models.Model):
+ ID = models.AutoField(primary_key=True)
+ EntryID = models.ForeignKey(Entries, models.CASCADE, verbose_name='Entry', db_column='Entry ID')
+ Clue = models.CharField(max_length=150)
+
+
+class WithManualPK(models.Model):
+ # The generic relations regression test needs two different model
+ # classes with the same PK value, and there are some (external)
+ # DB backends that don't work nicely when assigning integer to AutoField
+ # column (MSSQL at least).
+ id = models.IntegerField(primary_key=True)
+
+
+class HardbackBook(Book):
+ weight = models.FloatField()
+
+ def __str__(self):
+ return "%s (hardback): %s" % (self.name, self.weight)
+
+
+# Models for ticket #21150
+class Alfa(models.Model):
+ name = models.CharField(max_length=10, null=True)
+
+
+class Bravo(models.Model):
+ pass
+
+
+class Charlie(models.Model):
+ alfa = models.ForeignKey(Alfa, models.SET_NULL, null=True)
+ bravo = models.ForeignKey(Bravo, models.SET_NULL, null=True)
+
+
+class SelfRefFK(models.Model):
+ name = models.CharField(max_length=50)
+ parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
diff --git a/tests/aggregation_regress/tests.py b/tests/aggregation_regress/tests.py
new file mode 100644
index 00000000..becac848
--- /dev/null
+++ b/tests/aggregation_regress/tests.py
@@ -0,0 +1,1530 @@
+import datetime
+import pickle
+from decimal import Decimal
+from operator import attrgetter
+from unittest import mock
+
+from django.contrib.contenttypes.models import ContentType
+from django.core.exceptions import FieldError
+from django.db import connection
+from django.db.models import (
+ Avg, Case, Count, DecimalField, F, IntegerField, Max, Q, StdDev, Sum,
+ Value, Variance, When,
+)
+from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
+from django.test.utils import Approximate
+
+from .models import (
+ Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
+ Publisher, SelfRefFK, Store, WithManualPK,
+)
+
+
+class AggregationTests(TestCase):
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
+ cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
+ cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
+ cls.a4 = Author.objects.create(name='James Bennett', age=29)
+ cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
+ cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
+ cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
+ cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
+ cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
+ cls.a1.friends.add(cls.a2, cls.a4)
+ cls.a2.friends.add(cls.a1, cls.a7)
+ cls.a4.friends.add(cls.a1)
+ cls.a5.friends.add(cls.a6, cls.a7)
+ cls.a6.friends.add(cls.a5, cls.a7)
+ cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
+ cls.a8.friends.add(cls.a9)
+ cls.a9.friends.add(cls.a8)
+
+ cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
+ cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
+ cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
+ cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
+ cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
+
+ cls.b1 = Book.objects.create(
+ isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
+ pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
+ pubdate=datetime.date(2007, 12, 6)
+ )
+ cls.b2 = Book.objects.create(
+ isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
+ pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
+ pubdate=datetime.date(2008, 3, 3)
+ )
+ cls.b3 = Book.objects.create(
+ isbn='159059996', name='Practical Django Projects',
+ pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
+ pubdate=datetime.date(2008, 6, 23)
+ )
+ cls.b4 = Book.objects.create(
+ isbn='013235613', name='Python Web Development with Django',
+ pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
+ pubdate=datetime.date(2008, 11, 3)
+ )
+ cls.b5 = HardbackBook.objects.create(
+ isbn='013790395', name='Artificial Intelligence: A Modern Approach',
+ pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
+ pubdate=datetime.date(1995, 1, 15), weight=4.5)
+ cls.b6 = HardbackBook.objects.create(
+ isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
+ pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
+ pubdate=datetime.date(1991, 10, 15), weight=3.7)
+ cls.b1.authors.add(cls.a1, cls.a2)
+ cls.b2.authors.add(cls.a3)
+ cls.b3.authors.add(cls.a4)
+ cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
+ cls.b5.authors.add(cls.a8, cls.a9)
+ cls.b6.authors.add(cls.a8)
+
+ s1 = Store.objects.create(
+ name='Amazon.com',
+ original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
+ friday_night_closing=datetime.time(23, 59, 59)
+ )
+ s2 = Store.objects.create(
+ name='Books.com',
+ original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
+ friday_night_closing=datetime.time(23, 59, 59)
+ )
+ s3 = Store.objects.create(
+ name="Mamma and Pappa's Books",
+ original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
+ friday_night_closing=datetime.time(21, 30)
+ )
+ s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
+ s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
+ s3.books.add(cls.b3, cls.b4, cls.b6)
+
+ def assertObjectAttrs(self, obj, **kwargs):
+ for attr, value in kwargs.items():
+ self.assertEqual(getattr(obj, attr), value)
+
+ #def test_annotation_with_value(self):
+ # values = Book.objects.filter(
+ # name='Practical Django Projects',
+ # ).annotate(
+ # discount_price=F('price') * 2,
+ # ).values(
+ # 'discount_price',
+ # ).annotate(sum_discount=Sum('discount_price'))
+ # self.assertSequenceEqual(
+ # values,
+ # [{'discount_price': Decimal('59.38'), 'sum_discount': Decimal('59.38')}]
+ # )
+
+ def test_aggregates_in_where_clause(self):
+ """
+ Regression test for #12822: DatabaseError: aggregates not allowed in
+ WHERE clause
+
+ The subselect works and returns results equivalent to a
+ query with the IDs listed.
+
+ Before the corresponding fix for this bug, this test passed in 1.1 and
+ failed in 1.2-beta (trunk).
+ """
+ qs = Book.objects.values('contact').annotate(Max('id'))
+ qs = qs.order_by('contact').values_list('id__max', flat=True)
+ # don't do anything with the queryset (qs) before including it as a
+ # subquery
+ books = Book.objects.order_by('id')
+ qs1 = books.filter(id__in=qs)
+ qs2 = books.filter(id__in=list(qs))
+ self.assertEqual(list(qs1), list(qs2))
+
+ def test_aggregates_in_where_clause_pre_eval(self):
+ """
+ Regression test for #12822: DatabaseError: aggregates not allowed in
+ WHERE clause
+
+ Same as the above test, but evaluates the queryset for the subquery
+ before it's used as a subquery.
+
+ Before the corresponding fix for this bug, this test failed in both
+ 1.1 and 1.2-beta (trunk).
+ """
+ qs = Book.objects.values('contact').annotate(Max('id'))
+ qs = qs.order_by('contact').values_list('id__max', flat=True)
+ # force the queryset (qs) for the subquery to be evaluated in its
+ # current state
+ list(qs)
+ books = Book.objects.order_by('id')
+ qs1 = books.filter(id__in=qs)
+ qs2 = books.filter(id__in=list(qs))
+ self.assertEqual(list(qs1), list(qs2))
+
+ @skipUnlessDBFeature('supports_subqueries_in_group_by')
+ def test_annotate_with_extra(self):
+ """
+ Regression test for #11916: Extra params + aggregation creates
+ incorrect SQL.
+ """
+ # Oracle doesn't support subqueries in group by clause
+ shortest_book_sql = """
+ SELECT name
+ FROM aggregation_regress_book b
+ WHERE b.publisher_id = aggregation_regress_publisher.id
+ ORDER BY b.pages
+ LIMIT 1
+ """
+ # tests that this query does not raise a DatabaseError due to the full
+ # subselect being (erroneously) added to the GROUP BY parameters
+ qs = Publisher.objects.extra(select={
+ 'name_of_shortest_book': shortest_book_sql,
+ }).annotate(total_books=Count('book'))
+ # force execution of the query
+ list(qs)
+
+ def test_aggregate(self):
+ # Ordering requests are ignored
+ self.assertEqual(
+ Author.objects.order_by("name").aggregate(Avg("age")),
+ {"age__avg": Approximate(37.444, places=1)}
+ )
+
+ # Implicit ordering is also ignored
+ self.assertEqual(
+ Book.objects.aggregate(Sum("pages")),
+ {"pages__sum": 3703},
+ )
+
+ # Baseline results
+ self.assertEqual(
+ Book.objects.aggregate(Sum('pages'), Avg('pages')),
+ {'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
+ )
+
+ # Empty values query doesn't affect grouping or results
+ self.assertEqual(
+ Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
+ {'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
+ )
+
+ # Aggregate overrides extra selected column
+ self.assertEqual(
+ Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
+ {'pages__sum': 3703}
+ )
+
+ def test_annotation(self):
+ # Annotations get combined with extra select clauses
+ obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
+ select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
+ self.assertObjectAttrs(
+ obj,
+ contact_id=self.a3.id,
+ isbn='067232959',
+ mean_auth_age=45.0,
+ name='Sams Teach Yourself Django in 24 Hours',
+ pages=528,
+ price=Decimal("23.09"),
+ pubdate=datetime.date(2008, 3, 3),
+ publisher_id=self.p2.id,
+ rating=3.0
+ )
+ # Different DB backends return different types for the extra select computation
+ self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
+
+ # Order of the annotate/extra in the query doesn't matter
+ obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
+ mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
+ self.assertObjectAttrs(
+ obj,
+ contact_id=self.a3.id,
+ isbn='067232959',
+ mean_auth_age=45.0,
+ name='Sams Teach Yourself Django in 24 Hours',
+ pages=528,
+ price=Decimal("23.09"),
+ pubdate=datetime.date(2008, 3, 3),
+ publisher_id=self.p2.id,
+ rating=3.0
+ )
+ # Different DB backends return different types for the extra select computation
+ self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
+
+ # Values queries can be combined with annotate and extra
+ obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
+ select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
+ manufacture_cost = obj['manufacture_cost']
+ self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
+ del obj['manufacture_cost']
+ self.assertEqual(obj, {
+ 'id': self.b2.id,
+ 'contact_id': self.a3.id,
+ 'isbn': '067232959',
+ 'mean_auth_age': 45.0,
+ 'name': 'Sams Teach Yourself Django in 24 Hours',
+ 'pages': 528,
+ 'price': Decimal('23.09'),
+ 'pubdate': datetime.date(2008, 3, 3),
+ 'publisher_id': self.p2.id,
+ 'rating': 3.0,
+ })
+
+ # The order of the (empty) values, annotate and extra clauses doesn't
+ # matter
+ obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
+ select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
+ manufacture_cost = obj['manufacture_cost']
+ self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
+ del obj['manufacture_cost']
+ self.assertEqual(obj, {
+ 'id': self.b2.id,
+ 'contact_id': self.a3.id,
+ 'isbn': '067232959',
+ 'mean_auth_age': 45.0,
+ 'name': 'Sams Teach Yourself Django in 24 Hours',
+ 'pages': 528,
+ 'price': Decimal('23.09'),
+ 'pubdate': datetime.date(2008, 3, 3),
+ 'publisher_id': self.p2.id,
+ 'rating': 3.0
+ })
+
+ # If the annotation precedes the values clause, it won't be included
+ # unless it is explicitly named
+ obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
+ select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
+ self.assertEqual(obj, {
+ "name": 'The Definitive Guide to Django: Web Development Done Right',
+ })
+
+ obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
+ select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
+ self.assertEqual(obj, {
+ 'mean_auth_age': 34.5,
+ 'name': 'The Definitive Guide to Django: Web Development Done Right',
+ })
+
+ # If an annotation isn't included in the values, it can still be used
+ # in a filter
+ qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
+ self.assertSequenceEqual(
+ qs, [
+ {"name": 'Python Web Development with Django'}
+ ],
+ )
+
+ # The annotations are added to values output if values() precedes
+ # annotate()
+ obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
+ select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
+ self.assertEqual(obj, {
+ 'mean_auth_age': 34.5,
+ 'name': 'The Definitive Guide to Django: Web Development Done Right',
+ })
+
+ # All of the objects are getting counted (allow_nulls) and that values
+ # respects the amount of objects
+ self.assertEqual(
+ len(Author.objects.annotate(Avg('friends__age')).values()),
+ 9
+ )
+
+ # Consecutive calls to annotate accumulate in the query
+ qs = (
+ Book.objects
+ .values('price')
+ .annotate(oldest=Max('authors__age'))
+ .order_by('oldest', 'price')
+ .annotate(Max('publisher__num_awards'))
+ )
+ self.assertSequenceEqual(
+ qs, [
+ {'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
+ {'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
+ {'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
+ {'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
+ {'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
+ ],
+ )
+
+ def test_aggregate_annotation(self):
+ # Aggregates can be composed over annotations.
+ # The return type is derived from the composed aggregate
+ vals = (
+ Book.objects
+ .all()
+ .annotate(num_authors=Count('authors__id'))
+ .aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
+ )
+ self.assertEqual(vals, {
+ 'num_authors__sum': 10,
+ 'num_authors__avg': Approximate(1.666, places=2),
+ 'pages__max': 1132,
+ 'price__max': Decimal("82.80")
+ })
+
+ # Regression for #15624 - Missing SELECT columns when using values, annotate
+ # and aggregate in a single query
+ self.assertEqual(
+ Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
+ {'c__max': 3}
+ )
+
+ def test_conditional_aggreate(self):
+ # Conditional aggregation of a grouped queryset.
+ self.assertEqual(
+ Book.objects.annotate(c=Count('authors')).values('pk').aggregate(test=Sum(
+ Case(When(c__gt=1, then=1), output_field=IntegerField())
+ ))['test'],
+ 3
+ )
+
+ def test_sliced_conditional_aggregate(self):
+ self.assertEqual(
+ Author.objects.all()[:5].aggregate(test=Sum(Case(
+ When(age__lte=35, then=1), output_field=IntegerField()
+ )))['test'],
+ 3
+ )
+
+ #def test_annotated_conditional_aggregate(self):
+ # annotated_qs = Book.objects.annotate(discount_price=F('price') * 0.75)
+ # self.assertAlmostEqual(
+ # annotated_qs.aggregate(test=Avg(Case(
+ # When(pages__lt=400, then='discount_price'),
+ # output_field=DecimalField()
+ # )))['test'],
+ # 22.27, places=2
+ # )
+
+ def test_distinct_conditional_aggregate(self):
+ self.assertEqual(
+ Book.objects.distinct().aggregate(test=Avg(Case(
+ When(price=Decimal('29.69'), then='pages'),
+ output_field=IntegerField()
+ )))['test'],
+ 325
+ )
+
+ def test_conditional_aggregate_on_complex_condition(self):
+ self.assertEqual(
+ Book.objects.distinct().aggregate(test=Avg(Case(
+ When(Q(price__gte=Decimal('29')) & Q(price__lt=Decimal('30')), then='pages'),
+ output_field=IntegerField()
+ )))['test'],
+ 325
+ )
+
+ def test_decimal_aggregate_annotation_filter(self):
+ """
+ Filtering on an aggregate annotation with Decimal values should work.
+ Requires special handling on SQLite (#18247).
+ """
+ self.assertEqual(
+ len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
+ 1
+ )
+ self.assertEqual(
+ len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
+ 4
+ )
+
+ def test_field_error(self):
+ # Bad field requests in aggregates are caught and reported
+ msg = (
+ "Cannot resolve keyword 'foo' into field. Choices are: authors, "
+ "contact, contact_id, hardbackbook, id, isbn, name, pages, price, "
+ "pubdate, publisher, publisher_id, rating, store, tags"
+ )
+ with self.assertRaisesMessage(FieldError, msg):
+ Book.objects.all().aggregate(num_authors=Count('foo'))
+
+ with self.assertRaisesMessage(FieldError, msg):
+ Book.objects.all().annotate(num_authors=Count('foo'))
+
+ msg = (
+ "Cannot resolve keyword 'foo' into field. Choices are: authors, "
+ "contact, contact_id, hardbackbook, id, isbn, name, num_authors, "
+ "pages, price, pubdate, publisher, publisher_id, rating, store, tags"
+ )
+ with self.assertRaisesMessage(FieldError, msg):
+ Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
+
+ def test_more(self):
+ # Old-style count aggregations can be mixed with new-style
+ self.assertEqual(
+ Book.objects.annotate(num_authors=Count('authors')).count(),
+ 6
+ )
+
+ # Non-ordinal, non-computed Aggregates over annotations correctly
+ # inherit the annotation's internal type if the annotation is ordinal
+ # or computed
+ vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
+ self.assertEqual(
+ vals,
+ {'num_authors__max': 3}
+ )
+
+ vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
+ self.assertEqual(
+ vals,
+ {'avg_price__max': 75.0}
+ )
+
+ # Aliases are quoted to protected aliases that might be reserved names
+ vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
+ self.assertEqual(
+ vals,
+ {'number': 1132, 'select': 1132}
+ )
+
+ # Regression for #10064: select_related() plays nice with aggregates
+ obj = Book.objects.select_related('publisher').annotate(
+ num_authors=Count('authors')).values().get(isbn='013790395')
+ self.assertEqual(obj, {
+ 'contact_id': self.a8.id,
+ 'id': self.b5.id,
+ 'isbn': '013790395',
+ 'name': 'Artificial Intelligence: A Modern Approach',
+ 'num_authors': 2,
+ 'pages': 1132,
+ 'price': Decimal("82.8"),
+ 'pubdate': datetime.date(1995, 1, 15),
+ 'publisher_id': self.p3.id,
+ 'rating': 4.0,
+ })
+
+ # Regression for #10010: exclude on an aggregate field is correctly
+ # negated
+ self.assertEqual(
+ len(Book.objects.annotate(num_authors=Count('authors'))),
+ 6
+ )
+ self.assertEqual(
+ len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
+ 1
+ )
+ self.assertEqual(
+ len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
+ 5
+ )
+
+ self.assertEqual(
+ len(
+ Book.objects
+ .annotate(num_authors=Count('authors'))
+ .filter(num_authors__lt=3)
+ .exclude(num_authors__lt=2)
+ ),
+ 2
+ )
+ self.assertEqual(
+ len(
+ Book.objects
+ .annotate(num_authors=Count('authors'))
+ .exclude(num_authors__lt=2)
+ .filter(num_authors__lt=3)
+ ),
+ 2
+ )
+
+ def test_aggregate_fexpr(self):
+ # Aggregates can be used with F() expressions
+ # ... where the F() is pushed into the HAVING clause
+ qs = (
+ Publisher.objects
+ .annotate(num_books=Count('book'))
+ .filter(num_books__lt=F('num_awards') / 2)
+ .order_by('name')
+ .values('name', 'num_books', 'num_awards')
+ )
+ self.assertSequenceEqual(
+ qs, [
+ {'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
+ {'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
+ ],
+ )
+
+ qs = (
+ Publisher.objects
+ .annotate(num_books=Count('book'))
+ .exclude(num_books__lt=F('num_awards') / 2)
+ .order_by('name')
+ .values('name', 'num_books', 'num_awards')
+ )
+ self.assertSequenceEqual(
+ qs, [
+ {'num_books': 2, 'name': 'Apress', 'num_awards': 3},
+ {'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
+ {'num_books': 1, 'name': 'Sams', 'num_awards': 1}
+ ],
+ )
+
+ # ... and where the F() references an aggregate
+ qs = (
+ Publisher.objects
+ .annotate(num_books=Count('book'))
+ .filter(num_awards__gt=2 * F('num_books'))
+ .order_by('name')
+ .values('name', 'num_books', 'num_awards')
+ )
+ self.assertSequenceEqual(
+ qs, [
+ {'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
+ {'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
+ ],
+ )
+
+ qs = (
+ Publisher.objects
+ .annotate(num_books=Count('book'))
+ .exclude(num_books__lt=F('num_awards') / 2)
+ .order_by('name')
+ .values('name', 'num_books', 'num_awards')
+ )
+ self.assertSequenceEqual(
+ qs, [
+ {'num_books': 2, 'name': 'Apress', 'num_awards': 3},
+ {'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
+ {'num_books': 1, 'name': 'Sams', 'num_awards': 1}
+ ],
+ )
+
+ def test_db_col_table(self):
+ # Tests on fields with non-default table and column names.
+ qs = (
+ Clues.objects
+ .values('EntryID__Entry')
+ .annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
+ )
+ self.assertQuerysetEqual(qs, [])
+
+ qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
+ self.assertQuerysetEqual(qs, [])
+
+ def test_boolean_conversion(self):
+ # Aggregates mixed up ordering of columns for backend's convert_values
+ # method. Refs #21126.
+ e = Entries.objects.create(Entry='foo')
+ c = Clues.objects.create(EntryID=e, Clue='bar')
+ qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
+ self.assertSequenceEqual(qs, [c])
+ self.assertEqual(qs[0].EntryID, e)
+ self.assertIs(qs[0].EntryID.Exclude, False)
+
+ def test_empty(self):
+ # Regression for #10089: Check handling of empty result sets with
+ # aggregates
+ self.assertEqual(
+ Book.objects.filter(id__in=[]).count(),
+ 0
+ )
+
+ vals = (
+ Book.objects
+ .filter(id__in=[])
+ .aggregate(
+ num_authors=Count('authors'),
+ avg_authors=Avg('authors'),
+ max_authors=Max('authors'),
+ max_price=Max('price'),
+ max_rating=Max('rating'),
+ )
+ )
+ self.assertEqual(
+ vals,
+ {'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
+ )
+
+ qs = (
+ Publisher.objects
+ .filter(name="Jonno's House of Books")
+ .annotate(
+ num_authors=Count('book__authors'),
+ avg_authors=Avg('book__authors'),
+ max_authors=Max('book__authors'),
+ max_price=Max('book__price'),
+ max_rating=Max('book__rating'),
+ ).values()
+ )
+ self.assertSequenceEqual(
+ qs,
+ [{
+ 'max_authors': None,
+ 'name': "Jonno's House of Books",
+ 'num_awards': 0,
+ 'max_price': None,
+ 'num_authors': 0,
+ 'max_rating': None,
+ 'id': self.p5.id,
+ 'avg_authors': None,
+ }],
+ )
+
+ def test_more_more(self):
+ # Regression for #10113 - Fields mentioned in order_by() must be
+ # included in the GROUP BY. This only becomes a problem when the
+ # order_by introduces a new join.
+ self.assertQuerysetEqual(
+ Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
+ "Practical Django Projects",
+ "The Definitive Guide to Django: Web Development Done Right",
+ "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
+ "Artificial Intelligence: A Modern Approach",
+ "Python Web Development with Django",
+ "Sams Teach Yourself Django in 24 Hours",
+ ],
+ lambda b: b.name
+ )
+
+ # Regression for #10127 - Empty select_related() works with annotate
+ qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
+ self.assertQuerysetEqual(
+ qs,
+ [
+ ('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
+ ('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
+ (
+ 'Python Web Development with Django',
+ Approximate(30.333, places=2),
+ 'Prentice Hall',
+ 'Jeffrey Forcier',
+ ),
+ ('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
+ ],
+ lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
+ )
+
+ # Regression for #10132 - If the values() clause only mentioned extra
+ # (select=) columns, those columns are used for grouping
+ qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
+ self.assertSequenceEqual(
+ qs, [
+ {'pub': self.b1.id, 'id__count': 2},
+ {'pub': self.b2.id, 'id__count': 1},
+ {'pub': self.b3.id, 'id__count': 2},
+ {'pub': self.b4.id, 'id__count': 1}
+ ],
+ )
+
+ qs = (
+ Book.objects
+ .extra(select={'pub': 'publisher_id', 'foo': 'pages'})
+ .values('pub')
+ .annotate(Count('id'))
+ .order_by('pub')
+ )
+ self.assertSequenceEqual(
+ qs, [
+ {'pub': self.p1.id, 'id__count': 2},
+ {'pub': self.p2.id, 'id__count': 1},
+ {'pub': self.p3.id, 'id__count': 2},
+ {'pub': self.p4.id, 'id__count': 1}
+ ],
+ )
+
+ # Regression for #10182 - Queries with aggregate calls are correctly
+ # realiased when used in a subquery
+ ids = (
+ Book.objects
+ .filter(pages__gt=100)
+ .annotate(n_authors=Count('authors'))
+ .filter(n_authors__gt=2)
+ .order_by('n_authors')
+ )
+ self.assertQuerysetEqual(
+ Book.objects.filter(id__in=ids), [
+ "Python Web Development with Django",
+ ],
+ lambda b: b.name
+ )
+
+ # Regression for #15709 - Ensure each group_by field only exists once
+ # per query
+ qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
+ # There is just one GROUP BY clause (zero commas means at most one clause).
+ self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
+
+ def test_duplicate_alias(self):
+ # Regression for #11256 - duplicating a default alias raises ValueError.
+ msg = (
+ "The named annotation 'authors__age__avg' conflicts with "
+ "the default name for another annotation."
+ )
+ with self.assertRaisesMessage(ValueError, msg):
+ Book.objects.all().annotate(Avg('authors__age'), authors__age__avg=Avg('authors__age'))
+
+ def test_field_name_conflict(self):
+ # Regression for #11256 - providing an aggregate name
+ # that conflicts with a field name on the model raises ValueError
+ msg = "The annotation 'age' conflicts with a field on the model."
+ with self.assertRaisesMessage(ValueError, msg):
+ Author.objects.annotate(age=Avg('friends__age'))
+
+ def test_m2m_name_conflict(self):
+ # Regression for #11256 - providing an aggregate name
+ # that conflicts with an m2m name on the model raises ValueError
+ msg = "The annotation 'friends' conflicts with a field on the model."
+ with self.assertRaisesMessage(ValueError, msg):
+ Author.objects.annotate(friends=Count('friends'))
+
+ def test_fk_attname_conflict(self):
+ msg = "The annotation 'contact_id' conflicts with a field on the model."
+ with self.assertRaisesMessage(ValueError, msg):
+ Book.objects.annotate(contact_id=F('publisher_id'))
+
+ def test_values_queryset_non_conflict(self):
+ # Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
+
+ # age is a field on Author, so it shouldn't be allowed as an aggregate.
+ # But age isn't included in values(), so it is.
+ results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
+ self.assertEqual(len(results), 9)
+ self.assertEqual(results[0]['name'], 'Adrian Holovaty')
+ self.assertEqual(results[0]['age'], 1)
+
+ # Same problem, but aggregating over m2m fields
+ results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
+ self.assertEqual(len(results), 9)
+ self.assertEqual(results[0]['name'], 'Adrian Holovaty')
+ self.assertEqual(results[0]['age'], 32.0)
+
+ # Same problem, but colliding with an m2m field
+ results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
+ self.assertEqual(len(results), 9)
+ self.assertEqual(results[0]['name'], 'Adrian Holovaty')
+ self.assertEqual(results[0]['friends'], 2)
+
+ def test_reverse_relation_name_conflict(self):
+ # Regression for #11256 - providing an aggregate name
+ # that conflicts with a reverse-related name on the model raises ValueError
+ msg = "The annotation 'book_contact_set' conflicts with a field on the model."
+ with self.assertRaisesMessage(ValueError, msg):
+ Author.objects.annotate(book_contact_set=Avg('friends__age'))
+
+ def test_pickle(self):
+ # Regression for #10197 -- Queries with aggregates can be pickled.
+ # First check that pickling is possible at all. No crash = success
+ qs = Book.objects.annotate(num_authors=Count('authors'))
+ pickle.dumps(qs)
+
+ # Then check that the round trip works.
+ query = qs.query.get_compiler(qs.db).as_sql()[0]
+ qs2 = pickle.loads(pickle.dumps(qs))
+ self.assertEqual(
+ qs2.query.get_compiler(qs2.db).as_sql()[0],
+ query,
+ )
+
+ def test_more_more_more(self):
+ # Regression for #10199 - Aggregate calls clone the original query so
+ # the original query can still be used
+ books = Book.objects.all()
+ books.aggregate(Avg("authors__age"))
+ self.assertQuerysetEqual(
+ books.all(), [
+ 'Artificial Intelligence: A Modern Approach',
+ 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
+ 'Practical Django Projects',
+ 'Python Web Development with Django',
+ 'Sams Teach Yourself Django in 24 Hours',
+ 'The Definitive Guide to Django: Web Development Done Right'
+ ],
+ lambda b: b.name
+ )
+
+ # Regression for #10248 - Annotations work with dates()
+ qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
+ self.assertSequenceEqual(
+ qs, [
+ datetime.date(1995, 1, 15),
+ datetime.date(2007, 12, 6),
+ ],
+ )
+
+ # Regression for #10290 - extra selects with parameters can be used for
+ # grouping.
+ qs = (
+ Book.objects
+ .annotate(mean_auth_age=Avg('authors__age'))
+ .extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2])
+ .order_by('sheets')
+ .values('sheets')
+ )
+ self.assertQuerysetEqual(
+ qs, [
+ 150,
+ 175,
+ 224,
+ 264,
+ 473,
+ 566
+ ],
+ lambda b: int(b["sheets"])
+ )
+
+ # Regression for 10425 - annotations don't get in the way of a count()
+ # clause
+ self.assertEqual(
+ Book.objects.values('publisher').annotate(Count('publisher')).count(),
+ 4
+ )
+ self.assertEqual(
+ Book.objects.annotate(Count('publisher')).values('publisher').count(),
+ 6
+ )
+
+ # Note: intentionally no order_by(), that case needs tests, too.
+ publishers = Publisher.objects.filter(id__in=[1, 2])
+ self.assertEqual(
+ sorted(p.name for p in publishers),
+ [
+ "Apress",
+ "Sams"
+ ]
+ )
+
+ publishers = publishers.annotate(n_books=Count("book"))
+ sorted_publishers = sorted(publishers, key=lambda x: x.name)
+ self.assertEqual(
+ sorted_publishers[0].n_books,
+ 2
+ )
+ self.assertEqual(
+ sorted_publishers[1].n_books,
+ 1
+ )
+
+ self.assertEqual(
+ sorted(p.name for p in publishers),
+ [
+ "Apress",
+ "Sams"
+ ]
+ )
+
+ books = Book.objects.filter(publisher__in=publishers)
+ self.assertQuerysetEqual(
+ books, [
+ "Practical Django Projects",
+ "Sams Teach Yourself Django in 24 Hours",
+ "The Definitive Guide to Django: Web Development Done Right",
+ ],
+ lambda b: b.name
+ )
+ self.assertEqual(
+ sorted(p.name for p in publishers),
+ [
+ "Apress",
+ "Sams"
+ ]
+ )
+
+ # Regression for 10666 - inherited fields work with annotations and
+ # aggregations
+ self.assertEqual(
+ HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
+ {'n_pages': 2078}
+ )
+
+ self.assertEqual(
+ HardbackBook.objects.aggregate(n_pages=Sum('pages')),
+ {'n_pages': 2078},
+ )
+
+ qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
+ self.assertSequenceEqual(
+ qs,
+ [
+ {'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
+ {
+ 'n_authors': 1,
+ 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
+ }
+ ],
+ )
+
+ qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
+ self.assertSequenceEqual(
+ qs,
+ [
+ {'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
+ {
+ 'n_authors': 1,
+ 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
+ }
+ ],
+ )
+
+ # Regression for #10766 - Shouldn't be able to reference an aggregate
+ # fields in an aggregate() call.
+ msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate"
+ with self.assertRaisesMessage(FieldError, msg):
+ Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
+
+ def test_empty_filter_count(self):
+ self.assertEqual(
+ Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
+ 0
+ )
+
+ def test_empty_filter_aggregate(self):
+ self.assertEqual(
+ Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
+ {"pk__count": None}
+ )
+
+ def test_none_call_before_aggregate(self):
+ # Regression for #11789
+ self.assertEqual(
+ Author.objects.none().aggregate(Avg('age')),
+ {'age__avg': None}
+ )
+
+ def test_annotate_and_join(self):
+ self.assertEqual(
+ Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
+ Author.objects.count()
+ )
+
+ def test_f_expression_annotation(self):
+ # Books with less than 200 pages per author.
+ qs = Book.objects.values("name").annotate(
+ n_authors=Count("authors")
+ ).filter(
+ pages__lt=F("n_authors") * 200
+ ).values_list("pk")
+ self.assertQuerysetEqual(
+ Book.objects.filter(pk__in=qs), [
+ "Python Web Development with Django"
+ ],
+ attrgetter("name")
+ )
+
+ def test_values_annotate_values(self):
+ qs = Book.objects.values("name").annotate(
+ n_authors=Count("authors")
+ ).values_list("pk", flat=True)
+ self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
+
+ def test_having_group_by(self):
+ # When a field occurs on the LHS of a HAVING clause that it
+ # appears correctly in the GROUP BY clause
+ qs = Book.objects.values_list("name").annotate(
+ n_authors=Count("authors")
+ ).filter(
+ pages__gt=F("n_authors")
+ ).values_list("name", flat=True)
+ # Results should be the same, all Books have more pages than authors
+ self.assertEqual(
+ list(qs), list(Book.objects.values_list("name", flat=True))
+ )
+
+ def test_values_list_annotation_args_ordering(self):
+ """
+ Annotate *args ordering should be preserved in values_list results.
+ **kwargs comes after *args.
+ Regression test for #23659.
+ """
+ books = Book.objects.values_list("publisher__name").annotate(
+ Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
+ ).order_by("-publisher__name")
+ self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
+
+ def test_annotation_disjunction(self):
+ qs = Book.objects.annotate(n_authors=Count("authors")).filter(
+ Q(n_authors=2) | Q(name="Python Web Development with Django")
+ )
+ self.assertQuerysetEqual(
+ qs, [
+ "Artificial Intelligence: A Modern Approach",
+ "Python Web Development with Django",
+ "The Definitive Guide to Django: Web Development Done Right",
+ ],
+ attrgetter("name")
+ )
+
+ qs = (
+ Book.objects
+ .annotate(n_authors=Count("authors"))
+ .filter(
+ Q(name="The Definitive Guide to Django: Web Development Done Right") |
+ (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
+ )
+ )
+ self.assertQuerysetEqual(
+ qs,
+ [
+ "The Definitive Guide to Django: Web Development Done Right",
+ ],
+ attrgetter("name")
+ )
+
+ qs = Publisher.objects.annotate(
+ rating_sum=Sum("book__rating"),
+ book_count=Count("book")
+ ).filter(
+ Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
+ ).order_by('pk')
+ self.assertQuerysetEqual(
+ qs, [
+ "Apress",
+ "Prentice Hall",
+ "Jonno's House of Books",
+ ],
+ attrgetter("name")
+ )
+
+ qs = Publisher.objects.annotate(
+ rating_sum=Sum("book__rating"),
+ book_count=Count("book")
+ ).filter(
+ Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
+ ).order_by("num_awards")
+ self.assertQuerysetEqual(
+ qs, [
+ "Jonno's House of Books",
+ "Sams",
+ "Apress",
+ "Prentice Hall",
+ "Morgan Kaufmann"
+ ],
+ attrgetter("name")
+ )
+
+ def test_quoting_aggregate_order_by(self):
+ qs = Book.objects.filter(
+ name="Python Web Development with Django"
+ ).annotate(
+ authorCount=Count("authors")
+ ).order_by("authorCount")
+ self.assertQuerysetEqual(
+ qs, [
+ ("Python Web Development with Django", 3),
+ ],
+ lambda b: (b.name, b.authorCount)
+ )
+
+ @skipUnlessDBFeature('supports_stddev')
+ def test_stddev(self):
+ self.assertEqual(
+ Book.objects.aggregate(StdDev('pages')),
+ {'pages__stddev': Approximate(311.46, 1)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(StdDev('rating')),
+ {'rating__stddev': Approximate(0.60, 1)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(StdDev('price')),
+ {'price__stddev': Approximate(24.16, 2)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(StdDev('pages', sample=True)),
+ {'pages__stddev': Approximate(341.19, 2)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(StdDev('rating', sample=True)),
+ {'rating__stddev': Approximate(0.66, 2)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(StdDev('price', sample=True)),
+ {'price__stddev': Approximate(26.46, 1)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(Variance('pages')),
+ {'pages__variance': Approximate(97010.80, 1)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(Variance('rating')),
+ {'rating__variance': Approximate(0.36, 1)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(Variance('price')),
+ {'price__variance': Approximate(583.77, 1)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(Variance('pages', sample=True)),
+ {'pages__variance': Approximate(116412.96, 1)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(Variance('rating', sample=True)),
+ {'rating__variance': Approximate(0.44, 2)}
+ )
+
+ self.assertEqual(
+ Book.objects.aggregate(Variance('price', sample=True)),
+ {'price__variance': Approximate(700.53, 2)}
+ )
+
+ def test_filtering_by_annotation_name(self):
+ # Regression test for #14476
+
+ # The name of the explicitly provided annotation name in this case
+ # poses no problem
+ qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ ['Peter Norvig'],
+ lambda b: b.name
+ )
+ # Neither in this case
+ qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ ['Peter Norvig'],
+ lambda b: b.name
+ )
+ # This case used to fail because the ORM couldn't resolve the
+ # automatically generated annotation name `book__count`
+ qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ ['Peter Norvig'],
+ lambda b: b.name
+ )
+ # Referencing the auto-generated name in an aggregate() also works.
+ self.assertEqual(
+ Author.objects.annotate(Count('book')).aggregate(Max('book__count')),
+ {'book__count__max': 2}
+ )
+
+ def test_annotate_joins(self):
+ """
+ The base table's join isn't promoted to LOUTER. This could
+ cause the query generation to fail if there is an exclude() for fk-field
+ in the query, too. Refs #19087.
+ """
+ qs = Book.objects.annotate(n=Count('pk'))
+ self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
+ # The query executes without problems.
+ self.assertEqual(len(qs.exclude(publisher=-1)), 6)
+
+ @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
+ def test_aggregate_duplicate_columns(self):
+ # Regression test for #17144
+
+ results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
+
+ # There should only be one GROUP BY clause, for the `id` column.
+ # `name` and `age` should not be grouped on.
+ _, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
+ self.assertEqual(len(group_by), 1)
+ self.assertIn('id', group_by[0][0])
+ self.assertNotIn('name', group_by[0][0])
+ self.assertNotIn('age', group_by[0][0])
+ self.assertEqual(
+ [(a.name, a.num_contacts) for a in results.order_by('name')],
+ [
+ ('Adrian Holovaty', 1),
+ ('Brad Dayley', 1),
+ ('Jacob Kaplan-Moss', 0),
+ ('James Bennett', 1),
+ ('Jeffrey Forcier', 1),
+ ('Paul Bissex', 0),
+ ('Peter Norvig', 2),
+ ('Stuart Russell', 0),
+ ('Wesley J. Chun', 0),
+ ]
+ )
+
+ @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
+ def test_aggregate_duplicate_columns_only(self):
+ # Works with only() too.
+ results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
+ _, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
+ self.assertEqual(len(grouping), 1)
+ self.assertIn('id', grouping[0][0])
+ self.assertNotIn('name', grouping[0][0])
+ self.assertNotIn('age', grouping[0][0])
+ self.assertEqual(
+ [(a.name, a.num_contacts) for a in results.order_by('name')],
+ [
+ ('Adrian Holovaty', 1),
+ ('Brad Dayley', 1),
+ ('Jacob Kaplan-Moss', 0),
+ ('James Bennett', 1),
+ ('Jeffrey Forcier', 1),
+ ('Paul Bissex', 0),
+ ('Peter Norvig', 2),
+ ('Stuart Russell', 0),
+ ('Wesley J. Chun', 0),
+ ]
+ )
+
+ @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
+ def test_aggregate_duplicate_columns_select_related(self):
+ # And select_related()
+ results = Book.objects.select_related('contact').annotate(
+ num_authors=Count('authors'))
+ _, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
+ # In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
+ self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
+ self.assertIn('id', grouping[0][0])
+ self.assertNotIn('name', grouping[0][0])
+ self.assertNotIn('contact', grouping[0][0])
+ self.assertEqual(
+ [(b.name, b.num_authors) for b in results.order_by('name')],
+ [
+ ('Artificial Intelligence: A Modern Approach', 2),
+ ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
+ ('Practical Django Projects', 1),
+ ('Python Web Development with Django', 3),
+ ('Sams Teach Yourself Django in 24 Hours', 1),
+ ('The Definitive Guide to Django: Web Development Done Right', 2)
+ ]
+ )
+
+ @skipUnlessDBFeature('allows_group_by_selected_pks')
+ def test_aggregate_ummanaged_model_columns(self):
+ """
+ Unmanaged models are sometimes used to represent database views which
+ may not allow grouping by selected primary key.
+ """
+ def assertQuerysetResults(queryset):
+ self.assertEqual(
+ [(b.name, b.num_authors) for b in queryset.order_by('name')],
+ [
+ ('Artificial Intelligence: A Modern Approach', 2),
+ ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
+ ('Practical Django Projects', 1),
+ ('Python Web Development with Django', 3),
+ ('Sams Teach Yourself Django in 24 Hours', 1),
+ ('The Definitive Guide to Django: Web Development Done Right', 2),
+ ]
+ )
+ queryset = Book.objects.select_related('contact').annotate(num_authors=Count('authors'))
+ # Unmanaged origin model.
+ with mock.patch.object(Book._meta, 'managed', False):
+ _, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup()
+ self.assertEqual(len(grouping), len(Book._meta.fields) + 1)
+ for index, field in enumerate(Book._meta.fields):
+ self.assertIn(field.name, grouping[index][0])
+ self.assertIn(Author._meta.pk.name, grouping[-1][0])
+ assertQuerysetResults(queryset)
+ # Unmanaged related model.
+ with mock.patch.object(Author._meta, 'managed', False):
+ _, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup()
+ self.assertEqual(len(grouping), len(Author._meta.fields) + 1)
+ self.assertIn(Book._meta.pk.name, grouping[0][0])
+ for index, field in enumerate(Author._meta.fields):
+ self.assertIn(field.name, grouping[index + 1][0])
+ assertQuerysetResults(queryset)
+
+ def test_reverse_join_trimming(self):
+ qs = Author.objects.annotate(Count('book_contact_set__contact'))
+ self.assertIn(' JOIN ', str(qs.query))
+
+ def test_aggregation_with_generic_reverse_relation(self):
+ """
+ Regression test for #10870: Aggregates with joins ignore extra
+ filters provided by setup_joins
+
+ tests aggregations with generic reverse relations
+ """
+ django_book = Book.objects.get(name='Practical Django Projects')
+ ItemTag.objects.create(
+ object_id=django_book.id, tag='intermediate',
+ content_type=ContentType.objects.get_for_model(django_book),
+ )
+ ItemTag.objects.create(
+ object_id=django_book.id, tag='django',
+ content_type=ContentType.objects.get_for_model(django_book),
+ )
+ # Assign a tag to model with same PK as the book above. If the JOIN
+ # used in aggregation doesn't have content type as part of the
+ # condition the annotation will also count the 'hi mom' tag for b.
+ wmpk = WithManualPK.objects.create(id=django_book.pk)
+ ItemTag.objects.create(
+ object_id=wmpk.id, tag='hi mom',
+ content_type=ContentType.objects.get_for_model(wmpk),
+ )
+ ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
+ ItemTag.objects.create(
+ object_id=ai_book.id, tag='intermediate',
+ content_type=ContentType.objects.get_for_model(ai_book),
+ )
+
+ self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
+ results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
+ self.assertEqual(
+ [(b.name, b.tags__count) for b in results],
+ [
+ ('Practical Django Projects', 2),
+ ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
+ ('Artificial Intelligence: A Modern Approach', 0),
+ ('Python Web Development with Django', 0),
+ ('Sams Teach Yourself Django in 24 Hours', 0),
+ ('The Definitive Guide to Django: Web Development Done Right', 0)
+ ]
+ )
+
+ def test_negated_aggregation(self):
+ expected_results = Author.objects.exclude(
+ pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
+ ).order_by('name')
+ expected_results = [a.name for a in expected_results]
+ qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
+ Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ expected_results,
+ lambda b: b.name
+ )
+ expected_results = Author.objects.exclude(
+ pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
+ ).order_by('name')
+ expected_results = [a.name for a in expected_results]
+ qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ expected_results,
+ lambda b: b.name
+ )
+
+ def test_name_filters(self):
+ qs = Author.objects.annotate(Count('book')).filter(
+ Q(book__count__exact=2) | Q(name='Adrian Holovaty')
+ ).order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ ['Adrian Holovaty', 'Peter Norvig'],
+ lambda b: b.name
+ )
+
+ def test_name_expressions(self):
+ # Aggregates are spotted correctly from F objects.
+ # Note that Adrian's age is 34 in the fixtures, and he has one book
+ # so both conditions match one author.
+ qs = Author.objects.annotate(Count('book')).filter(
+ Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
+ ).order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ ['Adrian Holovaty', 'Peter Norvig'],
+ lambda b: b.name
+ )
+
+ def test_ticket_11293(self):
+ q1 = Q(price__gt=50)
+ q2 = Q(authors__count__gt=1)
+ query = Book.objects.annotate(Count('authors')).filter(
+ q1 | q2).order_by('pk')
+ self.assertQuerysetEqual(
+ query, [1, 4, 5, 6],
+ lambda b: b.pk)
+
+ def test_ticket_11293_q_immutable(self):
+ """
+ Splitting a q object to parts for where/having doesn't alter
+ the original q-object.
+ """
+ q1 = Q(isbn='')
+ q2 = Q(authors__count__gt=1)
+ query = Book.objects.annotate(Count('authors'))
+ query.filter(q1 | q2)
+ self.assertEqual(len(q2.children), 1)
+
+ def test_fobj_group_by(self):
+ """
+ An F() object referring to related column works correctly in group by.
+ """
+ qs = Book.objects.annotate(
+ account=Count('authors')
+ ).filter(
+ account=F('publisher__num_awards')
+ )
+ self.assertQuerysetEqual(
+ qs, ['Sams Teach Yourself Django in 24 Hours'],
+ lambda b: b.name)
+
+ def test_annotate_reserved_word(self):
+ """
+ Regression #18333 - Ensure annotated column name is properly quoted.
+ """
+ vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
+ self.assertEqual(vals, {
+ 'select__sum': 10,
+ 'select__avg': Approximate(1.666, places=2),
+ })
+
+ def test_annotate_on_relation(self):
+ book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
+ self.assertEqual(book.avg_price, 30.00)
+ self.assertEqual(book.publisher_name, "Apress")
+
+ def test_aggregate_on_relation(self):
+ # A query with an existing annotation aggregation on a relation should
+ # succeed.
+ qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
+ publisher_awards=Sum('publisher__num_awards')
+ )
+ self.assertEqual(qs['publisher_awards'], 30)
+
+ def test_annotate_distinct_aggregate(self):
+ # There are three books with rating of 4.0 and two of the books have
+ # the same price. Hence, the distinct removes one rating of 4.0
+ # from the results.
+ vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
+ vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
+ self.assertEqual(vals1, vals2)
+
+ def test_annotate_values_list_flat(self):
+ """Find ages that are shared by at least two authors."""
+ qs = Author.objects.values_list('age', flat=True).annotate(age_count=Count('age')).filter(age_count__gt=1)
+ self.assertSequenceEqual(qs, [29])
+
+
+class JoinPromotionTests(TestCase):
+ def test_ticket_21150(self):
+ b = Bravo.objects.create()
+ c = Charlie.objects.create(bravo=b)
+ qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
+ self.assertSequenceEqual(qs, [c])
+ self.assertIs(qs[0].alfa, None)
+ a = Alfa.objects.create()
+ c.alfa = a
+ c.save()
+ # Force re-evaluation
+ qs = qs.all()
+ self.assertSequenceEqual(qs, [c])
+ self.assertEqual(qs[0].alfa, a)
+
+ def test_existing_join_not_promoted(self):
+ # No promotion for existing joins
+ qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
+ self.assertIn(' INNER JOIN ', str(qs.query))
+ # Also, the existing join is unpromoted when doing filtering for already
+ # promoted join.
+ qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
+ self.assertIn(' INNER JOIN ', str(qs.query))
+ # But, as the join is nullable first use by annotate will be LOUTER
+ qs = Charlie.objects.annotate(Count('alfa__name'))
+ self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
+
+ def test_non_nullable_fk_not_promoted(self):
+ qs = Book.objects.annotate(Count('contact__name'))
+ self.assertIn(' INNER JOIN ', str(qs.query))
+
+
+class SelfReferentialFKTests(TestCase):
+ def test_ticket_24748(self):
+ t1 = SelfRefFK.objects.create(name='t1')
+ SelfRefFK.objects.create(name='t2', parent=t1)
+ SelfRefFK.objects.create(name='t3', parent=t1)
+ self.assertQuerysetEqual(
+ SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
+ [('t1', 2), ('t2', 0), ('t3', 0)],
+ lambda x: (x.name, x.num_children)
+ )
diff --git a/tests/bulk_create/__init__.py b/tests/bulk_create/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/bulk_create/models.py b/tests/bulk_create/models.py
new file mode 100644
index 00000000..c302a70b
--- /dev/null
+++ b/tests/bulk_create/models.py
@@ -0,0 +1,53 @@
+from django.db import models
+
+
+class Country(models.Model):
+ name = models.CharField(max_length=255)
+ iso_two_letter = models.CharField(max_length=2)
+
+
+class ProxyCountry(Country):
+ class Meta:
+ proxy = True
+
+
+class ProxyProxyCountry(ProxyCountry):
+ class Meta:
+ proxy = True
+
+
+class ProxyMultiCountry(ProxyCountry):
+ pass
+
+
+class ProxyMultiProxyCountry(ProxyMultiCountry):
+ class Meta:
+ proxy = True
+
+
+class Place(models.Model):
+ name = models.CharField(max_length=100)
+
+ class Meta:
+ abstract = True
+
+
+class Restaurant(Place):
+ pass
+
+
+class Pizzeria(Restaurant):
+ pass
+
+
+class State(models.Model):
+ two_letter_code = models.CharField(max_length=2, primary_key=True)
+
+
+class TwoFields(models.Model):
+ f1 = models.IntegerField(unique=True)
+ f2 = models.IntegerField(unique=True)
+
+
+class NoFields(models.Model):
+ pass
diff --git a/tests/bulk_create/tests.py b/tests/bulk_create/tests.py
new file mode 100644
index 00000000..210be56c
--- /dev/null
+++ b/tests/bulk_create/tests.py
@@ -0,0 +1,235 @@
+from __future__ import unicode_literals
+
+from operator import attrgetter
+
+from django.db import connection
+from django.db.models import Value
+from django.db.models.functions import Lower
+from django.test import (
+ TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
+)
+
+from .models import (
+ Country, NoFields, Pizzeria, ProxyCountry, ProxyMultiCountry,
+ ProxyMultiProxyCountry, ProxyProxyCountry, Restaurant, State, TwoFields,
+)
+
+
+class BulkCreateTests(TestCase):
+ def setUp(self):
+ self.data = [
+ Country(name="United States of America", iso_two_letter="US"),
+ Country(name="The Netherlands", iso_two_letter="NL"),
+ Country(name="Germany", iso_two_letter="DE"),
+ Country(name="Czech Republic", iso_two_letter="CZ")
+ ]
+
+ def test_simple(self):
+ created = Country.objects.bulk_create(self.data)
+ self.assertEqual(len(created), 4)
+ self.assertQuerysetEqual(Country.objects.order_by("-name"), [
+ "United States of America", "The Netherlands", "Germany", "Czech Republic"
+ ], attrgetter("name"))
+
+ created = Country.objects.bulk_create([])
+ self.assertEqual(created, [])
+ self.assertEqual(Country.objects.count(), 4)
+
+ @skipUnlessDBFeature('has_bulk_insert')
+ def test_efficiency(self):
+ with self.assertNumQueries(1):
+ Country.objects.bulk_create(self.data)
+
+ def test_multi_table_inheritance_unsupported(self):
+ expected_message = "Can't bulk create a multi-table inherited model"
+ with self.assertRaisesMessage(ValueError, expected_message):
+ Pizzeria.objects.bulk_create([
+ Pizzeria(name="The Art of Pizza"),
+ ])
+ with self.assertRaisesMessage(ValueError, expected_message):
+ ProxyMultiCountry.objects.bulk_create([
+ ProxyMultiCountry(name="Fillory", iso_two_letter="FL"),
+ ])
+ with self.assertRaisesMessage(ValueError, expected_message):
+ ProxyMultiProxyCountry.objects.bulk_create([
+ ProxyMultiProxyCountry(name="Fillory", iso_two_letter="FL"),
+ ])
+
+ def test_proxy_inheritance_supported(self):
+ ProxyCountry.objects.bulk_create([
+ ProxyCountry(name="Qwghlm", iso_two_letter="QW"),
+ Country(name="Tortall", iso_two_letter="TA"),
+ ])
+ self.assertQuerysetEqual(ProxyCountry.objects.all(), {
+ "Qwghlm", "Tortall"
+ }, attrgetter("name"), ordered=False)
+
+ ProxyProxyCountry.objects.bulk_create([
+ ProxyProxyCountry(name="Netherlands", iso_two_letter="NT"),
+ ])
+ self.assertQuerysetEqual(ProxyProxyCountry.objects.all(), {
+ "Qwghlm", "Tortall", "Netherlands",
+ }, attrgetter("name"), ordered=False)
+
+ def test_non_auto_increment_pk(self):
+ State.objects.bulk_create([
+ State(two_letter_code=s)
+ for s in ["IL", "NY", "CA", "ME"]
+ ])
+ self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
+ "CA", "IL", "ME", "NY",
+ ], attrgetter("two_letter_code"))
+
+ @skipUnlessDBFeature('has_bulk_insert')
+ def test_non_auto_increment_pk_efficiency(self):
+ with self.assertNumQueries(1):
+ State.objects.bulk_create([
+ State(two_letter_code=s)
+ for s in ["IL", "NY", "CA", "ME"]
+ ])
+ self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
+ "CA", "IL", "ME", "NY",
+ ], attrgetter("two_letter_code"))
+
+ @skipIfDBFeature('allows_auto_pk_0')
+ def test_zero_as_autoval(self):
+ """
+ Zero as id for AutoField should raise exception in MySQL, because MySQL
+ does not allow zero for automatic primary key.
+ """
+ valid_country = Country(name='Germany', iso_two_letter='DE')
+ invalid_country = Country(id=0, name='Poland', iso_two_letter='PL')
+ with self.assertRaises(ValueError):
+ Country.objects.bulk_create([valid_country, invalid_country])
+
+ def test_batch_same_vals(self):
+ # Sqlite had a problem where all the same-valued models were
+ # collapsed to one insert.
+ Restaurant.objects.bulk_create([
+ Restaurant(name='foo') for i in range(0, 2)
+ ])
+ self.assertEqual(Restaurant.objects.count(), 2)
+
+ def test_large_batch(self):
+ with override_settings(DEBUG=True):
+ connection.queries_log.clear()
+ TwoFields.objects.bulk_create([
+ TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)
+ ])
+ self.assertEqual(TwoFields.objects.count(), 1001)
+ self.assertEqual(
+ TwoFields.objects.filter(f1__gte=450, f1__lte=550).count(),
+ 101)
+ self.assertEqual(TwoFields.objects.filter(f2__gte=901).count(), 101)
+
+ @skipUnlessDBFeature('has_bulk_insert')
+ def test_large_single_field_batch(self):
+ # SQLite had a problem with more than 500 UNIONed selects in single
+ # query.
+ Restaurant.objects.bulk_create([
+ Restaurant() for i in range(0, 501)
+ ])
+
+ @skipUnlessDBFeature('has_bulk_insert')
+ def test_large_batch_efficiency(self):
+ with override_settings(DEBUG=True):
+ connection.queries_log.clear()
+ TwoFields.objects.bulk_create([
+ TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)
+ ])
+ self.assertLess(len(connection.queries), 10)
+
+ def test_large_batch_mixed(self):
+ """
+ Test inserting a large batch with objects having primary key set
+ mixed together with objects without PK set.
+ """
+ with override_settings(DEBUG=True):
+ connection.queries_log.clear()
+ TwoFields.objects.bulk_create([
+ TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
+ for i in range(100000, 101000)])
+ self.assertEqual(TwoFields.objects.count(), 1000)
+ # We can't assume much about the ID's created, except that the above
+ # created IDs must exist.
+ id_range = range(100000, 101000, 2)
+ self.assertEqual(TwoFields.objects.filter(id__in=id_range).count(), 500)
+ self.assertEqual(TwoFields.objects.exclude(id__in=id_range).count(), 500)
+
+ @skipUnlessDBFeature('has_bulk_insert')
+ def test_large_batch_mixed_efficiency(self):
+ """
+ Test inserting a large batch with objects having primary key set
+ mixed together with objects without PK set.
+ """
+ with override_settings(DEBUG=True):
+ connection.queries_log.clear()
+ TwoFields.objects.bulk_create([
+ TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
+ for i in range(100000, 101000)])
+ self.assertLess(len(connection.queries), 10)
+
+ def test_explicit_batch_size(self):
+ objs = [TwoFields(f1=i, f2=i) for i in range(0, 4)]
+ num_objs = len(objs)
+ TwoFields.objects.bulk_create(objs, batch_size=1)
+ self.assertEqual(TwoFields.objects.count(), num_objs)
+ TwoFields.objects.all().delete()
+ TwoFields.objects.bulk_create(objs, batch_size=2)
+ self.assertEqual(TwoFields.objects.count(), num_objs)
+ TwoFields.objects.all().delete()
+ TwoFields.objects.bulk_create(objs, batch_size=3)
+ self.assertEqual(TwoFields.objects.count(), num_objs)
+ TwoFields.objects.all().delete()
+ TwoFields.objects.bulk_create(objs, batch_size=num_objs)
+ self.assertEqual(TwoFields.objects.count(), num_objs)
+
+ def test_empty_model(self):
+ self.skipTest("TODO fix ZeroDivisionError: integer division or modulo by zero")
+ NoFields.objects.bulk_create([NoFields() for i in range(2)])
+ self.assertEqual(NoFields.objects.count(), 2)
+
+ @skipUnlessDBFeature('has_bulk_insert')
+ def test_explicit_batch_size_efficiency(self):
+ objs = [TwoFields(f1=i, f2=i) for i in range(0, 100)]
+ with self.assertNumQueries(2):
+ TwoFields.objects.bulk_create(objs, 50)
+ TwoFields.objects.all().delete()
+ with self.assertNumQueries(1):
+ TwoFields.objects.bulk_create(objs, len(objs))
+
+ @skipUnlessDBFeature('has_bulk_insert')
+ def test_bulk_insert_expressions(self):
+ Restaurant.objects.bulk_create([
+ Restaurant(name="Sam's Shake Shack"),
+ Restaurant(name=Lower(Value("Betty's Beetroot Bar")))
+ ])
+ bbb = Restaurant.objects.filter(name="betty's beetroot bar")
+ self.assertEqual(bbb.count(), 1)
+
+ @skipUnlessDBFeature('can_return_ids_from_bulk_insert')
+ def test_set_pk_and_insert_single_item(self):
+ with self.assertNumQueries(1):
+ countries = Country.objects.bulk_create([self.data[0]])
+ self.assertEqual(len(countries), 1)
+ self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
+
+ @skipUnlessDBFeature('can_return_ids_from_bulk_insert')
+ def test_set_pk_and_query_efficiency(self):
+ with self.assertNumQueries(1):
+ countries = Country.objects.bulk_create(self.data)
+ self.assertEqual(len(countries), 4)
+ self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
+ self.assertEqual(Country.objects.get(pk=countries[1].pk), countries[1])
+ self.assertEqual(Country.objects.get(pk=countries[2].pk), countries[2])
+ self.assertEqual(Country.objects.get(pk=countries[3].pk), countries[3])
+
+ @skipUnlessDBFeature('can_return_ids_from_bulk_insert')
+ def test_set_state(self):
+ country_nl = Country(name='Netherlands', iso_two_letter='NL')
+ country_be = Country(name='Belgium', iso_two_letter='BE')
+ Country.objects.bulk_create([country_nl])
+ country_be.save()
+ # Objects save via bulk_create() and save() should have equal state.
+ self.assertEqual(country_nl._state.adding, country_be._state.adding)
+ self.assertEqual(country_nl._state.db, country_be._state.db)
diff --git a/tests/custom_columns/__init__.py b/tests/custom_columns/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/custom_columns/models.py b/tests/custom_columns/models.py
new file mode 100644
index 00000000..3f619a7f
--- /dev/null
+++ b/tests/custom_columns/models.py
@@ -0,0 +1,55 @@
+"""
+Custom column/table names
+
+If your database column name is different than your model attribute, use the
+``db_column`` parameter. Note that you'll use the field's name, not its column
+name, in API usage.
+
+If your database table name is different than your model name, use the
+``db_table`` Meta attribute. This has no effect on the API used to
+query the database.
+
+If you need to use a table name for a many-to-many relationship that differs
+from the default generated name, use the ``db_table`` parameter on the
+``ManyToManyField``. This has no effect on the API for querying the database.
+
+"""
+
+from __future__ import unicode_literals
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Author(models.Model):
+ Author_ID = models.AutoField(primary_key=True, db_column='Author ID')
+ first_name = models.CharField(max_length=30, db_column='firstname')
+ last_name = models.CharField(max_length=30, db_column='last')
+
+ def __str__(self):
+ return '%s %s' % (self.first_name, self.last_name)
+
+ class Meta:
+ db_table = 'my_author_table'
+ ordering = ('last_name', 'first_name')
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ Article_ID = models.AutoField(primary_key=True, db_column='Article ID')
+ headline = models.CharField(max_length=100)
+ authors = models.ManyToManyField(Author, db_table='my_m2m_table')
+ primary_author = models.ForeignKey(
+ Author,
+ models.SET_NULL,
+ db_column='Author ID',
+ related_name='primary_set',
+ null=True,
+ )
+
+ def __str__(self):
+ return self.headline
+
+ class Meta:
+ ordering = ('headline',)
diff --git a/tests/custom_columns/tests.py b/tests/custom_columns/tests.py
new file mode 100644
index 00000000..7102e4fd
--- /dev/null
+++ b/tests/custom_columns/tests.py
@@ -0,0 +1,123 @@
+from __future__ import unicode_literals
+
+from django.core.exceptions import FieldError
+from django.test import TestCase
+from django.utils import six
+
+from .models import Article, Author
+
+
+class CustomColumnsTests(TestCase):
+
+ def setUp(self):
+ self.a1 = Author.objects.create(first_name="John", last_name="Smith")
+ self.a2 = Author.objects.create(first_name="Peter", last_name="Jones")
+ self.authors = [self.a1, self.a2]
+
+ self.article = Article.objects.create(headline="Django lets you build Web apps easily", primary_author=self.a1)
+ self.article.authors.set(self.authors)
+
+ def test_query_all_available_authors(self):
+ self.assertQuerysetEqual(
+ Author.objects.all(), [
+ "Peter Jones", "John Smith",
+ ],
+ six.text_type
+ )
+
+ def test_get_first_name(self):
+ self.assertEqual(
+ Author.objects.get(first_name__exact="John"),
+ self.a1,
+ )
+
+ def test_filter_first_name(self):
+ self.assertQuerysetEqual(
+ Author.objects.filter(first_name__exact="John"), [
+ "John Smith",
+ ],
+ six.text_type
+ )
+
+ def test_field_error(self):
+ with self.assertRaises(FieldError):
+ Author.objects.filter(firstname__exact="John")
+
+ def test_attribute_error(self):
+ with self.assertRaises(AttributeError):
+ self.a1.firstname
+
+ with self.assertRaises(AttributeError):
+ self.a1.last
+
+ def test_get_all_authors_for_an_article(self):
+ self.assertQuerysetEqual(
+ self.article.authors.all(), [
+ "Peter Jones",
+ "John Smith",
+ ],
+ six.text_type
+ )
+
+ def test_get_all_articles_for_an_author(self):
+ self.assertQuerysetEqual(
+ self.a1.article_set.all(), [
+ "Django lets you build Web apps easily",
+ ],
+ lambda a: a.headline
+ )
+
+ def test_get_author_m2m_relation(self):
+ self.assertQuerysetEqual(
+ self.article.authors.filter(last_name='Jones'), [
+ "Peter Jones"
+ ],
+ six.text_type
+ )
+
+ def test_author_querying(self):
+ self.assertQuerysetEqual(
+ Author.objects.all().order_by('last_name'),
+ ['', '']
+ )
+
+ def test_author_filtering(self):
+ self.assertQuerysetEqual(
+ Author.objects.filter(first_name__exact='John'),
+ ['']
+ )
+
+ def test_author_get(self):
+ self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
+
+ def test_filter_on_nonexistent_field(self):
+ msg = (
+ "Cannot resolve keyword 'firstname' into field. Choices are: "
+ "Author_ID, article, first_name, last_name, primary_set"
+ )
+ with self.assertRaisesMessage(FieldError, msg):
+ Author.objects.filter(firstname__exact='John')
+
+ def test_author_get_attributes(self):
+ a = Author.objects.get(last_name__exact='Smith')
+ self.assertEqual('John', a.first_name)
+ self.assertEqual('Smith', a.last_name)
+ with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'firstname'"):
+ getattr(a, 'firstname')
+
+ with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'last'"):
+ getattr(a, 'last')
+
+ def test_m2m_table(self):
+ self.assertQuerysetEqual(
+ self.article.authors.all().order_by('last_name'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ self.a1.article_set.all(),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ self.article.authors.filter(last_name='Jones'),
+ ['']
+ )
diff --git a/tests/custom_pk/__init__.py b/tests/custom_pk/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/custom_pk/fields.py b/tests/custom_pk/fields.py
new file mode 100644
index 00000000..5bd249df
--- /dev/null
+++ b/tests/custom_pk/fields.py
@@ -0,0 +1,60 @@
+import random
+import string
+
+from django.db import models
+
+
+class MyWrapper:
+ def __init__(self, value):
+ self.value = value
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.value)
+
+ def __str__(self):
+ return self.value
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.value == other.value
+ return self.value == other
+
+
+class MyAutoField(models.CharField):
+
+ def __init__(self, *args, **kwargs):
+ kwargs['max_length'] = 10
+ super().__init__(*args, **kwargs)
+
+ def pre_save(self, instance, add):
+ value = getattr(instance, self.attname, None)
+ if not value:
+ value = MyWrapper(''.join(random.sample(string.ascii_lowercase, 10)))
+ setattr(instance, self.attname, value)
+ return value
+
+ def to_python(self, value):
+ if not value:
+ return
+ if not isinstance(value, MyWrapper):
+ value = MyWrapper(value)
+ return value
+
+ def from_db_value(self, value, expression, connection):
+ if not value:
+ return
+ return MyWrapper(value)
+
+ def get_db_prep_save(self, value, connection):
+ if not value:
+ return
+ if isinstance(value, MyWrapper):
+ return str(value)
+ return value
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ if not value:
+ return
+ if isinstance(value, MyWrapper):
+ return str(value)
+ return value
diff --git a/tests/custom_pk/models.py b/tests/custom_pk/models.py
new file mode 100644
index 00000000..0b272c11
--- /dev/null
+++ b/tests/custom_pk/models.py
@@ -0,0 +1,44 @@
+"""
+Using a custom primary key
+
+By default, Django adds an ``"id"`` field to each model. But you can override
+this behavior by explicitly adding ``primary_key=True`` to a field.
+"""
+
+from django.db import models
+
+from .fields import MyAutoField
+
+
+class Employee(models.Model):
+ employee_code = models.IntegerField(primary_key=True, db_column='code')
+ first_name = models.CharField(max_length=20)
+ last_name = models.CharField(max_length=20)
+
+ class Meta:
+ ordering = ('last_name', 'first_name')
+
+ def __str__(self):
+ return "%s %s" % (self.first_name, self.last_name)
+
+
+class Business(models.Model):
+ name = models.CharField(max_length=20, primary_key=True)
+ employees = models.ManyToManyField(Employee)
+
+ class Meta:
+ verbose_name_plural = 'businesses'
+
+ def __str__(self):
+ return self.name
+
+
+class Bar(models.Model):
+ id = MyAutoField(primary_key=True, db_index=True)
+
+ def __str__(self):
+ return repr(self.pk)
+
+
+class Foo(models.Model):
+ bar = models.ForeignKey(Bar, models.CASCADE)
diff --git a/tests/custom_pk/tests.py b/tests/custom_pk/tests.py
new file mode 100644
index 00000000..da0cff14
--- /dev/null
+++ b/tests/custom_pk/tests.py
@@ -0,0 +1,232 @@
+from django.db import IntegrityError, transaction
+from django.test import TestCase, skipIfDBFeature
+
+from .models import Bar, Business, Employee, Foo
+
+
+class BasicCustomPKTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.dan = Employee.objects.create(
+ employee_code=123, first_name="Dan", last_name="Jones",
+ )
+ cls.fran = Employee.objects.create(
+ employee_code=456, first_name="Fran", last_name="Bones",
+ )
+ cls.business = Business.objects.create(name="Sears")
+ cls.business.employees.add(cls.dan, cls.fran)
+
+ def test_querysets(self):
+ """
+ Both pk and custom attribute_name can be used in filter and friends
+ """
+ self.assertQuerysetEqual(
+ Employee.objects.filter(pk=123), [
+ "Dan Jones",
+ ],
+ str
+ )
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(employee_code=123), [
+ "Dan Jones",
+ ],
+ str
+ )
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(pk__in=[123, 456]), [
+ "Fran Bones",
+ "Dan Jones",
+ ],
+ str
+ )
+
+ self.assertQuerysetEqual(
+ Employee.objects.all(), [
+ "Fran Bones",
+ "Dan Jones",
+ ],
+ str
+ )
+
+ self.assertQuerysetEqual(
+ Business.objects.filter(name="Sears"), [
+ "Sears"
+ ],
+ lambda b: b.name
+ )
+ self.assertQuerysetEqual(
+ Business.objects.filter(pk="Sears"), [
+ "Sears",
+ ],
+ lambda b: b.name
+ )
+
+ def test_querysets_related_name(self):
+ """
+ Custom pk doesn't affect related_name based lookups
+ """
+ self.assertQuerysetEqual(
+ self.business.employees.all(), [
+ "Fran Bones",
+ "Dan Jones",
+ ],
+ str
+ )
+ self.assertQuerysetEqual(
+ self.fran.business_set.all(), [
+ "Sears",
+ ],
+ lambda b: b.name
+ )
+
+ def test_querysets_relational(self):
+ """
+ Queries across tables, involving primary key
+ """
+ self.assertQuerysetEqual(
+ Employee.objects.filter(business__name="Sears"), [
+ "Fran Bones",
+ "Dan Jones",
+ ],
+ str,
+ )
+ self.assertQuerysetEqual(
+ Employee.objects.filter(business__pk="Sears"), [
+ "Fran Bones",
+ "Dan Jones",
+ ],
+ str,
+ )
+
+ self.assertQuerysetEqual(
+ Business.objects.filter(employees__employee_code=123), [
+ "Sears",
+ ],
+ lambda b: b.name
+ )
+ self.assertQuerysetEqual(
+ Business.objects.filter(employees__pk=123), [
+ "Sears",
+ ],
+ lambda b: b.name,
+ )
+
+ self.assertQuerysetEqual(
+ Business.objects.filter(employees__first_name__startswith="Fran"), [
+ "Sears",
+ ],
+ lambda b: b.name
+ )
+
+ def test_get(self):
+ """
+ Get can accept pk or the real attribute name
+ """
+ self.assertEqual(Employee.objects.get(pk=123), self.dan)
+ self.assertEqual(Employee.objects.get(pk=456), self.fran)
+
+ with self.assertRaises(Employee.DoesNotExist):
+ Employee.objects.get(pk=42)
+
+ # Use the name of the primary key, rather than pk.
+ self.assertEqual(Employee.objects.get(employee_code=123), self.dan)
+
+ def test_pk_attributes(self):
+ """
+ pk and attribute name are available on the model
+ No default id attribute is added
+ """
+ # pk can be used as a substitute for the primary key.
+ # The primary key can be accessed via the pk property on the model.
+ e = Employee.objects.get(pk=123)
+ self.assertEqual(e.pk, 123)
+ # Or we can use the real attribute name for the primary key:
+ self.assertEqual(e.employee_code, 123)
+
+ with self.assertRaisesMessage(AttributeError, "'Employee' object has no attribute 'id'"):
+ e.id
+
+ def test_in_bulk(self):
+ """
+ Custom pks work with in_bulk, both for integer and non-integer types
+ """
+ emps = Employee.objects.in_bulk([123, 456])
+ self.assertEqual(emps[123], self.dan)
+
+ self.assertEqual(Business.objects.in_bulk(["Sears"]), {
+ "Sears": self.business,
+ })
+
+ def test_save(self):
+ """
+ custom pks do not affect save
+ """
+ fran = Employee.objects.get(pk=456)
+ fran.last_name = "Jones"
+ fran.save()
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(last_name="Jones"), [
+ "Dan Jones",
+ "Fran Jones",
+ ],
+ str
+ )
+
+
+class CustomPKTests(TestCase):
+ def test_custom_pk_create(self):
+ """
+ New objects can be created both with pk and the custom name
+ """
+ Employee.objects.create(employee_code=1234, first_name="Foo", last_name="Bar")
+ Employee.objects.create(pk=1235, first_name="Foo", last_name="Baz")
+ Business.objects.create(name="Bears")
+ Business.objects.create(pk="Tears")
+
+ def test_unicode_pk(self):
+ # Primary key may be unicode string
+ Business.objects.create(name='jaźń')
+
+ def test_unique_pk(self):
+ # The primary key must also obviously be unique, so trying to create a
+ # new object with the same primary key will fail.
+ Employee.objects.create(
+ employee_code=123, first_name="Frank", last_name="Jones"
+ )
+ with self.assertRaises(IntegrityError):
+ with transaction.atomic():
+ Employee.objects.create(employee_code=123, first_name="Fred", last_name="Jones")
+
+ def test_zero_non_autoincrement_pk(self):
+ Employee.objects.create(
+ employee_code=0, first_name="Frank", last_name="Jones"
+ )
+ employee = Employee.objects.get(pk=0)
+ self.assertEqual(employee.employee_code, 0)
+
+ def test_custom_field_pk(self):
+ # Regression for #10785 -- Custom fields can be used for primary keys.
+ new_bar = Bar.objects.create()
+ new_foo = Foo.objects.create(bar=new_bar)
+
+ f = Foo.objects.get(bar=new_bar.pk)
+ self.assertEqual(f, new_foo)
+ self.assertEqual(f.bar, new_bar)
+
+ f = Foo.objects.get(bar=new_bar)
+ self.assertEqual(f, new_foo),
+ self.assertEqual(f.bar, new_bar)
+
+ # SQLite lets objects be saved with an empty primary key, even though an
+ # integer is expected. So we can't check for an error being raised in that
+ # case for SQLite. Remove it from the suite for this next bit.
+ @skipIfDBFeature('supports_unspecified_pk')
+ def test_required_pk(self):
+ # The primary key must be specified, so an error is raised if you
+ # try to create an object without it.
+ with self.assertRaises(IntegrityError):
+ with transaction.atomic():
+ Employee.objects.create(first_name="Tom", last_name="Smith")
diff --git a/tests/datatypes/__init__.py b/tests/datatypes/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/datatypes/models.py b/tests/datatypes/models.py
new file mode 100644
index 00000000..cabe5297
--- /dev/null
+++ b/tests/datatypes/models.py
@@ -0,0 +1,29 @@
+"""
+This is a basic model to test saving and loading boolean and date-related
+types, which in the past were problematic for some database backends.
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Donut(models.Model):
+ name = models.CharField(max_length=100)
+ is_frosted = models.BooleanField(default=False)
+ has_sprinkles = models.NullBooleanField()
+ baked_date = models.DateField(null=True)
+ baked_time = models.TimeField(null=True)
+ consumed_at = models.DateTimeField(null=True)
+ review = models.TextField()
+
+ class Meta:
+ ordering = ('consumed_at',)
+
+ def __str__(self):
+ return self.name
+
+
+class RumBaba(models.Model):
+ baked_date = models.DateField(auto_now_add=True)
+ baked_timestamp = models.DateTimeField(auto_now_add=True)
diff --git a/tests/datatypes/tests.py b/tests/datatypes/tests.py
new file mode 100644
index 00000000..cf765677
--- /dev/null
+++ b/tests/datatypes/tests.py
@@ -0,0 +1,102 @@
+from __future__ import unicode_literals
+
+import datetime
+
+from django.test import TestCase, skipIfDBFeature
+from django.utils import six
+from django.utils.timezone import utc
+
+from .models import Donut, RumBaba
+
+
+class DataTypesTestCase(TestCase):
+
+ def test_boolean_type(self):
+ d = Donut(name='Apple Fritter')
+ self.assertFalse(d.is_frosted)
+ self.assertIsNone(d.has_sprinkles)
+ d.has_sprinkles = True
+ self.assertTrue(d.has_sprinkles)
+
+ d.save()
+
+ d2 = Donut.objects.get(name='Apple Fritter')
+ self.assertFalse(d2.is_frosted)
+ self.assertTrue(d2.has_sprinkles)
+
+ def test_date_type(self):
+ d = Donut(name='Apple Fritter')
+ d.baked_date = datetime.date(year=1938, month=6, day=4)
+ d.baked_time = datetime.time(hour=5, minute=30)
+ d.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
+ d.save()
+
+ d2 = Donut.objects.get(name='Apple Fritter')
+ self.assertEqual(d2.baked_date, datetime.date(1938, 6, 4))
+ self.assertEqual(d2.baked_time, datetime.time(5, 30))
+ self.assertEqual(d2.consumed_at, datetime.datetime(2007, 4, 20, 16, 19, 59))
+
+ def test_time_field(self):
+ # Test for ticket #12059: TimeField wrongly handling datetime.datetime object.
+ d = Donut(name='Apple Fritter')
+ d.baked_time = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
+ d.save()
+
+ d2 = Donut.objects.get(name='Apple Fritter')
+ self.assertEqual(d2.baked_time, datetime.time(16, 19, 59))
+
+ def test_year_boundaries(self):
+ """Year boundary tests (ticket #3689)"""
+ Donut.objects.create(
+ name='Date Test 2007',
+ baked_date=datetime.datetime(year=2007, month=12, day=31),
+ consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59),
+ )
+ Donut.objects.create(
+ name='Date Test 2006',
+ baked_date=datetime.datetime(year=2006, month=1, day=1),
+ consumed_at=datetime.datetime(year=2006, month=1, day=1),
+ )
+ self.assertEqual("Date Test 2007", Donut.objects.filter(baked_date__year=2007)[0].name)
+ self.assertEqual("Date Test 2006", Donut.objects.filter(baked_date__year=2006)[0].name)
+
+ Donut.objects.create(
+ name='Apple Fritter',
+ consumed_at=datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59),
+ )
+
+ self.assertEqual(
+ ['Apple Fritter', 'Date Test 2007'],
+ list(Donut.objects.filter(consumed_at__year=2007).order_by('name').values_list('name', flat=True))
+ )
+ self.assertEqual(0, Donut.objects.filter(consumed_at__year=2005).count())
+ self.assertEqual(0, Donut.objects.filter(consumed_at__year=2008).count())
+
+ def test_textfields_unicode(self):
+ """Regression test for #10238: TextField values returned from the
+ database should be unicode."""
+ d = Donut.objects.create(name='Jelly Donut', review='Outstanding')
+ newd = Donut.objects.get(id=d.id)
+ self.assertIsInstance(newd.review, six.text_type)
+
+ @skipIfDBFeature('supports_timezones')
+ def test_error_on_timezone(self):
+ """Regression test for #8354: the MySQL and Oracle backends should raise
+ an error if given a timezone-aware datetime object."""
+ self.skipTest("TODO fix AssertionError: ValueError not raised")
+ dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=utc)
+ d = Donut(name='Bear claw', consumed_at=dt)
+ # MySQL backend does not support timezone-aware datetimes.
+ with self.assertRaises(ValueError):
+ d.save()
+
+ def test_datefield_auto_now_add(self):
+ """Regression test for #10970, auto_now_add for DateField should store
+ a Python datetime.date, not a datetime.datetime"""
+ b = RumBaba.objects.create()
+ # Verify we didn't break DateTimeField behavior
+ self.assertIsInstance(b.baked_timestamp, datetime.datetime)
+ # We need to test this way because datetime.datetime inherits
+ # from datetime.date:
+ self.assertIsInstance(b.baked_date, datetime.date)
+ self.assertNotIsInstance(b.baked_date, datetime.datetime)
diff --git a/tests/dates/__init__.py b/tests/dates/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/dates/models.py b/tests/dates/models.py
new file mode 100644
index 00000000..74f9db28
--- /dev/null
+++ b/tests/dates/models.py
@@ -0,0 +1,27 @@
+from django.db import models
+from django.utils import timezone
+
+
+class Article(models.Model):
+ title = models.CharField(max_length=100)
+ pub_date = models.DateField()
+ pub_datetime = models.DateTimeField(default=timezone.now())
+
+ categories = models.ManyToManyField("Category", related_name="articles")
+
+ def __str__(self):
+ return self.title
+
+
+class Comment(models.Model):
+ article = models.ForeignKey(Article, models.CASCADE, related_name="comments")
+ text = models.TextField()
+ pub_date = models.DateField()
+ approval_date = models.DateField(null=True)
+
+ def __str__(self):
+ return 'Comment to %s (%s)' % (self.article.title, self.pub_date)
+
+
+class Category(models.Model):
+ name = models.CharField(max_length=255)
diff --git a/tests/dates/tests.py b/tests/dates/tests.py
new file mode 100644
index 00000000..ebdf0581
--- /dev/null
+++ b/tests/dates/tests.py
@@ -0,0 +1,136 @@
+import datetime
+from unittest import skipUnless
+
+from django.core.exceptions import FieldError
+from django.db import connection
+from django.test import TestCase, override_settings
+
+from .models import Article, Category, Comment
+
+
+class DatesTests(TestCase):
+ def test_related_model_traverse(self):
+ a1 = Article.objects.create(
+ title="First one",
+ pub_date=datetime.date(2005, 7, 28),
+ )
+ a2 = Article.objects.create(
+ title="Another one",
+ pub_date=datetime.date(2010, 7, 28),
+ )
+ a3 = Article.objects.create(
+ title="Third one, in the first day",
+ pub_date=datetime.date(2005, 7, 28),
+ )
+
+ a1.comments.create(
+ text="Im the HULK!",
+ pub_date=datetime.date(2005, 7, 28),
+ )
+ a1.comments.create(
+ text="HULK SMASH!",
+ pub_date=datetime.date(2005, 7, 29),
+ )
+ a2.comments.create(
+ text="LMAO",
+ pub_date=datetime.date(2010, 7, 28),
+ )
+ a3.comments.create(
+ text="+1",
+ pub_date=datetime.date(2005, 8, 29),
+ )
+
+ c = Category.objects.create(name="serious-news")
+ c.articles.add(a1, a3)
+
+ self.assertSequenceEqual(
+ Comment.objects.dates("article__pub_date", "year"), [
+ datetime.date(2005, 1, 1),
+ datetime.date(2010, 1, 1),
+ ],
+ )
+ self.assertSequenceEqual(
+ Comment.objects.dates("article__pub_date", "month"), [
+ datetime.date(2005, 7, 1),
+ datetime.date(2010, 7, 1),
+ ],
+ )
+ self.assertSequenceEqual(
+ Comment.objects.dates("article__pub_date", "week"), [
+ datetime.date(2005, 7, 25),
+ datetime.date(2010, 7, 26),
+ ],
+ )
+ self.assertSequenceEqual(
+ Comment.objects.dates("article__pub_date", "day"), [
+ datetime.date(2005, 7, 28),
+ datetime.date(2010, 7, 28),
+ ],
+ )
+ self.assertSequenceEqual(
+ Article.objects.dates("comments__pub_date", "day"), [
+ datetime.date(2005, 7, 28),
+ datetime.date(2005, 7, 29),
+ datetime.date(2005, 8, 29),
+ datetime.date(2010, 7, 28),
+ ],
+ )
+ self.assertQuerysetEqual(
+ Article.objects.dates("comments__approval_date", "day"), []
+ )
+ self.assertSequenceEqual(
+ Category.objects.dates("articles__pub_date", "day"), [
+ datetime.date(2005, 7, 28),
+ ],
+ )
+
+ def test_dates_fails_when_no_arguments_are_provided(self):
+ with self.assertRaises(TypeError):
+ Article.objects.dates()
+
+ def test_dates_fails_when_given_invalid_field_argument(self):
+ self.assertRaisesMessage(
+ FieldError,
+ "Cannot resolve keyword 'invalid_field' into field. Choices are: "
+ "categories, comments, id, pub_date, pub_datetime, title",
+ Article.objects.dates,
+ "invalid_field",
+ "year",
+ )
+
+ def test_dates_fails_when_given_invalid_kind_argument(self):
+ msg = "'kind' must be one of 'year', 'month', 'week', or 'day'."
+ with self.assertRaisesMessage(AssertionError, msg):
+ Article.objects.dates("pub_date", "bad_kind")
+
+ def test_dates_fails_when_given_invalid_order_argument(self):
+ with self.assertRaisesMessage(AssertionError, "'order' must be either 'ASC' or 'DESC'."):
+ Article.objects.dates("pub_date", "year", order="bad order")
+
+ @override_settings(USE_TZ=False)
+ def test_dates_trunc_datetime_fields(self):
+ Article.objects.bulk_create(
+ Article(pub_date=pub_datetime.date(), pub_datetime=pub_datetime)
+ for pub_datetime in [
+ datetime.datetime(2015, 10, 21, 18, 1),
+ datetime.datetime(2015, 10, 21, 18, 2),
+ datetime.datetime(2015, 10, 22, 18, 1),
+ datetime.datetime(2015, 10, 22, 18, 2),
+ ]
+ )
+ self.assertSequenceEqual(
+ Article.objects.dates('pub_datetime', 'day', order='ASC'), [
+ datetime.date(2015, 10, 21),
+ datetime.date(2015, 10, 22),
+ ]
+ )
+
+ @skipUnless(connection.vendor == 'mysql', "Test checks MySQL query syntax")
+ def test_dates_avoid_datetime_cast(self):
+ Article.objects.create(pub_date=datetime.date(2015, 10, 21))
+ for kind in ['day', 'month', 'year']:
+ qs = Article.objects.dates('pub_date', kind)
+ if kind == 'day':
+ self.assertIn('DATE(', str(qs.query))
+ else:
+ self.assertIn(' AS DATE)', str(qs.query))
diff --git a/tests/datetimes/__init__.py b/tests/datetimes/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/datetimes/models.py b/tests/datetimes/models.py
new file mode 100644
index 00000000..2fcb72be
--- /dev/null
+++ b/tests/datetimes/models.py
@@ -0,0 +1,31 @@
+from __future__ import unicode_literals
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ title = models.CharField(max_length=100)
+ pub_date = models.DateTimeField()
+ published_on = models.DateField(null=True)
+
+ categories = models.ManyToManyField("Category", related_name="articles")
+
+ def __str__(self):
+ return self.title
+
+
+@python_2_unicode_compatible
+class Comment(models.Model):
+ article = models.ForeignKey(Article, models.CASCADE, related_name="comments")
+ text = models.TextField()
+ pub_date = models.DateTimeField()
+ approval_date = models.DateTimeField(null=True)
+
+ def __str__(self):
+ return 'Comment to %s (%s)' % (self.article.title, self.pub_date)
+
+
+class Category(models.Model):
+ name = models.CharField(max_length=255)
diff --git a/tests/datetimes/tests.py b/tests/datetimes/tests.py
new file mode 100644
index 00000000..9117454d
--- /dev/null
+++ b/tests/datetimes/tests.py
@@ -0,0 +1,153 @@
+from __future__ import unicode_literals
+
+import datetime
+
+import django
+from django.test import TestCase, override_settings
+from django.utils import timezone
+
+from .models import Article, Category, Comment
+
+
+class DateTimesTests(TestCase):
+ def test_related_model_traverse(self):
+ a1 = Article.objects.create(
+ title="First one",
+ pub_date=datetime.datetime(2005, 7, 28, 9, 0, 0),
+ )
+ a2 = Article.objects.create(
+ title="Another one",
+ pub_date=datetime.datetime(2010, 7, 28, 10, 0, 0),
+ )
+ a3 = Article.objects.create(
+ title="Third one, in the first day",
+ pub_date=datetime.datetime(2005, 7, 28, 17, 0, 0),
+ )
+
+ a1.comments.create(
+ text="Im the HULK!",
+ pub_date=datetime.datetime(2005, 7, 28, 9, 30, 0),
+ )
+ a1.comments.create(
+ text="HULK SMASH!",
+ pub_date=datetime.datetime(2005, 7, 29, 1, 30, 0),
+ )
+ a2.comments.create(
+ text="LMAO",
+ pub_date=datetime.datetime(2010, 7, 28, 10, 10, 10),
+ )
+ a3.comments.create(
+ text="+1",
+ pub_date=datetime.datetime(2005, 8, 29, 10, 10, 10),
+ )
+
+ c = Category.objects.create(name="serious-news")
+ c.articles.add(a1, a3)
+
+ self.assertSequenceEqual(
+ Comment.objects.datetimes("article__pub_date", "year"), [
+ datetime.datetime(2005, 1, 1),
+ datetime.datetime(2010, 1, 1),
+ ],
+ )
+ self.assertSequenceEqual(
+ Comment.objects.datetimes("article__pub_date", "month"), [
+ datetime.datetime(2005, 7, 1),
+ datetime.datetime(2010, 7, 1),
+ ],
+ )
+ self.assertSequenceEqual(
+ Comment.objects.datetimes("article__pub_date", "day"), [
+ datetime.datetime(2005, 7, 28),
+ datetime.datetime(2010, 7, 28),
+ ],
+ )
+ self.assertSequenceEqual(
+ Article.objects.datetimes("comments__pub_date", "day"), [
+ datetime.datetime(2005, 7, 28),
+ datetime.datetime(2005, 7, 29),
+ datetime.datetime(2005, 8, 29),
+ datetime.datetime(2010, 7, 28),
+ ],
+ )
+ self.assertQuerysetEqual(
+ Article.objects.datetimes("comments__approval_date", "day"), []
+ )
+ self.assertSequenceEqual(
+ Category.objects.datetimes("articles__pub_date", "day"), [
+ datetime.datetime(2005, 7, 28),
+ ],
+ )
+
+ @override_settings(USE_TZ=True)
+ def test_21432(self):
+ self.skipTest("TODO fix AssertionError: datet[20 chars], 9, 16, 59, 32, tzinfo=) != datet[20 chars], 9, 22, 59, 32, tzinfo=)")
+ now = timezone.localtime(timezone.now().replace(microsecond=0))
+ Article.objects.create(title="First one", pub_date=now)
+ qs = Article.objects.datetimes('pub_date', 'second')
+ self.assertEqual(qs[0], now)
+
+ def test_datetimes_returns_available_dates_for_given_scope_and_given_field(self):
+ pub_dates = [
+ datetime.datetime(2005, 7, 28, 12, 15),
+ datetime.datetime(2005, 7, 29, 2, 15),
+ datetime.datetime(2005, 7, 30, 5, 15),
+ datetime.datetime(2005, 7, 31, 19, 15)]
+ for i, pub_date in enumerate(pub_dates):
+ Article(pub_date=pub_date, title='title #{}'.format(i)).save()
+
+ self.assertQuerysetEqual(
+ Article.objects.datetimes('pub_date', 'year'),
+ ["datetime.datetime(2005, 1, 1, 0, 0)"])
+ self.assertQuerysetEqual(
+ Article.objects.datetimes('pub_date', 'month'),
+ ["datetime.datetime(2005, 7, 1, 0, 0)"])
+ self.assertQuerysetEqual(
+ Article.objects.datetimes('pub_date', 'day'),
+ ["datetime.datetime(2005, 7, 28, 0, 0)",
+ "datetime.datetime(2005, 7, 29, 0, 0)",
+ "datetime.datetime(2005, 7, 30, 0, 0)",
+ "datetime.datetime(2005, 7, 31, 0, 0)"])
+ self.assertQuerysetEqual(
+ Article.objects.datetimes('pub_date', 'day', order='ASC'),
+ ["datetime.datetime(2005, 7, 28, 0, 0)",
+ "datetime.datetime(2005, 7, 29, 0, 0)",
+ "datetime.datetime(2005, 7, 30, 0, 0)",
+ "datetime.datetime(2005, 7, 31, 0, 0)"])
+ self.assertQuerysetEqual(
+ Article.objects.datetimes('pub_date', 'day', order='DESC'),
+ ["datetime.datetime(2005, 7, 31, 0, 0)",
+ "datetime.datetime(2005, 7, 30, 0, 0)",
+ "datetime.datetime(2005, 7, 29, 0, 0)",
+ "datetime.datetime(2005, 7, 28, 0, 0)"])
+
+ def test_datetimes_has_lazy_iterator(self):
+ pub_dates = [
+ datetime.datetime(2005, 7, 28, 12, 15),
+ datetime.datetime(2005, 7, 29, 2, 15),
+ datetime.datetime(2005, 7, 30, 5, 15),
+ datetime.datetime(2005, 7, 31, 19, 15)]
+ for i, pub_date in enumerate(pub_dates):
+ Article(pub_date=pub_date, title='title #{}'.format(i)).save()
+ # Use iterator() with datetimes() to return a generator that lazily
+ # requests each result one at a time, to save memory.
+ dates = []
+ with self.assertNumQueries(0):
+ article_datetimes_iterator = Article.objects.datetimes('pub_date', 'day', order='DESC').iterator()
+
+ with self.assertNumQueries(1):
+ for article in article_datetimes_iterator:
+ dates.append(article)
+ self.assertEqual(dates, [
+ datetime.datetime(2005, 7, 31, 0, 0),
+ datetime.datetime(2005, 7, 30, 0, 0),
+ datetime.datetime(2005, 7, 29, 0, 0),
+ datetime.datetime(2005, 7, 28, 0, 0)])
+
+ def test_datetimes_disallows_date_fields(self):
+ if django.VERSION < (1, 10, 0):
+ self.skipTest("TODO fix AssertionError: 'published_on' isn't a DateTimeField.")
+ dt = datetime.datetime(2005, 7, 28, 12, 15)
+ Article.objects.create(pub_date=dt, published_on=dt.date(), title="Don't put dates into datetime functions!")
+ with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'published_on' to DateTimeField"):
+ list(Article.objects.datetimes('published_on', 'second'))
diff --git a/tests/db_typecasts/__init__.py b/tests/db_typecasts/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/db_typecasts/tests.py b/tests/db_typecasts/tests.py
new file mode 100644
index 00000000..fa9eab16
--- /dev/null
+++ b/tests/db_typecasts/tests.py
@@ -0,0 +1,63 @@
+# Unit tests for typecast functions in django.db.backends.util
+
+import datetime
+import unittest
+
+from django.db.backends import utils as typecasts
+from django.utils import six
+
+TEST_CASES = {
+ 'typecast_date': (
+ ('', None),
+ (None, None),
+ ('2005-08-11', datetime.date(2005, 8, 11)),
+ ('1990-01-01', datetime.date(1990, 1, 1)),
+ ),
+ 'typecast_time': (
+ ('', None),
+ (None, None),
+ ('0:00:00', datetime.time(0, 0)),
+ ('0:30:00', datetime.time(0, 30)),
+ ('8:50:00', datetime.time(8, 50)),
+ ('08:50:00', datetime.time(8, 50)),
+ ('12:00:00', datetime.time(12, 00)),
+ ('12:30:00', datetime.time(12, 30)),
+ ('13:00:00', datetime.time(13, 00)),
+ ('23:59:00', datetime.time(23, 59)),
+ ('00:00:12', datetime.time(0, 0, 12)),
+ ('00:00:12.5', datetime.time(0, 0, 12, 500000)),
+ ('7:22:13.312', datetime.time(7, 22, 13, 312000)),
+ ('12:45:30.126631', datetime.time(12, 45, 30, 126631)),
+ ('12:45:30.126630', datetime.time(12, 45, 30, 126630)),
+ ('12:45:30.123456789', datetime.time(12, 45, 30, 123456)),
+ ),
+ 'typecast_timestamp': (
+ ('', None),
+ (None, None),
+ ('2005-08-11 0:00:00', datetime.datetime(2005, 8, 11)),
+ ('2005-08-11 0:30:00', datetime.datetime(2005, 8, 11, 0, 30)),
+ ('2005-08-11 8:50:30', datetime.datetime(2005, 8, 11, 8, 50, 30)),
+ ('2005-08-11 8:50:30.123', datetime.datetime(2005, 8, 11, 8, 50, 30, 123000)),
+ ('2005-08-11 8:50:30.9', datetime.datetime(2005, 8, 11, 8, 50, 30, 900000)),
+ ('2005-08-11 8:50:30.312-05', datetime.datetime(2005, 8, 11, 8, 50, 30, 312000)),
+ ('2005-08-11 8:50:30.312+02', datetime.datetime(2005, 8, 11, 8, 50, 30, 312000)),
+ # ticket 14453
+ ('2010-10-12 15:29:22.063202', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
+ ('2010-10-12 15:29:22.063202-03', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
+ ('2010-10-12 15:29:22.063202+04', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
+ ('2010-10-12 15:29:22.0632021', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
+ ('2010-10-12 15:29:22.0632029', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
+ ),
+}
+
+
+class DBTypeCasts(unittest.TestCase):
+ def test_typeCasts(self):
+ for k, v in six.iteritems(TEST_CASES):
+ for inpt, expected in v:
+ got = getattr(typecasts, k)(inpt)
+ self.assertEqual(
+ got,
+ expected,
+ "In %s: %r doesn't match %r. Got %r instead." % (k, inpt, expected, got)
+ )
diff --git a/tests/defer/__init__.py b/tests/defer/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/defer/models.py b/tests/defer/models.py
new file mode 100644
index 00000000..b36b1735
--- /dev/null
+++ b/tests/defer/models.py
@@ -0,0 +1,48 @@
+"""
+Tests for defer() and only().
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+class Secondary(models.Model):
+ first = models.CharField(max_length=50)
+ second = models.CharField(max_length=50)
+
+
+@python_2_unicode_compatible
+class Primary(models.Model):
+ name = models.CharField(max_length=50)
+ value = models.CharField(max_length=50)
+ related = models.ForeignKey(Secondary, models.CASCADE)
+
+ def __str__(self):
+ return self.name
+
+
+class Child(Primary):
+ pass
+
+
+class BigChild(Primary):
+ other = models.CharField(max_length=50)
+
+
+class ChildProxy(Child):
+ class Meta:
+ proxy = True
+
+
+class RefreshPrimaryProxy(Primary):
+ class Meta:
+ proxy = True
+
+ def refresh_from_db(self, using=None, fields=None, **kwargs):
+ # Reloads all deferred fields if any of the fields is deferred.
+ if fields is not None:
+ fields = set(fields)
+ deferred_fields = self.get_deferred_fields()
+ if fields.intersection(deferred_fields):
+ fields = fields.union(deferred_fields)
+ super(RefreshPrimaryProxy, self).refresh_from_db(using, fields, **kwargs)
diff --git a/tests/defer/tests.py b/tests/defer/tests.py
new file mode 100644
index 00000000..35fa77ea
--- /dev/null
+++ b/tests/defer/tests.py
@@ -0,0 +1,274 @@
+from __future__ import unicode_literals
+
+from django.db.models.query_utils import InvalidQuery
+from django.test import TestCase
+
+from .models import (
+ BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
+)
+
+
+class AssertionMixin(object):
+ def assert_delayed(self, obj, num):
+ """
+ Instances with deferred fields look the same as normal instances when
+ we examine attribute values. Therefore, this method returns the number
+ of deferred fields on returned instances.
+ """
+ count = len(obj.get_deferred_fields())
+ self.assertEqual(count, num)
+
+
+class DeferTests(AssertionMixin, TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.s1 = Secondary.objects.create(first="x1", second="y1")
+ cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
+
+ def test_defer(self):
+ qs = Primary.objects.all()
+ self.assert_delayed(qs.defer("name")[0], 1)
+ self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
+ self.assert_delayed(qs.defer("related__first")[0], 0)
+ self.assert_delayed(qs.defer("name").defer("value")[0], 2)
+
+ def test_only(self):
+ # TODO: fix
+ return
+ qs = Primary.objects.all()
+ self.assert_delayed(qs.only("name")[0], 2)
+ self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
+ self.assert_delayed(qs.only("name").only("value")[0], 2)
+ self.assert_delayed(qs.only("related__first")[0], 2)
+ # Using 'pk' with only() should result in 3 deferred fields, namely all
+ # of them except the model's primary key see #15494
+ self.assert_delayed(qs.only("pk")[0], 3)
+ # You can use 'pk' with reverse foreign key lookups.
+ # The related_id is alawys set even if it's not fetched from the DB,
+ # so pk and related_id are not deferred.
+ self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 2)
+
+ def test_defer_only_chaining(self):
+ qs = Primary.objects.all()
+ self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
+ self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
+ self.assert_delayed(qs.defer("name").only("value")[0], 2)
+ self.assert_delayed(qs.only("name").defer("value")[0], 2)
+
+ def test_defer_on_an_already_deferred_field(self):
+ qs = Primary.objects.all()
+ self.assert_delayed(qs.defer("name")[0], 1)
+ self.assert_delayed(qs.defer("name").defer("name")[0], 1)
+
+ def test_defer_none_to_clear_deferred_set(self):
+ qs = Primary.objects.all()
+ self.assert_delayed(qs.defer("name", "value")[0], 2)
+ self.assert_delayed(qs.defer(None)[0], 0)
+ self.assert_delayed(qs.only("name").defer(None)[0], 0)
+
+ def test_only_none_raises_error(self):
+ msg = 'Cannot pass None as an argument to only().'
+ with self.assertRaisesMessage(TypeError, msg):
+ Primary.objects.only(None)
+
+ def test_defer_extra(self):
+ qs = Primary.objects.all()
+ self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
+ self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
+
+ def test_defer_values_does_not_defer(self):
+ # User values() won't defer anything (you get the full list of
+ # dictionaries back), but it still works.
+ self.assertEqual(Primary.objects.defer("name").values()[0], {
+ "id": self.p1.id,
+ "name": "p1",
+ "value": "xx",
+ "related_id": self.s1.id,
+ })
+
+ def test_only_values_does_not_defer(self):
+ self.assertEqual(Primary.objects.only("name").values()[0], {
+ "id": self.p1.id,
+ "name": "p1",
+ "value": "xx",
+ "related_id": self.s1.id,
+ })
+
+ def test_get(self):
+ # Using defer() and only() with get() is also valid.
+ qs = Primary.objects.all()
+ self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
+ self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
+
+ def test_defer_with_select_related(self):
+ obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
+ self.assert_delayed(obj.related, 2)
+ self.assert_delayed(obj, 0)
+
+ def test_only_with_select_related(self):
+ obj = Primary.objects.select_related().only("related__first")[0]
+ self.assert_delayed(obj, 2)
+ self.assert_delayed(obj.related, 1)
+ self.assertEqual(obj.related_id, self.s1.pk)
+ self.assertEqual(obj.name, "p1")
+
+ def test_defer_select_related_raises_invalid_query(self):
+ msg = (
+ 'Field Primary.related cannot be both deferred and traversed '
+ 'using select_related at the same time.'
+ )
+ with self.assertRaisesMessage(InvalidQuery, msg):
+ Primary.objects.defer("related").select_related("related")[0]
+
+ def test_only_select_related_raises_invalid_query(self):
+ msg = (
+ 'Field Primary.related cannot be both deferred and traversed using '
+ 'select_related at the same time.'
+ )
+ with self.assertRaisesMessage(InvalidQuery, msg):
+ Primary.objects.only("name").select_related("related")[0]
+
+ def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
+ # TODO: fix
+ return
+ # select_related() overrides defer().
+ with self.assertNumQueries(1):
+ obj = Primary.objects.defer("related").select_related()[0]
+ self.assert_delayed(obj, 1)
+ self.assertEqual(obj.related.id, self.s1.pk)
+
+ def test_saving_object_with_deferred_field(self):
+ # Saving models with deferred fields is possible (but inefficient,
+ # since every field has to be retrieved first).
+ Primary.objects.create(name="p2", value="xy", related=self.s1)
+ obj = Primary.objects.defer("value").get(name="p2")
+ obj.name = "a new name"
+ obj.save()
+ self.assertQuerysetEqual(
+ Primary.objects.all(), [
+ "p1", "a new name",
+ ],
+ lambda p: p.name,
+ ordered=False,
+ )
+
+ def test_defer_baseclass_when_subclass_has_no_added_fields(self):
+ # Regression for #10572 - A subclass with no extra fields can defer
+ # fields from the base class
+ Child.objects.create(name="c1", value="foo", related=self.s1)
+ # You can defer a field on a baseclass when the subclass has no fields
+ obj = Child.objects.defer("value").get(name="c1")
+ self.assert_delayed(obj, 1)
+ self.assertEqual(obj.name, "c1")
+ self.assertEqual(obj.value, "foo")
+
+ def test_only_baseclass_when_subclass_has_no_added_fields(self):
+ # You can retrieve a single column on a base class with no fields
+ Child.objects.create(name="c1", value="foo", related=self.s1)
+ obj = Child.objects.only("name").get(name="c1")
+ # on an inherited model, its PK is also fetched, hence '3' deferred fields.
+ self.assert_delayed(obj, 3)
+ self.assertEqual(obj.name, "c1")
+ self.assertEqual(obj.value, "foo")
+
+
+class BigChildDeferTests(AssertionMixin, TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.s1 = Secondary.objects.create(first="x1", second="y1")
+ BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
+
+ def test_defer_baseclass_when_subclass_has_added_field(self):
+ # You can defer a field on a baseclass
+ obj = BigChild.objects.defer("value").get(name="b1")
+ self.assert_delayed(obj, 1)
+ self.assertEqual(obj.name, "b1")
+ self.assertEqual(obj.value, "foo")
+ self.assertEqual(obj.other, "bar")
+
+ def test_defer_subclass(self):
+ # You can defer a field on a subclass
+ obj = BigChild.objects.defer("other").get(name="b1")
+ self.assert_delayed(obj, 1)
+ self.assertEqual(obj.name, "b1")
+ self.assertEqual(obj.value, "foo")
+ self.assertEqual(obj.other, "bar")
+
+ def test_only_baseclass_when_subclass_has_added_field(self):
+ # You can retrieve a single field on a baseclass
+ obj = BigChild.objects.only("name").get(name="b1")
+ # when inherited model, its PK is also fetched, hence '4' deferred fields.
+ self.assert_delayed(obj, 4)
+ self.assertEqual(obj.name, "b1")
+ self.assertEqual(obj.value, "foo")
+ self.assertEqual(obj.other, "bar")
+
+ def test_only_sublcass(self):
+ # You can retrieve a single field on a subclass
+ obj = BigChild.objects.only("other").get(name="b1")
+ self.assert_delayed(obj, 4)
+ self.assertEqual(obj.name, "b1")
+ self.assertEqual(obj.value, "foo")
+ self.assertEqual(obj.other, "bar")
+
+
+class TestDefer2(AssertionMixin, TestCase):
+ def test_defer_proxy(self):
+ """
+ Ensure select_related together with only on a proxy model behaves
+ as expected. See #17876.
+ """
+ related = Secondary.objects.create(first='x1', second='x2')
+ ChildProxy.objects.create(name='p1', value='xx', related=related)
+ children = ChildProxy.objects.all().select_related().only('id', 'name')
+ self.assertEqual(len(children), 1)
+ child = children[0]
+ self.assert_delayed(child, 2)
+ self.assertEqual(child.name, 'p1')
+ self.assertEqual(child.value, 'xx')
+
+ def test_defer_inheritance_pk_chaining(self):
+ """
+ When an inherited model is fetched from the DB, its PK is also fetched.
+ When getting the PK of the parent model it is useful to use the already
+ fetched parent model PK if it happens to be available.
+ """
+ s1 = Secondary.objects.create(first="x1", second="y1")
+ bc = BigChild.objects.create(name="b1", value="foo", related=s1,
+ other="bar")
+ bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
+ with self.assertNumQueries(0):
+ bc_deferred.id
+ self.assertEqual(bc_deferred.pk, bc_deferred.id)
+
+ def test_eq(self):
+ s1 = Secondary.objects.create(first="x1", second="y1")
+ s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
+ self.assertEqual(s1, s1_defer)
+ self.assertEqual(s1_defer, s1)
+
+ def test_refresh_not_loading_deferred_fields(self):
+ s = Secondary.objects.create()
+ rf = Primary.objects.create(name='foo', value='bar', related=s)
+ rf2 = Primary.objects.only('related', 'value').get()
+ rf.name = 'new foo'
+ rf.value = 'new bar'
+ rf.save()
+ with self.assertNumQueries(1):
+ rf2.refresh_from_db()
+ self.assertEqual(rf2.value, 'new bar')
+ with self.assertNumQueries(1):
+ self.assertEqual(rf2.name, 'new foo')
+
+ def test_custom_refresh_on_deferred_loading(self):
+ s = Secondary.objects.create()
+ rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
+ rf2 = RefreshPrimaryProxy.objects.only('related').get()
+ rf.name = 'new foo'
+ rf.value = 'new bar'
+ rf.save()
+ with self.assertNumQueries(1):
+ # Customized refresh_from_db() reloads all deferred fields on
+ # access of any of them.
+ self.assertEqual(rf2.name, 'new foo')
+ self.assertEqual(rf2.value, 'new bar')
diff --git a/tests/defer_regress/__init__.py b/tests/defer_regress/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/defer_regress/models.py b/tests/defer_regress/models.py
new file mode 100644
index 00000000..a73f539b
--- /dev/null
+++ b/tests/defer_regress/models.py
@@ -0,0 +1,106 @@
+"""
+Regression tests for defer() / only() behavior.
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Item(models.Model):
+ name = models.CharField(max_length=15)
+ text = models.TextField(default="xyzzy")
+ value = models.IntegerField()
+ other_value = models.IntegerField(default=0)
+
+ def __str__(self):
+ return self.name
+
+
+class RelatedItem(models.Model):
+ item = models.ForeignKey(Item, models.CASCADE)
+
+
+class ProxyRelated(RelatedItem):
+ class Meta:
+ proxy = True
+
+
+class Child(models.Model):
+ name = models.CharField(max_length=10)
+ value = models.IntegerField()
+
+
+@python_2_unicode_compatible
+class Leaf(models.Model):
+ name = models.CharField(max_length=10)
+ child = models.ForeignKey(Child, models.CASCADE)
+ second_child = models.ForeignKey(Child, models.SET_NULL, related_name="other", null=True)
+ value = models.IntegerField(default=42)
+
+ def __str__(self):
+ return self.name
+
+
+class ResolveThis(models.Model):
+ num = models.FloatField()
+ name = models.CharField(max_length=16)
+
+
+class Proxy(Item):
+ class Meta:
+ proxy = True
+
+
+@python_2_unicode_compatible
+class SimpleItem(models.Model):
+ name = models.CharField(max_length=15)
+ value = models.IntegerField()
+
+ def __str__(self):
+ return self.name
+
+
+class Feature(models.Model):
+ item = models.ForeignKey(SimpleItem, models.CASCADE)
+
+
+class SpecialFeature(models.Model):
+ feature = models.ForeignKey(Feature, models.CASCADE)
+
+
+class OneToOneItem(models.Model):
+ item = models.OneToOneField(Item, models.CASCADE, related_name="one_to_one_item")
+ name = models.CharField(max_length=15)
+
+
+class ItemAndSimpleItem(models.Model):
+ item = models.ForeignKey(Item, models.CASCADE)
+ simple = models.ForeignKey(SimpleItem, models.CASCADE)
+
+
+class Profile(models.Model):
+ profile1 = models.CharField(max_length=1000, default='profile1')
+
+
+class Location(models.Model):
+ location1 = models.CharField(max_length=1000, default='location1')
+
+
+class Request(models.Model):
+ profile = models.ForeignKey(Profile, models.SET_NULL, null=True, blank=True)
+ location = models.ForeignKey(Location, models.CASCADE)
+ items = models.ManyToManyField(Item)
+
+ request1 = models.CharField(default='request1', max_length=1000)
+ request2 = models.CharField(default='request2', max_length=1000)
+ request3 = models.CharField(default='request3', max_length=1000)
+ request4 = models.CharField(default='request4', max_length=1000)
+
+
+class Base(models.Model):
+ text = models.TextField()
+
+
+class Derived(Base):
+ other_text = models.TextField()
diff --git a/tests/defer_regress/tests.py b/tests/defer_regress/tests.py
new file mode 100644
index 00000000..76bdf277
--- /dev/null
+++ b/tests/defer_regress/tests.py
@@ -0,0 +1,282 @@
+from __future__ import unicode_literals
+
+from operator import attrgetter
+
+import django
+from django.contrib.contenttypes.models import ContentType
+from django.contrib.sessions.backends.db import SessionStore
+from django.db import models
+from django.db.models import Count
+from django.test import TestCase, override_settings
+
+from .models import (
+ Base, Child, Derived, Feature, Item, ItemAndSimpleItem, Leaf, Location,
+ OneToOneItem, Proxy, ProxyRelated, RelatedItem, Request, ResolveThis,
+ SimpleItem, SpecialFeature,
+)
+
+
+class DeferRegressionTest(TestCase):
+ def test_basic(self):
+ # Deferred fields should really be deferred and not accidentally use
+ # the field's default value just because they aren't passed to __init__
+
+ Item.objects.create(name="first", value=42)
+ obj = Item.objects.only("name", "other_value").get(name="first")
+ # Accessing "name" doesn't trigger a new database query. Accessing
+ # "value" or "text" should.
+ with self.assertNumQueries(0):
+ self.assertEqual(obj.name, "first")
+ self.assertEqual(obj.other_value, 0)
+
+ with self.assertNumQueries(1):
+ self.assertEqual(obj.value, 42)
+
+ with self.assertNumQueries(1):
+ self.assertEqual(obj.text, "xyzzy")
+
+ with self.assertNumQueries(0):
+ self.assertEqual(obj.text, "xyzzy")
+
+ # Regression test for #10695. Make sure different instances don't
+ # inadvertently share data in the deferred descriptor objects.
+ i = Item.objects.create(name="no I'm first", value=37)
+ items = Item.objects.only("value").order_by("-value")
+ self.assertEqual(items[0].name, "first")
+ self.assertEqual(items[1].name, "no I'm first")
+
+ RelatedItem.objects.create(item=i)
+ r = RelatedItem.objects.defer("item").get()
+ self.assertEqual(r.item_id, i.id)
+ self.assertEqual(r.item, i)
+
+ # Some further checks for select_related() and inherited model
+ # behavior (regression for #10710).
+ c1 = Child.objects.create(name="c1", value=42)
+ c2 = Child.objects.create(name="c2", value=37)
+ Leaf.objects.create(name="l1", child=c1, second_child=c2)
+
+ obj = Leaf.objects.only("name", "child").select_related()[0]
+ self.assertEqual(obj.child.name, "c1")
+
+ self.assertQuerysetEqual(
+ Leaf.objects.select_related().only("child__name", "second_child__name"), [
+ "l1",
+ ],
+ attrgetter("name")
+ )
+
+ # Models instances with deferred fields should still return the same
+ # content types as their non-deferred versions (bug #10738).
+ ctype = ContentType.objects.get_for_model
+ c1 = ctype(Item.objects.all()[0])
+ c2 = ctype(Item.objects.defer("name")[0])
+ c3 = ctype(Item.objects.only("name")[0])
+ self.assertTrue(c1 is c2 is c3)
+
+ # Regression for #10733 - only() can be used on a model with two
+ # foreign keys.
+ results = Leaf.objects.only("name", "child", "second_child").select_related()
+ self.assertEqual(results[0].child.name, "c1")
+ self.assertEqual(results[0].second_child.name, "c2")
+
+ results = Leaf.objects.only(
+ "name", "child", "second_child", "child__name", "second_child__name"
+ ).select_related()
+ self.assertEqual(results[0].child.name, "c1")
+ self.assertEqual(results[0].second_child.name, "c2")
+
+ # Regression for #16409 - make sure defer() and only() work with annotate()
+ self.assertIsInstance(
+ list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
+ list)
+ self.assertIsInstance(
+ list(SimpleItem.objects.annotate(Count('feature')).only('name')),
+ list)
+
+ @override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer')
+ def test_ticket_12163(self):
+ # Test for #12163 - Pickling error saving session with unsaved model
+ # instances.
+ SESSION_KEY = '2b1189a188b44ad18c35e1baac6ceead'
+
+ item = Item()
+ item._deferred = False
+ s = SessionStore(SESSION_KEY)
+ s.clear()
+ s["item"] = item
+ s.save(must_create=True)
+
+ s = SessionStore(SESSION_KEY)
+ s.modified = True
+ s.save()
+
+ i2 = s["item"]
+ self.assertFalse(i2._deferred)
+
+ def test_ticket_16409(self):
+ # Regression for #16409 - make sure defer() and only() work with annotate()
+ self.assertIsInstance(
+ list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
+ list)
+ self.assertIsInstance(
+ list(SimpleItem.objects.annotate(Count('feature')).only('name')),
+ list)
+
+ def test_ticket_23270(self):
+ Derived.objects.create(text="foo", other_text="bar")
+ with self.assertNumQueries(1):
+ obj = Base.objects.select_related("derived").defer("text")[0]
+ self.assertIsInstance(obj.derived, Derived)
+ self.assertEqual("bar", obj.derived.other_text)
+ self.assertNotIn("text", obj.__dict__)
+ self.assertEqual(1, obj.derived.base_ptr_id)
+
+ def test_only_and_defer_usage_on_proxy_models(self):
+ # Regression for #15790 - only() broken for proxy models
+ proxy = Proxy.objects.create(name="proxy", value=42)
+
+ msg = 'QuerySet.only() return bogus results with proxy models'
+ dp = Proxy.objects.only('other_value').get(pk=proxy.pk)
+ self.assertEqual(dp.name, proxy.name, msg=msg)
+ self.assertEqual(dp.value, proxy.value, msg=msg)
+
+ # also test things with .defer()
+ msg = 'QuerySet.defer() return bogus results with proxy models'
+ dp = Proxy.objects.defer('name', 'text', 'value').get(pk=proxy.pk)
+ self.assertEqual(dp.name, proxy.name, msg=msg)
+ self.assertEqual(dp.value, proxy.value, msg=msg)
+
+ def test_resolve_columns(self):
+ ResolveThis.objects.create(num=5.0, name='Foobar')
+ qs = ResolveThis.objects.defer('num')
+ self.assertEqual(1, qs.count())
+ self.assertEqual('Foobar', qs[0].name)
+
+ def test_reverse_one_to_one_relations(self):
+ # Refs #14694. Test reverse relations which are known unique (reverse
+ # side has o2ofield or unique FK) - the o2o case
+ item = Item.objects.create(name="first", value=42)
+ o2o = OneToOneItem.objects.create(item=item, name="second")
+ self.assertEqual(len(Item.objects.defer('one_to_one_item__name')), 1)
+ self.assertEqual(len(Item.objects.select_related('one_to_one_item')), 1)
+ self.assertEqual(len(Item.objects.select_related(
+ 'one_to_one_item').defer('one_to_one_item__name')), 1)
+ self.assertEqual(len(Item.objects.select_related('one_to_one_item').defer('value')), 1)
+ # Make sure that `only()` doesn't break when we pass in a unique relation,
+ # rather than a field on the relation.
+ self.assertEqual(len(Item.objects.only('one_to_one_item')), 1)
+ with self.assertNumQueries(1):
+ i = Item.objects.select_related('one_to_one_item')[0]
+ self.assertEqual(i.one_to_one_item.pk, o2o.pk)
+ self.assertEqual(i.one_to_one_item.name, "second")
+ with self.assertNumQueries(1):
+ i = Item.objects.select_related('one_to_one_item').defer(
+ 'value', 'one_to_one_item__name')[0]
+ self.assertEqual(i.one_to_one_item.pk, o2o.pk)
+ self.assertEqual(i.name, "first")
+ with self.assertNumQueries(1):
+ self.assertEqual(i.one_to_one_item.name, "second")
+ with self.assertNumQueries(1):
+ self.assertEqual(i.value, 42)
+
+ def test_defer_with_select_related(self):
+ item1 = Item.objects.create(name="first", value=47)
+ item2 = Item.objects.create(name="second", value=42)
+ simple = SimpleItem.objects.create(name="simple", value="23")
+ ItemAndSimpleItem.objects.create(item=item1, simple=simple)
+
+ obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
+ self.assertEqual(obj.item, item1)
+ self.assertEqual(obj.item_id, item1.id)
+
+ obj.item = item2
+ obj.save()
+
+ obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
+ self.assertEqual(obj.item, item2)
+ self.assertEqual(obj.item_id, item2.id)
+
+ def test_proxy_model_defer_with_select_related(self):
+ # Regression for #22050
+ item = Item.objects.create(name="first", value=47)
+ RelatedItem.objects.create(item=item)
+ # Defer fields with only()
+ obj = ProxyRelated.objects.all().select_related().only('item__name')[0]
+ with self.assertNumQueries(0):
+ self.assertEqual(obj.item.name, "first")
+ with self.assertNumQueries(1):
+ self.assertEqual(obj.item.value, 47)
+
+ def test_only_with_select_related(self):
+ # Test for #17485.
+ item = SimpleItem.objects.create(name='first', value=47)
+ feature = Feature.objects.create(item=item)
+ SpecialFeature.objects.create(feature=feature)
+
+ qs = Feature.objects.only('item__name').select_related('item')
+ self.assertEqual(len(qs), 1)
+
+ qs = SpecialFeature.objects.only('feature__item__name').select_related('feature__item')
+ self.assertEqual(len(qs), 1)
+
+
+class DeferAnnotateSelectRelatedTest(TestCase):
+ def test_defer_annotate_select_related(self):
+ location = Location.objects.create()
+ Request.objects.create(location=location)
+ self.assertIsInstance(
+ list(Request.objects.annotate(Count('items')).select_related('profile', 'location')
+ .only('profile', 'location')),
+ list
+ )
+ self.assertIsInstance(
+ list(Request.objects.annotate(Count('items')).select_related('profile', 'location')
+ .only('profile__profile1', 'location__location1')),
+ list
+ )
+ self.assertIsInstance(
+ list(Request.objects.annotate(Count('items')).select_related('profile', 'location')
+ .defer('request1', 'request2', 'request3', 'request4')),
+ list
+ )
+
+
+class DeferDeletionSignalsTests(TestCase):
+ senders = [Item, Proxy]
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.item_pk = Item.objects.create(value=1).pk
+
+ def setUp(self):
+ self.pre_delete_senders = []
+ self.post_delete_senders = []
+ for sender in self.senders:
+ models.signals.pre_delete.connect(self.pre_delete_receiver, sender)
+ models.signals.post_delete.connect(self.post_delete_receiver, sender)
+
+ def tearDown(self):
+ for sender in self.senders:
+ models.signals.pre_delete.disconnect(self.pre_delete_receiver, sender)
+ models.signals.post_delete.disconnect(self.post_delete_receiver, sender)
+
+ def pre_delete_receiver(self, sender, **kwargs):
+ self.pre_delete_senders.append(sender)
+
+ def post_delete_receiver(self, sender, **kwargs):
+ self.post_delete_senders.append(sender)
+
+ def test_delete_defered_model(self):
+ if django.VERSION < (1, 10, 0):
+ self.skipTest('This does not work on older Django')
+ Item.objects.only('value').get(pk=self.item_pk).delete()
+ self.assertEqual(self.pre_delete_senders, [Item])
+ self.assertEqual(self.post_delete_senders, [Item])
+
+ def test_delete_defered_proxy_model(self):
+ if django.VERSION < (1, 10, 0):
+ self.skipTest('This does not work on older Django')
+ Proxy.objects.only('value').get(pk=self.item_pk).delete()
+ self.assertEqual(self.pre_delete_senders, [Proxy])
+ self.assertEqual(self.post_delete_senders, [Proxy])
diff --git a/tests/delete_regress/__init__.py b/tests/delete_regress/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/delete_regress/models.py b/tests/delete_regress/models.py
new file mode 100644
index 00000000..f0145de6
--- /dev/null
+++ b/tests/delete_regress/models.py
@@ -0,0 +1,141 @@
+from django.contrib.contenttypes.fields import (
+ GenericForeignKey, GenericRelation,
+)
+from django.contrib.contenttypes.models import ContentType
+from django.db import models
+
+
+class Award(models.Model):
+ name = models.CharField(max_length=25)
+ object_id = models.PositiveIntegerField()
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ content_object = GenericForeignKey()
+
+
+class AwardNote(models.Model):
+ award = models.ForeignKey(Award, models.CASCADE)
+ note = models.CharField(max_length=100)
+
+
+class Person(models.Model):
+ name = models.CharField(max_length=25)
+ awards = GenericRelation(Award)
+
+
+class Book(models.Model):
+ pagecount = models.IntegerField()
+
+
+class Toy(models.Model):
+ name = models.CharField(max_length=50)
+
+
+class Child(models.Model):
+ name = models.CharField(max_length=50)
+ toys = models.ManyToManyField(Toy, through='PlayedWith')
+
+
+class PlayedWith(models.Model):
+ child = models.ForeignKey(Child, models.CASCADE)
+ toy = models.ForeignKey(Toy, models.CASCADE)
+ date = models.DateField(db_column='date_col')
+
+
+class PlayedWithNote(models.Model):
+ played = models.ForeignKey(PlayedWith, models.CASCADE)
+ note = models.TextField()
+
+
+class Contact(models.Model):
+ label = models.CharField(max_length=100)
+
+
+class Email(Contact):
+ email_address = models.EmailField(max_length=100)
+
+
+class Researcher(models.Model):
+ contacts = models.ManyToManyField(Contact, related_name="research_contacts")
+
+
+class Food(models.Model):
+ name = models.CharField(max_length=20, unique=True)
+
+
+class Eaten(models.Model):
+ food = models.ForeignKey(Food, models.CASCADE, to_field="name")
+ meal = models.CharField(max_length=20)
+
+
+# Models for #15776
+
+
+class Policy(models.Model):
+ policy_number = models.CharField(max_length=10)
+
+
+class Version(models.Model):
+ policy = models.ForeignKey(Policy, models.CASCADE)
+
+
+class Location(models.Model):
+ version = models.ForeignKey(Version, models.SET_NULL, blank=True, null=True)
+
+
+class Item(models.Model):
+ version = models.ForeignKey(Version, models.CASCADE)
+ location = models.ForeignKey(Location, models.SET_NULL, blank=True, null=True)
+
+# Models for #16128
+
+
+class File(models.Model):
+ pass
+
+
+class Image(File):
+ class Meta:
+ proxy = True
+
+
+class Photo(Image):
+ class Meta:
+ proxy = True
+
+
+class FooImage(models.Model):
+ my_image = models.ForeignKey(Image, models.CASCADE)
+
+
+class FooFile(models.Model):
+ my_file = models.ForeignKey(File, models.CASCADE)
+
+
+class FooPhoto(models.Model):
+ my_photo = models.ForeignKey(Photo, models.CASCADE)
+
+
+class FooFileProxy(FooFile):
+ class Meta:
+ proxy = True
+
+
+class OrgUnit(models.Model):
+ name = models.CharField(max_length=64, unique=True)
+
+
+class Login(models.Model):
+ description = models.CharField(max_length=32)
+ orgunit = models.ForeignKey(OrgUnit, models.CASCADE)
+
+
+class House(models.Model):
+ address = models.CharField(max_length=32)
+
+
+class OrderedPerson(models.Model):
+ name = models.CharField(max_length=32)
+ lives_in = models.ForeignKey(House, models.CASCADE)
+
+ class Meta:
+ ordering = ['name']
diff --git a/tests/delete_regress/tests.py b/tests/delete_regress/tests.py
new file mode 100644
index 00000000..21287337
--- /dev/null
+++ b/tests/delete_regress/tests.py
@@ -0,0 +1,347 @@
+from __future__ import unicode_literals
+
+import datetime
+
+from django.db import connection, models, transaction
+from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
+
+from .models import (
+ Award, AwardNote, Book, Child, Eaten, Email, File, Food, FooFile,
+ FooFileProxy, FooImage, FooPhoto, House, Image, Item, Location, Login,
+ OrderedPerson, OrgUnit, Person, Photo, PlayedWith, PlayedWithNote, Policy,
+ Researcher, Toy, Version,
+)
+
+
+# Can't run this test under SQLite, because you can't
+# get two connections to an in-memory database.
+@skipUnlessDBFeature('test_db_allows_multiple_connections')
+class DeleteLockingTest(TransactionTestCase):
+
+ available_apps = ['delete_regress']
+
+ def setUp(self):
+ # Create a second connection to the default database
+ self.conn2 = connection.copy()
+ self.conn2.set_autocommit(False)
+
+ def tearDown(self):
+ # Close down the second connection.
+ self.conn2.rollback()
+ self.conn2.close()
+
+ def test_concurrent_delete(self):
+ """Concurrent deletes don't collide and lock the database (#9479)."""
+ with transaction.atomic():
+ Book.objects.create(id=1, pagecount=100)
+ Book.objects.create(id=2, pagecount=200)
+ Book.objects.create(id=3, pagecount=300)
+
+ with transaction.atomic():
+ # Start a transaction on the main connection.
+ self.assertEqual(3, Book.objects.count())
+
+ # Delete something using another database connection.
+ with self.conn2.cursor() as cursor2:
+ cursor2.execute("DELETE from delete_regress_book WHERE id = 1")
+ self.conn2.commit()
+
+ # In the same transaction on the main connection, perform a
+ # queryset delete that covers the object deleted with the other
+ # connection. This causes an infinite loop under MySQL InnoDB
+ # unless we keep track of already deleted objects.
+ Book.objects.filter(pagecount__lt=250).delete()
+
+ self.assertEqual(1, Book.objects.count())
+
+
+class DeleteCascadeTests(TestCase):
+ def test_generic_relation_cascade(self):
+ """
+ Django cascades deletes through generic-related objects to their
+ reverse relations.
+ """
+ person = Person.objects.create(name='Nelson Mandela')
+ award = Award.objects.create(name='Nobel', content_object=person)
+ AwardNote.objects.create(note='a peace prize',
+ award=award)
+ self.assertEqual(AwardNote.objects.count(), 1)
+ person.delete()
+ self.assertEqual(Award.objects.count(), 0)
+ # first two asserts are just sanity checks, this is the kicker:
+ self.assertEqual(AwardNote.objects.count(), 0)
+
+ def test_fk_to_m2m_through(self):
+ """
+ If an M2M relationship has an explicitly-specified through model, and
+ some other model has an FK to that through model, deletion is cascaded
+ from one of the participants in the M2M, to the through model, to its
+ related model.
+ """
+ juan = Child.objects.create(name='Juan')
+ paints = Toy.objects.create(name='Paints')
+ played = PlayedWith.objects.create(child=juan, toy=paints,
+ date=datetime.date.today())
+ PlayedWithNote.objects.create(played=played,
+ note='the next Jackson Pollock')
+ self.assertEqual(PlayedWithNote.objects.count(), 1)
+ paints.delete()
+ self.assertEqual(PlayedWith.objects.count(), 0)
+ # first two asserts just sanity checks, this is the kicker:
+ self.assertEqual(PlayedWithNote.objects.count(), 0)
+
+ def test_15776(self):
+ policy = Policy.objects.create(pk=1, policy_number="1234")
+ version = Version.objects.create(policy=policy)
+ location = Location.objects.create(version=version)
+ Item.objects.create(version=version, location=location)
+ policy.delete()
+
+
+class DeleteCascadeTransactionTests(TransactionTestCase):
+
+ available_apps = ['delete_regress']
+
+ def test_inheritance(self):
+ """
+ Auto-created many-to-many through tables referencing a parent model are
+ correctly found by the delete cascade when a child of that parent is
+ deleted.
+
+ Refs #14896.
+ """
+ r = Researcher.objects.create()
+ email = Email.objects.create(
+ label="office-email", email_address="carl@science.edu"
+ )
+ r.contacts.add(email)
+
+ email.delete()
+
+ def test_to_field(self):
+ """
+ Cascade deletion works with ForeignKey.to_field set to non-PK.
+ """
+ apple = Food.objects.create(name="apple")
+ Eaten.objects.create(food=apple, meal="lunch")
+
+ apple.delete()
+ self.assertFalse(Food.objects.exists())
+ self.assertFalse(Eaten.objects.exists())
+
+
+class LargeDeleteTests(TestCase):
+ def test_large_deletes(self):
+ "Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
+ for x in range(300):
+ Book.objects.create(pagecount=x + 100)
+ # attach a signal to make sure we will not fast-delete
+
+ def noop(*args, **kwargs):
+ pass
+ models.signals.post_delete.connect(noop, sender=Book)
+ Book.objects.all().delete()
+ models.signals.post_delete.disconnect(noop, sender=Book)
+ self.assertEqual(Book.objects.count(), 0)
+
+
+class ProxyDeleteTest(TestCase):
+ """
+ Tests on_delete behavior for proxy models.
+
+ See #16128.
+ """
+ def create_image(self):
+ """Return an Image referenced by both a FooImage and a FooFile."""
+ # Create an Image
+ test_image = Image()
+ test_image.save()
+ foo_image = FooImage(my_image=test_image)
+ foo_image.save()
+
+ # Get the Image instance as a File
+ test_file = File.objects.get(pk=test_image.pk)
+ foo_file = FooFile(my_file=test_file)
+ foo_file.save()
+
+ return test_image
+
+ def test_delete_proxy(self):
+ """
+ Deleting the *proxy* instance bubbles through to its non-proxy and
+ *all* referring objects are deleted.
+ """
+ self.create_image()
+
+ Image.objects.all().delete()
+
+ # An Image deletion == File deletion
+ self.assertEqual(len(Image.objects.all()), 0)
+ self.assertEqual(len(File.objects.all()), 0)
+
+ # The Image deletion cascaded and *all* references to it are deleted.
+ self.assertEqual(len(FooImage.objects.all()), 0)
+ self.assertEqual(len(FooFile.objects.all()), 0)
+
+ def test_delete_proxy_of_proxy(self):
+ """
+ Deleting a proxy-of-proxy instance should bubble through to its proxy
+ and non-proxy parents, deleting *all* referring objects.
+ """
+ test_image = self.create_image()
+
+ # Get the Image as a Photo
+ test_photo = Photo.objects.get(pk=test_image.pk)
+ foo_photo = FooPhoto(my_photo=test_photo)
+ foo_photo.save()
+
+ Photo.objects.all().delete()
+
+ # A Photo deletion == Image deletion == File deletion
+ self.assertEqual(len(Photo.objects.all()), 0)
+ self.assertEqual(len(Image.objects.all()), 0)
+ self.assertEqual(len(File.objects.all()), 0)
+
+ # The Photo deletion should have cascaded and deleted *all*
+ # references to it.
+ self.assertEqual(len(FooPhoto.objects.all()), 0)
+ self.assertEqual(len(FooFile.objects.all()), 0)
+ self.assertEqual(len(FooImage.objects.all()), 0)
+
+ def test_delete_concrete_parent(self):
+ """
+ Deleting an instance of a concrete model should also delete objects
+ referencing its proxy subclass.
+ """
+ self.create_image()
+
+ File.objects.all().delete()
+
+ # A File deletion == Image deletion
+ self.assertEqual(len(File.objects.all()), 0)
+ self.assertEqual(len(Image.objects.all()), 0)
+
+ # The File deletion should have cascaded and deleted *all* references
+ # to it.
+ self.assertEqual(len(FooFile.objects.all()), 0)
+ self.assertEqual(len(FooImage.objects.all()), 0)
+
+ def test_delete_proxy_pair(self):
+ """
+ If a pair of proxy models are linked by an FK from one concrete parent
+ to the other, deleting one proxy model cascade-deletes the other, and
+ the deletion happens in the right order (not triggering an
+ IntegrityError on databases unable to defer integrity checks).
+
+ Refs #17918.
+ """
+ # Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
+ # which has an FK to File)
+ image = Image.objects.create()
+ as_file = File.objects.get(pk=image.pk)
+ FooFileProxy.objects.create(my_file=as_file)
+
+ Image.objects.all().delete()
+
+ self.assertEqual(len(FooFileProxy.objects.all()), 0)
+
+ def test_19187_values(self):
+ with self.assertRaises(TypeError):
+ Image.objects.values().delete()
+ with self.assertRaises(TypeError):
+ Image.objects.values_list().delete()
+
+
+class Ticket19102Tests(TestCase):
+ """
+ Test different queries which alter the SELECT clause of the query. We
+ also must be using a subquery for the deletion (that is, the original
+ query has a join in it). The deletion should be done as "fast-path"
+ deletion (that is, just one query for the .delete() call).
+
+ Note that .values() is not tested here on purpose. .values().delete()
+ doesn't work for non fast-path deletes at all.
+ """
+ def setUp(self):
+ self.o1 = OrgUnit.objects.create(name='o1')
+ self.o2 = OrgUnit.objects.create(name='o2')
+ self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
+ self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
+
+ @skipUnlessDBFeature("update_can_self_select")
+ def test_ticket_19102_annotate(self):
+ with self.assertNumQueries(1):
+ Login.objects.order_by('description').filter(
+ orgunit__name__isnull=False
+ ).annotate(
+ n=models.Count('description')
+ ).filter(
+ n=1, pk=self.l1.pk
+ ).delete()
+ self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
+ self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
+
+ @skipUnlessDBFeature("update_can_self_select")
+ def test_ticket_19102_extra(self):
+ with self.assertNumQueries(1):
+ Login.objects.order_by('description').filter(
+ orgunit__name__isnull=False
+ ).extra(
+ select={'extraf': '1'}
+ ).filter(
+ pk=self.l1.pk
+ ).delete()
+ self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
+ self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
+
+ @skipUnlessDBFeature("update_can_self_select")
+ @skipUnlessDBFeature('can_distinct_on_fields')
+ def test_ticket_19102_distinct_on(self):
+ # Both Login objs should have same description so that only the one
+ # having smaller PK will be deleted.
+ Login.objects.update(description='description')
+ with self.assertNumQueries(1):
+ Login.objects.distinct('description').order_by('pk').filter(
+ orgunit__name__isnull=False
+ ).delete()
+ # Assumed that l1 which is created first has smaller PK.
+ self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
+ self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
+
+ @skipUnlessDBFeature("update_can_self_select")
+ def test_ticket_19102_select_related(self):
+ with self.assertNumQueries(1):
+ Login.objects.filter(
+ pk=self.l1.pk
+ ).filter(
+ orgunit__name__isnull=False
+ ).order_by(
+ 'description'
+ ).select_related('orgunit').delete()
+ self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
+ self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
+
+ @skipUnlessDBFeature("update_can_self_select")
+ def test_ticket_19102_defer(self):
+ with self.assertNumQueries(1):
+ Login.objects.filter(
+ pk=self.l1.pk
+ ).filter(
+ orgunit__name__isnull=False
+ ).order_by(
+ 'description'
+ ).only('id').delete()
+ self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
+ self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
+
+
+class OrderedDeleteTests(TestCase):
+ def test_meta_ordered_delete(self):
+ # When a subquery is performed by deletion code, the subquery must be
+ # cleared of all ordering. There was a but that caused _meta ordering
+ # to be used. Refs #19720.
+ h = House.objects.create(address='Foo')
+ OrderedPerson.objects.create(name='Jack', lives_in=h)
+ OrderedPerson.objects.create(name='Bob', lives_in=h)
+ OrderedPerson.objects.filter(lives_in__address='Foo').delete()
+ self.assertEqual(OrderedPerson.objects.count(), 0)
diff --git a/tests/distinct_on_fields/__init__.py b/tests/distinct_on_fields/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/distinct_on_fields/models.py b/tests/distinct_on_fields/models.py
new file mode 100644
index 00000000..2c33f3ad
--- /dev/null
+++ b/tests/distinct_on_fields/models.py
@@ -0,0 +1,61 @@
+from __future__ import unicode_literals
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Tag(models.Model):
+ name = models.CharField(max_length=10)
+ parent = models.ForeignKey(
+ 'self',
+ models.SET_NULL,
+ blank=True,
+ null=True,
+ related_name='children',
+ )
+
+ class Meta:
+ ordering = ['name']
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Celebrity(models.Model):
+ name = models.CharField("Name", max_length=20)
+ greatest_fan = models.ForeignKey(
+ "Fan",
+ models.SET_NULL,
+ null=True,
+ unique=True,
+ )
+
+ def __str__(self):
+ return self.name
+
+
+class Fan(models.Model):
+ fan_of = models.ForeignKey(Celebrity, models.CASCADE)
+
+
+@python_2_unicode_compatible
+class Staff(models.Model):
+ id = models.IntegerField(primary_key=True)
+ name = models.CharField(max_length=50)
+ organisation = models.CharField(max_length=100)
+ tags = models.ManyToManyField(Tag, through='StaffTag')
+ coworkers = models.ManyToManyField('self')
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class StaffTag(models.Model):
+ staff = models.ForeignKey(Staff, models.CASCADE)
+ tag = models.ForeignKey(Tag, models.CASCADE)
+
+ def __str__(self):
+ return "%s -> %s" % (self.tag, self.staff)
diff --git a/tests/distinct_on_fields/tests.py b/tests/distinct_on_fields/tests.py
new file mode 100644
index 00000000..e7445003
--- /dev/null
+++ b/tests/distinct_on_fields/tests.py
@@ -0,0 +1,130 @@
+from __future__ import unicode_literals
+
+from django.db.models import Max
+from django.test import TestCase, skipUnlessDBFeature
+from django.test.utils import str_prefix
+
+from .models import Celebrity, Fan, Staff, StaffTag, Tag
+
+
+@skipUnlessDBFeature('can_distinct_on_fields')
+@skipUnlessDBFeature('supports_nullable_unique_constraints')
+class DistinctOnTests(TestCase):
+ def setUp(self):
+ t1 = Tag.objects.create(name='t1')
+ Tag.objects.create(name='t2', parent=t1)
+ t3 = Tag.objects.create(name='t3', parent=t1)
+ Tag.objects.create(name='t4', parent=t3)
+ Tag.objects.create(name='t5', parent=t3)
+
+ self.p1_o1 = Staff.objects.create(id=1, name="p1", organisation="o1")
+ self.p2_o1 = Staff.objects.create(id=2, name="p2", organisation="o1")
+ self.p3_o1 = Staff.objects.create(id=3, name="p3", organisation="o1")
+ self.p1_o2 = Staff.objects.create(id=4, name="p1", organisation="o2")
+ self.p1_o1.coworkers.add(self.p2_o1, self.p3_o1)
+ StaffTag.objects.create(staff=self.p1_o1, tag=t1)
+ StaffTag.objects.create(staff=self.p1_o1, tag=t1)
+
+ celeb1 = Celebrity.objects.create(name="c1")
+ celeb2 = Celebrity.objects.create(name="c2")
+
+ self.fan1 = Fan.objects.create(fan_of=celeb1)
+ self.fan2 = Fan.objects.create(fan_of=celeb1)
+ self.fan3 = Fan.objects.create(fan_of=celeb2)
+
+ def test_basic_distinct_on(self):
+ """QuerySet.distinct('field', ...) works"""
+ # (qset, expected) tuples
+ qsets = (
+ (
+ Staff.objects.distinct().order_by('name'),
+ ['', '', '', ''],
+ ),
+ (
+ Staff.objects.distinct('name').order_by('name'),
+ ['', '', ''],
+ ),
+ (
+ Staff.objects.distinct('organisation').order_by('organisation', 'name'),
+ ['', ''],
+ ),
+ (
+ Staff.objects.distinct('name', 'organisation').order_by('name', 'organisation'),
+ ['', '', '', ''],
+ ),
+ (
+ Celebrity.objects.filter(fan__in=[self.fan1, self.fan2, self.fan3]).distinct('name').order_by('name'),
+ ['', ''],
+ ),
+ # Does combining querysets work?
+ (
+ (Celebrity.objects.filter(fan__in=[self.fan1, self.fan2]).
+ distinct('name').order_by('name') |
+ Celebrity.objects.filter(fan__in=[self.fan3]).
+ distinct('name').order_by('name')),
+ ['', ''],
+ ),
+ (
+ StaffTag.objects.distinct('staff', 'tag'),
+ [' p1>'],
+ ),
+ (
+ Tag.objects.order_by('parent__pk', 'pk').distinct('parent'),
+ ['', '', ''],
+ ),
+ (
+ StaffTag.objects.select_related('staff').distinct('staff__name').order_by('staff__name'),
+ [' p1>'],
+ ),
+ # Fetch the alphabetically first coworker for each worker
+ (
+ (Staff.objects.distinct('id').order_by('id', 'coworkers__name').
+ values_list('id', 'coworkers__name')),
+ [str_prefix("(1, %(_)s'p2')"), str_prefix("(2, %(_)s'p1')"),
+ str_prefix("(3, %(_)s'p1')"), "(4, None)"]
+ ),
+ )
+ for qset, expected in qsets:
+ self.assertQuerysetEqual(qset, expected)
+ self.assertEqual(qset.count(), len(expected))
+
+ # Combining queries with different distinct_fields is not allowed.
+ base_qs = Celebrity.objects.all()
+ with self.assertRaisesMessage(AssertionError, "Cannot combine queries with different distinct fields."):
+ base_qs.distinct('id') & base_qs.distinct('name')
+
+ # Test join unreffing
+ c1 = Celebrity.objects.distinct('greatest_fan__id', 'greatest_fan__fan_of')
+ self.assertIn('OUTER JOIN', str(c1.query))
+ c2 = c1.distinct('pk')
+ self.assertNotIn('OUTER JOIN', str(c2.query))
+
+ def test_distinct_not_implemented_checks(self):
+ # distinct + annotate not allowed
+ with self.assertRaises(NotImplementedError):
+ Celebrity.objects.annotate(Max('id')).distinct('id')[0]
+ with self.assertRaises(NotImplementedError):
+ Celebrity.objects.distinct('id').annotate(Max('id'))[0]
+
+ # However this check is done only when the query executes, so you
+ # can use distinct() to remove the fields before execution.
+ Celebrity.objects.distinct('id').annotate(Max('id')).distinct()[0]
+ # distinct + aggregate not allowed
+ with self.assertRaises(NotImplementedError):
+ Celebrity.objects.distinct('id').aggregate(Max('id'))
+
+ def test_distinct_on_in_ordered_subquery(self):
+ qs = Staff.objects.distinct('name').order_by('name', 'id')
+ qs = Staff.objects.filter(pk__in=qs).order_by('name')
+ self.assertSequenceEqual(qs, [self.p1_o1, self.p2_o1, self.p3_o1])
+ qs = Staff.objects.distinct('name').order_by('name', '-id')
+ qs = Staff.objects.filter(pk__in=qs).order_by('name')
+ self.assertSequenceEqual(qs, [self.p1_o2, self.p2_o1, self.p3_o1])
+
+ def test_distinct_on_get_ordering_preserved(self):
+ """
+ Ordering shouldn't be cleared when distinct on fields are specified.
+ refs #25081
+ """
+ staff = Staff.objects.distinct('name').order_by('name', '-organisation').get(name='p1')
+ self.assertEqual(staff.organisation, 'o2')
diff --git a/tests/expressions/__init__.py b/tests/expressions/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/expressions/models.py b/tests/expressions/models.py
new file mode 100644
index 00000000..42e4a37b
--- /dev/null
+++ b/tests/expressions/models.py
@@ -0,0 +1,95 @@
+"""
+Tests for F() query expression syntax.
+"""
+import uuid
+
+from django.db import models
+
+
+class Employee(models.Model):
+ firstname = models.CharField(max_length=50)
+ lastname = models.CharField(max_length=50)
+ salary = models.IntegerField(blank=True, null=True)
+
+ def __str__(self):
+ return '%s %s' % (self.firstname, self.lastname)
+
+
+class Company(models.Model):
+ name = models.CharField(max_length=100)
+ num_employees = models.PositiveIntegerField()
+ num_chairs = models.PositiveIntegerField()
+ ceo = models.ForeignKey(
+ Employee,
+ models.CASCADE,
+ related_name='company_ceo_set',
+ )
+ point_of_contact = models.ForeignKey(
+ Employee,
+ models.SET_NULL,
+ related_name='company_point_of_contact_set',
+ null=True,
+ )
+
+ def __str__(self):
+ return self.name
+
+
+class Number(models.Model):
+ integer = models.BigIntegerField(db_column='the_integer')
+ float = models.FloatField(null=True, db_column='the_float')
+
+ def __str__(self):
+ return '%i, %.3f' % (self.integer, self.float)
+
+
+class Experiment(models.Model):
+ name = models.CharField(max_length=24)
+ assigned = models.DateField()
+ completed = models.DateField()
+ estimated_time = models.DurationField()
+ start = models.DateTimeField()
+ end = models.DateTimeField()
+
+ class Meta:
+ db_table = 'expressions_ExPeRiMeNt'
+ ordering = ('name',)
+
+ def duration(self):
+ return self.end - self.start
+
+
+class Result(models.Model):
+ experiment = models.ForeignKey(Experiment, models.CASCADE)
+ result_time = models.DateTimeField()
+
+ def __str__(self):
+ return "Result at %s" % self.result_time
+
+
+class Time(models.Model):
+ time = models.TimeField(null=True)
+
+ def __str__(self):
+ return "%s" % self.time
+
+
+class SimulationRun(models.Model):
+ start = models.ForeignKey(Time, models.CASCADE, null=True, related_name='+')
+ end = models.ForeignKey(Time, models.CASCADE, null=True, related_name='+')
+ midpoint = models.TimeField()
+
+ def __str__(self):
+ return "%s (%s to %s)" % (self.midpoint, self.start, self.end)
+
+
+class UUIDPK(models.Model):
+ id = models.UUIDField(primary_key=True, default=uuid.uuid4)
+
+
+class UUID(models.Model):
+ uuid = models.UUIDField(null=True)
+ uuid_fk = models.ForeignKey(UUIDPK, models.CASCADE, null=True)
+
+ def __str__(self):
+ return "%s" % self.uuid
diff --git a/tests/expressions/test_queryset_values.py b/tests/expressions/test_queryset_values.py
new file mode 100644
index 00000000..e2645979
--- /dev/null
+++ b/tests/expressions/test_queryset_values.py
@@ -0,0 +1,62 @@
+from django.db.models.aggregates import Sum
+from django.db.models.expressions import F
+from django.test import TestCase
+
+from .models import Company, Employee
+
+
+class ValuesExpressionsTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ Company.objects.create(
+ name='Example Inc.', num_employees=2300, num_chairs=5,
+ ceo=Employee.objects.create(firstname='Joe', lastname='Smith', salary=10)
+ )
+ Company.objects.create(
+ name='Foobar Ltd.', num_employees=3, num_chairs=4,
+ ceo=Employee.objects.create(firstname='Frank', lastname='Meyer', salary=20)
+ )
+ Company.objects.create(
+ name='Test GmbH', num_employees=32, num_chairs=1,
+ ceo=Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30)
+ )
+
+ def test_values_expression(self):
+ self.assertSequenceEqual(
+ Company.objects.values(salary=F('ceo__salary')),
+ [{'salary': 10}, {'salary': 20}, {'salary': 30}],
+ )
+
+ def test_values_expression_group_by(self):
+ # values() applies annotate() first, so values selected are grouped by
+ # id, not firstname.
+ Employee.objects.create(firstname='Joe', lastname='Jones', salary=2)
+ joes = Employee.objects.filter(firstname='Joe')
+ self.assertSequenceEqual(
+ joes.values('firstname', sum_salary=Sum('salary')).order_by('sum_salary'),
+ [{'firstname': 'Joe', 'sum_salary': 2}, {'firstname': 'Joe', 'sum_salary': 10}],
+ )
+ self.assertSequenceEqual(
+ joes.values('firstname').annotate(sum_salary=Sum('salary')),
+ [{'firstname': 'Joe', 'sum_salary': 12}]
+ )
+
+ def test_chained_values_with_expression(self):
+ Employee.objects.create(firstname='Joe', lastname='Jones', salary=2)
+ joes = Employee.objects.filter(firstname='Joe').values('firstname')
+ self.assertSequenceEqual(
+ joes.values('firstname', sum_salary=Sum('salary')),
+ [{'firstname': 'Joe', 'sum_salary': 12}]
+ )
+ self.assertSequenceEqual(
+ joes.values(sum_salary=Sum('salary')),
+ [{'sum_salary': 12}]
+ )
+
+ def test_values_list_expression(self):
+ companies = Company.objects.values_list('name', F('ceo__salary'))
+ self.assertSequenceEqual(companies, [('Example Inc.', 10), ('Foobar Ltd.', 20), ('Test GmbH', 30)])
+
+ def test_values_list_expression_flat(self):
+ companies = Company.objects.values_list(F('ceo__salary'), flat=True)
+ self.assertSequenceEqual(companies, (10, 20, 30))
diff --git a/tests/expressions/tests.py b/tests/expressions/tests.py
new file mode 100644
index 00000000..4789a244
--- /dev/null
+++ b/tests/expressions/tests.py
@@ -0,0 +1,1519 @@
+import datetime
+import pickle
+import unittest
+import uuid
+from copy import deepcopy
+
+from django.core.exceptions import FieldError
+from django.db import DatabaseError, connection, models, transaction
+from django.db.models import CharField, Q, TimeField, UUIDField
+from django.db.models.aggregates import (
+ Avg, Count, Max, Min, StdDev, Sum, Variance,
+)
+from django.db.models.expressions import (
+ Case, Col, Combinable, Exists, ExpressionList, ExpressionWrapper, F, Func,
+ OrderBy, OuterRef, Random, RawSQL, Ref, Subquery, Value, When,
+)
+from django.db.models.functions import (
+ Coalesce, Concat, Length, Lower, Substr, Upper,
+)
+from django.db.models.sql import constants
+from django.db.models.sql.datastructures import Join
+from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
+from django.test.utils import Approximate
+
+from .models import (
+ UUID, UUIDPK, Company, Employee, Experiment, Number, Result, SimulationRun,
+ Time,
+)
+
+
+class BasicExpressionsTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.example_inc = Company.objects.create(
+ name="Example Inc.", num_employees=2300, num_chairs=5,
+ ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
+ )
+ cls.foobar_ltd = Company.objects.create(
+ name="Foobar Ltd.", num_employees=3, num_chairs=4,
+ ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
+ )
+ cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30)
+ cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max)
+
+ def setUp(self):
+ self.company_query = Company.objects.values(
+ "name", "num_employees", "num_chairs"
+ ).order_by(
+ "name", "num_employees", "num_chairs"
+ )
+
+ def test_annotate_values_aggregate(self):
+ companies = Company.objects.annotate(
+ salaries=F('ceo__salary'),
+ ).values('num_employees', 'salaries').aggregate(
+ result=Sum(
+ F('salaries') + F('num_employees'),
+ output_field=models.IntegerField()
+ ),
+ )
+ self.assertEqual(companies['result'], 2395)
+
+ def test_annotate_values_filter(self):
+ companies = Company.objects.annotate(
+ foo=RawSQL('%s', ['value']),
+ ).filter(foo='value').order_by('name')
+ self.assertQuerysetEqual(
+ companies, [
+ '',
+ '',
+ '',
+ ],
+ )
+
+ #@unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support using boolean type in SELECT")
+ #def test_filtering_on_annotate_that_uses_q(self):
+ # self.assertEqual(
+ # Company.objects.annotate(
+ # num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=models.BooleanField())
+ # ).filter(num_employees_check=True).count(),
+ # 2,
+ # )
+
+ def test_filter_inter_attribute(self):
+ # We can filter on attribute relationships on same model obj, e.g.
+ # find companies where the number of employees is greater
+ # than the number of chairs.
+ self.assertSequenceEqual(
+ self.company_query.filter(num_employees__gt=F("num_chairs")), [
+ {
+ "num_chairs": 5,
+ "name": "Example Inc.",
+ "num_employees": 2300,
+ },
+ {
+ "num_chairs": 1,
+ "name": "Test GmbH",
+ "num_employees": 32
+ },
+ ],
+ )
+
+ def test_update(self):
+ # We can set one field to have the value of another field
+ # Make sure we have enough chairs
+ self.company_query.update(num_chairs=F("num_employees"))
+ self.assertSequenceEqual(
+ self.company_query, [
+ {
+ "num_chairs": 2300,
+ "name": "Example Inc.",
+ "num_employees": 2300
+ },
+ {
+ "num_chairs": 3,
+ "name": "Foobar Ltd.",
+ "num_employees": 3
+ },
+ {
+ "num_chairs": 32,
+ "name": "Test GmbH",
+ "num_employees": 32
+ }
+ ],
+ )
+
+ def test_arithmetic(self):
+ # We can perform arithmetic operations in expressions
+ # Make sure we have 2 spare chairs
+ self.company_query.update(num_chairs=F("num_employees") + 2)
+ self.assertSequenceEqual(
+ self.company_query, [
+ {
+ 'num_chairs': 2302,
+ 'name': 'Example Inc.',
+ 'num_employees': 2300
+ },
+ {
+ 'num_chairs': 5,
+ 'name': 'Foobar Ltd.',
+ 'num_employees': 3
+ },
+ {
+ 'num_chairs': 34,
+ 'name': 'Test GmbH',
+ 'num_employees': 32
+ }
+ ],
+ )
+
+ def test_order_of_operations(self):
+ # Law of order of operations is followed
+ self. company_query.update(
+ num_chairs=F('num_employees') + 2 * F('num_employees')
+ )
+ self.assertSequenceEqual(
+ self.company_query, [
+ {
+ 'num_chairs': 6900,
+ 'name': 'Example Inc.',
+ 'num_employees': 2300
+ },
+ {
+ 'num_chairs': 9,
+ 'name': 'Foobar Ltd.',
+ 'num_employees': 3
+ },
+ {
+ 'num_chairs': 96,
+ 'name': 'Test GmbH',
+ 'num_employees': 32
+ }
+ ],
+ )
+
+ def test_parenthesis_priority(self):
+ # Law of order of operations can be overridden by parentheses
+ self.company_query.update(
+ num_chairs=((F('num_employees') + 2) * F('num_employees'))
+ )
+ self.assertSequenceEqual(
+ self.company_query, [
+ {
+ 'num_chairs': 5294600,
+ 'name': 'Example Inc.',
+ 'num_employees': 2300
+ },
+ {
+ 'num_chairs': 15,
+ 'name': 'Foobar Ltd.',
+ 'num_employees': 3
+ },
+ {
+ 'num_chairs': 1088,
+ 'name': 'Test GmbH',
+ 'num_employees': 32
+ }
+ ],
+ )
+
+ def test_update_with_fk(self):
+ # ForeignKey can become updated with the value of another ForeignKey.
+ self.assertEqual(
+ Company.objects.update(point_of_contact=F('ceo')),
+ 3
+ )
+ self.assertQuerysetEqual(
+ Company.objects.all(), [
+ "Joe Smith",
+ "Frank Meyer",
+ "Max Mustermann",
+ ],
+ lambda c: str(c.point_of_contact),
+ ordered=False
+ )
+
+ def test_update_with_none(self):
+ Number.objects.create(integer=1, float=1.0)
+ Number.objects.create(integer=2)
+ Number.objects.filter(float__isnull=False).update(float=Value(None))
+ self.assertQuerysetEqual(
+ Number.objects.all(), [
+ None,
+ None,
+ ],
+ lambda n: n.float,
+ ordered=False
+ )
+
+ def test_filter_with_join(self):
+ # F Expressions can also span joins
+ Company.objects.update(point_of_contact=F('ceo'))
+ c = Company.objects.all()[0]
+ c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
+ c.save()
+
+ self.assertQuerysetEqual(
+ Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
+ "Foobar Ltd.",
+ "Test GmbH",
+ ],
+ lambda c: c.name,
+ ordered=False
+ )
+
+ Company.objects.exclude(
+ ceo__firstname=F("point_of_contact__firstname")
+ ).update(name="foo")
+ self.assertEqual(
+ Company.objects.exclude(
+ ceo__firstname=F('point_of_contact__firstname')
+ ).get().name,
+ "foo",
+ )
+
+ with transaction.atomic():
+ msg = "Joined field references are not permitted in this query"
+ with self.assertRaisesMessage(FieldError, msg):
+ Company.objects.exclude(
+ ceo__firstname=F('point_of_contact__firstname')
+ ).update(name=F('point_of_contact__lastname'))
+
+ def test_object_update(self):
+ # F expressions can be used to update attributes on single objects
+ test_gmbh = Company.objects.get(name="Test GmbH")
+ self.assertEqual(test_gmbh.num_employees, 32)
+ test_gmbh.num_employees = F("num_employees") + 4
+ test_gmbh.save()
+ test_gmbh = Company.objects.get(pk=test_gmbh.pk)
+ self.assertEqual(test_gmbh.num_employees, 36)
+
+ def test_new_object_save(self):
+ # We should be able to use Funcs when inserting new data
+ test_co = Company(
+ name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
+ ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
+ )
+ test_co.save()
+ test_co.refresh_from_db()
+ self.assertEqual(test_co.name, "upper")
+
+ def test_new_object_create(self):
+ test_co = Company.objects.create(
+ name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
+ ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
+ )
+ test_co.refresh_from_db()
+ self.assertEqual(test_co.name, "upper")
+
+ def test_object_create_with_aggregate(self):
+ # Aggregates are not allowed when inserting new data
+ with self.assertRaisesMessage(FieldError, 'Aggregate functions are not allowed in this query'):
+ Company.objects.create(
+ name='Company', num_employees=Max(Value(1)), num_chairs=1,
+ ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
+ )
+
+ def test_object_update_fk(self):
+ # F expressions cannot be used to update attributes which are foreign
+ # keys, or attributes which involve joins.
+ test_gmbh = Company.objects.get(name="Test GmbH")
+
+ def test():
+ test_gmbh.point_of_contact = F("ceo")
+ msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.'
+ with self.assertRaisesMessage(ValueError, msg):
+ test()
+
+ test_gmbh.point_of_contact = test_gmbh.ceo
+ test_gmbh.save()
+ test_gmbh.name = F("ceo__last_name")
+ msg = 'Joined field references are not permitted in this query'
+ with self.assertRaisesMessage(FieldError, msg):
+ test_gmbh.save()
+
+ def test_object_update_unsaved_objects(self):
+ # F expressions cannot be used to update attributes on objects which do
+ # not yet exist in the database
+ test_gmbh = Company.objects.get(name="Test GmbH")
+ acme = Company(
+ name="The Acme Widget Co.", num_employees=12, num_chairs=5,
+ ceo=test_gmbh.ceo
+ )
+ acme.num_employees = F("num_employees") + 16
+ msg = (
+ 'Failed to insert expression "Col(expressions_company, '
+ 'expressions.Company.num_employees) + Value(16)" on '
+ 'expressions.Company.num_employees. F() expressions can only be '
+ 'used to update, not to insert.'
+ )
+ with self.assertRaisesMessage(ValueError, msg):
+ acme.save()
+
+ acme.num_employees = 12
+ acme.name = Lower(F('name'))
+ msg = (
+ 'Failed to insert expression "Lower(Col(expressions_company, '
+ 'expressions.Company.name))" on expressions.Company.name. F() '
+ 'expressions can only be used to update, not to insert.'
+ )
+ with self.assertRaisesMessage(ValueError, msg):
+ acme.save()
+
+ def test_ticket_11722_iexact_lookup(self):
+ Employee.objects.create(firstname="John", lastname="Doe")
+ Employee.objects.create(firstname="Test", lastname="test")
+
+ queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
+ self.assertQuerysetEqual(queryset, [""])
+
+ def test_ticket_16731_startswith_lookup(self):
+ Employee.objects.create(firstname="John", lastname="Doe")
+ e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
+ e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
+ self.assertSequenceEqual(
+ Employee.objects.filter(lastname__startswith=F('firstname')),
+ [e2, e3] if connection.features.has_case_insensitive_like else [e2]
+ )
+ qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk')
+ self.assertSequenceEqual(qs, [e2, e3])
+
+ def test_ticket_18375_join_reuse(self):
+ # Reverse multijoin F() references and the lookup target the same join.
+ # Pre #18375 the F() join was generated first and the lookup couldn't
+ # reuse that join.
+ qs = Employee.objects.filter(
+ company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
+ self.assertEqual(str(qs.query).count('JOIN'), 1)
+
+ def test_ticket_18375_kwarg_ordering(self):
+ # The next query was dict-randomization dependent - if the "gte=1"
+ # was seen first, then the F() will reuse the join generated by the
+ # gte lookup, if F() was seen first, then it generated a join the
+ # other lookups could not reuse.
+ qs = Employee.objects.filter(
+ company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
+ company_ceo_set__num_chairs__gte=1,
+ )
+ self.assertEqual(str(qs.query).count('JOIN'), 1)
+
+ def test_ticket_18375_kwarg_ordering_2(self):
+ # Another similar case for F() than above. Now we have the same join
+ # in two filter kwargs, one in the lhs lookup, one in F. Here pre
+ # #18375 the amount of joins generated was random if dict
+ # randomization was enabled, that is the generated query dependent
+ # on which clause was seen first.
+ qs = Employee.objects.filter(
+ company_ceo_set__num_employees=F('pk'),
+ pk=F('company_ceo_set__num_employees')
+ )
+ self.assertEqual(str(qs.query).count('JOIN'), 1)
+
+ def test_ticket_18375_chained_filters(self):
+ # F() expressions do not reuse joins from previous filter.
+ qs = Employee.objects.filter(
+ company_ceo_set__num_employees=F('pk')
+ ).filter(
+ company_ceo_set__num_employees=F('company_ceo_set__num_employees')
+ )
+ self.assertEqual(str(qs.query).count('JOIN'), 2)
+
+ def test_order_by_exists(self):
+ mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20)
+ mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by(
+ # Order by whether the employee is the CEO of a company
+ Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc()
+ )
+ self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary])
+
+ def test_outerref(self):
+ inner = Company.objects.filter(point_of_contact=OuterRef('pk'))
+ msg = (
+ 'This queryset contains a reference to an outer query and may only '
+ 'be used in a subquery.'
+ )
+ with self.assertRaisesMessage(ValueError, msg):
+ inner.exists()
+
+ outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
+ self.assertIs(outer.exists(), True)
+
+ def test_exist_single_field_output_field(self):
+ queryset = Company.objects.values('pk')
+ self.assertIsInstance(Exists(queryset).output_field, models.BooleanField)
+
+ def test_subquery(self):
+ Company.objects.filter(name='Example Inc.').update(
+ point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'),
+ ceo=Employee.objects.get(firstname='Max', lastname='Mustermann'),
+ )
+ Employee.objects.create(firstname='Bob', lastname='Brown', salary=40)
+ qs = Employee.objects.annotate(
+ is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))),
+ is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))),
+ is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))),
+ is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))),
+ largest_company=Subquery(Company.objects.order_by('-num_employees').filter(
+ models.Q(ceo=OuterRef('pk')) | models.Q(point_of_contact=OuterRef('pk'))
+ ).values('name')[:1], output_field=models.CharField())
+ ).values(
+ 'firstname',
+ 'is_point_of_contact',
+ 'is_not_point_of_contact',
+ 'is_ceo_of_small_company',
+ 'is_ceo_small_2',
+ 'largest_company',
+ ).order_by('firstname')
+
+ results = list(qs)
+ # Could use Coalesce(subq, Value('')) instead except for the bug in
+ # cx_Oracle mentioned in #23843.
+ bob = results[0]
+ if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls:
+ bob['largest_company'] = None
+
+ self.assertEqual(results, [
+ {
+ 'firstname': 'Bob',
+ 'is_point_of_contact': False,
+ 'is_not_point_of_contact': True,
+ 'is_ceo_of_small_company': False,
+ 'is_ceo_small_2': False,
+ 'largest_company': None,
+ },
+ {
+ 'firstname': 'Frank',
+ 'is_point_of_contact': False,
+ 'is_not_point_of_contact': True,
+ 'is_ceo_of_small_company': True,
+ 'is_ceo_small_2': True,
+ 'largest_company': 'Foobar Ltd.',
+ },
+ {
+ 'firstname': 'Joe',
+ 'is_point_of_contact': True,
+ 'is_not_point_of_contact': False,
+ 'is_ceo_of_small_company': False,
+ 'is_ceo_small_2': False,
+ 'largest_company': 'Example Inc.',
+ },
+ {
+ 'firstname': 'Max',
+ 'is_point_of_contact': False,
+ 'is_not_point_of_contact': True,
+ 'is_ceo_of_small_company': True,
+ 'is_ceo_small_2': True,
+ 'largest_company': 'Example Inc.'
+ }
+ ])
+ # A less elegant way to write the same query: this uses a LEFT OUTER
+ # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less
+ # efficient than EXISTS.
+ self.assertCountEqual(
+ qs.filter(is_point_of_contact=True).values('pk'),
+ Employee.objects.exclude(company_point_of_contact_set=None).values('pk')
+ )
+
+ def test_in_subquery(self):
+ # This is a contrived test (and you really wouldn't write this query),
+ # but it is a succinct way to test the __in=Subquery() construct.
+ small_companies = Company.objects.filter(num_employees__lt=200).values('pk')
+ subquery_test = Company.objects.filter(pk__in=Subquery(small_companies))
+ self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh])
+ subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3)))
+ self.assertCountEqual(subquery_test2, [self.foobar_ltd])
+
+ def test_uuid_pk_subquery(self):
+ u = UUIDPK.objects.create()
+ UUID.objects.create(uuid_fk=u)
+ qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id')))
+ self.assertCountEqual(qs, [u])
+
+ def test_nested_subquery(self):
+ inner = Company.objects.filter(point_of_contact=OuterRef('pk'))
+ outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
+ contrived = Employee.objects.annotate(
+ is_point_of_contact=Subquery(
+ outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'),
+ output_field=models.BooleanField(),
+ ),
+ )
+ self.assertCountEqual(contrived.values_list(), outer.values_list())
+
+ def test_nested_subquery_outer_ref_2(self):
+ first = Time.objects.create(time='09:00')
+ second = Time.objects.create(time='17:00')
+ third = Time.objects.create(time='21:00')
+ SimulationRun.objects.bulk_create([
+ SimulationRun(start=first, end=second, midpoint='12:00'),
+ SimulationRun(start=first, end=third, midpoint='15:00'),
+ SimulationRun(start=second, end=first, midpoint='00:00'),
+ ])
+ inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time')
+ middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1]
+ outer = Time.objects.annotate(other=Subquery(middle, output_field=models.TimeField()))
+ # This is a contrived example. It exercises the double OuterRef form.
+ self.assertCountEqual(outer, [first, second, third])
+
+ def test_nested_subquery_outer_ref_with_autofield(self):
+ first = Time.objects.create(time='09:00')
+ second = Time.objects.create(time='17:00')
+ SimulationRun.objects.create(start=first, end=second, midpoint='12:00')
+ inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start')
+ middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1]
+ outer = Time.objects.annotate(other=Subquery(middle, output_field=models.IntegerField()))
+ # This exercises the double OuterRef form with AutoField as pk.
+ self.assertCountEqual(outer, [first, second])
+
+ def test_annotations_within_subquery(self):
+ Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank'))
+ inner = Company.objects.filter(
+ ceo=OuterRef('pk')
+ ).values('ceo').annotate(total_employees=models.Sum('num_employees')).values('total_employees')
+ outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner))
+ self.assertSequenceEqual(
+ outer.order_by('-total_employees').values('salary', 'total_employees'),
+ [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}],
+ )
+
+ def test_subquery_references_joined_table_twice(self):
+ inner = Company.objects.filter(
+ num_chairs__gte=OuterRef('ceo__salary'),
+ num_employees__gte=OuterRef('point_of_contact__salary'),
+ )
+ # Another contrived example (there is no need to have a subquery here)
+ outer = Company.objects.filter(pk__in=Subquery(inner.values('pk')))
+ self.assertFalse(outer.exists())
+
+ def test_explicit_output_field(self):
+ class FuncA(Func):
+ output_field = models.CharField()
+
+ class FuncB(Func):
+ pass
+
+ expr = FuncB(FuncA())
+ self.assertEqual(expr.output_field, FuncA.output_field)
+
+ def test_outerref_mixed_case_table_name(self):
+ inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned'))
+ outer = Result.objects.filter(pk__in=Subquery(inner.values('pk')))
+ self.assertFalse(outer.exists())
+
+ def test_outerref_with_operator(self):
+ inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2)
+ outer = Company.objects.filter(pk__in=Subquery(inner.values('pk')))
+ self.assertEqual(outer.get().name, 'Test GmbH')
+
+ def test_pickle_expression(self):
+ expr = Value(1, output_field=models.IntegerField())
+ expr.convert_value # populate cached property
+ self.assertEqual(pickle.loads(pickle.dumps(expr)), expr)
+
+
+class IterableLookupInnerExpressionsTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30)
+ # MySQL requires that the values calculated for expressions don't pass
+ # outside of the field's range, so it's inconvenient to use the values
+ # in the more general tests.
+ Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo)
+ Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo)
+ Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo)
+ Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo)
+ Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo)
+
+ def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
+ # __in lookups can use F() expressions for integers.
+ queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10]))
+ self.assertQuerysetEqual(queryset, [''], ordered=False)
+ self.assertQuerysetEqual(
+ Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])),
+ ['', ''],
+ ordered=False
+ )
+ self.assertQuerysetEqual(
+ Company.objects.filter(
+ num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10])
+ ),
+ ['', '', ''],
+ ordered=False
+ )
+
+ def test_expressions_in_lookups_join_choice(self):
+ self.skipTest('failing on MSSQL')
+ midpoint = datetime.time(13, 0)
+ t1 = Time.objects.create(time=datetime.time(12, 0))
+ t2 = Time.objects.create(time=datetime.time(14, 0))
+ SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
+ SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
+ SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
+ SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
+
+ queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')])
+ self.assertQuerysetEqual(
+ queryset,
+ [''],
+ ordered=False
+ )
+ for alias in queryset.query.alias_map.values():
+ if isinstance(alias, Join):
+ self.assertEqual(alias.join_type, constants.INNER)
+
+ queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')])
+ self.assertQuerysetEqual(queryset, [], ordered=False)
+ for alias in queryset.query.alias_map.values():
+ if isinstance(alias, Join):
+ self.assertEqual(alias.join_type, constants.LOUTER)
+
+ def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
+ # Range lookups can use F() expressions for integers.
+ Company.objects.filter(num_employees__exact=F("num_chairs"))
+ self.assertQuerysetEqual(
+ Company.objects.filter(num_employees__range=(F('num_chairs'), 100)),
+ ['', '', ''],
+ ordered=False
+ )
+ self.assertQuerysetEqual(
+ Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)),
+ ['', '', ''],
+ ordered=False
+ )
+ self.assertQuerysetEqual(
+ Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)),
+ ['', '', '', ''],
+ ordered=False
+ )
+ self.assertQuerysetEqual(
+ Company.objects.filter(num_employees__range=(1, 100)),
+ [
+ '', '', '',
+ '', '',
+ ],
+ ordered=False
+ )
+
+ @unittest.skipUnless(connection.vendor == 'sqlite',
+ "This defensive test only works on databases that don't validate parameter types")
+ def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self):
+ """
+ This tests that SQL injection isn't possible using compilation of
+ expressions in iterable filters, as their compilation happens before
+ the main query compilation. It's limited to SQLite, as PostgreSQL,
+ Oracle and other vendors have defense in depth against this by type
+ checking. Testing against SQLite (the most permissive of the built-in
+ databases) demonstrates that the problem doesn't exist while keeping
+ the test simple.
+ """
+ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1'])
+ self.assertQuerysetEqual(queryset, [], ordered=False)
+
+ def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self):
+ start = datetime.datetime(2016, 2, 3, 15, 0, 0)
+ end = datetime.datetime(2016, 2, 5, 15, 0, 0)
+ experiment_1 = Experiment.objects.create(
+ name='Integrity testing',
+ assigned=start.date(),
+ start=start,
+ end=end,
+ completed=end.date(),
+ estimated_time=end - start,
+ )
+ experiment_2 = Experiment.objects.create(
+ name='Taste testing',
+ assigned=start.date(),
+ start=start,
+ end=end,
+ completed=end.date(),
+ estimated_time=end - start,
+ )
+ Result.objects.create(
+ experiment=experiment_1,
+ result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
+ )
+ Result.objects.create(
+ experiment=experiment_1,
+ result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
+ )
+ Result.objects.create(
+ experiment=experiment_2,
+ result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
+ )
+
+ within_experiment_time = [F('experiment__start'), F('experiment__end')]
+ queryset = Result.objects.filter(result_time__range=within_experiment_time)
+ self.assertQuerysetEqual(queryset, [""])
+
+ within_experiment_time = [F('experiment__start'), F('experiment__end')]
+ queryset = Result.objects.filter(result_time__range=within_experiment_time)
+ self.assertQuerysetEqual(queryset, [""])
+
+
+class FTests(SimpleTestCase):
+
+ def test_deepcopy(self):
+ f = F("foo")
+ g = deepcopy(f)
+ self.assertEqual(f.name, g.name)
+
+ def test_deconstruct(self):
+ f = F('name')
+ path, args, kwargs = f.deconstruct()
+ self.assertEqual(path, 'django.db.models.expressions.F')
+ self.assertEqual(args, (f.name,))
+ self.assertEqual(kwargs, {})
+
+ def test_equal(self):
+ f = F('name')
+ same_f = F('name')
+ other_f = F('username')
+ self.assertEqual(f, same_f)
+ self.assertNotEqual(f, other_f)
+
+ def test_hash(self):
+ d = {F('name'): 'Bob'}
+ self.assertIn(F('name'), d)
+ self.assertEqual(d[F('name')], 'Bob')
+
+ def test_not_equal_Value(self):
+ f = F('name')
+ value = Value('name')
+ self.assertNotEqual(f, value)
+ self.assertNotEqual(value, f)
+
+
+class ExpressionsTests(TestCase):
+
+ def test_F_reuse(self):
+ f = F('id')
+ n = Number.objects.create(integer=-1)
+ c = Company.objects.create(
+ name="Example Inc.", num_employees=2300, num_chairs=5,
+ ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
+ )
+ c_qs = Company.objects.filter(id=f)
+ self.assertEqual(c_qs.get(), c)
+ # Reuse the same F-object for another queryset
+ n_qs = Number.objects.filter(id=f)
+ self.assertEqual(n_qs.get(), n)
+ # The original query still works correctly
+ self.assertEqual(c_qs.get(), c)
+
+ def test_patterns_escape(self):
+ r"""
+ Special characters (e.g. %, _ and \) stored in database are
+ properly escaped when using a pattern lookup with an expression
+ refs #16731
+ """
+ Employee.objects.bulk_create([
+ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
+ Employee(firstname="Johnny", lastname="%John"),
+ Employee(firstname="Jean-Claude", lastname="Claud_"),
+ Employee(firstname="Jean-Claude", lastname="Claude"),
+ Employee(firstname="Jean-Claude", lastname="Claude%"),
+ Employee(firstname="Johnny", lastname="Joh\\n"),
+ Employee(firstname="Johnny", lastname="John"),
+ Employee(firstname="Johnny", lastname="_ohn"),
+ ])
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(firstname__contains=F('lastname')),
+ ["", "", ""],
+ ordered=False,
+ )
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(firstname__startswith=F('lastname')),
+ ["", ""],
+ ordered=False,
+ )
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(firstname__endswith=F('lastname')),
+ [""],
+ ordered=False,
+ )
+
+ def test_insensitive_patterns_escape(self):
+ r"""
+ Special characters (e.g. %, _ and \) stored in database are
+ properly escaped when using a case insensitive pattern lookup with an
+ expression -- refs #16731
+ """
+ Employee.objects.bulk_create([
+ Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
+ Employee(firstname="Johnny", lastname="%john"),
+ Employee(firstname="Jean-Claude", lastname="claud_"),
+ Employee(firstname="Jean-Claude", lastname="claude"),
+ Employee(firstname="Jean-Claude", lastname="claude%"),
+ Employee(firstname="Johnny", lastname="joh\\n"),
+ Employee(firstname="Johnny", lastname="john"),
+ Employee(firstname="Johnny", lastname="_ohn"),
+ ])
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(firstname__icontains=F('lastname')),
+ ["", "", ""],
+ ordered=False,
+ )
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(firstname__istartswith=F('lastname')),
+ ["", ""],
+ ordered=False,
+ )
+
+ self.assertQuerysetEqual(
+ Employee.objects.filter(firstname__iendswith=F('lastname')),
+ [""],
+ ordered=False,
+ )
+
+
+class ExpressionsNumericTests(TestCase):
+
+ def setUp(self):
+ Number(integer=-1).save()
+ Number(integer=42).save()
+ Number(integer=1337).save()
+ self.assertEqual(Number.objects.update(float=F('integer')), 3)
+
+ def test_fill_with_value_from_same_object(self):
+ """
+ We can fill a value in all objects with an other value of the
+ same object.
+ """
+ self.assertQuerysetEqual(
+ Number.objects.all(),
+ [
+ '',
+ '',
+ ''
+ ],
+ ordered=False
+ )
+
+ def test_increment_value(self):
+ """
+ We can increment a value of all objects in a query set.
+ """
+ self.assertEqual(
+ Number.objects.filter(integer__gt=0)
+ .update(integer=F('integer') + 1),
+ 2)
+
+ self.assertQuerysetEqual(
+ Number.objects.all(),
+ [
+ '',
+ '',
+ ''
+ ],
+ ordered=False
+ )
+
+ def test_filter_not_equals_other_field(self):
+ """
+ We can filter for objects, where a value is not equals the value
+ of an other field.
+ """
+ self.assertEqual(
+ Number.objects.filter(integer__gt=0)
+ .update(integer=F('integer') + 1),
+ 2)
+ self.assertQuerysetEqual(
+ Number.objects.exclude(float=F('integer')),
+ [
+ '',
+ ''
+ ],
+ ordered=False
+ )
+
+ def test_complex_expressions(self):
+ """
+ Complex expressions of different connection types are possible.
+ """
+ n = Number.objects.create(integer=10, float=123.45)
+ self.assertEqual(Number.objects.filter(pk=n.pk).update(
+ float=F('integer') + F('float') * 2), 1)
+
+ self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
+ self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
+
+ def test_incorrect_field_expression(self):
+ with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
+ list(Employee.objects.filter(firstname=F('nope')))
+
+
+class ExpressionOperatorTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.n = Number.objects.create(integer=42, float=15.5)
+ cls.n1 = Number.objects.create(integer=-42, float=-15.5)
+
+ def test_lefthand_addition(self):
+ # LH Addition of floats and integers
+ Number.objects.filter(pk=self.n.pk).update(
+ integer=F('integer') + 15,
+ float=F('float') + 42.7
+ )
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
+
+ def test_lefthand_subtraction(self):
+ # LH Subtraction of floats and integers
+ Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7)
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
+
+ def test_lefthand_multiplication(self):
+ # Multiplication of floats and integers
+ Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7)
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
+
+ def test_lefthand_division(self):
+ # LH Division of floats and integers
+ Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7)
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
+
+ def test_lefthand_modulo(self):
+ # LH Modulo arithmetic on integers
+ Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
+
+ def test_lefthand_bitwise_and(self):
+ # LH Bitwise ands on integers
+ Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
+ Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56))
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
+ self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
+
+ def test_lefthand_bitwise_left_shift_operator(self):
+ Number.objects.update(integer=F('integer').bitleftshift(2))
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168)
+ self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168)
+
+ #def test_lefthand_bitwise_right_shift_operator(self):
+ # Number.objects.update(integer=F('integer').bitrightshift(2))
+ # self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10)
+ # self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11)
+
+ def test_lefthand_bitwise_or(self):
+ # LH Bitwise or on integers
+ Number.objects.update(integer=F('integer').bitor(48))
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
+ self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
+
+ def test_lefthand_power(self):
+ # LH Powert arithmetic operation on floats and integers
+ Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
+
+ def test_right_hand_addition(self):
+ # Right hand operators
+ Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float'))
+
+ # RH Addition of floats and integers
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
+
+ def test_right_hand_subtraction(self):
+ Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float'))
+
+ # RH Subtraction of floats and integers
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
+
+ def test_right_hand_multiplication(self):
+ # RH Multiplication of floats and integers
+ Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float'))
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
+
+ def test_right_hand_division(self):
+ # RH Division of floats and integers
+ Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float'))
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
+
+ def test_right_hand_modulo(self):
+ # RH Modulo arithmetic on integers
+ Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
+
+ self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
+ self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
+
+ #def test_righthand_power(self):
+ # # RH Powert arithmetic operation on floats and integers
+ # Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float'))
+ # self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
+ # self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
+
+
+class FTimeDeltaTests(TestCase):
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.sday = sday = datetime.date(2010, 6, 25)
+ cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
+ midnight = datetime.time(0)
+
+ delta0 = datetime.timedelta(0)
+ delta1 = datetime.timedelta(microseconds=253000)
+ delta2 = datetime.timedelta(seconds=44)
+ delta3 = datetime.timedelta(hours=21, minutes=8)
+ delta4 = datetime.timedelta(days=10)
+ delta5 = datetime.timedelta(days=90)
+
+ # Test data is set so that deltas and delays will be
+ # strictly increasing.
+ cls.deltas = []
+ cls.delays = []
+ cls.days_long = []
+
+ # e0: started same day as assigned, zero duration
+ end = stime + delta0
+ e0 = Experiment.objects.create(
+ name='e0', assigned=sday, start=stime, end=end,
+ completed=end.date(), estimated_time=delta0,
+ )
+ cls.deltas.append(delta0)
+ cls.delays.append(e0.start - datetime.datetime.combine(e0.assigned, midnight))
+ cls.days_long.append(e0.completed - e0.assigned)
+
+ # e1: started one day after assigned, tiny duration, data
+ # set so that end time has no fractional seconds, which
+ # tests an edge case on sqlite.
+ delay = datetime.timedelta(1)
+ end = stime + delay + delta1
+ e1 = Experiment.objects.create(
+ name='e1', assigned=sday, start=stime + delay, end=end,
+ completed=end.date(), estimated_time=delta1,
+ )
+ cls.deltas.append(delta1)
+ cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
+ cls.days_long.append(e1.completed - e1.assigned)
+
+ # e2: started three days after assigned, small duration
+ end = stime + delta2
+ e2 = Experiment.objects.create(
+ name='e2', assigned=sday - datetime.timedelta(3), start=stime,
+ end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1),
+ )
+ cls.deltas.append(delta2)
+ cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
+ cls.days_long.append(e2.completed - e2.assigned)
+
+ # e3: started four days after assigned, medium duration
+ delay = datetime.timedelta(4)
+ end = stime + delay + delta3
+ e3 = Experiment.objects.create(
+ name='e3', assigned=sday, start=stime + delay, end=end,
+ completed=end.date(), estimated_time=delta3,
+ )
+ cls.deltas.append(delta3)
+ cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
+ cls.days_long.append(e3.completed - e3.assigned)
+
+ # e4: started 10 days after assignment, long duration
+ end = stime + delta4
+ e4 = Experiment.objects.create(
+ name='e4', assigned=sday - datetime.timedelta(10), start=stime,
+ end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1),
+ )
+ cls.deltas.append(delta4)
+ cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
+ cls.days_long.append(e4.completed - e4.assigned)
+
+ # e5: started a month after assignment, very long duration
+ delay = datetime.timedelta(30)
+ end = stime + delay + delta5
+ e5 = Experiment.objects.create(
+ name='e5', assigned=sday, start=stime + delay, end=end,
+ completed=end.date(), estimated_time=delta5,
+ )
+ cls.deltas.append(delta5)
+ cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight))
+ cls.days_long.append(e5.completed - e5.assigned)
+
+ cls.expnames = [e.name for e in Experiment.objects.all()]
+
+ def test_multiple_query_compilation(self):
+ # Ticket #21643
+ queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
+ q1 = str(queryset.query)
+ q2 = str(queryset.query)
+ self.assertEqual(q1, q2)
+
+ def test_query_clone(self):
+ # Ticket #21643 - Crash when compiling query more than once
+ qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
+ qs2 = qs.all()
+ list(qs)
+ list(qs2)
+ # Intentionally no assert
+
+ def test_delta_add(self):
+ for i in range(len(self.deltas)):
+ delta = self.deltas[i]
+ test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)]
+ self.assertEqual(test_set, self.expnames[:i])
+
+ test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))]
+ self.assertEqual(test_set, self.expnames[:i])
+
+ test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)]
+ self.assertEqual(test_set, self.expnames[:i + 1])
+
+ def test_delta_subtract(self):
+ for i in range(len(self.deltas)):
+ delta = self.deltas[i]
+ test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)]
+ self.assertEqual(test_set, self.expnames[:i])
+
+ test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)]
+ self.assertEqual(test_set, self.expnames[:i + 1])
+
+ def test_exclude(self):
+ for i in range(len(self.deltas)):
+ delta = self.deltas[i]
+ test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)]
+ self.assertEqual(test_set, self.expnames[i:])
+
+ test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)]
+ self.assertEqual(test_set, self.expnames[i + 1:])
+
+ def test_date_comparison(self):
+ for i in range(len(self.days_long)):
+ days = self.days_long[i]
+ test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)]
+ self.assertEqual(test_set, self.expnames[:i])
+
+ test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)]
+ self.assertEqual(test_set, self.expnames[:i + 1])
+
+ @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
+ def test_mixed_comparisons1(self):
+ for i in range(len(self.delays)):
+ delay = self.delays[i]
+ test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)]
+ self.assertEqual(test_set, self.expnames[:i])
+
+ test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)]
+ self.assertEqual(test_set, self.expnames[:i + 1])
+
+ def test_mixed_comparisons2(self):
+ delays = [datetime.timedelta(delay.days) for delay in self.delays]
+ for i in range(len(delays)):
+ delay = delays[i]
+ test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)]
+ self.assertEqual(test_set, self.expnames[:i])
+
+ test_set = [
+ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1))
+ ]
+ self.assertEqual(test_set, self.expnames[:i + 1])
+
+ def test_delta_update(self):
+ for i in range(len(self.deltas)):
+ delta = self.deltas[i]
+ exps = Experiment.objects.all()
+ expected_durations = [e.duration() for e in exps]
+ expected_starts = [e.start + delta for e in exps]
+ expected_ends = [e.end + delta for e in exps]
+
+ Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
+ exps = Experiment.objects.all()
+ new_starts = [e.start for e in exps]
+ new_ends = [e.end for e in exps]
+ new_durations = [e.duration() for e in exps]
+ self.assertEqual(expected_starts, new_starts)
+ self.assertEqual(expected_ends, new_ends)
+ self.assertEqual(expected_durations, new_durations)
+
+ #def test_invalid_operator(self):
+ # with self.assertRaises(DatabaseError):
+ # list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
+
+ def test_durationfield_add(self):
+ zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))]
+ self.assertEqual(zeros, ['e0'])
+
+ end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
+ self.assertEqual(end_less, ['e2'])
+
+ delta_math = [
+ e.name for e in
+ Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))
+ ]
+ self.assertEqual(delta_math, ['e4'])
+
+ @skipUnlessDBFeature('supports_temporal_subtraction')
+ def test_date_subtraction(self):
+ queryset = Experiment.objects.annotate(
+ completion_duration=ExpressionWrapper(
+ F('completed') - F('assigned'), output_field=models.DurationField()
+ )
+ )
+
+ at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))}
+ self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'})
+
+ at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))}
+ self.assertEqual(at_least_120_days, {'e5'})
+
+ less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))}
+ self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'})
+
+ @skipUnlessDBFeature('supports_temporal_subtraction')
+ def test_time_subtraction(self):
+ Time.objects.create(time=datetime.time(12, 30, 15, 2345))
+ queryset = Time.objects.annotate(
+ difference=ExpressionWrapper(
+ F('time') - Value(datetime.time(11, 15, 0), output_field=models.TimeField()),
+ output_field=models.DurationField(),
+ )
+ )
+ self.assertEqual(
+ queryset.get().difference,
+ datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345)
+ )
+
+ @skipUnlessDBFeature('supports_temporal_subtraction')
+ def test_datetime_subtraction(self):
+ under_estimate = [
+ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))
+ ]
+ self.assertEqual(under_estimate, ['e2'])
+
+ over_estimate = [
+ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))
+ ]
+ self.assertEqual(over_estimate, ['e4'])
+
+ #@skipUnlessDBFeature('supports_temporal_subtraction')
+ #def test_datetime_subtraction_microseconds(self):
+ # delta = datetime.timedelta(microseconds=8999999999999999)
+ # Experiment.objects.update(end=F('start') + delta)
+ # qs = Experiment.objects.annotate(
+ # delta=ExpressionWrapper(F('end') - F('start'), output_field=models.DurationField())
+ # )
+ # for e in qs:
+ # self.assertEqual(e.delta, delta)
+
+ def test_duration_with_datetime(self):
+ # Exclude e1 which has very high precision so we can test this on all
+ # backends regardless of whether or not it supports
+ # microsecond_precision.
+ over_estimate = Experiment.objects.exclude(name='e1').filter(
+ completed__gt=self.stime + F('estimated_time'),
+ ).order_by('name')
+ self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name)
+
+ #def test_duration_with_datetime_microseconds(self):
+ # delta = datetime.timedelta(microseconds=8999999999999999)
+ # qs = Experiment.objects.annotate(dt=ExpressionWrapper(
+ # F('start') + delta,
+ # output_field=models.DateTimeField(),
+ # ))
+ # for e in qs:
+ # self.assertEqual(e.dt, e.start + delta)
+
+ def test_date_minus_duration(self):
+ more_than_4_days = Experiment.objects.filter(
+ assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=models.DurationField())
+ )
+ self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name)
+
+ def test_negative_timedelta_update(self):
+ # subtract 30 seconds, 30 minutes, 2 hours and 2 days
+ experiments = Experiment.objects.filter(name='e0').annotate(
+ start_sub_seconds=F('start') + datetime.timedelta(seconds=-30),
+ ).annotate(
+ start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30),
+ ).annotate(
+ start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2),
+ ).annotate(
+ new_start=F('start_sub_hours') + datetime.timedelta(days=-2),
+ )
+ expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0)
+ # subtract 30 microseconds
+ experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30))
+ expected_start += datetime.timedelta(microseconds=+746970)
+ experiments.update(start=F('new_start'))
+ e0 = Experiment.objects.get(name='e0')
+ self.assertEqual(e0.start, expected_start)
+
+
+class ValueTests(TestCase):
+ def test_update_TimeField_using_Value(self):
+ Time.objects.create()
+ Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
+ self.assertEqual(Time.objects.get().time, datetime.time(1))
+
+ def test_update_UUIDField_using_Value(self):
+ UUID.objects.create()
+ UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
+ self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
+
+ def test_deconstruct(self):
+ value = Value('name')
+ path, args, kwargs = value.deconstruct()
+ self.assertEqual(path, 'django.db.models.expressions.Value')
+ self.assertEqual(args, (value.value,))
+ self.assertEqual(kwargs, {})
+
+ def test_deconstruct_output_field(self):
+ value = Value('name', output_field=CharField())
+ path, args, kwargs = value.deconstruct()
+ self.assertEqual(path, 'django.db.models.expressions.Value')
+ self.assertEqual(args, (value.value,))
+ self.assertEqual(len(kwargs), 1)
+ self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct())
+
+ def test_equal(self):
+ value = Value('name')
+ same_value = Value('name')
+ other_value = Value('username')
+ self.assertEqual(value, same_value)
+ self.assertNotEqual(value, other_value)
+
+ def test_hash(self):
+ d = {Value('name'): 'Bob'}
+ self.assertIn(Value('name'), d)
+ self.assertEqual(d[Value('name')], 'Bob')
+
+ def test_equal_output_field(self):
+ value = Value('name', output_field=CharField())
+ same_value = Value('name', output_field=CharField())
+ other_value = Value('name', output_field=TimeField())
+ no_output_field = Value('name')
+ self.assertEqual(value, same_value)
+ self.assertNotEqual(value, other_value)
+ self.assertNotEqual(value, no_output_field)
+
+ def test_raise_empty_expressionlist(self):
+ msg = 'ExpressionList requires at least one expression'
+ with self.assertRaisesMessage(ValueError, msg):
+ ExpressionList()
+
+
+class FieldTransformTests(TestCase):
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.sday = sday = datetime.date(2010, 6, 25)
+ cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
+ cls.ex1 = Experiment.objects.create(
+ name='Experiment 1',
+ assigned=sday,
+ completed=sday + datetime.timedelta(2),
+ estimated_time=datetime.timedelta(2),
+ start=stime,
+ end=stime + datetime.timedelta(2),
+ )
+
+ def test_month_aggregation(self):
+ self.assertEqual(
+ Experiment.objects.aggregate(month_count=Count('assigned__month')),
+ {'month_count': 1}
+ )
+
+ def test_transform_in_values(self):
+ self.assertQuerysetEqual(
+ Experiment.objects.values('assigned__month'),
+ ["{'assigned__month': 6}"]
+ )
+
+ def test_multiple_transforms_in_values(self):
+ self.assertQuerysetEqual(
+ Experiment.objects.values('end__date__month'),
+ ["{'end__date__month': 6}"]
+ )
+
+
+class ReprTests(TestCase):
+
+ def test_expressions(self):
+ self.assertEqual(
+ repr(Case(When(a=1))),
+ " THEN Value(None), ELSE Value(None)>"
+ )
+ self.assertEqual(
+ repr(When(Q(age__gte=18), then=Value('legal'))),
+ " THEN Value(legal)>"
+ )
+ self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
+ self.assertEqual(repr(F('published')), "F(published)")
+ self.assertEqual(repr(F('cost') + F('tax')), "")
+ self.assertEqual(
+ repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
+ "ExpressionWrapper(F(cost) + F(tax))"
+ )
+ self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
+ self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
+ self.assertEqual(repr(Random()), "Random()")
+ self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
+ self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
+ self.assertEqual(repr(Value(1)), "Value(1)")
+ self.assertEqual(
+ repr(ExpressionList(F('col'), F('anothercol'))),
+ 'ExpressionList(F(col), F(anothercol))'
+ )
+ self.assertEqual(
+ repr(ExpressionList(OrderBy(F('col'), descending=False))),
+ 'ExpressionList(OrderBy(F(col), descending=False))'
+ )
+
+ def test_functions(self):
+ self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
+ self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
+ self.assertEqual(repr(Length('a')), "Length(F(a))")
+ self.assertEqual(repr(Lower('a')), "Lower(F(a))")
+ self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
+ self.assertEqual(repr(Upper('a')), "Upper(F(a))")
+
+ def test_aggregates(self):
+ self.assertEqual(repr(Avg('a')), "Avg(F(a))")
+ self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
+ self.assertEqual(repr(Count('*')), "Count('*', distinct=False)")
+ self.assertEqual(repr(Max('a')), "Max(F(a))")
+ self.assertEqual(repr(Min('a')), "Min(F(a))")
+ self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
+ self.assertEqual(repr(Sum('a')), "Sum(F(a))")
+ self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
+
+ def test_filtered_aggregates(self):
+ filter = Q(a=1)
+ self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))")
+ self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), distinct=False, filter=(AND: ('a', 1)))")
+ self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))")
+ self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))")
+ self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)")
+ self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))")
+ self.assertEqual(
+ repr(Variance('a', sample=True, filter=filter)),
+ "Variance(F(a), filter=(AND: ('a', 1)), sample=True)"
+ )
+
+
+class CombinableTests(SimpleTestCase):
+ bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.'
+
+ def test_negation(self):
+ c = Combinable()
+ self.assertEqual(-c, c * -1)
+
+ def test_and(self):
+ with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
+ Combinable() & Combinable()
+
+ def test_or(self):
+ with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
+ Combinable() | Combinable()
+
+ def test_reversed_and(self):
+ with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
+ object() & Combinable()
+
+ def test_reversed_or(self):
+ with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
+ object() | Combinable()
diff --git a/tests/field_deconstruction/__init__.py b/tests/field_deconstruction/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/field_deconstruction/tests.py b/tests/field_deconstruction/tests.py
new file mode 100644
index 00000000..1a6385db
--- /dev/null
+++ b/tests/field_deconstruction/tests.py
@@ -0,0 +1,396 @@
+from __future__ import unicode_literals
+
+from django.apps import apps
+from django.db import models
+from django.test import SimpleTestCase, override_settings
+from django.test.utils import isolate_lru_cache
+from django.utils import six
+
+
+class FieldDeconstructionTests(SimpleTestCase):
+ """
+ Tests the deconstruct() method on all core fields.
+ """
+
+ def test_name(self):
+ """
+ Tests the outputting of the correct name if assigned one.
+ """
+ # First try using a "normal" field
+ field = models.CharField(max_length=65)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertIsNone(name)
+ field.set_attributes_from_name("is_awesome_test")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(name, "is_awesome_test")
+ self.assertIsInstance(name, six.text_type)
+ # Now try with a ForeignKey
+ field = models.ForeignKey("some_fake.ModelName", models.CASCADE)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertIsNone(name)
+ field.set_attributes_from_name("author")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(name, "author")
+
+ def test_auto_field(self):
+ field = models.AutoField(primary_key=True)
+ field.set_attributes_from_name("id")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.AutoField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"primary_key": True})
+
+ def test_big_integer_field(self):
+ field = models.BigIntegerField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.BigIntegerField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_boolean_field(self):
+ field = models.BooleanField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.BooleanField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+ field = models.BooleanField(default=True)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.BooleanField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"default": True})
+
+ def test_char_field(self):
+ field = models.CharField(max_length=65)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.CharField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_length": 65})
+ field = models.CharField(max_length=65, null=True, blank=True)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.CharField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
+
+ def test_char_field_choices(self):
+ field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.CharField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
+
+ def test_csi_field(self):
+ field = models.CommaSeparatedIntegerField(max_length=100)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_length": 100})
+
+ def test_date_field(self):
+ field = models.DateField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.DateField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+ field = models.DateField(auto_now=True)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.DateField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"auto_now": True})
+
+ def test_datetime_field(self):
+ field = models.DateTimeField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.DateTimeField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+ field = models.DateTimeField(auto_now_add=True)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.DateTimeField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"auto_now_add": True})
+ # Bug #21785
+ field = models.DateTimeField(auto_now=True, auto_now_add=True)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.DateTimeField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
+
+ def test_decimal_field(self):
+ field = models.DecimalField(max_digits=5, decimal_places=2)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.DecimalField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
+
+ def test_decimal_field_0_decimal_places(self):
+ """
+ A DecimalField with decimal_places=0 should work (#22272).
+ """
+ field = models.DecimalField(max_digits=5, decimal_places=0)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.DecimalField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
+
+ def test_email_field(self):
+ field = models.EmailField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.EmailField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_length": 254})
+ field = models.EmailField(max_length=255)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.EmailField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_length": 255})
+
+ def test_file_field(self):
+ field = models.FileField(upload_to="foo/bar")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.FileField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"upload_to": "foo/bar"})
+ # Test max_length
+ field = models.FileField(upload_to="foo/bar", max_length=200)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.FileField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
+
+ def test_file_path_field(self):
+ field = models.FilePathField(match=r".*\.txt$")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.FilePathField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"match": r".*\.txt$"})
+ field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.FilePathField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
+
+ def test_float_field(self):
+ field = models.FloatField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.FloatField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_foreign_key(self):
+ # Test basic pointing
+ from django.contrib.auth.models import Permission
+ field = models.ForeignKey("auth.Permission", models.CASCADE)
+ field.remote_field.model = Permission
+ field.remote_field.field_name = "id"
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ForeignKey")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
+ self.assertFalse(hasattr(kwargs['to'], "setting_name"))
+ # Test swap detection for swappable model
+ field = models.ForeignKey("auth.User", models.CASCADE)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ForeignKey")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.CASCADE})
+ self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
+ # Test nonexistent (for now) model
+ field = models.ForeignKey("something.Else", models.CASCADE)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ForeignKey")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "something.Else", "on_delete": models.CASCADE})
+ # Test on_delete
+ field = models.ForeignKey("auth.User", models.SET_NULL)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ForeignKey")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
+ # Test to_field preservation
+ field = models.ForeignKey("auth.Permission", models.CASCADE, to_field="foobar")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ForeignKey")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar", "on_delete": models.CASCADE})
+ # Test related_name preservation
+ field = models.ForeignKey("auth.Permission", models.CASCADE, related_name="foobar")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ForeignKey")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar", "on_delete": models.CASCADE})
+
+ @override_settings(AUTH_USER_MODEL="auth.Permission")
+ def test_foreign_key_swapped(self):
+ with isolate_lru_cache(apps.get_swappable_settings_name):
+ # It doesn't matter that we swapped out user for permission;
+ # there's no validation. We just want to check the setting stuff works.
+ field = models.ForeignKey("auth.Permission", models.CASCADE)
+ name, path, args, kwargs = field.deconstruct()
+
+ self.assertEqual(path, "django.db.models.ForeignKey")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
+ self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
+
+ def test_image_field(self):
+ field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ImageField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
+
+ def test_integer_field(self):
+ field = models.IntegerField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.IntegerField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_ip_address_field(self):
+ field = models.IPAddressField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.IPAddressField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_generic_ip_address_field(self):
+ field = models.GenericIPAddressField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.GenericIPAddressField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+ field = models.GenericIPAddressField(protocol="IPv6")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.GenericIPAddressField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"protocol": "IPv6"})
+
+ def test_many_to_many_field(self):
+ # Test normal
+ field = models.ManyToManyField("auth.Permission")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ManyToManyField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission"})
+ self.assertFalse(hasattr(kwargs['to'], "setting_name"))
+ # Test swappable
+ field = models.ManyToManyField("auth.User")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ManyToManyField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.User"})
+ self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
+ # Test through
+ field = models.ManyToManyField("auth.Permission", through="auth.Group")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ManyToManyField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
+ # Test custom db_table
+ field = models.ManyToManyField("auth.Permission", db_table="custom_table")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ManyToManyField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
+ # Test related_name
+ field = models.ManyToManyField("auth.Permission", related_name="custom_table")
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.ManyToManyField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
+
+ @override_settings(AUTH_USER_MODEL="auth.Permission")
+ def test_many_to_many_field_swapped(self):
+ with isolate_lru_cache(apps.get_swappable_settings_name):
+ # It doesn't matter that we swapped out user for permission;
+ # there's no validation. We just want to check the setting stuff works.
+ field = models.ManyToManyField("auth.Permission")
+ name, path, args, kwargs = field.deconstruct()
+
+ self.assertEqual(path, "django.db.models.ManyToManyField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"to": "auth.Permission"})
+ self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
+
+ def test_null_boolean_field(self):
+ field = models.NullBooleanField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.NullBooleanField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_positive_integer_field(self):
+ field = models.PositiveIntegerField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.PositiveIntegerField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_positive_small_integer_field(self):
+ field = models.PositiveSmallIntegerField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_slug_field(self):
+ field = models.SlugField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.SlugField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+ field = models.SlugField(db_index=False, max_length=231)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.SlugField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
+
+ def test_small_integer_field(self):
+ field = models.SmallIntegerField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.SmallIntegerField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_text_field(self):
+ field = models.TextField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.TextField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ def test_time_field(self):
+ field = models.TimeField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.TimeField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+
+ field = models.TimeField(auto_now=True)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {'auto_now': True})
+
+ field = models.TimeField(auto_now_add=True)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {'auto_now_add': True})
+
+ def test_url_field(self):
+ field = models.URLField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.URLField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
+ field = models.URLField(max_length=231)
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.URLField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {"max_length": 231})
+
+ def test_binary_field(self):
+ field = models.BinaryField()
+ name, path, args, kwargs = field.deconstruct()
+ self.assertEqual(path, "django.db.models.BinaryField")
+ self.assertEqual(args, [])
+ self.assertEqual(kwargs, {})
diff --git a/tests/field_defaults/__init__.py b/tests/field_defaults/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/field_defaults/models.py b/tests/field_defaults/models.py
new file mode 100644
index 00000000..4f062320
--- /dev/null
+++ b/tests/field_defaults/models.py
@@ -0,0 +1,25 @@
+# coding: utf-8
+"""
+Callable defaults
+
+You can pass callable objects as the ``default`` parameter to a field. When
+the object is created without an explicit value passed in, Django will call
+the method to determine the default value.
+
+This example uses ``datetime.datetime.now`` as the default for the ``pub_date``
+field.
+"""
+
+from datetime import datetime
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ headline = models.CharField(max_length=100, default='Default headline')
+ pub_date = models.DateTimeField(default=datetime.now)
+
+ def __str__(self):
+ return self.headline
diff --git a/tests/field_defaults/tests.py b/tests/field_defaults/tests.py
new file mode 100644
index 00000000..031fd75f
--- /dev/null
+++ b/tests/field_defaults/tests.py
@@ -0,0 +1,17 @@
+from datetime import datetime
+
+from django.test import TestCase
+from django.utils import six
+
+from .models import Article
+
+
+class DefaultTests(TestCase):
+ def test_field_defaults(self):
+ a = Article()
+ now = datetime.now()
+ a.save()
+
+ self.assertIsInstance(a.id, six.integer_types)
+ self.assertEqual(a.headline, "Default headline")
+ self.assertLess((now - a.pub_date).seconds, 5)
diff --git a/tests/field_subclassing/__init__.py b/tests/field_subclassing/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/field_subclassing/fields.py b/tests/field_subclassing/fields.py
new file mode 100644
index 00000000..c2e4b50c
--- /dev/null
+++ b/tests/field_subclassing/fields.py
@@ -0,0 +1,8 @@
+from __future__ import unicode_literals
+
+from django.db import models
+
+
+class CustomTypedField(models.TextField):
+ def db_type(self, connection):
+ return 'custom_field'
diff --git a/tests/field_subclassing/tests.py b/tests/field_subclassing/tests.py
new file mode 100644
index 00000000..d291276c
--- /dev/null
+++ b/tests/field_subclassing/tests.py
@@ -0,0 +1,13 @@
+from __future__ import unicode_literals
+
+from django.db import connection
+from django.test import SimpleTestCase
+
+from .fields import CustomTypedField
+
+
+class TestDbType(SimpleTestCase):
+
+ def test_db_parameters_respects_db_type(self):
+ f = CustomTypedField()
+ self.assertEqual(f.db_parameters(connection)['type'], 'custom_field')
diff --git a/tests/force_insert_update/__init__.py b/tests/force_insert_update/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/force_insert_update/models.py b/tests/force_insert_update/models.py
new file mode 100644
index 00000000..a98eadb4
--- /dev/null
+++ b/tests/force_insert_update/models.py
@@ -0,0 +1,28 @@
+"""
+Tests for forcing insert and update queries (instead of Django's normal
+automatic behavior).
+"""
+from django.db import models
+
+
+class Counter(models.Model):
+ name = models.CharField(max_length=10)
+ value = models.IntegerField()
+
+
+class InheritedCounter(Counter):
+ tag = models.CharField(max_length=10)
+
+
+class ProxyCounter(Counter):
+ class Meta:
+ proxy = True
+
+
+class SubCounter(Counter):
+ pass
+
+
+class WithCustomPK(models.Model):
+ name = models.IntegerField(primary_key=True)
+ value = models.IntegerField()
diff --git a/tests/force_insert_update/tests.py b/tests/force_insert_update/tests.py
new file mode 100644
index 00000000..ae8b771e
--- /dev/null
+++ b/tests/force_insert_update/tests.py
@@ -0,0 +1,69 @@
+from __future__ import unicode_literals
+
+from django.db import DatabaseError, IntegrityError, transaction
+from django.test import TestCase
+
+from .models import (
+ Counter, InheritedCounter, ProxyCounter, SubCounter, WithCustomPK,
+)
+
+
+class ForceTests(TestCase):
+ def test_force_update(self):
+ c = Counter.objects.create(name="one", value=1)
+
+ # The normal case
+ c.value = 2
+ c.save()
+ # Same thing, via an update
+ c.value = 3
+ c.save(force_update=True)
+
+ # Won't work because force_update and force_insert are mutually
+ # exclusive
+ c.value = 4
+ with self.assertRaises(ValueError):
+ c.save(force_insert=True, force_update=True)
+
+ # Try to update something that doesn't have a primary key in the first
+ # place.
+ c1 = Counter(name="two", value=2)
+ with self.assertRaises(ValueError):
+ with transaction.atomic():
+ c1.save(force_update=True)
+ c1.save(force_insert=True)
+
+ # Won't work because we can't insert a pk of the same value.
+ c.value = 5
+ with self.assertRaises(IntegrityError):
+ with transaction.atomic():
+ c.save(force_insert=True)
+
+ # Trying to update should still fail, even with manual primary keys, if
+ # the data isn't in the database already.
+ obj = WithCustomPK(name=1, value=1)
+ with self.assertRaises(DatabaseError):
+ with transaction.atomic():
+ obj.save(force_update=True)
+
+
+class InheritanceTests(TestCase):
+ def test_force_update_on_inherited_model(self):
+ a = InheritedCounter(name="count", value=1, tag="spam")
+ a.save()
+ a.save(force_update=True)
+
+ def test_force_update_on_proxy_model(self):
+ a = ProxyCounter(name="count", value=1)
+ a.save()
+ a.save(force_update=True)
+
+ def test_force_update_on_inherited_model_without_fields(self):
+ '''
+ Issue 13864: force_update fails on subclassed models, if they don't
+ specify custom fields.
+ '''
+ a = SubCounter(name="count", value=1)
+ a.save()
+ a.value = 2
+ a.save(force_update=True)
diff --git a/tests/generic_relations_regress/__init__.py b/tests/generic_relations_regress/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/generic_relations_regress/models.py b/tests/generic_relations_regress/models.py
new file mode 100644
index 00000000..eb4f645d
--- /dev/null
+++ b/tests/generic_relations_regress/models.py
@@ -0,0 +1,218 @@
+from django.contrib.contenttypes.fields import (
+ GenericForeignKey, GenericRelation,
+)
+from django.contrib.contenttypes.models import ContentType
+from django.db import models
+from django.db.models.deletion import ProtectedError
+from django.utils.encoding import python_2_unicode_compatible
+
+__all__ = ('Link', 'Place', 'Restaurant', 'Person', 'Address',
+ 'CharLink', 'TextLink', 'OddRelation1', 'OddRelation2',
+ 'Contact', 'Organization', 'Note', 'Company')
+
+
+@python_2_unicode_compatible
+class Link(models.Model):
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.PositiveIntegerField()
+ content_object = GenericForeignKey()
+
+ def __str__(self):
+ return "Link to %s id=%s" % (self.content_type, self.object_id)
+
+
+@python_2_unicode_compatible
+class Place(models.Model):
+ name = models.CharField(max_length=100)
+ links = GenericRelation(Link)
+
+ def __str__(self):
+ return "Place: %s" % self.name
+
+
+@python_2_unicode_compatible
+class Restaurant(Place):
+ def __str__(self):
+ return "Restaurant: %s" % self.name
+
+
+@python_2_unicode_compatible
+class Address(models.Model):
+ street = models.CharField(max_length=80)
+ city = models.CharField(max_length=50)
+ state = models.CharField(max_length=2)
+ zipcode = models.CharField(max_length=5)
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.PositiveIntegerField()
+ content_object = GenericForeignKey()
+
+ def __str__(self):
+ return '%s %s, %s %s' % (self.street, self.city, self.state, self.zipcode)
+
+
+@python_2_unicode_compatible
+class Person(models.Model):
+ account = models.IntegerField(primary_key=True)
+ name = models.CharField(max_length=128)
+ addresses = GenericRelation(Address)
+
+ def __str__(self):
+ return self.name
+
+
+class CharLink(models.Model):
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.CharField(max_length=100)
+ content_object = GenericForeignKey()
+
+
+class TextLink(models.Model):
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.TextField()
+ content_object = GenericForeignKey()
+
+
+class OddRelation1(models.Model):
+ name = models.CharField(max_length=100)
+ clinks = GenericRelation(CharLink)
+
+
+class OddRelation2(models.Model):
+ name = models.CharField(max_length=100)
+ tlinks = GenericRelation(TextLink)
+
+
+# models for test_q_object_or:
+class Note(models.Model):
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.PositiveIntegerField()
+ content_object = GenericForeignKey()
+ note = models.TextField()
+
+
+class Contact(models.Model):
+ notes = GenericRelation(Note)
+
+
+class Organization(models.Model):
+ name = models.CharField(max_length=255)
+ contacts = models.ManyToManyField(Contact, related_name='organizations')
+
+
+@python_2_unicode_compatible
+class Company(models.Model):
+ name = models.CharField(max_length=100)
+ links = GenericRelation(Link)
+
+ def __str__(self):
+ return "Company: %s" % self.name
+
+
+# For testing #13085 fix, we also use Note model defined above
+class Developer(models.Model):
+ name = models.CharField(max_length=15)
+
+
+@python_2_unicode_compatible
+class Team(models.Model):
+ name = models.CharField(max_length=15)
+ members = models.ManyToManyField(Developer)
+
+ def __str__(self):
+ return "%s team" % self.name
+
+ def __len__(self):
+ return self.members.count()
+
+
+class Guild(models.Model):
+ name = models.CharField(max_length=15)
+ members = models.ManyToManyField(Developer)
+
+ def __nonzero__(self):
+
+ return self.members.count()
+
+
+class Tag(models.Model):
+ content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='g_r_r_tags')
+ object_id = models.CharField(max_length=15)
+ content_object = GenericForeignKey()
+ label = models.CharField(max_length=15)
+
+
+class Board(models.Model):
+ name = models.CharField(primary_key=True, max_length=15)
+
+
+class SpecialGenericRelation(GenericRelation):
+ def __init__(self, *args, **kwargs):
+ super(SpecialGenericRelation, self).__init__(*args, **kwargs)
+ self.editable = True
+ self.save_form_data_calls = 0
+
+ def save_form_data(self, *args, **kwargs):
+ self.save_form_data_calls += 1
+
+
+class HasLinks(models.Model):
+ links = SpecialGenericRelation(Link)
+
+ class Meta:
+ abstract = True
+
+
+class HasLinkThing(HasLinks):
+ pass
+
+
+class A(models.Model):
+ flag = models.NullBooleanField()
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.PositiveIntegerField()
+ content_object = GenericForeignKey('content_type', 'object_id')
+
+
+class B(models.Model):
+ a = GenericRelation(A)
+
+ class Meta:
+ ordering = ('id',)
+
+
+class C(models.Model):
+ b = models.ForeignKey(B, models.CASCADE)
+
+ class Meta:
+ ordering = ('id',)
+
+
+class D(models.Model):
+ b = models.ForeignKey(B, models.SET_NULL, null=True)
+
+ class Meta:
+ ordering = ('id',)
+
+
+# Ticket #22998
+
+class Node(models.Model):
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.PositiveIntegerField()
+ content = GenericForeignKey('content_type', 'object_id')
+
+
+class Content(models.Model):
+ nodes = GenericRelation(Node)
+ related_obj = models.ForeignKey('Related', models.CASCADE)
+
+
+class Related(models.Model):
+ pass
+
+
+def prevent_deletes(sender, instance, **kwargs):
+ raise ProtectedError("Not allowed to delete.", [instance])
+
+
+models.signals.pre_delete.connect(prevent_deletes, sender=Node)
diff --git a/tests/generic_relations_regress/tests.py b/tests/generic_relations_regress/tests.py
new file mode 100644
index 00000000..d3986b69
--- /dev/null
+++ b/tests/generic_relations_regress/tests.py
@@ -0,0 +1,246 @@
+from django.db.models import Q, Sum
+from django.db.models.deletion import ProtectedError
+from django.db.utils import IntegrityError
+from django.forms.models import modelform_factory
+from django.test import TestCase, skipIfDBFeature
+
+from .models import (
+ A, Address, B, Board, C, CharLink, Company, Contact, Content, D, Developer,
+ Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2,
+ Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink,
+)
+
+
+class GenericRelationTests(TestCase):
+
+ def test_inherited_models_content_type(self):
+ """
+ GenericRelations on inherited classes use the correct content type.
+ """
+ p = Place.objects.create(name="South Park")
+ r = Restaurant.objects.create(name="Chubby's")
+ l1 = Link.objects.create(content_object=p)
+ l2 = Link.objects.create(content_object=r)
+ self.assertEqual(list(p.links.all()), [l1])
+ self.assertEqual(list(r.links.all()), [l2])
+
+ def test_reverse_relation_pk(self):
+ """
+ The correct column name is used for the primary key on the
+ originating model of a query. See #12664.
+ """
+ p = Person.objects.create(account=23, name='Chef')
+ Address.objects.create(street='123 Anywhere Place',
+ city='Conifer', state='CO',
+ zipcode='80433', content_object=p)
+
+ qs = Person.objects.filter(addresses__zipcode='80433')
+ self.assertEqual(1, qs.count())
+ self.assertEqual('Chef', qs[0].name)
+
+ def test_charlink_delete(self):
+ oddrel = OddRelation1.objects.create(name='clink')
+ CharLink.objects.create(content_object=oddrel)
+ oddrel.delete()
+
+ def test_textlink_delete(self):
+ oddrel = OddRelation2.objects.create(name='tlink')
+ TextLink.objects.create(content_object=oddrel)
+ oddrel.delete()
+
+ def test_q_object_or(self):
+ """
+ SQL query parameters for generic relations are properly
+ grouped when OR is used (#11535).
+
+ In this bug the first query (below) works while the second, with the
+ query parameters the same but in reverse order, does not.
+
+ The issue is that the generic relation conditions do not get properly
+ grouped in parentheses.
+ """
+ note_contact = Contact.objects.create()
+ org_contact = Contact.objects.create()
+ Note.objects.create(note='note', content_object=note_contact)
+ org = Organization.objects.create(name='org name')
+ org.contacts.add(org_contact)
+ # search with a non-matching note and a matching org name
+ qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
+ Q(organizations__name__icontains=r'org name'))
+ self.assertIn(org_contact, qs)
+ # search again, with the same query parameters, in reverse order
+ qs = Contact.objects.filter(
+ Q(organizations__name__icontains=r'org name') |
+ Q(notes__note__icontains=r'other note'))
+ self.assertIn(org_contact, qs)
+
+ def test_join_reuse(self):
+ qs = Person.objects.filter(
+ addresses__street='foo'
+ ).filter(
+ addresses__street='bar'
+ )
+ self.assertEqual(str(qs.query).count('JOIN'), 2)
+
+ def test_generic_relation_ordering(self):
+ """
+ Ordering over a generic relation does not include extraneous
+ duplicate results, nor excludes rows not participating in the relation.
+ """
+ p1 = Place.objects.create(name="South Park")
+ p2 = Place.objects.create(name="The City")
+ c = Company.objects.create(name="Chubby's Intl.")
+ Link.objects.create(content_object=p1)
+ Link.objects.create(content_object=c)
+
+ places = list(Place.objects.order_by('links__id'))
+
+ def count_places(place):
+ return len([p for p in places if p.id == place.id])
+
+ self.assertEqual(len(places), 2)
+ self.assertEqual(count_places(p1), 1)
+ self.assertEqual(count_places(p2), 1)
+
+ def test_target_model_is_unsaved(self):
+ """Test related to #13085"""
+ # Fails with another, ORM-level error
+ dev1 = Developer(name='Joe')
+ note = Note(note='Deserves promotion', content_object=dev1)
+ with self.assertRaises(IntegrityError):
+ note.save()
+
+ def test_target_model_len_zero(self):
+ """
+ Saving a model with a GenericForeignKey to a model instance whose
+ __len__ method returns 0 (Team.__len__() here) shouldn't fail (#13085).
+ """
+ team1 = Team.objects.create(name='Backend devs')
+ note = Note(note='Deserve a bonus', content_object=team1)
+ note.save()
+
+ def test_target_model_nonzero_false(self):
+ """Test related to #13085"""
+ # __nonzero__() returns False -- This actually doesn't currently fail.
+ # This test validates that
+ g1 = Guild.objects.create(name='First guild')
+ note = Note(note='Note for guild', content_object=g1)
+ note.save()
+
+ @skipIfDBFeature('interprets_empty_strings_as_nulls')
+ def test_gfk_to_model_with_empty_pk(self):
+ """Test related to #13085"""
+ # Saving model with GenericForeignKey to model instance with an
+ # empty CharField PK
+ b1 = Board.objects.create(name='')
+ tag = Tag(label='VP', content_object=b1)
+ tag.save()
+
+ def test_ticket_20378(self):
+ # Create a couple of extra HasLinkThing so that the autopk value
+ # isn't the same for Link and HasLinkThing.
+ hs1 = HasLinkThing.objects.create()
+ hs2 = HasLinkThing.objects.create()
+ hs3 = HasLinkThing.objects.create()
+ hs4 = HasLinkThing.objects.create()
+ l1 = Link.objects.create(content_object=hs3)
+ l2 = Link.objects.create(content_object=hs4)
+ self.assertSequenceEqual(HasLinkThing.objects.filter(links=l1), [hs3])
+ self.assertSequenceEqual(HasLinkThing.objects.filter(links=l2), [hs4])
+ self.assertSequenceEqual(HasLinkThing.objects.exclude(links=l2), [hs1, hs2, hs3])
+ self.assertSequenceEqual(HasLinkThing.objects.exclude(links=l1), [hs1, hs2, hs4])
+
+ def test_ticket_20564(self):
+ b1 = B.objects.create()
+ b2 = B.objects.create()
+ b3 = B.objects.create()
+ c1 = C.objects.create(b=b1)
+ c2 = C.objects.create(b=b2)
+ c3 = C.objects.create(b=b3)
+ A.objects.create(flag=None, content_object=b1)
+ A.objects.create(flag=True, content_object=b2)
+ self.assertSequenceEqual(C.objects.filter(b__a__flag=None), [c1, c3])
+ self.assertSequenceEqual(C.objects.exclude(b__a__flag=None), [c2])
+
+ def test_ticket_20564_nullable_fk(self):
+ b1 = B.objects.create()
+ b2 = B.objects.create()
+ b3 = B.objects.create()
+ d1 = D.objects.create(b=b1)
+ d2 = D.objects.create(b=b2)
+ d3 = D.objects.create(b=b3)
+ d4 = D.objects.create()
+ A.objects.create(flag=None, content_object=b1)
+ A.objects.create(flag=True, content_object=b1)
+ A.objects.create(flag=True, content_object=b2)
+ self.assertSequenceEqual(D.objects.exclude(b__a__flag=None), [d2])
+ self.assertSequenceEqual(D.objects.filter(b__a__flag=None), [d1, d3, d4])
+ self.assertSequenceEqual(B.objects.filter(a__flag=None), [b1, b3])
+ self.assertSequenceEqual(B.objects.exclude(a__flag=None), [b2])
+
+ def test_extra_join_condition(self):
+ # A crude check that content_type_id is taken in account in the
+ # join/subquery condition.
+ self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower())
+ # No need for any joins - the join from inner query can be trimmed in
+ # this case (but not in the above case as no a objects at all for given
+ # B would then fail).
+ self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
+ self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower())
+
+ def test_annotate(self):
+ hs1 = HasLinkThing.objects.create()
+ hs2 = HasLinkThing.objects.create()
+ HasLinkThing.objects.create()
+ b = Board.objects.create(name=str(hs1.pk))
+ Link.objects.create(content_object=hs2)
+ link = Link.objects.create(content_object=hs1)
+ Link.objects.create(content_object=b)
+ qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk)
+ # If content_type restriction isn't in the query's join condition,
+ # then wrong results are produced here as the link to b will also match
+ # (b and hs1 have equal pks).
+ self.assertEqual(qs.count(), 1)
+ self.assertEqual(qs[0].links__sum, link.id)
+ link.delete()
+ # Now if we don't have proper left join, we will not produce any
+ # results at all here.
+ # clear cached results
+ qs = qs.all()
+ self.assertEqual(qs.count(), 1)
+ # Note - 0 here would be a nicer result...
+ self.assertIs(qs[0].links__sum, None)
+ # Finally test that filtering works.
+ self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
+ self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
+
+ def test_filter_targets_related_pk(self):
+ HasLinkThing.objects.create()
+ hs2 = HasLinkThing.objects.create()
+ link = Link.objects.create(content_object=hs2)
+ self.assertNotEqual(link.object_id, link.pk)
+ self.assertSequenceEqual(HasLinkThing.objects.filter(links=link.pk), [hs2])
+
+ def test_editable_generic_rel(self):
+ GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
+ form = GenericRelationForm()
+ self.assertIn('links', form.fields)
+ form = GenericRelationForm({'links': None})
+ self.assertTrue(form.is_valid())
+ form.save()
+ links = HasLinkThing._meta.get_field('links')
+ self.assertEqual(links.save_form_data_calls, 1)
+
+ def test_ticket_22998(self):
+ related = Related.objects.create()
+ content = Content.objects.create(related_obj=related)
+ Node.objects.create(content=content)
+
+ # deleting the Related cascades to the Content cascades to the Node,
+ # where the pre_delete signal should fire and prevent deletion.
+ with self.assertRaises(ProtectedError):
+ related.delete()
+
+ def test_ticket_22982(self):
+ place = Place.objects.create(name='My Place')
+ self.assertIn('GenericRelatedObjectManager', str(place.links))
diff --git a/tests/indexes/__init__.py b/tests/indexes/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/indexes/models.py b/tests/indexes/models.py
new file mode 100644
index 00000000..208da32c
--- /dev/null
+++ b/tests/indexes/models.py
@@ -0,0 +1,54 @@
+from django.db import connection, models
+
+
+class CurrentTranslation(models.ForeignObject):
+ """
+ Creates virtual relation to the translation with model cache enabled.
+ """
+ # Avoid validation
+ requires_unique_target = False
+
+ def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
+ # Disable reverse relation
+ kwargs['related_name'] = '+'
+ # Set unique to enable model cache.
+ kwargs['unique'] = True
+ super().__init__(to, on_delete, from_fields, to_fields, **kwargs)
+
+
+class ArticleTranslation(models.Model):
+
+ article = models.ForeignKey('indexes.Article', models.CASCADE)
+ article_no_constraint = models.ForeignKey('indexes.Article', models.CASCADE, db_constraint=False, related_name='+')
+ language = models.CharField(max_length=10, unique=True)
+ content = models.TextField()
+
+
+class Article(models.Model):
+ headline = models.CharField(max_length=100)
+ pub_date = models.DateTimeField()
+
+ # Add virtual relation to the ArticleTranslation model.
+ translation = CurrentTranslation(ArticleTranslation, models.CASCADE, ['id'], ['article'])
+
+ class Meta:
+ index_together = [
+ ["headline", "pub_date"],
+ ]
+
+
+# Model for index_together being used only with single list
+class IndexTogetherSingleList(models.Model):
+ headline = models.CharField(max_length=100)
+ pub_date = models.DateTimeField()
+
+ class Meta:
+ index_together = ["headline", "pub_date"]
+
+
+# Indexing a TextField on Oracle or MySQL results in index creation error.
+if connection.vendor == 'postgresql':
+ class IndexedArticle(models.Model):
+ headline = models.CharField(max_length=100, db_index=True)
+ body = models.TextField(db_index=True)
+ slug = models.CharField(max_length=40, unique=True)
diff --git a/tests/indexes/tests.py b/tests/indexes/tests.py
new file mode 100644
index 00000000..ee2cbd15
--- /dev/null
+++ b/tests/indexes/tests.py
@@ -0,0 +1,125 @@
+from unittest import skipUnless
+
+from django.db import connection
+from django.db.models.deletion import CASCADE
+from django.db.models.fields.related import ForeignKey
+from django.test import TestCase, TransactionTestCase
+
+from .models import Article, ArticleTranslation, IndexTogetherSingleList
+
+
+class SchemaIndexesTests(TestCase):
+ """
+ Test index handling by the db.backends.schema infrastructure.
+ """
+
+ def test_index_name_hash(self):
+ """
+ Index names should be deterministic.
+ """
+ with connection.schema_editor() as editor:
+ index_name = editor._create_index_name(
+ table_name=Article._meta.db_table,
+ column_names=("c1",),
+ suffix="123",
+ )
+ self.assertEqual(index_name, "indexes_article_c1_a52bd80b123")
+
+ def test_index_name(self):
+ """
+ Index names on the built-in database backends::
+ * Are truncated as needed.
+ * Include all the column names.
+ * Include a deterministic hash.
+ """
+ long_name = 'l%sng' % ('o' * 100)
+ with connection.schema_editor() as editor:
+ index_name = editor._create_index_name(
+ table_name=Article._meta.db_table,
+ column_names=('c1', 'c2', long_name),
+ suffix='ix',
+ )
+ expected = {
+ 'mysql': 'indexes_article_c1_c2_looooooooooooooooooo_255179b2ix',
+ 'oracle': 'indexes_a_c1_c2_loo_255179b2ix',
+ 'postgresql': 'indexes_article_c1_c2_loooooooooooooooooo_255179b2ix',
+ 'sqlite': 'indexes_article_c1_c2_l%sng_255179b2ix' % ('o' * 100),
+ }
+ if connection.vendor not in expected:
+ self.skipTest('This test is only supported on the built-in database backends.')
+ self.assertEqual(index_name, expected[connection.vendor])
+
+ def test_index_together(self):
+ editor = connection.schema_editor()
+ index_sql = [str(statement) for statement in editor._model_indexes_sql(Article)]
+ self.assertEqual(len(index_sql), 1)
+ # Ensure the index name is properly quoted
+ self.assertIn(
+ connection.ops.quote_name(
+ editor._create_index_name(Article._meta.db_table, ['headline', 'pub_date'], suffix='_idx')
+ ),
+ index_sql[0]
+ )
+
+ def test_index_together_single_list(self):
+ # Test for using index_together with a single list (#22172)
+ index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
+ self.assertEqual(len(index_sql), 1)
+
+ @skipUnless(connection.vendor == 'postgresql', "This is a postgresql-specific issue")
+ def test_postgresql_text_indexes(self):
+ """Test creation of PostgreSQL-specific text indexes (#12234)"""
+ from .models import IndexedArticle
+ index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(IndexedArticle)]
+ self.assertEqual(len(index_sql), 5)
+ self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
+ self.assertIn('("body" text_pattern_ops)', index_sql[3])
+ # unique=True and db_index=True should only create the varchar-specific
+ # index (#19441).
+ self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
+
+ @skipUnless(connection.vendor == 'postgresql', "This is a postgresql-specific issue")
+ def test_postgresql_virtual_relation_indexes(self):
+ """Test indexes are not created for related objects"""
+ index_sql = connection.schema_editor()._model_indexes_sql(Article)
+ self.assertEqual(len(index_sql), 1)
+
+
+@skipUnless(connection.vendor == 'mysql', 'MySQL tests')
+class SchemaIndexesMySQLTests(TransactionTestCase):
+ available_apps = ['indexes']
+
+ def test_no_index_for_foreignkey(self):
+ """
+ MySQL on InnoDB already creates indexes automatically for foreign keys.
+ (#14180). An index should be created if db_constraint=False (#26171).
+ """
+ storage = connection.introspection.get_storage_engine(
+ connection.cursor(), ArticleTranslation._meta.db_table
+ )
+ if storage != "InnoDB":
+ self.skip("This test only applies to the InnoDB storage engine")
+ index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(ArticleTranslation)]
+ self.assertEqual(index_sql, [
+ 'CREATE INDEX `indexes_articletranslation_article_no_constraint_id_d6c0806b` '
+ 'ON `indexes_articletranslation` (`article_no_constraint_id`)'
+ ])
+
+ # The index also shouldn't be created if the ForeignKey is added after
+ # the model was created.
+ field_created = False
+ try:
+ with connection.schema_editor() as editor:
+ new_field = ForeignKey(Article, CASCADE)
+ new_field.set_attributes_from_name('new_foreign_key')
+ editor.add_field(ArticleTranslation, new_field)
+ field_created = True
+ self.assertEqual([str(statement) for statement in editor.deferred_sql], [
+ 'ALTER TABLE `indexes_articletranslation` '
+ 'ADD CONSTRAINT `indexes_articletrans_new_foreign_key_id_d27a9146_fk_indexes_a` '
+ 'FOREIGN KEY (`new_foreign_key_id`) REFERENCES `indexes_article` (`id`)'
+ ])
+ finally:
+ if field_created:
+ with connection.schema_editor() as editor:
+ editor.remove_field(ArticleTranslation, new_field)
diff --git a/tests/m2m_and_m2o/__init__.py b/tests/m2m_and_m2o/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2m_and_m2o/models.py b/tests/m2m_and_m2o/models.py
new file mode 100644
index 00000000..60f5c437
--- /dev/null
+++ b/tests/m2m_and_m2o/models.py
@@ -0,0 +1,31 @@
+"""
+Many-to-many and many-to-one relationships to the same table
+
+Make sure to set ``related_name`` if you use relationships to the same table.
+"""
+from __future__ import unicode_literals
+
+from django.db import models
+from django.utils import six
+from django.utils.encoding import python_2_unicode_compatible
+
+
+class User(models.Model):
+ username = models.CharField(max_length=20)
+
+
+@python_2_unicode_compatible
+class Issue(models.Model):
+ num = models.IntegerField()
+ cc = models.ManyToManyField(User, blank=True, related_name='test_issue_cc')
+ client = models.ForeignKey(User, models.CASCADE, related_name='test_issue_client')
+
+ def __str__(self):
+ return six.text_type(self.num)
+
+ class Meta:
+ ordering = ('num',)
+
+
+class UnicodeReferenceModel(models.Model):
+ others = models.ManyToManyField("UnicodeReferenceModel")
diff --git a/tests/m2m_and_m2o/tests.py b/tests/m2m_and_m2o/tests.py
new file mode 100644
index 00000000..2c84a7f2
--- /dev/null
+++ b/tests/m2m_and_m2o/tests.py
@@ -0,0 +1,94 @@
+from django.db.models import Q
+from django.test import TestCase
+
+from .models import Issue, UnicodeReferenceModel, User
+
+
+class RelatedObjectTests(TestCase):
+
+ def test_related_objects_have_name_attribute(self):
+ for field_name in ('test_issue_client', 'test_issue_cc'):
+ obj = User._meta.get_field(field_name)
+ self.assertEqual(field_name, obj.field.related_query_name())
+
+ def test_m2m_and_m2o(self):
+ r = User.objects.create(username="russell")
+ g = User.objects.create(username="gustav")
+
+ i1 = Issue(num=1)
+ i1.client = r
+ i1.save()
+
+ i2 = Issue(num=2)
+ i2.client = r
+ i2.save()
+ i2.cc.add(r)
+
+ i3 = Issue(num=3)
+ i3.client = g
+ i3.save()
+ i3.cc.add(r)
+
+ self.assertQuerysetEqual(
+ Issue.objects.filter(client=r.id), [
+ 1,
+ 2,
+ ],
+ lambda i: i.num
+ )
+ self.assertQuerysetEqual(
+ Issue.objects.filter(client=g.id), [
+ 3,
+ ],
+ lambda i: i.num
+ )
+ self.assertQuerysetEqual(
+ Issue.objects.filter(cc__id__exact=g.id), []
+ )
+ self.assertQuerysetEqual(
+ Issue.objects.filter(cc__id__exact=r.id), [
+ 2,
+ 3,
+ ],
+ lambda i: i.num
+ )
+
+ # These queries combine results from the m2m and the m2o relationships.
+ # They're three ways of saying the same thing.
+ self.assertQuerysetEqual(
+ Issue.objects.filter(Q(cc__id__exact=r.id) | Q(client=r.id)), [
+ 1,
+ 2,
+ 3,
+ ],
+ lambda i: i.num
+ )
+ self.assertQuerysetEqual(
+ Issue.objects.filter(cc__id__exact=r.id) | Issue.objects.filter(client=r.id), [
+ 1,
+ 2,
+ 3,
+ ],
+ lambda i: i.num
+ )
+ self.assertQuerysetEqual(
+ Issue.objects.filter(Q(client=r.id) | Q(cc__id__exact=r.id)), [
+ 1,
+ 2,
+ 3,
+ ],
+ lambda i: i.num
+ )
+
+
+class RelatedObjectUnicodeTests(TestCase):
+ def test_m2m_with_unicode_reference(self):
+ """
+ Regression test for #6045: references to other models can be unicode
+ strings, providing they are directly convertible to ASCII.
+ """
+ m1 = UnicodeReferenceModel.objects.create()
+ m2 = UnicodeReferenceModel.objects.create()
+ m2.others.add(m1) # used to cause an error (see ticket #6045)
+ m2.save()
+ list(m2.others.all()) # Force retrieval.
diff --git a/tests/m2m_intermediary/__init__.py b/tests/m2m_intermediary/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2m_intermediary/models.py b/tests/m2m_intermediary/models.py
new file mode 100644
index 00000000..3e73164e
--- /dev/null
+++ b/tests/m2m_intermediary/models.py
@@ -0,0 +1,43 @@
+"""
+Many-to-many relationships via an intermediary table
+
+For many-to-many relationships that need extra fields on the intermediary
+table, use an intermediary model.
+
+In this example, an ``Article`` can have multiple ``Reporter`` objects, and
+each ``Article``-``Reporter`` combination (a ``Writer``) has a ``position``
+field, which specifies the ``Reporter``'s position for the given article
+(e.g. "Staff writer").
+"""
+from __future__ import unicode_literals
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Reporter(models.Model):
+ first_name = models.CharField(max_length=30)
+ last_name = models.CharField(max_length=30)
+
+ def __str__(self):
+ return "%s %s" % (self.first_name, self.last_name)
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ headline = models.CharField(max_length=100)
+ pub_date = models.DateField()
+
+ def __str__(self):
+ return self.headline
+
+
+@python_2_unicode_compatible
+class Writer(models.Model):
+ reporter = models.ForeignKey(Reporter, models.CASCADE)
+ article = models.ForeignKey(Article, models.CASCADE)
+ position = models.CharField(max_length=100)
+
+ def __str__(self):
+ return '%s (%s)' % (self.reporter, self.position)
diff --git a/tests/m2m_intermediary/tests.py b/tests/m2m_intermediary/tests.py
new file mode 100644
index 00000000..ce4e1860
--- /dev/null
+++ b/tests/m2m_intermediary/tests.py
@@ -0,0 +1,41 @@
+from __future__ import unicode_literals
+
+from datetime import datetime
+
+from django.test import TestCase
+from django.utils import six
+
+from .models import Article, Reporter, Writer
+
+
+class M2MIntermediaryTests(TestCase):
+ def test_intermeiary(self):
+ r1 = Reporter.objects.create(first_name="John", last_name="Smith")
+ r2 = Reporter.objects.create(first_name="Jane", last_name="Doe")
+
+ a = Article.objects.create(
+ headline="This is a test", pub_date=datetime(2005, 7, 27)
+ )
+
+ w1 = Writer.objects.create(reporter=r1, article=a, position="Main writer")
+ w2 = Writer.objects.create(reporter=r2, article=a, position="Contributor")
+
+ self.assertQuerysetEqual(
+ a.writer_set.select_related().order_by("-position"), [
+ ("John Smith", "Main writer"),
+ ("Jane Doe", "Contributor"),
+ ],
+ lambda w: (six.text_type(w.reporter), w.position)
+ )
+ self.assertEqual(w1.reporter, r1)
+ self.assertEqual(w2.reporter, r2)
+
+ self.assertEqual(w1.article, a)
+ self.assertEqual(w2.article, a)
+
+ self.assertQuerysetEqual(
+ r1.writer_set.all(), [
+ ("John Smith", "Main writer")
+ ],
+ lambda w: (six.text_type(w.reporter), w.position)
+ )
diff --git a/tests/m2m_multiple/__init__.py b/tests/m2m_multiple/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2m_multiple/models.py b/tests/m2m_multiple/models.py
new file mode 100644
index 00000000..a6db9425
--- /dev/null
+++ b/tests/m2m_multiple/models.py
@@ -0,0 +1,36 @@
+"""
+Multiple many-to-many relationships between the same two tables
+
+In this example, an ``Article`` can have many "primary" ``Category`` objects
+and many "secondary" ``Category`` objects.
+
+Set ``related_name`` to designate what the reverse relationship is called.
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Category(models.Model):
+ name = models.CharField(max_length=20)
+
+ class Meta:
+ ordering = ('name',)
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ headline = models.CharField(max_length=50)
+ pub_date = models.DateTimeField()
+ primary_categories = models.ManyToManyField(Category, related_name='primary_article_set')
+ secondary_categories = models.ManyToManyField(Category, related_name='secondary_article_set')
+
+ class Meta:
+ ordering = ('pub_date',)
+
+ def __str__(self):
+ return self.headline
diff --git a/tests/m2m_multiple/tests.py b/tests/m2m_multiple/tests.py
new file mode 100644
index 00000000..9d605423
--- /dev/null
+++ b/tests/m2m_multiple/tests.py
@@ -0,0 +1,86 @@
+from __future__ import unicode_literals
+
+from datetime import datetime
+
+from django.test import TestCase
+
+from .models import Article, Category
+
+
+class M2MMultipleTests(TestCase):
+ def test_multiple(self):
+ c1, c2, c3, c4 = [
+ Category.objects.create(name=name)
+ for name in ["Sports", "News", "Crime", "Life"]
+ ]
+
+ a1 = Article.objects.create(
+ headline="Parrot steals", pub_date=datetime(2005, 11, 27)
+ )
+ a1.primary_categories.add(c2, c3)
+ a1.secondary_categories.add(c4)
+
+ a2 = Article.objects.create(
+ headline="Parrot runs", pub_date=datetime(2005, 11, 28)
+ )
+ a2.primary_categories.add(c1, c2)
+ a2.secondary_categories.add(c4)
+
+ self.assertQuerysetEqual(
+ a1.primary_categories.all(), [
+ "Crime",
+ "News",
+ ],
+ lambda c: c.name
+ )
+ self.assertQuerysetEqual(
+ a2.primary_categories.all(), [
+ "News",
+ "Sports",
+ ],
+ lambda c: c.name
+ )
+ self.assertQuerysetEqual(
+ a1.secondary_categories.all(), [
+ "Life",
+ ],
+ lambda c: c.name
+ )
+ self.assertQuerysetEqual(
+ c1.primary_article_set.all(), [
+ "Parrot runs",
+ ],
+ lambda a: a.headline
+ )
+ self.assertQuerysetEqual(
+ c1.secondary_article_set.all(), []
+ )
+ self.assertQuerysetEqual(
+ c2.primary_article_set.all(), [
+ "Parrot steals",
+ "Parrot runs",
+ ],
+ lambda a: a.headline
+ )
+ self.assertQuerysetEqual(
+ c2.secondary_article_set.all(), []
+ )
+ self.assertQuerysetEqual(
+ c3.primary_article_set.all(), [
+ "Parrot steals",
+ ],
+ lambda a: a.headline
+ )
+ self.assertQuerysetEqual(
+ c3.secondary_article_set.all(), []
+ )
+ self.assertQuerysetEqual(
+ c4.primary_article_set.all(), []
+ )
+ self.assertQuerysetEqual(
+ c4.secondary_article_set.all(), [
+ "Parrot steals",
+ "Parrot runs",
+ ],
+ lambda a: a.headline
+ )
diff --git a/tests/m2m_recursive/__init__.py b/tests/m2m_recursive/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2m_recursive/models.py b/tests/m2m_recursive/models.py
new file mode 100644
index 00000000..d224b3d5
--- /dev/null
+++ b/tests/m2m_recursive/models.py
@@ -0,0 +1,30 @@
+"""
+Many-to-many relationships between the same two tables
+
+In this example, a ``Person`` can have many friends, who are also ``Person``
+objects. Friendship is a symmetrical relationship - if I am your friend, you
+are my friend. Here, ``friends`` is an example of a symmetrical
+``ManyToManyField``.
+
+A ``Person`` can also have many idols - but while I may idolize you, you may
+not think the same of me. Here, ``idols`` is an example of a non-symmetrical
+``ManyToManyField``. Only recursive ``ManyToManyField`` fields may be
+non-symmetrical, and they are symmetrical by default.
+
+This test validates that the many-to-many table is created using a mangled name
+if there is a name clash, and tests that symmetry is preserved where
+appropriate.
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Person(models.Model):
+ name = models.CharField(max_length=20)
+ friends = models.ManyToManyField('self')
+ idols = models.ManyToManyField('self', symmetrical=False, related_name='stalkers')
+
+ def __str__(self):
+ return self.name
diff --git a/tests/m2m_recursive/tests.py b/tests/m2m_recursive/tests.py
new file mode 100644
index 00000000..c6573800
--- /dev/null
+++ b/tests/m2m_recursive/tests.py
@@ -0,0 +1,183 @@
+from __future__ import unicode_literals
+
+from operator import attrgetter
+
+from django.test import TestCase
+
+from .models import Person
+
+
+class RecursiveM2MTests(TestCase):
+ def setUp(self):
+ self.a, self.b, self.c, self.d = [
+ Person.objects.create(name=name)
+ for name in ["Anne", "Bill", "Chuck", "David"]
+ ]
+
+ # Anne is friends with Bill and Chuck
+ self.a.friends.add(self.b, self.c)
+
+ # David is friends with Anne and Chuck - add in reverse direction
+ self.d.friends.add(self.a, self.c)
+
+ def test_recursive_m2m_all(self):
+ # Who is friends with Anne?
+ self.assertQuerysetEqual(
+ self.a.friends.all(), [
+ "Bill",
+ "Chuck",
+ "David"
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+ # Who is friends with Bill?
+ self.assertQuerysetEqual(
+ self.b.friends.all(), [
+ "Anne",
+ ],
+ attrgetter("name")
+ )
+ # Who is friends with Chuck?
+ self.assertQuerysetEqual(
+ self.c.friends.all(), [
+ "Anne",
+ "David"
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+ # Who is friends with David?
+ self.assertQuerysetEqual(
+ self.d.friends.all(), [
+ "Anne",
+ "Chuck",
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+
+ def test_recursive_m2m_reverse_add(self):
+ # Bill is already friends with Anne - add Anne again, but in the
+ # reverse direction
+ self.b.friends.add(self.a)
+
+ # Who is friends with Anne?
+ self.assertQuerysetEqual(
+ self.a.friends.all(), [
+ "Bill",
+ "Chuck",
+ "David",
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+ # Who is friends with Bill?
+ self.assertQuerysetEqual(
+ self.b.friends.all(), [
+ "Anne",
+ ],
+ attrgetter("name")
+ )
+
+ def test_recursive_m2m_remove(self):
+ # Remove Anne from Bill's friends
+ self.b.friends.remove(self.a)
+
+ # Who is friends with Anne?
+ self.assertQuerysetEqual(
+ self.a.friends.all(), [
+ "Chuck",
+ "David",
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+ # Who is friends with Bill?
+ self.assertQuerysetEqual(
+ self.b.friends.all(), []
+ )
+
+ def test_recursive_m2m_clear(self):
+ # Clear Anne's group of friends
+ self.a.friends.clear()
+
+ # Who is friends with Anne?
+ self.assertQuerysetEqual(
+ self.a.friends.all(), []
+ )
+
+ # Reverse relationships should also be gone
+ # Who is friends with Chuck?
+ self.assertQuerysetEqual(
+ self.c.friends.all(), [
+ "David",
+ ],
+ attrgetter("name")
+ )
+
+ # Who is friends with David?
+ self.assertQuerysetEqual(
+ self.d.friends.all(), [
+ "Chuck",
+ ],
+ attrgetter("name")
+ )
+
+ def test_recursive_m2m_add_via_related_name(self):
+ # David is idolized by Anne and Chuck - add in reverse direction
+ self.d.stalkers.add(self.a)
+
+ # Who are Anne's idols?
+ self.assertQuerysetEqual(
+ self.a.idols.all(), [
+ "David",
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+ # Who is stalking Anne?
+ self.assertQuerysetEqual(
+ self.a.stalkers.all(), [],
+ attrgetter("name")
+ )
+
+ def test_recursive_m2m_add_in_both_directions(self):
+ """Adding the same relation twice results in a single relation."""
+ # Ann idolizes David
+ self.a.idols.add(self.d)
+
+ # David is idolized by Anne
+ self.d.stalkers.add(self.a)
+
+ # Who are Anne's idols?
+ self.assertQuerysetEqual(
+ self.a.idols.all(), [
+ "David",
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+ # As the assertQuerysetEqual uses a set for comparison,
+ # check we've only got David listed once
+ self.assertEqual(self.a.idols.all().count(), 1)
+
+ def test_recursive_m2m_related_to_self(self):
+ # Ann idolizes herself
+ self.a.idols.add(self.a)
+
+ # Who are Anne's idols?
+ self.assertQuerysetEqual(
+ self.a.idols.all(), [
+ "Anne",
+ ],
+ attrgetter("name"),
+ ordered=False
+ )
+ # Who is stalking Anne?
+ self.assertQuerysetEqual(
+ self.a.stalkers.all(), [
+ "Anne",
+ ],
+ attrgetter("name")
+ )
diff --git a/tests/m2m_regress/__init__.py b/tests/m2m_regress/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2m_regress/models.py b/tests/m2m_regress/models.py
new file mode 100644
index 00000000..57f02b8f
--- /dev/null
+++ b/tests/m2m_regress/models.py
@@ -0,0 +1,100 @@
+from django.contrib.auth import models as auth
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+# No related name is needed here, since symmetrical relations are not
+# explicitly reversible.
+@python_2_unicode_compatible
+class SelfRefer(models.Model):
+ name = models.CharField(max_length=10)
+ references = models.ManyToManyField('self')
+ related = models.ManyToManyField('self')
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Tag(models.Model):
+ name = models.CharField(max_length=10)
+
+ def __str__(self):
+ return self.name
+
+
+# Regression for #11956 -- a many to many to the base class
+@python_2_unicode_compatible
+class TagCollection(Tag):
+ tags = models.ManyToManyField(Tag, related_name='tag_collections')
+
+ def __str__(self):
+ return self.name
+
+
+# A related_name is required on one of the ManyToManyField entries here because
+# they are both addressable as reverse relations from Tag.
+@python_2_unicode_compatible
+class Entry(models.Model):
+ name = models.CharField(max_length=10)
+ topics = models.ManyToManyField(Tag)
+ related = models.ManyToManyField(Tag, related_name="similar")
+
+ def __str__(self):
+ return self.name
+
+
+# Two models both inheriting from a base model with a self-referential m2m field
+class SelfReferChild(SelfRefer):
+ pass
+
+
+class SelfReferChildSibling(SelfRefer):
+ pass
+
+
+# Many-to-Many relation between models, where one of the PK's isn't an Autofield
+@python_2_unicode_compatible
+class Line(models.Model):
+ name = models.CharField(max_length=100)
+
+ def __str__(self):
+ return self.name
+
+
+class Worksheet(models.Model):
+ id = models.CharField(primary_key=True, max_length=100)
+ lines = models.ManyToManyField(Line, blank=True)
+
+
+# Regression for #11226 -- A model with the same name that another one to
+# which it has a m2m relation. This shouldn't cause a name clash between
+# the automatically created m2m intermediary table FK field names when
+# running migrate
+class User(models.Model):
+ name = models.CharField(max_length=30)
+ friends = models.ManyToManyField(auth.User)
+
+
+class BadModelWithSplit(models.Model):
+ name = models.CharField(max_length=1)
+
+ def split(self):
+ raise RuntimeError('split should not be called')
+
+ class Meta:
+ abstract = True
+
+
+class RegressionModelSplit(BadModelWithSplit):
+ """
+ Model with a split method should not cause an error in add_lazy_relation
+ """
+ others = models.ManyToManyField('self')
+
+
+# Regression for #24505 -- Two ManyToManyFields with the same "to" model
+# and related_name set to '+'.
+class Post(models.Model):
+ primary_lines = models.ManyToManyField(Line, related_name='+')
+ secondary_lines = models.ManyToManyField(Line, related_name='+')
diff --git a/tests/m2m_regress/tests.py b/tests/m2m_regress/tests.py
new file mode 100644
index 00000000..3c882c59
--- /dev/null
+++ b/tests/m2m_regress/tests.py
@@ -0,0 +1,122 @@
+from __future__ import unicode_literals
+
+from django.core.exceptions import FieldError
+from django.test import TestCase
+
+from .models import (
+ Entry, Line, Post, RegressionModelSplit, SelfRefer, SelfReferChild,
+ SelfReferChildSibling, Tag, TagCollection, Worksheet,
+)
+
+
+class M2MRegressionTests(TestCase):
+ def test_multiple_m2m(self):
+ # Multiple m2m references to model must be distinguished when
+ # accessing the relations through an instance attribute.
+
+ s1 = SelfRefer.objects.create(name='s1')
+ s2 = SelfRefer.objects.create(name='s2')
+ s3 = SelfRefer.objects.create(name='s3')
+ s1.references.add(s2)
+ s1.related.add(s3)
+
+ e1 = Entry.objects.create(name='e1')
+ t1 = Tag.objects.create(name='t1')
+ t2 = Tag.objects.create(name='t2')
+
+ e1.topics.add(t1)
+ e1.related.add(t2)
+
+ self.assertQuerysetEqual(s1.references.all(), [""])
+ self.assertQuerysetEqual(s1.related.all(), [""])
+
+ self.assertQuerysetEqual(e1.topics.all(), [""])
+ self.assertQuerysetEqual(e1.related.all(), [""])
+
+ def test_internal_related_name_not_in_error_msg(self):
+ # The secret internal related names for self-referential many-to-many
+ # fields shouldn't appear in the list when an error is made.
+ self.assertRaisesMessage(
+ FieldError,
+ "Choices are: id, name, references, related, selfreferchild, selfreferchildsibling",
+ lambda: SelfRefer.objects.filter(porcupine='fred')
+ )
+
+ def test_m2m_inheritance_symmetry(self):
+ # Test to ensure that the relationship between two inherited models
+ # with a self-referential m2m field maintains symmetry
+
+ sr_child = SelfReferChild(name="Hanna")
+ sr_child.save()
+
+ sr_sibling = SelfReferChildSibling(name="Beth")
+ sr_sibling.save()
+ sr_child.related.add(sr_sibling)
+
+ self.assertQuerysetEqual(sr_child.related.all(), [""])
+ self.assertQuerysetEqual(sr_sibling.related.all(), [""])
+
+ def test_m2m_pk_field_type(self):
+ # Regression for #11311 - The primary key for models in a m2m relation
+ # doesn't have to be an AutoField
+
+ w = Worksheet(id='abc')
+ w.save()
+ w.delete()
+
+ def test_add_m2m_with_base_class(self):
+ # Regression for #11956 -- You can add an object to a m2m with the
+ # base class without causing integrity errors
+
+ t1 = Tag.objects.create(name='t1')
+ t2 = Tag.objects.create(name='t2')
+
+ c1 = TagCollection.objects.create(name='c1')
+ c1.tags.set([t1, t2])
+ c1 = TagCollection.objects.get(name='c1')
+
+ self.assertQuerysetEqual(c1.tags.all(), ["", ""], ordered=False)
+ self.assertQuerysetEqual(t1.tag_collections.all(), [""])
+
+ def test_manager_class_caching(self):
+ e1 = Entry.objects.create()
+ e2 = Entry.objects.create()
+ t1 = Tag.objects.create()
+ t2 = Tag.objects.create()
+
+ # Get same manager twice in a row:
+ self.assertIs(t1.entry_set.__class__, t1.entry_set.__class__)
+ self.assertIs(e1.topics.__class__, e1.topics.__class__)
+
+ # Get same manager for different instances
+ self.assertIs(e1.topics.__class__, e2.topics.__class__)
+ self.assertIs(t1.entry_set.__class__, t2.entry_set.__class__)
+
+ def test_m2m_abstract_split(self):
+ # Regression for #19236 - an abstract class with a 'split' method
+ # causes a TypeError in add_lazy_relation
+ m1 = RegressionModelSplit(name='1')
+ m1.save()
+
+ def test_assigning_invalid_data_to_m2m_doesnt_clear_existing_relations(self):
+ t1 = Tag.objects.create(name='t1')
+ t2 = Tag.objects.create(name='t2')
+ c1 = TagCollection.objects.create(name='c1')
+ c1.tags.set([t1, t2])
+
+ with self.assertRaises(TypeError):
+ c1.tags.set(7)
+
+ c1.refresh_from_db()
+ self.assertQuerysetEqual(c1.tags.order_by('name'), ["", ""])
+
+ def test_multiple_forwards_only_m2m(self):
+ # Regression for #24505 - Multiple ManyToManyFields to same "to"
+ # model with related_name set to '+'.
+ foo = Line.objects.create(name='foo')
+ bar = Line.objects.create(name='bar')
+ post = Post.objects.create()
+ post.primary_lines.add(foo)
+ post.secondary_lines.add(bar)
+ self.assertQuerysetEqual(post.primary_lines.all(), [''])
+ self.assertQuerysetEqual(post.secondary_lines.all(), [''])
diff --git a/tests/m2m_signals/__init__.py b/tests/m2m_signals/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2m_signals/models.py b/tests/m2m_signals/models.py
new file mode 100644
index 00000000..e4110ccf
--- /dev/null
+++ b/tests/m2m_signals/models.py
@@ -0,0 +1,43 @@
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Part(models.Model):
+ name = models.CharField(max_length=20)
+
+ class Meta:
+ ordering = ('name',)
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Car(models.Model):
+ name = models.CharField(max_length=20)
+ default_parts = models.ManyToManyField(Part)
+ optional_parts = models.ManyToManyField(Part, related_name='cars_optional')
+
+ class Meta:
+ ordering = ('name',)
+
+ def __str__(self):
+ return self.name
+
+
+class SportsCar(Car):
+ price = models.IntegerField()
+
+
+@python_2_unicode_compatible
+class Person(models.Model):
+ name = models.CharField(max_length=20)
+ fans = models.ManyToManyField('self', related_name='idols', symmetrical=False)
+ friends = models.ManyToManyField('self')
+
+ class Meta:
+ ordering = ('name',)
+
+ def __str__(self):
+ return self.name
diff --git a/tests/m2m_signals/tests.py b/tests/m2m_signals/tests.py
new file mode 100644
index 00000000..834897eb
--- /dev/null
+++ b/tests/m2m_signals/tests.py
@@ -0,0 +1,463 @@
+"""
+Testing signals emitted on changing m2m relations.
+"""
+
+from django.db import models
+from django.test import TestCase
+
+from .models import Car, Part, Person, SportsCar
+
+
+class ManyToManySignalsTest(TestCase):
+ def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
+ message = {
+ 'instance': kwargs['instance'],
+ 'action': kwargs['action'],
+ 'reverse': kwargs['reverse'],
+ 'model': kwargs['model'],
+ }
+ if kwargs['pk_set']:
+ message['objects'] = list(
+ kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
+ )
+ self.m2m_changed_messages.append(message)
+
+ def setUp(self):
+ self.m2m_changed_messages = []
+
+ self.vw = Car.objects.create(name='VW')
+ self.bmw = Car.objects.create(name='BMW')
+ self.toyota = Car.objects.create(name='Toyota')
+
+ self.wheelset = Part.objects.create(name='Wheelset')
+ self.doors = Part.objects.create(name='Doors')
+ self.engine = Part.objects.create(name='Engine')
+ self.airbag = Part.objects.create(name='Airbag')
+ self.sunroof = Part.objects.create(name='Sunroof')
+
+ self.alice = Person.objects.create(name='Alice')
+ self.bob = Person.objects.create(name='Bob')
+ self.chuck = Person.objects.create(name='Chuck')
+ self.daisy = Person.objects.create(name='Daisy')
+
+ def tearDown(self):
+ # disconnect all signal handlers
+ models.signals.m2m_changed.disconnect(
+ self.m2m_changed_signal_receiver, Car.default_parts.through
+ )
+ models.signals.m2m_changed.disconnect(
+ self.m2m_changed_signal_receiver, Car.optional_parts.through
+ )
+ models.signals.m2m_changed.disconnect(
+ self.m2m_changed_signal_receiver, Person.fans.through
+ )
+ models.signals.m2m_changed.disconnect(
+ self.m2m_changed_signal_receiver, Person.friends.through
+ )
+
+ def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
+ """ Install a listener on the two m2m relations. """
+ models.signals.m2m_changed.connect(
+ self.m2m_changed_signal_receiver, Car.optional_parts.through
+ )
+ if add_default_parts_before_set_signal:
+ # adding a default part to our car - no signal listener installed
+ self.vw.default_parts.add(self.sunroof)
+ models.signals.m2m_changed.connect(
+ self.m2m_changed_signal_receiver, Car.default_parts.through
+ )
+
+ def test_m2m_relations_add_remove_clear(self):
+ expected_messages = []
+
+ self._initialize_signal_car(add_default_parts_before_set_signal=True)
+
+ self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors, self.engine, self.wheelset],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors, self.engine, self.wheelset],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ # give the BMW and Toyota some doors as well
+ self.doors.car_set.add(self.bmw, self.toyota)
+ expected_messages.append({
+ 'instance': self.doors,
+ 'action': 'pre_add',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [self.bmw, self.toyota],
+ })
+ expected_messages.append({
+ 'instance': self.doors,
+ 'action': 'post_add',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [self.bmw, self.toyota],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ def test_m2m_relations_signals_remove_relation(self):
+ self._initialize_signal_car()
+ # remove the engine from the self.vw and the airbag (which is not set
+ # but is returned)
+ self.vw.default_parts.remove(self.engine, self.airbag)
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.vw,
+ 'action': 'pre_remove',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.airbag, self.engine],
+ }, {
+ 'instance': self.vw,
+ 'action': 'post_remove',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.airbag, self.engine],
+ }
+ ])
+
+ def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
+ expected_messages = []
+
+ self._initialize_signal_car()
+
+ # give the self.vw some optional parts (second relation to same model)
+ self.vw.optional_parts.add(self.airbag, self.sunroof)
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.airbag, self.sunroof],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.airbag, self.sunroof],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ # add airbag to all the cars (even though the self.vw already has one)
+ self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
+ expected_messages.append({
+ 'instance': self.airbag,
+ 'action': 'pre_add',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [self.bmw, self.toyota],
+ })
+ expected_messages.append({
+ 'instance': self.airbag,
+ 'action': 'post_add',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [self.bmw, self.toyota],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
+ self._initialize_signal_car()
+ # remove airbag from the self.vw (reverse relation with custom
+ # related_name)
+ self.airbag.cars_optional.remove(self.vw)
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.airbag,
+ 'action': 'pre_remove',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [self.vw],
+ }, {
+ 'instance': self.airbag,
+ 'action': 'post_remove',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [self.vw],
+ }
+ ])
+
+ def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
+ self._initialize_signal_car()
+ # clear all parts of the self.vw
+ self.vw.default_parts.clear()
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.vw,
+ 'action': 'pre_clear',
+ 'reverse': False,
+ 'model': Part,
+ }, {
+ 'instance': self.vw,
+ 'action': 'post_clear',
+ 'reverse': False,
+ 'model': Part,
+ }
+ ])
+
+ def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
+ self._initialize_signal_car()
+ # take all the doors off of cars
+ self.doors.car_set.clear()
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.doors,
+ 'action': 'pre_clear',
+ 'reverse': True,
+ 'model': Car,
+ }, {
+ 'instance': self.doors,
+ 'action': 'post_clear',
+ 'reverse': True,
+ 'model': Car,
+ }
+ ])
+
+ def test_m2m_relations_signals_reverse_relation(self):
+ self._initialize_signal_car()
+ # take all the airbags off of cars (clear reverse relation with custom
+ # related_name)
+ self.airbag.cars_optional.clear()
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.airbag,
+ 'action': 'pre_clear',
+ 'reverse': True,
+ 'model': Car,
+ }, {
+ 'instance': self.airbag,
+ 'action': 'post_clear',
+ 'reverse': True,
+ 'model': Car,
+ }
+ ])
+
+ def test_m2m_relations_signals_alternative_ways(self):
+ expected_messages = []
+
+ self._initialize_signal_car()
+
+ # alternative ways of setting relation:
+ self.vw.default_parts.create(name='Windows')
+ p6 = Part.objects.get(name='Windows')
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [p6],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [p6],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ # direct assignment clears the set first, then adds
+ self.vw.default_parts.set([self.wheelset, self.doors, self.engine])
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_remove',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [p6],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_remove',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [p6],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors, self.engine, self.wheelset],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors, self.engine, self.wheelset],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ def test_m2m_relations_signals_clearing_removing(self):
+ expected_messages = []
+
+ self._initialize_signal_car(add_default_parts_before_set_signal=True)
+
+ # set by clearing.
+ self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_clear',
+ 'reverse': False,
+ 'model': Part,
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_clear',
+ 'reverse': False,
+ 'model': Part,
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors, self.engine, self.wheelset],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors, self.engine, self.wheelset],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ # set by only removing what's necessary.
+ self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'pre_remove',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.engine],
+ })
+ expected_messages.append({
+ 'instance': self.vw,
+ 'action': 'post_remove',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.engine],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ def test_m2m_relations_signals_when_inheritance(self):
+ expected_messages = []
+
+ self._initialize_signal_car(add_default_parts_before_set_signal=True)
+
+ # Signals still work when model inheritance is involved
+ c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
+ c4b = Car.objects.get(name='Bugatti')
+ c4.default_parts.set([self.doors])
+ expected_messages.append({
+ 'instance': c4,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors],
+ })
+ expected_messages.append({
+ 'instance': c4,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Part,
+ 'objects': [self.doors],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ self.engine.car_set.add(c4)
+ expected_messages.append({
+ 'instance': self.engine,
+ 'action': 'pre_add',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [c4b],
+ })
+ expected_messages.append({
+ 'instance': self.engine,
+ 'action': 'post_add',
+ 'reverse': True,
+ 'model': Car,
+ 'objects': [c4b],
+ })
+ self.assertEqual(self.m2m_changed_messages, expected_messages)
+
+ def _initialize_signal_person(self):
+ # Install a listener on the two m2m relations.
+ models.signals.m2m_changed.connect(
+ self.m2m_changed_signal_receiver, Person.fans.through
+ )
+ models.signals.m2m_changed.connect(
+ self.m2m_changed_signal_receiver, Person.friends.through
+ )
+
+ def test_m2m_relations_with_self_add_friends(self):
+ self._initialize_signal_person()
+ self.alice.friends.set([self.bob, self.chuck])
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.alice,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Person,
+ 'objects': [self.bob, self.chuck],
+ }, {
+ 'instance': self.alice,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Person,
+ 'objects': [self.bob, self.chuck],
+ }
+ ])
+
+ def test_m2m_relations_with_self_add_fan(self):
+ self._initialize_signal_person()
+ self.alice.fans.set([self.daisy])
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.alice,
+ 'action': 'pre_add',
+ 'reverse': False,
+ 'model': Person,
+ 'objects': [self.daisy],
+ }, {
+ 'instance': self.alice,
+ 'action': 'post_add',
+ 'reverse': False,
+ 'model': Person,
+ 'objects': [self.daisy],
+ }
+ ])
+
+ def test_m2m_relations_with_self_add_idols(self):
+ self._initialize_signal_person()
+ self.chuck.idols.set([self.alice, self.bob])
+ self.assertEqual(self.m2m_changed_messages, [
+ {
+ 'instance': self.chuck,
+ 'action': 'pre_add',
+ 'reverse': True,
+ 'model': Person,
+ 'objects': [self.alice, self.bob],
+ }, {
+ 'instance': self.chuck,
+ 'action': 'post_add',
+ 'reverse': True,
+ 'model': Person,
+ 'objects': [self.alice, self.bob],
+ }
+ ])
diff --git a/tests/m2m_through/__init__.py b/tests/m2m_through/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2m_through/models.py b/tests/m2m_through/models.py
new file mode 100644
index 00000000..dab3be51
--- /dev/null
+++ b/tests/m2m_through/models.py
@@ -0,0 +1,156 @@
+from datetime import datetime
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+# M2M described on one of the models
+@python_2_unicode_compatible
+class Person(models.Model):
+ name = models.CharField(max_length=128)
+
+ class Meta:
+ ordering = ('name',)
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Group(models.Model):
+ name = models.CharField(max_length=128)
+ members = models.ManyToManyField(Person, through='Membership')
+ custom_members = models.ManyToManyField(Person, through='CustomMembership', related_name="custom")
+ nodefaultsnonulls = models.ManyToManyField(
+ Person,
+ through='TestNoDefaultsOrNulls',
+ related_name="testnodefaultsnonulls",
+ )
+
+ class Meta:
+ ordering = ('name',)
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Membership(models.Model):
+ person = models.ForeignKey(Person, models.CASCADE)
+ group = models.ForeignKey(Group, models.CASCADE)
+ date_joined = models.DateTimeField(default=datetime.now)
+ invite_reason = models.CharField(max_length=64, null=True)
+
+ class Meta:
+ ordering = ('date_joined', 'invite_reason', 'group')
+
+ def __str__(self):
+ return "%s is a member of %s" % (self.person.name, self.group.name)
+
+
+@python_2_unicode_compatible
+class CustomMembership(models.Model):
+ person = models.ForeignKey(
+ Person,
+ models.CASCADE,
+ db_column="custom_person_column",
+ related_name="custom_person_related_name",
+ )
+ group = models.ForeignKey(Group, models.CASCADE)
+ weird_fk = models.ForeignKey(Membership, models.SET_NULL, null=True)
+ date_joined = models.DateTimeField(default=datetime.now)
+
+ def __str__(self):
+ return "%s is a member of %s" % (self.person.name, self.group.name)
+
+ class Meta:
+ db_table = "test_table"
+ ordering = ["date_joined"]
+
+
+class TestNoDefaultsOrNulls(models.Model):
+ person = models.ForeignKey(Person, models.CASCADE)
+ group = models.ForeignKey(Group, models.CASCADE)
+ nodefaultnonull = models.CharField(max_length=5)
+
+
+@python_2_unicode_compatible
+class PersonSelfRefM2M(models.Model):
+ name = models.CharField(max_length=5)
+ friends = models.ManyToManyField('self', through="Friendship", symmetrical=False)
+
+ def __str__(self):
+ return self.name
+
+
+class Friendship(models.Model):
+ first = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_from_set")
+ second = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_to_set")
+ date_friended = models.DateTimeField()
+
+
+# Custom through link fields
+@python_2_unicode_compatible
+class Event(models.Model):
+ title = models.CharField(max_length=50)
+ invitees = models.ManyToManyField(
+ Person, through='Invitation',
+ through_fields=('event', 'invitee'),
+ related_name='events_invited',
+ )
+
+ def __str__(self):
+ return self.title
+
+
+class Invitation(models.Model):
+ event = models.ForeignKey(Event, models.CASCADE, related_name='invitations')
+ # field order is deliberately inverted. the target field is "invitee".
+ inviter = models.ForeignKey(Person, models.CASCADE, related_name='invitations_sent')
+ invitee = models.ForeignKey(Person, models.CASCADE, related_name='invitations')
+
+
+@python_2_unicode_compatible
+class Employee(models.Model):
+ name = models.CharField(max_length=5)
+ subordinates = models.ManyToManyField(
+ 'self',
+ through="Relationship",
+ through_fields=('source', 'target'),
+ symmetrical=False,
+ )
+
+ class Meta:
+ ordering = ('pk',)
+
+ def __str__(self):
+ return self.name
+
+
+class Relationship(models.Model):
+ # field order is deliberately inverted.
+ another = models.ForeignKey(Employee, models.SET_NULL, related_name="rel_another_set", null=True)
+ target = models.ForeignKey(Employee, models.CASCADE, related_name="rel_target_set")
+ source = models.ForeignKey(Employee, models.CASCADE, related_name="rel_source_set")
+
+
+class Ingredient(models.Model):
+ iname = models.CharField(max_length=20, unique=True)
+
+ class Meta:
+ ordering = ('iname',)
+
+
+class Recipe(models.Model):
+ rname = models.CharField(max_length=20, unique=True)
+ ingredients = models.ManyToManyField(
+ Ingredient, through='RecipeIngredient', related_name='recipes',
+ )
+
+ class Meta:
+ ordering = ('rname',)
+
+
+class RecipeIngredient(models.Model):
+ ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname')
+ recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
diff --git a/tests/m2m_through/tests.py b/tests/m2m_through/tests.py
new file mode 100644
index 00000000..47cbbeec
--- /dev/null
+++ b/tests/m2m_through/tests.py
@@ -0,0 +1,472 @@
+from __future__ import unicode_literals
+
+from datetime import datetime
+from operator import attrgetter
+
+from django.test import TestCase, skipUnlessDBFeature
+
+from .models import (
+ CustomMembership, Employee, Event, Friendship, Group, Ingredient,
+ Invitation, Membership, Person, PersonSelfRefM2M, Recipe, RecipeIngredient,
+ Relationship,
+)
+
+
+class M2mThroughTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.bob = Person.objects.create(name='Bob')
+ cls.jim = Person.objects.create(name='Jim')
+ cls.jane = Person.objects.create(name='Jane')
+ cls.rock = Group.objects.create(name='Rock')
+ cls.roll = Group.objects.create(name='Roll')
+
+ def test_retrieve_intermediate_items(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(person=self.jane, group=self.rock)
+
+ expected = ['Jane', 'Jim']
+ self.assertQuerysetEqual(
+ self.rock.members.all(),
+ expected,
+ attrgetter("name")
+ )
+
+ def test_get_on_intermediate_model(self):
+ Membership.objects.create(person=self.jane, group=self.rock)
+
+ queryset = Membership.objects.get(person=self.jane, group=self.rock)
+
+ self.assertEqual(
+ repr(queryset),
+ ''
+ )
+
+ def test_filter_on_intermediate_model(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(person=self.jane, group=self.rock)
+
+ queryset = Membership.objects.filter(group=self.rock)
+
+ expected = [
+ '',
+ '',
+ ]
+
+ self.assertQuerysetEqual(
+ queryset,
+ expected
+ )
+
+ def test_cannot_use_add_on_m2m_with_intermediary_model(self):
+ msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.rock.members.add(self.bob)
+
+ self.assertQuerysetEqual(
+ self.rock.members.all(),
+ []
+ )
+
+ def test_cannot_use_create_on_m2m_with_intermediary_model(self):
+ msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.rock.members.create(name='Annie')
+
+ self.assertQuerysetEqual(
+ self.rock.members.all(),
+ []
+ )
+
+ def test_cannot_use_remove_on_m2m_with_intermediary_model(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.rock.members.remove(self.jim)
+
+ self.assertQuerysetEqual(
+ self.rock.members.all(),
+ ['Jim', ],
+ attrgetter("name")
+ )
+
+ def test_cannot_use_setattr_on_m2m_with_intermediary_model(self):
+ msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
+ members = list(Person.objects.filter(name__in=['Bob', 'Jim']))
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.rock.members.set(members)
+
+ self.assertQuerysetEqual(
+ self.rock.members.all(),
+ []
+ )
+
+ def test_clear_removes_all_the_m2m_relationships(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(person=self.jane, group=self.rock)
+
+ self.rock.members.clear()
+
+ self.assertQuerysetEqual(
+ self.rock.members.all(),
+ []
+ )
+
+ def test_retrieve_reverse_intermediate_items(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(person=self.jim, group=self.roll)
+
+ expected = ['Rock', 'Roll']
+ self.assertQuerysetEqual(
+ self.jim.group_set.all(),
+ expected,
+ attrgetter("name")
+ )
+
+ def test_cannot_use_add_on_reverse_m2m_with_intermediary_model(self):
+ msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.bob.group_set.add(self.bob)
+
+ self.assertQuerysetEqual(
+ self.bob.group_set.all(),
+ []
+ )
+
+ def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
+ msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.bob.group_set.create(name='Funk')
+
+ self.assertQuerysetEqual(
+ self.bob.group_set.all(),
+ []
+ )
+
+ def test_cannot_use_remove_on_reverse_m2m_with_intermediary_model(self):
+ Membership.objects.create(person=self.bob, group=self.rock)
+ msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.bob.group_set.remove(self.rock)
+
+ self.assertQuerysetEqual(
+ self.bob.group_set.all(),
+ ['Rock', ],
+ attrgetter('name')
+ )
+
+ def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
+ msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
+ members = list(Group.objects.filter(name__in=['Rock', 'Roll']))
+
+ with self.assertRaisesMessage(AttributeError, msg):
+ self.bob.group_set.set(members)
+
+ self.assertQuerysetEqual(
+ self.bob.group_set.all(),
+ []
+ )
+
+ def test_clear_on_reverse_removes_all_the_m2m_relationships(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(person=self.jim, group=self.roll)
+
+ self.jim.group_set.clear()
+
+ self.assertQuerysetEqual(
+ self.jim.group_set.all(),
+ []
+ )
+
+ def test_query_model_by_attribute_name_of_related_model(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(person=self.jane, group=self.rock)
+ Membership.objects.create(person=self.bob, group=self.roll)
+ Membership.objects.create(person=self.jim, group=self.roll)
+ Membership.objects.create(person=self.jane, group=self.roll)
+
+ self.assertQuerysetEqual(
+ Group.objects.filter(members__name='Bob'),
+ ['Roll', ],
+ attrgetter("name")
+ )
+
+ @skipUnlessDBFeature('supports_microsecond_precision')
+ def test_order_by_relational_field_through_model(self):
+ CustomMembership.objects.create(person=self.jim, group=self.rock)
+ CustomMembership.objects.create(person=self.bob, group=self.rock)
+ CustomMembership.objects.create(person=self.jane, group=self.roll)
+ CustomMembership.objects.create(person=self.jim, group=self.roll)
+ self.assertSequenceEqual(
+ self.rock.custom_members.order_by('custom_person_related_name'),
+ [self.jim, self.bob]
+ )
+ self.assertSequenceEqual(
+ self.roll.custom_members.order_by('custom_person_related_name'),
+ [self.jane, self.jim]
+ )
+
+ def test_query_first_model_by_intermediate_model_attribute(self):
+ Membership.objects.create(
+ person=self.jane, group=self.roll,
+ invite_reason="She was just awesome."
+ )
+ Membership.objects.create(
+ person=self.jim, group=self.roll,
+ invite_reason="He is good."
+ )
+ Membership.objects.create(person=self.bob, group=self.roll)
+
+ qs = Group.objects.filter(
+ membership__invite_reason="She was just awesome."
+ )
+ self.assertQuerysetEqual(
+ qs,
+ ['Roll'],
+ attrgetter("name")
+ )
+
+ def test_query_second_model_by_intermediate_model_attribute(self):
+ Membership.objects.create(
+ person=self.jane, group=self.roll,
+ invite_reason="She was just awesome."
+ )
+ Membership.objects.create(
+ person=self.jim, group=self.roll,
+ invite_reason="He is good."
+ )
+ Membership.objects.create(person=self.bob, group=self.roll)
+
+ qs = Person.objects.filter(
+ membership__invite_reason="She was just awesome."
+ )
+ self.assertQuerysetEqual(
+ qs,
+ ['Jane'],
+ attrgetter("name")
+ )
+
+ def test_query_model_by_related_model_name(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(person=self.jane, group=self.rock)
+ Membership.objects.create(person=self.bob, group=self.roll)
+ Membership.objects.create(person=self.jim, group=self.roll)
+ Membership.objects.create(person=self.jane, group=self.roll)
+
+ self.assertQuerysetEqual(
+ Person.objects.filter(group__name="Rock"),
+ ['Jane', 'Jim'],
+ attrgetter("name")
+ )
+
+ def test_query_model_by_custom_related_name(self):
+ CustomMembership.objects.create(person=self.bob, group=self.rock)
+ CustomMembership.objects.create(person=self.jim, group=self.rock)
+
+ self.assertQuerysetEqual(
+ Person.objects.filter(custom__name="Rock"),
+ ['Bob', 'Jim'],
+ attrgetter("name")
+ )
+
+ def test_query_model_by_intermediate_can_return_non_unique_queryset(self):
+ Membership.objects.create(person=self.jim, group=self.rock)
+ Membership.objects.create(
+ person=self.jane, group=self.rock,
+ date_joined=datetime(2006, 1, 1)
+ )
+ Membership.objects.create(
+ person=self.bob, group=self.roll,
+ date_joined=datetime(2004, 1, 1))
+ Membership.objects.create(person=self.jim, group=self.roll)
+ Membership.objects.create(
+ person=self.jane, group=self.roll,
+ date_joined=datetime(2004, 1, 1))
+
+ qs = Person.objects.filter(
+ membership__date_joined__gt=datetime(2004, 1, 1)
+ )
+ self.assertQuerysetEqual(
+ qs,
+ ['Jane', 'Jim', 'Jim'],
+ attrgetter("name")
+ )
+
+ def test_custom_related_name_forward_empty_qs(self):
+ self.assertQuerysetEqual(
+ self.rock.custom_members.all(),
+ []
+ )
+
+ def test_custom_related_name_reverse_empty_qs(self):
+ self.assertQuerysetEqual(
+ self.bob.custom.all(),
+ []
+ )
+
+ def test_custom_related_name_forward_non_empty_qs(self):
+ CustomMembership.objects.create(person=self.bob, group=self.rock)
+ CustomMembership.objects.create(person=self.jim, group=self.rock)
+
+ self.assertQuerysetEqual(
+ self.rock.custom_members.all(),
+ ['Bob', 'Jim'],
+ attrgetter("name")
+ )
+
+ def test_custom_related_name_reverse_non_empty_qs(self):
+ CustomMembership.objects.create(person=self.bob, group=self.rock)
+ CustomMembership.objects.create(person=self.jim, group=self.rock)
+
+ self.assertQuerysetEqual(
+ self.bob.custom.all(),
+ ['Rock'],
+ attrgetter("name")
+ )
+
+ def test_custom_related_name_doesnt_conflict_with_fky_related_name(self):
+ CustomMembership.objects.create(person=self.bob, group=self.rock)
+
+ self.assertQuerysetEqual(
+ self.bob.custom_person_related_name.all(),
+ ['']
+ )
+
+ def test_through_fields(self):
+ """
+ Relations with intermediary tables with multiple FKs
+ to the M2M's ``to`` model are possible.
+ """
+ event = Event.objects.create(title='Rockwhale 2014')
+ Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jim)
+ Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jane)
+ self.assertQuerysetEqual(
+ event.invitees.all(),
+ ['Jane', 'Jim'],
+ attrgetter('name')
+ )
+
+
+class M2mThroughReferentialTests(TestCase):
+ def test_self_referential_empty_qs(self):
+ tony = PersonSelfRefM2M.objects.create(name="Tony")
+ self.assertQuerysetEqual(
+ tony.friends.all(),
+ []
+ )
+
+ def test_self_referential_non_symmetrical_first_side(self):
+ tony = PersonSelfRefM2M.objects.create(name="Tony")
+ chris = PersonSelfRefM2M.objects.create(name="Chris")
+ Friendship.objects.create(
+ first=tony, second=chris, date_friended=datetime.now()
+ )
+
+ self.assertQuerysetEqual(
+ tony.friends.all(),
+ ['Chris'],
+ attrgetter("name")
+ )
+
+ def test_self_referential_non_symmetrical_second_side(self):
+ tony = PersonSelfRefM2M.objects.create(name="Tony")
+ chris = PersonSelfRefM2M.objects.create(name="Chris")
+ Friendship.objects.create(
+ first=tony, second=chris, date_friended=datetime.now()
+ )
+
+ self.assertQuerysetEqual(
+ chris.friends.all(),
+ []
+ )
+
+ def test_self_referential_non_symmetrical_clear_first_side(self):
+ tony = PersonSelfRefM2M.objects.create(name="Tony")
+ chris = PersonSelfRefM2M.objects.create(name="Chris")
+ Friendship.objects.create(
+ first=tony, second=chris, date_friended=datetime.now()
+ )
+
+ chris.friends.clear()
+
+ self.assertQuerysetEqual(
+ chris.friends.all(),
+ []
+ )
+
+ # Since this isn't a symmetrical relation, Tony's friend link still exists.
+ self.assertQuerysetEqual(
+ tony.friends.all(),
+ ['Chris'],
+ attrgetter("name")
+ )
+
+ def test_self_referential_symmetrical(self):
+ tony = PersonSelfRefM2M.objects.create(name="Tony")
+ chris = PersonSelfRefM2M.objects.create(name="Chris")
+ Friendship.objects.create(
+ first=tony, second=chris, date_friended=datetime.now()
+ )
+ Friendship.objects.create(
+ first=chris, second=tony, date_friended=datetime.now()
+ )
+
+ self.assertQuerysetEqual(
+ tony.friends.all(),
+ ['Chris'],
+ attrgetter("name")
+ )
+
+ self.assertQuerysetEqual(
+ chris.friends.all(),
+ ['Tony'],
+ attrgetter("name")
+ )
+
+ def test_through_fields_self_referential(self):
+ john = Employee.objects.create(name='john')
+ peter = Employee.objects.create(name='peter')
+ mary = Employee.objects.create(name='mary')
+ harry = Employee.objects.create(name='harry')
+
+ Relationship.objects.create(source=john, target=peter, another=None)
+ Relationship.objects.create(source=john, target=mary, another=None)
+ Relationship.objects.create(source=john, target=harry, another=peter)
+
+ self.assertQuerysetEqual(
+ john.subordinates.all(),
+ ['peter', 'mary', 'harry'],
+ attrgetter('name')
+ )
+
+
+class M2mThroughToFieldsTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ cls.pea = Ingredient.objects.create(iname='pea')
+ cls.potato = Ingredient.objects.create(iname='potato')
+ cls.tomato = Ingredient.objects.create(iname='tomato')
+ cls.curry = Recipe.objects.create(rname='curry')
+ RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.potato)
+ RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.pea)
+ RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.tomato)
+
+ def test_retrieval(self):
+ # Forward retrieval
+ self.assertSequenceEqual(self.curry.ingredients.all(), [self.pea, self.potato, self.tomato])
+ # Backward retrieval
+ self.assertEqual(self.tomato.recipes.get(), self.curry)
+
+ def test_choices(self):
+ field = Recipe._meta.get_field('ingredients')
+ self.assertEqual(
+ [choice[0] for choice in field.get_choices(include_blank=False)],
+ ['pea', 'potato', 'tomato']
+ )
diff --git a/tests/m2o_recursive/__init__.py b/tests/m2o_recursive/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/m2o_recursive/models.py b/tests/m2o_recursive/models.py
new file mode 100644
index 00000000..d62c514a
--- /dev/null
+++ b/tests/m2o_recursive/models.py
@@ -0,0 +1,33 @@
+"""
+Relating an object to itself, many-to-one
+
+To define a many-to-one relationship between a model and itself, use
+``ForeignKey('self', ...)``.
+
+In this example, a ``Category`` is related to itself. That is, each
+``Category`` has a parent ``Category``.
+
+Set ``related_name`` to designate what the reverse relationship is called.
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Category(models.Model):
+ name = models.CharField(max_length=20)
+ parent = models.ForeignKey('self', models.SET_NULL, blank=True, null=True, related_name='child_set')
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Person(models.Model):
+ full_name = models.CharField(max_length=20)
+ mother = models.ForeignKey('self', models.SET_NULL, null=True, related_name='mothers_child_set')
+ father = models.ForeignKey('self', models.SET_NULL, null=True, related_name='fathers_child_set')
+
+ def __str__(self):
+ return self.full_name
diff --git a/tests/m2o_recursive/tests.py b/tests/m2o_recursive/tests.py
new file mode 100644
index 00000000..8e730d48
--- /dev/null
+++ b/tests/m2o_recursive/tests.py
@@ -0,0 +1,43 @@
+from __future__ import unicode_literals
+
+from django.test import TestCase
+
+from .models import Category, Person
+
+
+class ManyToOneRecursiveTests(TestCase):
+
+ def setUp(self):
+ self.r = Category(id=None, name='Root category', parent=None)
+ self.r.save()
+ self.c = Category(id=None, name='Child category', parent=self.r)
+ self.c.save()
+
+ def test_m2o_recursive(self):
+ self.assertQuerysetEqual(self.r.child_set.all(),
+ [''])
+ self.assertEqual(self.r.child_set.get(name__startswith='Child').id, self.c.id)
+ self.assertIsNone(self.r.parent)
+ self.assertQuerysetEqual(self.c.child_set.all(), [])
+ self.assertEqual(self.c.parent.id, self.r.id)
+
+
+class MultipleManyToOneRecursiveTests(TestCase):
+
+ def setUp(self):
+ self.dad = Person(full_name='John Smith Senior', mother=None, father=None)
+ self.dad.save()
+ self.mom = Person(full_name='Jane Smith', mother=None, father=None)
+ self.mom.save()
+ self.kid = Person(full_name='John Smith Junior', mother=self.mom, father=self.dad)
+ self.kid.save()
+
+ def test_m2o_recursive2(self):
+ self.assertEqual(self.kid.mother.id, self.mom.id)
+ self.assertEqual(self.kid.father.id, self.dad.id)
+ self.assertQuerysetEqual(self.dad.fathers_child_set.all(),
+ [''])
+ self.assertQuerysetEqual(self.mom.mothers_child_set.all(),
+ [''])
+ self.assertQuerysetEqual(self.kid.mothers_child_set.all(), [])
+ self.assertQuerysetEqual(self.kid.fathers_child_set.all(), [])
diff --git a/tests/many_to_one_null/__init__.py b/tests/many_to_one_null/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/many_to_one_null/models.py b/tests/many_to_one_null/models.py
new file mode 100644
index 00000000..2a67623d
--- /dev/null
+++ b/tests/many_to_one_null/models.py
@@ -0,0 +1,37 @@
+"""
+Many-to-one relationships that can be null
+
+To define a many-to-one relationship that can have a null foreign key, use
+``ForeignKey()`` with ``null=True`` .
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Reporter(models.Model):
+ name = models.CharField(max_length=30)
+
+ def __str__(self):
+ return self.name
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ headline = models.CharField(max_length=100)
+ reporter = models.ForeignKey(Reporter, models.SET_NULL, null=True)
+
+ class Meta:
+ ordering = ('headline',)
+
+ def __str__(self):
+ return self.headline
+
+
+class Car(models.Model):
+ make = models.CharField(max_length=100, null=True, unique=True)
+
+
+class Driver(models.Model):
+ car = models.ForeignKey(Car, models.SET_NULL, to_field='make', null=True, related_name='drivers')
diff --git a/tests/many_to_one_null/tests.py b/tests/many_to_one_null/tests.py
new file mode 100644
index 00000000..dc49c61f
--- /dev/null
+++ b/tests/many_to_one_null/tests.py
@@ -0,0 +1,138 @@
+from __future__ import unicode_literals
+
+from django.test import TestCase
+
+from .models import Article, Car, Driver, Reporter
+
+
+class ManyToOneNullTests(TestCase):
+ def setUp(self):
+ # Create a Reporter.
+ self.r = Reporter(name='John Smith')
+ self.r.save()
+ # Create an Article.
+ self.a = Article(headline="First", reporter=self.r)
+ self.a.save()
+ # Create an Article via the Reporter object.
+ self.a2 = self.r.article_set.create(headline="Second")
+ # Create an Article with no Reporter by passing "reporter=None".
+ self.a3 = Article(headline="Third", reporter=None)
+ self.a3.save()
+ # Create another article and reporter
+ self.r2 = Reporter(name='Paul Jones')
+ self.r2.save()
+ self.a4 = self.r2.article_set.create(headline='Fourth')
+
+ def test_get_related(self):
+ self.assertEqual(self.a.reporter.id, self.r.id)
+ # Article objects have access to their related Reporter objects.
+ r = self.a.reporter
+ self.assertEqual(r.id, self.r.id)
+
+ def test_created_via_related_set(self):
+ self.assertEqual(self.a2.reporter.id, self.r.id)
+
+ def test_related_set(self):
+ # Reporter objects have access to their related Article objects.
+ self.assertQuerysetEqual(self.r.article_set.all(), ['', ''])
+ self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'), [''])
+ self.assertEqual(self.r.article_set.count(), 2)
+
+ def test_created_without_related(self):
+ self.assertIsNone(self.a3.reporter)
+ # Need to reget a3 to refresh the cache
+ a3 = Article.objects.get(pk=self.a3.pk)
+ with self.assertRaises(AttributeError):
+ getattr(a3.reporter, 'id')
+ # Accessing an article's 'reporter' attribute returns None
+ # if the reporter is set to None.
+ self.assertIsNone(a3.reporter)
+ # To retrieve the articles with no reporters set, use "reporter__isnull=True".
+ self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), [''])
+ # We can achieve the same thing by filtering for the case where the
+ # reporter is None.
+ self.assertQuerysetEqual(Article.objects.filter(reporter=None), [''])
+ # Set the reporter for the Third article
+ self.assertQuerysetEqual(self.r.article_set.all(), ['', ''])
+ self.r.article_set.add(a3)
+ self.assertQuerysetEqual(
+ self.r.article_set.all(),
+ ['', '', '']
+ )
+ # Remove an article from the set, and check that it was removed.
+ self.r.article_set.remove(a3)
+ self.assertQuerysetEqual(self.r.article_set.all(), ['', ''])
+ self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), [''])
+
+ def test_remove_from_wrong_set(self):
+ self.assertQuerysetEqual(self.r2.article_set.all(), [''])
+ # Try to remove a4 from a set it does not belong to
+ with self.assertRaises(Reporter.DoesNotExist):
+ self.r.article_set.remove(self.a4)
+ self.assertQuerysetEqual(self.r2.article_set.all(), [''])
+
+ def test_set(self):
+ # Use manager.set() to allocate ForeignKey. Null is legal, so existing
+ # members of the set that are not in the assignment set are set to null.
+ self.r2.article_set.set([self.a2, self.a3])
+ self.assertQuerysetEqual(self.r2.article_set.all(), ['', ''])
+ # Use manager.set(clear=True)
+ self.r2.article_set.set([self.a3, self.a4], clear=True)
+ self.assertQuerysetEqual(self.r2.article_set.all(), ['', ''])
+ # Clear the rest of the set
+ self.r2.article_set.set([])
+ self.assertQuerysetEqual(self.r2.article_set.all(), [])
+ self.assertQuerysetEqual(
+ Article.objects.filter(reporter__isnull=True),
+ ['', '', '']
+ )
+
+ def test_assign_clear_related_set(self):
+ # Use descriptor assignment to allocate ForeignKey. Null is legal, so
+ # existing members of the set that are not in the assignment set are
+ # set to null.
+ self.r2.article_set.set([self.a2, self.a3])
+ self.assertQuerysetEqual(self.r2.article_set.all(), ['', ''])
+ # Clear the rest of the set
+ self.r.article_set.clear()
+ self.assertQuerysetEqual(self.r.article_set.all(), [])
+ self.assertQuerysetEqual(
+ Article.objects.filter(reporter__isnull=True),
+ ['', '']
+ )
+
+ def test_assign_with_queryset(self):
+ # Querysets used in reverse FK assignments are pre-evaluated
+ # so their value isn't affected by the clearing operation in
+ # RelatedManager.set() (#19816).
+ self.r2.article_set.set([self.a2, self.a3])
+
+ qs = self.r2.article_set.filter(headline="Second")
+ self.r2.article_set.set(qs)
+
+ self.assertEqual(1, self.r2.article_set.count())
+ self.assertEqual(1, qs.count())
+
+ def test_add_efficiency(self):
+ r = Reporter.objects.create()
+ articles = []
+ for _ in range(3):
+ articles.append(Article.objects.create())
+ with self.assertNumQueries(1):
+ r.article_set.add(*articles)
+ self.assertEqual(r.article_set.count(), 3)
+
+ def test_clear_efficiency(self):
+ r = Reporter.objects.create()
+ for _ in range(3):
+ r.article_set.create()
+ with self.assertNumQueries(1):
+ r.article_set.clear()
+ self.assertEqual(r.article_set.count(), 0)
+
+ def test_related_null_to_field(self):
+ c1 = Car.objects.create()
+ d1 = Driver.objects.create()
+ self.assertIs(d1.car, None)
+ with self.assertNumQueries(0):
+ self.assertEqual(list(c1.drivers.all()), [])
diff --git a/tests/migration_test_data_persistence/__init__.py b/tests/migration_test_data_persistence/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/migration_test_data_persistence/migrations/0001_initial.py b/tests/migration_test_data_persistence/migrations/0001_initial.py
new file mode 100644
index 00000000..6c19c4c8
--- /dev/null
+++ b/tests/migration_test_data_persistence/migrations/0001_initial.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Book',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
+ ('title', models.CharField(max_length=100)),
+ ],
+ options={
+ },
+ bases=(models.Model,),
+ ),
+ ]
diff --git a/tests/migration_test_data_persistence/migrations/0002_add_book.py b/tests/migration_test_data_persistence/migrations/0002_add_book.py
new file mode 100644
index 00000000..6ce7fff2
--- /dev/null
+++ b/tests/migration_test_data_persistence/migrations/0002_add_book.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations
+
+
+def add_book(apps, schema_editor):
+ apps.get_model("migration_test_data_persistence", "Book").objects.using(
+ schema_editor.connection.alias,
+ ).create(
+ title="I Love Django",
+ )
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [("migration_test_data_persistence", "0001_initial")]
+
+ operations = [
+ migrations.RunPython(
+ add_book,
+ ),
+ ]
diff --git a/tests/migration_test_data_persistence/migrations/__init__.py b/tests/migration_test_data_persistence/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/migration_test_data_persistence/models.py b/tests/migration_test_data_persistence/models.py
new file mode 100644
index 00000000..c1572d5d
--- /dev/null
+++ b/tests/migration_test_data_persistence/models.py
@@ -0,0 +1,12 @@
+from django.db import models
+
+
+class Book(models.Model):
+ title = models.CharField(max_length=100)
+
+
+class Unmanaged(models.Model):
+ title = models.CharField(max_length=100)
+
+ class Meta:
+ managed = False
diff --git a/tests/migration_test_data_persistence/tests.py b/tests/migration_test_data_persistence/tests.py
new file mode 100644
index 00000000..862a06c4
--- /dev/null
+++ b/tests/migration_test_data_persistence/tests.py
@@ -0,0 +1,31 @@
+from django.test import TestCase, TransactionTestCase
+
+from .models import Book
+
+
+class MigrationDataPersistenceTestCase(TransactionTestCase):
+ """
+ Data loaded in migrations is available if
+ TransactionTestCase.serialized_rollback = True.
+ """
+
+ available_apps = ["migration_test_data_persistence"]
+ serialized_rollback = True
+
+ def test_persistence(self):
+ self.assertEqual(
+ Book.objects.count(),
+ 1,
+ )
+
+
+class MigrationDataNormalPersistenceTestCase(TestCase):
+ """
+ Data loaded in migrations is available on TestCase
+ """
+
+ def test_persistence(self):
+ self.assertEqual(
+ Book.objects.count(),
+ 1,
+ )
diff --git a/tests/migrations2/__init__.py b/tests/migrations2/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/migrations2/models.py b/tests/migrations2/models.py
new file mode 100644
index 00000000..3ea7a1df
--- /dev/null
+++ b/tests/migrations2/models.py
@@ -0,0 +1 @@
+# Required for migration detection (#22645)
diff --git a/tests/migrations2/test_migrations_2/0001_initial.py b/tests/migrations2/test_migrations_2/0001_initial.py
new file mode 100644
index 00000000..02cbd97f
--- /dev/null
+++ b/tests/migrations2/test_migrations_2/0001_initial.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [("migrations", "0002_second")]
+
+ operations = [
+
+ migrations.CreateModel(
+ "OtherAuthor",
+ [
+ ("id", models.AutoField(primary_key=True)),
+ ("name", models.CharField(max_length=255)),
+ ("slug", models.SlugField(null=True)),
+ ("age", models.IntegerField(default=0)),
+ ("silly_field", models.BooleanField(default=False)),
+ ],
+ ),
+
+ ]
diff --git a/tests/migrations2/test_migrations_2/__init__.py b/tests/migrations2/test_migrations_2/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/migrations2/test_migrations_2_first/0001_initial.py b/tests/migrations2/test_migrations_2_first/0001_initial.py
new file mode 100644
index 00000000..e31d1d50
--- /dev/null
+++ b/tests/migrations2/test_migrations_2_first/0001_initial.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("migrations", "__first__"),
+ ]
+
+ operations = [
+
+ migrations.CreateModel(
+ "OtherAuthor",
+ [
+ ("id", models.AutoField(primary_key=True)),
+ ("name", models.CharField(max_length=255)),
+ ("slug", models.SlugField(null=True)),
+ ("age", models.IntegerField(default=0)),
+ ("silly_field", models.BooleanField(default=False)),
+ ],
+ ),
+
+ ]
diff --git a/tests/migrations2/test_migrations_2_first/0002_second.py b/tests/migrations2/test_migrations_2_first/0002_second.py
new file mode 100644
index 00000000..a3ca7dac
--- /dev/null
+++ b/tests/migrations2/test_migrations_2_first/0002_second.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [("migrations2", "0001_initial")]
+
+ operations = [
+
+ migrations.CreateModel(
+ "Bookstore",
+ [
+ ("id", models.AutoField(primary_key=True)),
+ ("name", models.CharField(max_length=255)),
+ ("slug", models.SlugField(null=True)),
+ ],
+ ),
+
+ ]
diff --git a/tests/migrations2/test_migrations_2_first/__init__.py b/tests/migrations2/test_migrations_2_first/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/migrations2/test_migrations_2_no_deps/0001_initial.py b/tests/migrations2/test_migrations_2_no_deps/0001_initial.py
new file mode 100644
index 00000000..22137065
--- /dev/null
+++ b/tests/migrations2/test_migrations_2_no_deps/0001_initial.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = []
+
+ operations = [
+
+ migrations.CreateModel(
+ "OtherAuthor",
+ [
+ ("id", models.AutoField(primary_key=True)),
+ ("name", models.CharField(max_length=255)),
+ ("slug", models.SlugField(null=True)),
+ ("age", models.IntegerField(default=0)),
+ ("silly_field", models.BooleanField(default=False)),
+ ],
+ ),
+
+ ]
diff --git a/tests/migrations2/test_migrations_2_no_deps/__init__.py b/tests/migrations2/test_migrations_2_no_deps/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/multiple_database/__init__.py b/tests/multiple_database/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/multiple_database/fixtures/multidb-common.json b/tests/multiple_database/fixtures/multidb-common.json
new file mode 100644
index 00000000..33134173
--- /dev/null
+++ b/tests/multiple_database/fixtures/multidb-common.json
@@ -0,0 +1,10 @@
+[
+ {
+ "pk": 1,
+ "model": "multiple_database.book",
+ "fields": {
+ "title": "The Definitive Guide to Django",
+ "published": "2009-7-8"
+ }
+ }
+]
\ No newline at end of file
diff --git a/tests/multiple_database/fixtures/multidb.default.json b/tests/multiple_database/fixtures/multidb.default.json
new file mode 100644
index 00000000..379b18a8
--- /dev/null
+++ b/tests/multiple_database/fixtures/multidb.default.json
@@ -0,0 +1,26 @@
+[
+ {
+ "pk": 1,
+ "model": "multiple_database.person",
+ "fields": {
+ "name": "Marty Alchin"
+ }
+ },
+ {
+ "pk": 2,
+ "model": "multiple_database.person",
+ "fields": {
+ "name": "George Vilches"
+ }
+ },
+ {
+ "pk": 2,
+ "model": "multiple_database.book",
+ "fields": {
+ "title": "Pro Django",
+ "published": "2008-12-16",
+ "authors": [["Marty Alchin"]],
+ "editor": ["George Vilches"]
+ }
+ }
+]
diff --git a/tests/multiple_database/fixtures/multidb.other.json b/tests/multiple_database/fixtures/multidb.other.json
new file mode 100644
index 00000000..c64f4902
--- /dev/null
+++ b/tests/multiple_database/fixtures/multidb.other.json
@@ -0,0 +1,26 @@
+[
+ {
+ "pk": 1,
+ "model": "multiple_database.person",
+ "fields": {
+ "name": "Mark Pilgrim"
+ }
+ },
+ {
+ "pk": 2,
+ "model": "multiple_database.person",
+ "fields": {
+ "name": "Chris Mills"
+ }
+ },
+ {
+ "pk": 2,
+ "model": "multiple_database.book",
+ "fields": {
+ "title": "Dive into Python",
+ "published": "2009-5-4",
+ "authors": [["Mark Pilgrim"]],
+ "editor": ["Chris Mills"]
+ }
+ }
+]
\ No newline at end of file
diff --git a/tests/multiple_database/fixtures/pets.json b/tests/multiple_database/fixtures/pets.json
new file mode 100644
index 00000000..89756a3e
--- /dev/null
+++ b/tests/multiple_database/fixtures/pets.json
@@ -0,0 +1,18 @@
+[
+ {
+ "pk": 1,
+ "model": "multiple_database.pet",
+ "fields": {
+ "name": "Mr Bigglesworth",
+ "owner": 1
+ }
+ },
+ {
+ "pk": 2,
+ "model": "multiple_database.pet",
+ "fields": {
+ "name": "Spot",
+ "owner": 2
+ }
+ }
+]
\ No newline at end of file
diff --git a/tests/multiple_database/models.py b/tests/multiple_database/models.py
new file mode 100644
index 00000000..367cd31d
--- /dev/null
+++ b/tests/multiple_database/models.py
@@ -0,0 +1,89 @@
+from django.contrib.auth.models import User
+from django.contrib.contenttypes.fields import (
+ GenericForeignKey, GenericRelation,
+)
+from django.contrib.contenttypes.models import ContentType
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Review(models.Model):
+ source = models.CharField(max_length=100)
+ content_type = models.ForeignKey(ContentType, models.CASCADE)
+ object_id = models.PositiveIntegerField()
+ content_object = GenericForeignKey()
+
+ def __str__(self):
+ return self.source
+
+ class Meta:
+ ordering = ('source',)
+
+
+class PersonManager(models.Manager):
+ def get_by_natural_key(self, name):
+ return self.get(name=name)
+
+
+@python_2_unicode_compatible
+class Person(models.Model):
+ objects = PersonManager()
+ name = models.CharField(max_length=100)
+
+ def __str__(self):
+ return self.name
+
+ class Meta:
+ ordering = ('name',)
+
+
+# This book manager doesn't do anything interesting; it just
+# exists to strip out the 'extra_arg' argument to certain
+# calls. This argument is used to establish that the BookManager
+# is actually getting used when it should be.
+class BookManager(models.Manager):
+ def create(self, *args, **kwargs):
+ kwargs.pop('extra_arg', None)
+ return super(BookManager, self).create(*args, **kwargs)
+
+ def get_or_create(self, *args, **kwargs):
+ kwargs.pop('extra_arg', None)
+ return super(BookManager, self).get_or_create(*args, **kwargs)
+
+
+@python_2_unicode_compatible
+class Book(models.Model):
+ objects = BookManager()
+ title = models.CharField(max_length=100)
+ published = models.DateField()
+ authors = models.ManyToManyField(Person)
+ editor = models.ForeignKey(Person, models.SET_NULL, null=True, related_name='edited')
+ reviews = GenericRelation(Review)
+ pages = models.IntegerField(default=100)
+
+ def __str__(self):
+ return self.title
+
+ class Meta:
+ ordering = ('title',)
+
+
+@python_2_unicode_compatible
+class Pet(models.Model):
+ name = models.CharField(max_length=100)
+ owner = models.ForeignKey(Person, models.CASCADE)
+
+ def __str__(self):
+ return self.name
+
+ class Meta:
+ ordering = ('name',)
+
+
+class UserProfile(models.Model):
+ user = models.OneToOneField(User, models.SET_NULL, null=True)
+ flavor = models.CharField(max_length=100)
+
+ class Meta:
+ ordering = ('flavor',)
diff --git a/tests/multiple_database/routers.py b/tests/multiple_database/routers.py
new file mode 100644
index 00000000..e467cf56
--- /dev/null
+++ b/tests/multiple_database/routers.py
@@ -0,0 +1,62 @@
+from __future__ import unicode_literals
+
+from django.db import DEFAULT_DB_ALIAS
+
+
+class TestRouter(object):
+ """
+ Vaguely behave like primary/replica, but the databases aren't assumed to
+ propagate changes.
+ """
+
+ def db_for_read(self, model, instance=None, **hints):
+ if instance:
+ return instance._state.db or 'other'
+ return 'other'
+
+ def db_for_write(self, model, **hints):
+ return DEFAULT_DB_ALIAS
+
+ def allow_relation(self, obj1, obj2, **hints):
+ return obj1._state.db in ('default', 'other') and obj2._state.db in ('default', 'other')
+
+ def allow_migrate(self, db, app_label, **hints):
+ return True
+
+
+class AuthRouter(object):
+ """
+ Control all database operations on models in the contrib.auth application.
+ """
+
+ def db_for_read(self, model, **hints):
+ "Point all read operations on auth models to 'default'"
+ if model._meta.app_label == 'auth':
+ # We use default here to ensure we can tell the difference
+ # between a read request and a write request for Auth objects
+ return 'default'
+ return None
+
+ def db_for_write(self, model, **hints):
+ "Point all operations on auth models to 'other'"
+ if model._meta.app_label == 'auth':
+ return 'other'
+ return None
+
+ def allow_relation(self, obj1, obj2, **hints):
+ "Allow any relation if a model in Auth is involved"
+ if obj1._meta.app_label == 'auth' or obj2._meta.app_label == 'auth':
+ return True
+ return None
+
+ def allow_migrate(self, db, app_label, **hints):
+ "Make sure the auth app only appears on the 'other' db"
+ if app_label == 'auth':
+ return db == 'other'
+ return None
+
+
+class WriteRouter(object):
+ # A router that only expresses an opinion on writes
+ def db_for_write(self, model, **hints):
+ return 'writer'
diff --git a/tests/multiple_database/tests.py b/tests/multiple_database/tests.py
new file mode 100644
index 00000000..cc762f04
--- /dev/null
+++ b/tests/multiple_database/tests.py
@@ -0,0 +1,2042 @@
+from __future__ import unicode_literals
+
+import datetime
+import pickle
+from operator import attrgetter
+
+import django
+from django.contrib.auth.models import User
+from django.contrib.contenttypes.models import ContentType
+from django.core import management
+from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
+from django.db.models import signals
+from django.db.utils import ConnectionRouter
+from django.test import SimpleTestCase, TestCase, override_settings
+from django.utils.six import StringIO
+
+from .models import Book, Person, Pet, Review, UserProfile
+from .routers import AuthRouter, TestRouter, WriteRouter
+
+
+class QueryTestCase(TestCase):
+ multi_db = True
+
+ def test_db_selection(self):
+ "Querysets will use the default database by default"
+ self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS)
+ self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS)
+
+ self.assertEqual(Book.objects.using('other').db, 'other')
+
+ self.assertEqual(Book.objects.db_manager('other').db, 'other')
+ self.assertEqual(Book.objects.db_manager('other').all().db, 'other')
+
+ def test_default_creation(self):
+ "Objects created on the default database don't leak onto other databases"
+ # Create a book on the default database using create()
+ Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
+
+ # Create a book on the default database using a save
+ dive = Book()
+ dive.title = "Dive into Python"
+ dive.published = datetime.date(2009, 5, 4)
+ dive.save()
+
+ # Book exists on the default database, but not on other database
+ try:
+ Book.objects.get(title="Pro Django")
+ Book.objects.using('default').get(title="Pro Django")
+ except Book.DoesNotExist:
+ self.fail('"Pro Django" should exist on default database')
+
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('other').get(title="Pro Django")
+
+ try:
+ Book.objects.get(title="Dive into Python")
+ Book.objects.using('default').get(title="Dive into Python")
+ except Book.DoesNotExist:
+ self.fail('"Dive into Python" should exist on default database')
+
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('other').get(title="Dive into Python")
+
+ def test_other_creation(self):
+ "Objects created on another database don't leak onto the default database"
+ # Create a book on the second database
+ Book.objects.using('other').create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+
+ # Create a book on the default database using a save
+ dive = Book()
+ dive.title = "Dive into Python"
+ dive.published = datetime.date(2009, 5, 4)
+ dive.save(using='other')
+
+ # Book exists on the default database, but not on other database
+ try:
+ Book.objects.using('other').get(title="Pro Django")
+ except Book.DoesNotExist:
+ self.fail('"Pro Django" should exist on other database')
+
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.get(title="Pro Django")
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('default').get(title="Pro Django")
+
+ try:
+ Book.objects.using('other').get(title="Dive into Python")
+ except Book.DoesNotExist:
+ self.fail('"Dive into Python" should exist on other database')
+
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.get(title="Dive into Python")
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('default').get(title="Dive into Python")
+
+ def test_refresh(self):
+ dive = Book(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ dive.save(using='other')
+ dive2 = Book.objects.using('other').get()
+ dive2.title = "Dive into Python (on default)"
+ dive2.save(using='default')
+ dive.refresh_from_db()
+ self.assertEqual(dive.title, "Dive into Python")
+ dive.refresh_from_db(using='default')
+ self.assertEqual(dive.title, "Dive into Python (on default)")
+ self.assertEqual(dive._state.db, "default")
+
+ def test_basic_queries(self):
+ "Queries are constrained to a single database"
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4))
+ self.assertEqual(dive.title, "Dive into Python")
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('default').get(published=datetime.date(2009, 5, 4))
+
+ dive = Book.objects.using('other').get(title__icontains="dive")
+ self.assertEqual(dive.title, "Dive into Python")
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('default').get(title__icontains="dive")
+
+ dive = Book.objects.using('other').get(title__iexact="dive INTO python")
+ self.assertEqual(dive.title, "Dive into Python")
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('default').get(title__iexact="dive INTO python")
+
+ dive = Book.objects.using('other').get(published__year=2009)
+ self.assertEqual(dive.title, "Dive into Python")
+ self.assertEqual(dive.published, datetime.date(2009, 5, 4))
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('default').get(published__year=2009)
+
+ years = Book.objects.using('other').dates('published', 'year')
+ self.assertEqual([o.year for o in years], [2009])
+ years = Book.objects.using('default').dates('published', 'year')
+ self.assertEqual([o.year for o in years], [])
+
+ months = Book.objects.using('other').dates('published', 'month')
+ self.assertEqual([o.month for o in months], [5])
+ months = Book.objects.using('default').dates('published', 'month')
+ self.assertEqual([o.month for o in months], [])
+
+ def test_m2m_separation(self):
+ "M2M fields are constrained to a single database"
+ # Create a book and author on the default database
+ pro = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+
+ marty = Person.objects.create(name="Marty Alchin")
+
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+
+ # Save the author relations
+ pro.authors.set([marty])
+ dive.authors.set([mark])
+
+ # Inspect the m2m tables directly.
+ # There should be 1 entry in each database
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
+
+ # Queries work across m2m joins
+ self.assertEqual(
+ list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
+ ['Pro Django']
+ )
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
+ []
+ )
+
+ self.assertEqual(
+ list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
+ ['Dive into Python']
+ )
+
+ # Reget the objects to clear caches
+ dive = Book.objects.using('other').get(title="Dive into Python")
+ mark = Person.objects.using('other').get(name="Mark Pilgrim")
+
+ # Retrieve related object by descriptor. Related objects should be database-bound
+ self.assertEqual(list(dive.authors.all().values_list('name', flat=True)), ['Mark Pilgrim'])
+
+ self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)), ['Dive into Python'])
+
+ def test_m2m_forward_operations(self):
+ "M2M forward manipulations are all constrained to a single DB"
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+
+ # Save the author relations
+ dive.authors.set([mark])
+
+ # Add a second author
+ john = Person.objects.using('other').create(name="John Smith")
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
+ []
+ )
+
+ dive.authors.add(john)
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
+ ['Dive into Python']
+ )
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
+ ['Dive into Python']
+ )
+
+ # Remove the second author
+ dive.authors.remove(john)
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
+ ['Dive into Python']
+ )
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
+ []
+ )
+
+ # Clear all authors
+ dive.authors.clear()
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
+ []
+ )
+
+ # Create an author through the m2m interface
+ dive.authors.create(name='Jane Brown')
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)),
+ ['Dive into Python']
+ )
+
+ def test_m2m_reverse_operations(self):
+ "M2M reverse manipulations are all constrained to a single DB"
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+
+ # Save the author relations
+ dive.authors.set([mark])
+
+ # Create a second book on the other database
+ grease = Book.objects.using('other').create(title="Greasemonkey Hacks", published=datetime.date(2005, 11, 1))
+
+ # Add a books to the m2m
+ mark.book_set.add(grease)
+ self.assertEqual(
+ list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
+ ['Mark Pilgrim']
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
+ ),
+ ['Mark Pilgrim']
+ )
+
+ # Remove a book from the m2m
+ mark.book_set.remove(grease)
+ self.assertEqual(
+ list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
+ ['Mark Pilgrim']
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
+ ),
+ []
+ )
+
+ # Clear the books associated with mark
+ mark.book_set.clear()
+ self.assertEqual(
+ list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
+ ),
+ []
+ )
+
+ # Create a book through the m2m interface
+ mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1))
+ self.assertEqual(
+ list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)),
+ ['Mark Pilgrim']
+ )
+
+ def test_m2m_cross_database_protection(self):
+ "Operations that involve sharing M2M objects across databases raise an error"
+ # Create a book and author on the default database
+ pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
+
+ marty = Person.objects.create(name="Marty Alchin")
+
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+ # Set a foreign key set with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='default'):
+ marty.edited.set([pro, dive])
+
+ # Add to an m2m with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='default'):
+ marty.book_set.add(dive)
+
+ # Set a m2m with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='default'):
+ marty.book_set.set([pro, dive])
+
+ # Add to a reverse m2m with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='other'):
+ dive.authors.add(marty)
+
+ # Set a reverse m2m with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='other'):
+ dive.authors.set([mark, marty])
+
+ def test_m2m_deletion(self):
+ "Cascaded deletions of m2m relations issue queries on the right database"
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+ dive.authors.set([mark])
+
+ # Check the initial state
+ self.assertEqual(Person.objects.using('default').count(), 0)
+ self.assertEqual(Book.objects.using('default').count(), 0)
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
+
+ self.assertEqual(Person.objects.using('other').count(), 1)
+ self.assertEqual(Book.objects.using('other').count(), 1)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
+
+ # Delete the object on the other database
+ dive.delete(using='other')
+
+ self.assertEqual(Person.objects.using('default').count(), 0)
+ self.assertEqual(Book.objects.using('default').count(), 0)
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
+
+ # The person still exists ...
+ self.assertEqual(Person.objects.using('other').count(), 1)
+ # ... but the book has been deleted
+ self.assertEqual(Book.objects.using('other').count(), 0)
+ # ... and the relationship object has also been deleted.
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
+
+ # Now try deletion in the reverse direction. Set up the relation again
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ dive.authors.set([mark])
+
+ # Check the initial state
+ self.assertEqual(Person.objects.using('default').count(), 0)
+ self.assertEqual(Book.objects.using('default').count(), 0)
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
+
+ self.assertEqual(Person.objects.using('other').count(), 1)
+ self.assertEqual(Book.objects.using('other').count(), 1)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
+
+ # Delete the object on the other database
+ mark.delete(using='other')
+
+ self.assertEqual(Person.objects.using('default').count(), 0)
+ self.assertEqual(Book.objects.using('default').count(), 0)
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
+
+ # The person has been deleted ...
+ self.assertEqual(Person.objects.using('other').count(), 0)
+ # ... but the book still exists
+ self.assertEqual(Book.objects.using('other').count(), 1)
+ # ... and the relationship object has been deleted.
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
+
+ def test_foreign_key_separation(self):
+ "FK fields are constrained to a single database"
+ # Create a book and author on the default database
+ pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
+
+ george = Person.objects.create(name="George Vilches")
+
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ chris = Person.objects.using('other').create(name="Chris Mills")
+
+ # Save the author's favorite books
+ pro.editor = george
+ pro.save()
+
+ dive.editor = chris
+ dive.save()
+
+ pro = Book.objects.using('default').get(title="Pro Django")
+ self.assertEqual(pro.editor.name, "George Vilches")
+
+ dive = Book.objects.using('other').get(title="Dive into Python")
+ self.assertEqual(dive.editor.name, "Chris Mills")
+
+ # Queries work across foreign key joins
+ self.assertEqual(
+ list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)),
+ ['George Vilches']
+ )
+ self.assertEqual(
+ list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)),
+ []
+ )
+
+ self.assertEqual(
+ list(
+ Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)
+ ),
+ []
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
+ ),
+ ['Chris Mills']
+ )
+
+ # Reget the objects to clear caches
+ chris = Person.objects.using('other').get(name="Chris Mills")
+ dive = Book.objects.using('other').get(title="Dive into Python")
+
+ # Retrieve related object by descriptor. Related objects should be database-bound
+ self.assertEqual(list(chris.edited.values_list('title', flat=True)), ['Dive into Python'])
+
+ def test_foreign_key_reverse_operations(self):
+ "FK reverse manipulations are all constrained to a single DB"
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ chris = Person.objects.using('other').create(name="Chris Mills")
+
+ # Save the author relations
+ dive.editor = chris
+ dive.save()
+
+ # Add a second book edited by chris
+ html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
+ self.assertEqual(
+ list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
+ []
+ )
+
+ chris.edited.add(html5)
+ self.assertEqual(
+ list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
+ ['Chris Mills']
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
+ ),
+ ['Chris Mills']
+ )
+
+ # Remove the second editor
+ chris.edited.remove(html5)
+ self.assertEqual(
+ list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
+ ),
+ ['Chris Mills']
+ )
+
+ # Clear all edited books
+ chris.edited.clear()
+ self.assertEqual(
+ list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
+ ),
+ []
+ )
+
+ # Create an author through the m2m interface
+ chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15))
+ self.assertEqual(
+ list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)),
+ ['Chris Mills']
+ )
+ self.assertEqual(
+ list(
+ Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
+ ),
+ []
+ )
+
+ def test_foreign_key_cross_database_protection(self):
+ "Operations that involve sharing FK objects across databases raise an error"
+ # Create a book and author on the default database
+ pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
+ marty = Person.objects.create(name="Marty Alchin")
+
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ # Set a foreign key with an object from a different database
+ with self.assertRaises(ValueError):
+ dive.editor = marty
+
+ # Set a foreign key set with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='default'):
+ marty.edited.set([pro, dive])
+
+ # Add to a foreign key set with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='default'):
+ marty.edited.add(dive)
+
+ def test_foreign_key_deletion(self):
+ "Cascaded deletions of Foreign Key relations issue queries on the right database"
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+ Pet.objects.using('other').create(name="Fido", owner=mark)
+
+ # Check the initial state
+ self.assertEqual(Person.objects.using('default').count(), 0)
+ self.assertEqual(Pet.objects.using('default').count(), 0)
+
+ self.assertEqual(Person.objects.using('other').count(), 1)
+ self.assertEqual(Pet.objects.using('other').count(), 1)
+
+ # Delete the person object, which will cascade onto the pet
+ mark.delete(using='other')
+
+ self.assertEqual(Person.objects.using('default').count(), 0)
+ self.assertEqual(Pet.objects.using('default').count(), 0)
+
+ # Both the pet and the person have been deleted from the right database
+ self.assertEqual(Person.objects.using('other').count(), 0)
+ self.assertEqual(Pet.objects.using('other').count(), 0)
+
+ def test_foreign_key_validation(self):
+ "ForeignKey.validate() uses the correct database"
+ mickey = Person.objects.using('other').create(name="Mickey")
+ pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey)
+ self.assertIsNone(pluto.full_clean())
+
+ # Any router that accesses `model` in db_for_read() works here.
+ @override_settings(DATABASE_ROUTERS=[AuthRouter()])
+ def test_foreign_key_validation_with_router(self):
+ """
+ ForeignKey.validate() passes `model` to db_for_read() even if
+ model_instance=None.
+ """
+ if django.VERSION < (1, 11, 0):
+ self.skipTest("TODO fix AttributeError: type object 'NoneType' has no attribute '_meta'")
+ mickey = Person.objects.create(name="Mickey")
+ owner_field = Pet._meta.get_field('owner')
+ self.assertEqual(owner_field.clean(mickey.pk, None), mickey.pk)
+
+ def test_o2o_separation(self):
+ "OneToOne fields are constrained to a single database"
+ # Create a user and profile on the default database
+ alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
+ alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
+
+ # Create a user and profile on the other database
+ bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
+ bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
+
+ # Retrieve related objects; queries should be database constrained
+ alice = User.objects.using('default').get(username="alice")
+ self.assertEqual(alice.userprofile.flavor, "chocolate")
+
+ bob = User.objects.using('other').get(username="bob")
+ self.assertEqual(bob.userprofile.flavor, "crunchy frog")
+
+ # Queries work across joins
+ self.assertEqual(
+ list(
+ User.objects.using('default')
+ .filter(userprofile__flavor='chocolate').values_list('username', flat=True)
+ ),
+ ['alice']
+ )
+ self.assertEqual(
+ list(
+ User.objects.using('other')
+ .filter(userprofile__flavor='chocolate').values_list('username', flat=True)
+ ),
+ []
+ )
+
+ self.assertEqual(
+ list(
+ User.objects.using('default')
+ .filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)
+ ),
+ []
+ )
+ self.assertEqual(
+ list(
+ User.objects.using('other')
+ .filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)
+ ),
+ ['bob']
+ )
+
+ # Reget the objects to clear caches
+ alice_profile = UserProfile.objects.using('default').get(flavor='chocolate')
+ bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog')
+
+ # Retrieve related object by descriptor. Related objects should be database-bound
+ self.assertEqual(alice_profile.user.username, 'alice')
+ self.assertEqual(bob_profile.user.username, 'bob')
+
+ def test_o2o_cross_database_protection(self):
+ "Operations that involve sharing FK objects across databases raise an error"
+ # Create a user and profile on the default database
+ alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
+
+ # Create a user and profile on the other database
+ bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
+
+ # Set a one-to-one relation with an object from a different database
+ alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
+ with self.assertRaises(ValueError):
+ bob.userprofile = alice_profile
+
+ # BUT! if you assign a FK object when the base object hasn't
+ # been saved yet, you implicitly assign the database for the
+ # base object.
+ bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
+
+ new_bob_profile = UserProfile(flavor="spring surprise")
+
+ # assigning a profile requires an explicit pk as the object isn't saved
+ charlie = User(pk=51, username='charlie', email='charlie@example.com')
+ charlie.set_unusable_password()
+
+ # initially, no db assigned
+ self.assertIsNone(new_bob_profile._state.db)
+ self.assertIsNone(charlie._state.db)
+
+ # old object comes from 'other', so the new object is set to use 'other'...
+ new_bob_profile.user = bob
+ charlie.userprofile = bob_profile
+ self.assertEqual(new_bob_profile._state.db, 'other')
+ self.assertEqual(charlie._state.db, 'other')
+
+ # ... but it isn't saved yet
+ self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)), ['bob'])
+ self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog'])
+
+ # When saved (no using required), new objects goes to 'other'
+ charlie.save()
+ bob_profile.save()
+ new_bob_profile.save()
+ self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)), ['alice'])
+ self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)), ['bob', 'charlie'])
+ self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate'])
+ self.assertEqual(
+ list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
+ ['crunchy frog', 'spring surprise']
+ )
+
+ # This also works if you assign the O2O relation in the constructor
+ denise = User.objects.db_manager('other').create_user('denise', 'denise@example.com')
+ denise_profile = UserProfile(flavor="tofu", user=denise)
+
+ self.assertEqual(denise_profile._state.db, 'other')
+ # ... but it isn't saved yet
+ self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate'])
+ self.assertEqual(
+ list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
+ ['crunchy frog', 'spring surprise']
+ )
+
+ # When saved, the new profile goes to 'other'
+ denise_profile.save()
+ self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate'])
+ self.assertEqual(
+ list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
+ ['crunchy frog', 'spring surprise', 'tofu']
+ )
+
+ def test_generic_key_separation(self):
+ "Generic fields are constrained to a single database"
+ # Create a book and author on the default database
+ pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
+ review1 = Review.objects.create(source="Python Monthly", content_object=pro)
+
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
+
+ review1 = Review.objects.using('default').get(source="Python Monthly")
+ self.assertEqual(review1.content_object.title, "Pro Django")
+
+ review2 = Review.objects.using('other').get(source="Python Weekly")
+ self.assertEqual(review2.content_object.title, "Dive into Python")
+
+ # Reget the objects to clear caches
+ dive = Book.objects.using('other').get(title="Dive into Python")
+
+ # Retrieve related object by descriptor. Related objects should be database-bound
+ self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)), ['Python Weekly'])
+
+ def test_generic_key_reverse_operations(self):
+ "Generic reverse manipulations are all constrained to a single DB"
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ temp = Book.objects.using('other').create(title="Temp", published=datetime.date(2009, 5, 4))
+ review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
+ review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp)
+
+ self.assertEqual(
+ list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
+ ['Python Weekly']
+ )
+
+ # Add a second review
+ dive.reviews.add(review2)
+ self.assertEqual(
+ list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
+ ['Python Monthly', 'Python Weekly']
+ )
+
+ # Remove the second author
+ dive.reviews.remove(review1)
+ self.assertEqual(
+ list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
+ ['Python Monthly']
+ )
+
+ # Clear all reviews
+ dive.reviews.clear()
+ self.assertEqual(
+ list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
+ []
+ )
+
+ # Create an author through the generic interface
+ dive.reviews.create(source='Python Daily')
+ self.assertEqual(
+ list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
+ []
+ )
+ self.assertEqual(
+ list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
+ ['Python Daily']
+ )
+
+ def test_generic_key_cross_database_protection(self):
+ "Operations that involve sharing generic key objects across databases raise an error"
+ # Create a book and author on the default database
+ pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
+ review1 = Review.objects.create(source="Python Monthly", content_object=pro)
+
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ Review.objects.using('other').create(source="Python Weekly", content_object=dive)
+
+ # Set a foreign key with an object from a different database
+ with self.assertRaises(ValueError):
+ review1.content_object = dive
+
+ # Add to a foreign key set with an object from a different database
+ with self.assertRaises(ValueError):
+ with transaction.atomic(using='other'):
+ dive.reviews.add(review1)
+
+ # BUT! if you assign a FK object when the base object hasn't
+ # been saved yet, you implicitly assign the database for the
+ # base object.
+ review3 = Review(source="Python Daily")
+ # initially, no db assigned
+ self.assertIsNone(review3._state.db)
+
+ # Dive comes from 'other', so review3 is set to use 'other'...
+ review3.content_object = dive
+ self.assertEqual(review3._state.db, 'other')
+ # ... but it isn't saved yet
+ self.assertEqual(
+ list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
+ ['Python Monthly']
+ )
+ self.assertEqual(
+ list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
+ ['Python Weekly']
+ )
+
+ # When saved, John goes to 'other'
+ review3.save()
+ self.assertEqual(
+ list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
+ ['Python Monthly']
+ )
+ self.assertEqual(
+ list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
+ ['Python Daily', 'Python Weekly']
+ )
+
+ def test_generic_key_deletion(self):
+ "Cascaded deletions of Generic Key relations issue queries on the right database"
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ Review.objects.using('other').create(source="Python Weekly", content_object=dive)
+
+ # Check the initial state
+ self.assertEqual(Book.objects.using('default').count(), 0)
+ self.assertEqual(Review.objects.using('default').count(), 0)
+
+ self.assertEqual(Book.objects.using('other').count(), 1)
+ self.assertEqual(Review.objects.using('other').count(), 1)
+
+ # Delete the Book object, which will cascade onto the pet
+ dive.delete(using='other')
+
+ self.assertEqual(Book.objects.using('default').count(), 0)
+ self.assertEqual(Review.objects.using('default').count(), 0)
+
+ # Both the pet and the person have been deleted from the right database
+ self.assertEqual(Book.objects.using('other').count(), 0)
+ self.assertEqual(Review.objects.using('other').count(), 0)
+
+ def test_ordering(self):
+ "get_next_by_XXX commands stick to a single database"
+ Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ learn = Book.objects.using('other').create(title="Learning Python", published=datetime.date(2008, 7, 16))
+
+ self.assertEqual(learn.get_next_by_published().title, "Dive into Python")
+ self.assertEqual(dive.get_previous_by_published().title, "Learning Python")
+
+ def test_raw(self):
+ "test the raw() method across databases"
+ dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+ val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book')
+ self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
+
+ val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other')
+ self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
+
+ def test_select_related(self):
+ "Database assignment is retained if an object is retrieved with select_related()"
+ # Create a book and author on the other database
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+ Book.objects.using('other').create(
+ title="Dive into Python",
+ published=datetime.date(2009, 5, 4),
+ editor=mark,
+ )
+
+ # Retrieve the Person using select_related()
+ book = Book.objects.using('other').select_related('editor').get(title="Dive into Python")
+
+ # The editor instance should have a db state
+ self.assertEqual(book.editor._state.db, 'other')
+
+ def test_subquery(self):
+ """Make sure as_sql works with subqueries and primary/replica."""
+ sub = Person.objects.using('other').filter(name='fff')
+ qs = Book.objects.filter(editor__in=sub)
+
+ # When you call __str__ on the query object, it doesn't know about using
+ # so it falls back to the default. If the subquery explicitly uses a
+ # different database, an error should be raised.
+ with self.assertRaises(ValueError):
+ str(qs.query)
+
+ # Evaluating the query shouldn't work, either
+ with self.assertRaises(ValueError):
+ for obj in qs:
+ pass
+
+ def test_related_manager(self):
+ "Related managers return managers, not querysets"
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+
+ # extra_arg is removed by the BookManager's implementation of
+ # create(); but the BookManager's implementation won't get called
+ # unless edited returns a Manager, not a queryset
+ mark.book_set.create(title="Dive into Python", published=datetime.date(2009, 5, 4), extra_arg=True)
+ mark.book_set.get_or_create(title="Dive into Python", published=datetime.date(2009, 5, 4), extra_arg=True)
+ mark.edited.create(title="Dive into Water", published=datetime.date(2009, 5, 4), extra_arg=True)
+ mark.edited.get_or_create(title="Dive into Water", published=datetime.date(2009, 5, 4), extra_arg=True)
+
+
+class ConnectionRouterTestCase(SimpleTestCase):
+ @override_settings(DATABASE_ROUTERS=[
+ 'multiple_database.tests.TestRouter',
+ 'multiple_database.tests.WriteRouter'])
+ def test_router_init_default(self):
+ connection_router = ConnectionRouter()
+ self.assertListEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter'])
+
+ def test_router_init_arg(self):
+ connection_router = ConnectionRouter([
+ 'multiple_database.tests.TestRouter',
+ 'multiple_database.tests.WriteRouter'
+ ])
+ self.assertListEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter'])
+
+ # Init with instances instead of strings
+ connection_router = ConnectionRouter([TestRouter(), WriteRouter()])
+ self.assertListEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter'])
+
+
+# Make the 'other' database appear to be a replica of the 'default'
+@override_settings(DATABASE_ROUTERS=[TestRouter()])
+class RouterTestCase(TestCase):
+ multi_db = True
+
+ def test_db_selection(self):
+ "Querysets obey the router for db suggestions"
+ self.assertEqual(Book.objects.db, 'other')
+ self.assertEqual(Book.objects.all().db, 'other')
+
+ self.assertEqual(Book.objects.using('default').db, 'default')
+
+ self.assertEqual(Book.objects.db_manager('default').db, 'default')
+ self.assertEqual(Book.objects.db_manager('default').all().db, 'default')
+
+ def test_migrate_selection(self):
+ "Synchronization behavior is predictable"
+
+ self.assertTrue(router.allow_migrate_model('default', User))
+ self.assertTrue(router.allow_migrate_model('default', Book))
+
+ self.assertTrue(router.allow_migrate_model('other', User))
+ self.assertTrue(router.allow_migrate_model('other', Book))
+
+ with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]):
+ # Add the auth router to the chain. TestRouter is a universal
+ # synchronizer, so it should have no effect.
+ self.assertTrue(router.allow_migrate_model('default', User))
+ self.assertTrue(router.allow_migrate_model('default', Book))
+
+ self.assertTrue(router.allow_migrate_model('other', User))
+ self.assertTrue(router.allow_migrate_model('other', Book))
+
+ with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]):
+ # Now check what happens if the router order is reversed.
+ self.assertFalse(router.allow_migrate_model('default', User))
+ self.assertTrue(router.allow_migrate_model('default', Book))
+
+ self.assertTrue(router.allow_migrate_model('other', User))
+ self.assertTrue(router.allow_migrate_model('other', Book))
+
+ def test_partial_router(self):
+ "A router can choose to implement a subset of methods"
+ dive = Book.objects.using('other').create(title="Dive into Python",
+ published=datetime.date(2009, 5, 4))
+
+ # First check the baseline behavior.
+
+ self.assertEqual(router.db_for_read(User), 'other')
+ self.assertEqual(router.db_for_read(Book), 'other')
+
+ self.assertEqual(router.db_for_write(User), 'default')
+ self.assertEqual(router.db_for_write(Book), 'default')
+
+ self.assertTrue(router.allow_relation(dive, dive))
+
+ self.assertTrue(router.allow_migrate_model('default', User))
+ self.assertTrue(router.allow_migrate_model('default', Book))
+
+ with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]):
+ self.assertEqual(router.db_for_read(User), 'default')
+ self.assertEqual(router.db_for_read(Book), 'other')
+
+ self.assertEqual(router.db_for_write(User), 'writer')
+ self.assertEqual(router.db_for_write(Book), 'writer')
+
+ self.assertTrue(router.allow_relation(dive, dive))
+
+ self.assertFalse(router.allow_migrate_model('default', User))
+ self.assertTrue(router.allow_migrate_model('default', Book))
+
+ def test_database_routing(self):
+ marty = Person.objects.using('default').create(name="Marty Alchin")
+ pro = Book.objects.using('default').create(title="Pro Django",
+ published=datetime.date(2008, 12, 16),
+ editor=marty)
+ pro.authors.set([marty])
+
+ # Create a book and author on the other database
+ Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ # An update query will be routed to the default database
+ Book.objects.filter(title='Pro Django').update(pages=200)
+
+ with self.assertRaises(Book.DoesNotExist):
+ # By default, the get query will be directed to 'other'
+ Book.objects.get(title='Pro Django')
+
+ # But the same query issued explicitly at a database will work.
+ pro = Book.objects.using('default').get(title='Pro Django')
+
+ # The update worked.
+ self.assertEqual(pro.pages, 200)
+
+ # An update query with an explicit using clause will be routed
+ # to the requested database.
+ Book.objects.using('other').filter(title='Dive into Python').update(pages=300)
+ self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300)
+
+ # Related object queries stick to the same database
+ # as the original object, regardless of the router
+ self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin'])
+ self.assertEqual(pro.editor.name, 'Marty Alchin')
+
+ # get_or_create is a special case. The get needs to be targeted at
+ # the write database in order to avoid potential transaction
+ # consistency problems
+ book, created = Book.objects.get_or_create(title="Pro Django")
+ self.assertFalse(created)
+
+ book, created = Book.objects.get_or_create(title="Dive Into Python",
+ defaults={'published': datetime.date(2009, 5, 4)})
+ self.assertTrue(created)
+
+ # Check the head count of objects
+ self.assertEqual(Book.objects.using('default').count(), 2)
+ self.assertEqual(Book.objects.using('other').count(), 1)
+ # If a database isn't specified, the read database is used
+ self.assertEqual(Book.objects.count(), 1)
+
+ # A delete query will also be routed to the default database
+ Book.objects.filter(pages__gt=150).delete()
+
+ # The default database has lost the book.
+ self.assertEqual(Book.objects.using('default').count(), 1)
+ self.assertEqual(Book.objects.using('other').count(), 1)
+
+ def test_invalid_set_foreign_key_assignment(self):
+ marty = Person.objects.using('default').create(name="Marty Alchin")
+ dive = Book.objects.using('other').create(
+ title="Dive into Python",
+ published=datetime.date(2009, 5, 4),
+ )
+ # Set a foreign key set with an object from a different database
+ msg = " instance isn't saved. Use bulk=False or save the object first."
+ with self.assertRaisesMessage(ValueError, msg):
+ marty.edited.set([dive])
+
+ def test_foreign_key_cross_database_protection(self):
+ "Foreign keys can cross databases if they two databases have a common source"
+ # Create a book and author on the default database
+ pro = Book.objects.using('default').create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+
+ marty = Person.objects.using('default').create(name="Marty Alchin")
+
+ # Create a book and author on the other database
+ dive = Book.objects.using('other').create(title="Dive into Python",
+ published=datetime.date(2009, 5, 4))
+
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+
+ # Set a foreign key with an object from a different database
+ dive.editor = marty
+
+ # Database assignments of original objects haven't changed...
+ self.assertEqual(marty._state.db, 'default')
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(mark._state.db, 'other')
+
+ # ... but they will when the affected object is saved.
+ dive.save()
+ self.assertEqual(dive._state.db, 'default')
+
+ # ...and the source database now has a copy of any object saved
+ Book.objects.using('default').get(title='Dive into Python').delete()
+
+ # This isn't a real primary/replica database, so restore the original from other
+ dive = Book.objects.using('other').get(title='Dive into Python')
+ self.assertEqual(dive._state.db, 'other')
+
+ # Set a foreign key set with an object from a different database
+ marty.edited.set([pro, dive], bulk=False)
+
+ # Assignment implies a save, so database assignments of original objects have changed...
+ self.assertEqual(marty._state.db, 'default')
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(dive._state.db, 'default')
+ self.assertEqual(mark._state.db, 'other')
+
+ # ...and the source database now has a copy of any object saved
+ Book.objects.using('default').get(title='Dive into Python').delete()
+
+ # This isn't a real primary/replica database, so restore the original from other
+ dive = Book.objects.using('other').get(title='Dive into Python')
+ self.assertEqual(dive._state.db, 'other')
+
+ # Add to a foreign key set with an object from a different database
+ marty.edited.add(dive, bulk=False)
+
+ # Add implies a save, so database assignments of original objects have changed...
+ self.assertEqual(marty._state.db, 'default')
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(dive._state.db, 'default')
+ self.assertEqual(mark._state.db, 'other')
+
+ # ...and the source database now has a copy of any object saved
+ Book.objects.using('default').get(title='Dive into Python').delete()
+
+ # This isn't a real primary/replica database, so restore the original from other
+ dive = Book.objects.using('other').get(title='Dive into Python')
+
+ # If you assign a FK object when the base object hasn't
+ # been saved yet, you implicitly assign the database for the
+ # base object.
+ chris = Person(name="Chris Mills")
+ html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
+ # initially, no db assigned
+ self.assertIsNone(chris._state.db)
+ self.assertIsNone(html5._state.db)
+
+ # old object comes from 'other', so the new object is set to use the
+ # source of 'other'...
+ self.assertEqual(dive._state.db, 'other')
+ chris.save()
+ dive.editor = chris
+ html5.editor = mark
+
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(mark._state.db, 'other')
+ self.assertEqual(chris._state.db, 'default')
+ self.assertEqual(html5._state.db, 'default')
+
+ # This also works if you assign the FK in the constructor
+ water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
+ self.assertEqual(water._state.db, 'default')
+
+ # For the remainder of this test, create a copy of 'mark' in the
+ # 'default' database to prevent integrity errors on backends that
+ # don't defer constraints checks until the end of the transaction
+ mark.save(using='default')
+
+ # This moved 'mark' in the 'default' database, move it back in 'other'
+ mark.save(using='other')
+ self.assertEqual(mark._state.db, 'other')
+
+ # If you create an object through a FK relation, it will be
+ # written to the write database, even if the original object
+ # was on the read database
+ cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
+ self.assertEqual(cheesecake._state.db, 'default')
+
+ # Same goes for get_or_create, regardless of whether getting or creating
+ cheesecake, created = mark.edited.get_or_create(
+ title='Dive into Cheesecake',
+ published=datetime.date(2010, 3, 15),
+ )
+ self.assertEqual(cheesecake._state.db, 'default')
+
+ puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15))
+ self.assertEqual(puddles._state.db, 'default')
+
+ def test_m2m_cross_database_protection(self):
+ "M2M relations can cross databases if the database share a source"
+ # Create books and authors on the inverse to the usual database
+ pro = Book.objects.using('other').create(pk=1, title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+
+ marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
+
+ dive = Book.objects.using('default').create(pk=2, title="Dive into Python",
+ published=datetime.date(2009, 5, 4))
+
+ mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim")
+
+ # Now save back onto the usual database.
+ # This simulates primary/replica - the objects exist on both database,
+ # but the _state.db is as it is for all other tests.
+ pro.save(using='default')
+ marty.save(using='default')
+ dive.save(using='other')
+ mark.save(using='other')
+
+ # We have 2 of both types of object on both databases
+ self.assertEqual(Book.objects.using('default').count(), 2)
+ self.assertEqual(Book.objects.using('other').count(), 2)
+ self.assertEqual(Person.objects.using('default').count(), 2)
+ self.assertEqual(Person.objects.using('other').count(), 2)
+
+ # Set a m2m set with an object from a different database
+ marty.book_set.set([pro, dive])
+
+ # Database assignments don't change
+ self.assertEqual(marty._state.db, 'default')
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(mark._state.db, 'other')
+
+ # All m2m relations should be saved on the default database
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
+
+ # Reset relations
+ Book.authors.through.objects.using('default').delete()
+
+ # Add to an m2m with an object from a different database
+ marty.book_set.add(dive)
+
+ # Database assignments don't change
+ self.assertEqual(marty._state.db, 'default')
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(mark._state.db, 'other')
+
+ # All m2m relations should be saved on the default database
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
+
+ # Reset relations
+ Book.authors.through.objects.using('default').delete()
+
+ # Set a reverse m2m with an object from a different database
+ dive.authors.set([mark, marty])
+
+ # Database assignments don't change
+ self.assertEqual(marty._state.db, 'default')
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(mark._state.db, 'other')
+
+ # All m2m relations should be saved on the default database
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
+
+ # Reset relations
+ Book.authors.through.objects.using('default').delete()
+
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
+
+ # Add to a reverse m2m with an object from a different database
+ dive.authors.add(marty)
+
+ # Database assignments don't change
+ self.assertEqual(marty._state.db, 'default')
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(mark._state.db, 'other')
+
+ # All m2m relations should be saved on the default database
+ self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
+ self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
+
+ # If you create an object through a M2M relation, it will be
+ # written to the write database, even if the original object
+ # was on the read database
+ alice = dive.authors.create(name='Alice')
+ self.assertEqual(alice._state.db, 'default')
+
+ # Same goes for get_or_create, regardless of whether getting or creating
+ alice, created = dive.authors.get_or_create(name='Alice')
+ self.assertEqual(alice._state.db, 'default')
+
+ bob, created = dive.authors.get_or_create(name='Bob')
+ self.assertEqual(bob._state.db, 'default')
+
+ def test_o2o_cross_database_protection(self):
+ "Operations that involve sharing FK objects across databases raise an error"
+ # Create a user and profile on the default database
+ alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
+
+ # Create a user and profile on the other database
+ bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
+
+ # Set a one-to-one relation with an object from a different database
+ alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate')
+ bob.userprofile = alice_profile
+
+ # Database assignments of original objects haven't changed...
+ self.assertEqual(alice._state.db, 'default')
+ self.assertEqual(alice_profile._state.db, 'default')
+ self.assertEqual(bob._state.db, 'other')
+
+ # ... but they will when the affected object is saved.
+ bob.save()
+ self.assertEqual(bob._state.db, 'default')
+
+ def test_generic_key_cross_database_protection(self):
+ "Generic Key operations can span databases if they share a source"
+ # Create a book and author on the default database
+ pro = Book.objects.using(
+ 'default').create(title="Pro Django", published=datetime.date(2008, 12, 16))
+
+ review1 = Review.objects.using(
+ 'default').create(source="Python Monthly", content_object=pro)
+
+ # Create a book and author on the other database
+ dive = Book.objects.using(
+ 'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
+
+ review2 = Review.objects.using(
+ 'other').create(source="Python Weekly", content_object=dive)
+
+ # Set a generic foreign key with an object from a different database
+ review1.content_object = dive
+
+ # Database assignments of original objects haven't changed...
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(review1._state.db, 'default')
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(review2._state.db, 'other')
+
+ # ... but they will when the affected object is saved.
+ dive.save()
+ self.assertEqual(review1._state.db, 'default')
+ self.assertEqual(dive._state.db, 'default')
+
+ # ...and the source database now has a copy of any object saved
+ Book.objects.using('default').get(title='Dive into Python').delete()
+
+ # This isn't a real primary/replica database, so restore the original from other
+ dive = Book.objects.using('other').get(title='Dive into Python')
+ self.assertEqual(dive._state.db, 'other')
+
+ # Add to a generic foreign key set with an object from a different database
+ dive.reviews.add(review1)
+
+ # Database assignments of original objects haven't changed...
+ self.assertEqual(pro._state.db, 'default')
+ self.assertEqual(review1._state.db, 'default')
+ self.assertEqual(dive._state.db, 'other')
+ self.assertEqual(review2._state.db, 'other')
+
+ # ... but they will when the affected object is saved.
+ dive.save()
+ self.assertEqual(dive._state.db, 'default')
+
+ # ...and the source database now has a copy of any object saved
+ Book.objects.using('default').get(title='Dive into Python').delete()
+
+ # BUT! if you assign a FK object when the base object hasn't
+ # been saved yet, you implicitly assign the database for the
+ # base object.
+ review3 = Review(source="Python Daily")
+ # initially, no db assigned
+ self.assertIsNone(review3._state.db)
+
+ # Dive comes from 'other', so review3 is set to use the source of 'other'...
+ review3.content_object = dive
+ self.assertEqual(review3._state.db, 'default')
+
+ # If you create an object through a M2M relation, it will be
+ # written to the write database, even if the original object
+ # was on the read database
+ dive = Book.objects.using('other').get(title='Dive into Python')
+ nyt = dive.reviews.create(source="New York Times", content_object=dive)
+ self.assertEqual(nyt._state.db, 'default')
+
+ def test_m2m_managers(self):
+ "M2M relations are represented by managers, and can be controlled like managers"
+ pro = Book.objects.using('other').create(pk=1, title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+
+ marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
+
+ self.assertEqual(pro.authors.db, 'other')
+ self.assertEqual(pro.authors.db_manager('default').db, 'default')
+ self.assertEqual(pro.authors.db_manager('default').all().db, 'default')
+
+ self.assertEqual(marty.book_set.db, 'other')
+ self.assertEqual(marty.book_set.db_manager('default').db, 'default')
+ self.assertEqual(marty.book_set.db_manager('default').all().db, 'default')
+
+ def test_foreign_key_managers(self):
+ "FK reverse relations are represented by managers, and can be controlled like managers"
+ marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
+ Book.objects.using('other').create(pk=1, title="Pro Django",
+ published=datetime.date(2008, 12, 16),
+ editor=marty)
+
+ self.assertEqual(marty.edited.db, 'other')
+ self.assertEqual(marty.edited.db_manager('default').db, 'default')
+ self.assertEqual(marty.edited.db_manager('default').all().db, 'default')
+
+ def test_generic_key_managers(self):
+ "Generic key relations are represented by managers, and can be controlled like managers"
+ pro = Book.objects.using('other').create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+
+ Review.objects.using('other').create(source="Python Monthly",
+ content_object=pro)
+
+ self.assertEqual(pro.reviews.db, 'other')
+ self.assertEqual(pro.reviews.db_manager('default').db, 'default')
+ self.assertEqual(pro.reviews.db_manager('default').all().db, 'default')
+
+ def test_subquery(self):
+ """Make sure as_sql works with subqueries and primary/replica."""
+ # Create a book and author on the other database
+
+ mark = Person.objects.using('other').create(name="Mark Pilgrim")
+ Book.objects.using('other').create(title="Dive into Python",
+ published=datetime.date(2009, 5, 4),
+ editor=mark)
+
+ sub = Person.objects.filter(name='Mark Pilgrim')
+ qs = Book.objects.filter(editor__in=sub)
+
+ # When you call __str__ on the query object, it doesn't know about using
+ # so it falls back to the default. Don't let routing instructions
+ # force the subquery to an incompatible database.
+ str(qs.query)
+
+ # If you evaluate the query, it should work, running on 'other'
+ self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python'])
+
+ def test_deferred_models(self):
+ mark_def = Person.objects.using('default').create(name="Mark Pilgrim")
+ mark_other = Person.objects.using('other').create(name="Mark Pilgrim")
+ orig_b = Book.objects.using('other').create(title="Dive into Python",
+ published=datetime.date(2009, 5, 4),
+ editor=mark_other)
+ b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
+ self.assertEqual(b.published, datetime.date(2009, 5, 4))
+ b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
+ b.editor = mark_def
+ b.save(using='default')
+ self.assertEqual(Book.objects.using('default').get(pk=b.pk).published,
+ datetime.date(2009, 5, 4))
+
+
+@override_settings(DATABASE_ROUTERS=[AuthRouter()])
+class AuthTestCase(TestCase):
+ multi_db = True
+
+ def test_auth_manager(self):
+ "The methods on the auth manager obey database hints"
+ # Create one user using default allocation policy
+ User.objects.create_user('alice', 'alice@example.com')
+
+ # Create another user, explicitly specifying the database
+ User.objects.db_manager('default').create_user('bob', 'bob@example.com')
+
+ # The second user only exists on the other database
+ alice = User.objects.using('other').get(username='alice')
+
+ self.assertEqual(alice.username, 'alice')
+ self.assertEqual(alice._state.db, 'other')
+
+ with self.assertRaises(User.DoesNotExist):
+ User.objects.using('default').get(username='alice')
+
+ # The second user only exists on the default database
+ bob = User.objects.using('default').get(username='bob')
+
+ self.assertEqual(bob.username, 'bob')
+ self.assertEqual(bob._state.db, 'default')
+
+ with self.assertRaises(User.DoesNotExist):
+ User.objects.using('other').get(username='bob')
+
+ # That is... there is one user on each database
+ self.assertEqual(User.objects.using('default').count(), 1)
+ self.assertEqual(User.objects.using('other').count(), 1)
+
+ def test_dumpdata(self):
+ "dumpdata honors allow_migrate restrictions on the router"
+ User.objects.create_user('alice', 'alice@example.com')
+ User.objects.db_manager('default').create_user('bob', 'bob@example.com')
+
+ # dumping the default database doesn't try to include auth because
+ # allow_migrate prohibits auth on default
+ new_io = StringIO()
+ management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io)
+ command_output = new_io.getvalue().strip()
+ self.assertEqual(command_output, '[]')
+
+ # dumping the other database does include auth
+ new_io = StringIO()
+ management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io)
+ command_output = new_io.getvalue().strip()
+ self.assertIn('"email": "alice@example.com"', command_output)
+
+
+class AntiPetRouter(object):
+ # A router that only expresses an opinion on migrate,
+ # passing pets to the 'other' database
+
+ def allow_migrate(self, db, app_label, model_name=None, **hints):
+ if db == 'other':
+ return model_name == 'pet'
+ else:
+ return model_name != 'pet'
+
+
+class FixtureTestCase(TestCase):
+ multi_db = True
+ fixtures = ['multidb-common', 'multidb']
+
+ @override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
+ def test_fixture_loading(self):
+ "Multi-db fixtures are loaded correctly"
+ # "Pro Django" exists on the default database, but not on other database
+ Book.objects.get(title="Pro Django")
+ Book.objects.using('default').get(title="Pro Django")
+
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('other').get(title="Pro Django")
+
+ # "Dive into Python" exists on the default database, but not on other database
+ Book.objects.using('other').get(title="Dive into Python")
+
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.get(title="Dive into Python")
+ with self.assertRaises(Book.DoesNotExist):
+ Book.objects.using('default').get(title="Dive into Python")
+
+ # "Definitive Guide" exists on the both databases
+ Book.objects.get(title="The Definitive Guide to Django")
+ Book.objects.using('default').get(title="The Definitive Guide to Django")
+ Book.objects.using('other').get(title="The Definitive Guide to Django")
+
+ @override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
+ def test_pseudo_empty_fixtures(self):
+ """
+ A fixture can contain entries, but lead to nothing in the database;
+ this shouldn't raise an error (#14068).
+ """
+ new_io = StringIO()
+ management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io)
+ command_output = new_io.getvalue().strip()
+ # No objects will actually be loaded
+ self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)")
+
+
+class PickleQuerySetTestCase(TestCase):
+ multi_db = True
+
+ def test_pickling(self):
+ for db in connections:
+ Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4))
+ qs = Book.objects.all()
+ self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db)
+
+
+class DatabaseReceiver(object):
+ """
+ Used in the tests for the database argument in signals (#13552)
+ """
+ def __call__(self, signal, sender, **kwargs):
+ self._database = kwargs['using']
+
+
+class WriteToOtherRouter(object):
+ """
+ A router that sends all writes to the other database.
+ """
+ def db_for_write(self, model, **hints):
+ return "other"
+
+
+class SignalTests(TestCase):
+ multi_db = True
+
+ def override_router(self):
+ return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()])
+
+ def test_database_arg_save_and_delete(self):
+ """
+ The pre/post_save signal contains the correct database.
+ """
+ # Make some signal receivers
+ pre_save_receiver = DatabaseReceiver()
+ post_save_receiver = DatabaseReceiver()
+ pre_delete_receiver = DatabaseReceiver()
+ post_delete_receiver = DatabaseReceiver()
+ # Make model and connect receivers
+ signals.pre_save.connect(sender=Person, receiver=pre_save_receiver)
+ signals.post_save.connect(sender=Person, receiver=post_save_receiver)
+ signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver)
+ signals.post_delete.connect(sender=Person, receiver=post_delete_receiver)
+ p = Person.objects.create(name='Darth Vader')
+ # Save and test receivers got calls
+ p.save()
+ self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS)
+ self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS)
+ # Delete, and test
+ p.delete()
+ self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS)
+ self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS)
+ # Save again to a different database
+ p.save(using="other")
+ self.assertEqual(pre_save_receiver._database, "other")
+ self.assertEqual(post_save_receiver._database, "other")
+ # Delete, and test
+ p.delete(using="other")
+ self.assertEqual(pre_delete_receiver._database, "other")
+ self.assertEqual(post_delete_receiver._database, "other")
+
+ signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver)
+ signals.post_save.disconnect(sender=Person, receiver=post_save_receiver)
+ signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver)
+ signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver)
+
+ def test_database_arg_m2m(self):
+ """
+ The m2m_changed signal has a correct database arg.
+ """
+ # Make a receiver
+ receiver = DatabaseReceiver()
+ # Connect it
+ signals.m2m_changed.connect(receiver=receiver)
+
+ # Create the models that will be used for the tests
+ b = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ p = Person.objects.create(name="Marty Alchin")
+
+ # Create a copy of the models on the 'other' database to prevent
+ # integrity errors on backends that don't defer constraints checks
+ Book.objects.using('other').create(pk=b.pk, title=b.title,
+ published=b.published)
+ Person.objects.using('other').create(pk=p.pk, name=p.name)
+
+ # Test addition
+ b.authors.add(p)
+ self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
+ with self.override_router():
+ b.authors.add(p)
+ self.assertEqual(receiver._database, "other")
+
+ # Test removal
+ b.authors.remove(p)
+ self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
+ with self.override_router():
+ b.authors.remove(p)
+ self.assertEqual(receiver._database, "other")
+
+ # Test addition in reverse
+ p.book_set.add(b)
+ self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
+ with self.override_router():
+ p.book_set.add(b)
+ self.assertEqual(receiver._database, "other")
+
+ # Test clearing
+ b.authors.clear()
+ self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
+ with self.override_router():
+ b.authors.clear()
+ self.assertEqual(receiver._database, "other")
+
+
+class AttributeErrorRouter(object):
+ "A router to test the exception handling of ConnectionRouter"
+ def db_for_read(self, model, **hints):
+ raise AttributeError
+
+ def db_for_write(self, model, **hints):
+ raise AttributeError
+
+
+class RouterAttributeErrorTestCase(TestCase):
+ multi_db = True
+
+ def override_router(self):
+ return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()])
+
+ def test_attribute_error_read(self):
+ "The AttributeError from AttributeErrorRouter bubbles up"
+ b = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ with self.override_router():
+ with self.assertRaises(AttributeError):
+ Book.objects.get(pk=b.pk)
+
+ def test_attribute_error_save(self):
+ "The AttributeError from AttributeErrorRouter bubbles up"
+ dive = Book()
+ dive.title = "Dive into Python"
+ dive.published = datetime.date(2009, 5, 4)
+ with self.override_router():
+ with self.assertRaises(AttributeError):
+ dive.save()
+
+ def test_attribute_error_delete(self):
+ "The AttributeError from AttributeErrorRouter bubbles up"
+ b = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ p = Person.objects.create(name="Marty Alchin")
+ b.authors.set([p])
+ b.editor = p
+ with self.override_router():
+ with self.assertRaises(AttributeError):
+ b.delete()
+
+ def test_attribute_error_m2m(self):
+ "The AttributeError from AttributeErrorRouter bubbles up"
+ b = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ p = Person.objects.create(name="Marty Alchin")
+ with self.override_router():
+ with self.assertRaises(AttributeError):
+ b.authors.set([p])
+
+
+class ModelMetaRouter(object):
+ "A router to ensure model arguments are real model classes"
+ def db_for_write(self, model, **hints):
+ if not hasattr(model, '_meta'):
+ raise ValueError
+
+
+@override_settings(DATABASE_ROUTERS=[ModelMetaRouter()])
+class RouterModelArgumentTestCase(TestCase):
+ multi_db = True
+
+ def test_m2m_collection(self):
+ b = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+
+ p = Person.objects.create(name="Marty Alchin")
+ # test add
+ b.authors.add(p)
+ # test remove
+ b.authors.remove(p)
+ # test clear
+ b.authors.clear()
+ # test setattr
+ b.authors.set([p])
+ # test M2M collection
+ b.delete()
+
+ def test_foreignkey_collection(self):
+ person = Person.objects.create(name='Bob')
+ Pet.objects.create(owner=person, name='Wart')
+ # test related FK collection
+ person.delete()
+
+
+class SyncOnlyDefaultDatabaseRouter(object):
+ def allow_migrate(self, db, app_label, **hints):
+ return db == DEFAULT_DB_ALIAS
+
+
+class MigrateTestCase(TestCase):
+
+ # Limit memory usage when calling 'migrate'.
+ available_apps = [
+ 'multiple_database',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes'
+ ]
+ multi_db = True
+
+ def test_migrate_to_other_database(self):
+ """Regression test for #16039: migrate with --database option."""
+ cts = ContentType.objects.using('other').filter(app_label='multiple_database')
+
+ count = cts.count()
+ self.assertGreater(count, 0)
+
+ cts.delete()
+ management.call_command('migrate', verbosity=0, interactive=False, database='other')
+ self.assertEqual(cts.count(), count)
+
+ def test_migrate_to_other_database_with_router(self):
+ """Regression test for #16039: migrate with --database option."""
+ cts = ContentType.objects.using('other').filter(app_label='multiple_database')
+
+ cts.delete()
+ with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]):
+ management.call_command('migrate', verbosity=0, interactive=False, database='other')
+
+ self.assertEqual(cts.count(), 0)
+
+
+class RouterUsed(Exception):
+ WRITE = 'write'
+
+ def __init__(self, mode, model, hints):
+ self.mode = mode
+ self.model = model
+ self.hints = hints
+
+
+class RouteForWriteTestCase(TestCase):
+ multi_db = True
+
+ class WriteCheckRouter(object):
+ def db_for_write(self, model, **hints):
+ raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints)
+
+ def override_router(self):
+ return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()])
+
+ def test_fk_delete(self):
+ owner = Person.objects.create(name='Someone')
+ pet = Pet.objects.create(name='fido', owner=owner)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ pet.owner.delete()
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Person)
+ self.assertEqual(e.hints, {'instance': owner})
+
+ def test_reverse_fk_delete(self):
+ owner = Person.objects.create(name='Someone')
+ to_del_qs = owner.pet_set.all()
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ to_del_qs.delete()
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Pet)
+ self.assertEqual(e.hints, {'instance': owner})
+
+ def test_reverse_fk_get_or_create(self):
+ owner = Person.objects.create(name='Someone')
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ owner.pet_set.get_or_create(name='fido')
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Pet)
+ self.assertEqual(e.hints, {'instance': owner})
+
+ def test_reverse_fk_update(self):
+ owner = Person.objects.create(name='Someone')
+ Pet.objects.create(name='fido', owner=owner)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ owner.pet_set.update(name='max')
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Pet)
+ self.assertEqual(e.hints, {'instance': owner})
+
+ def test_m2m_add(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ book.authors.add(auth)
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book.authors.through)
+ self.assertEqual(e.hints, {'instance': book})
+
+ def test_m2m_clear(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ book.authors.clear()
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book.authors.through)
+ self.assertEqual(e.hints, {'instance': book})
+
+ def test_m2m_delete(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ book.authors.all().delete()
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Person)
+ self.assertEqual(e.hints, {'instance': book})
+
+ def test_m2m_get_or_create(self):
+ Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ book.authors.get_or_create(name='Someone else')
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book)
+ self.assertEqual(e.hints, {'instance': book})
+
+ def test_m2m_remove(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ book.authors.remove(auth)
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book.authors.through)
+ self.assertEqual(e.hints, {'instance': book})
+
+ def test_m2m_update(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ book.authors.all().update(name='Different')
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Person)
+ self.assertEqual(e.hints, {'instance': book})
+
+ def test_reverse_m2m_add(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ auth.book_set.add(book)
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book.authors.through)
+ self.assertEqual(e.hints, {'instance': auth})
+
+ def test_reverse_m2m_clear(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ auth.book_set.clear()
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book.authors.through)
+ self.assertEqual(e.hints, {'instance': auth})
+
+ def test_reverse_m2m_delete(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ auth.book_set.all().delete()
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book)
+ self.assertEqual(e.hints, {'instance': auth})
+
+ def test_reverse_m2m_get_or_create(self):
+ auth = Person.objects.create(name='Someone')
+ Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now())
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Person)
+ self.assertEqual(e.hints, {'instance': auth})
+
+ def test_reverse_m2m_remove(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ auth.book_set.remove(book)
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book.authors.through)
+ self.assertEqual(e.hints, {'instance': auth})
+
+ def test_reverse_m2m_update(self):
+ auth = Person.objects.create(name='Someone')
+ book = Book.objects.create(title="Pro Django",
+ published=datetime.date(2008, 12, 16))
+ book.authors.add(auth)
+ with self.assertRaises(RouterUsed) as cm:
+ with self.override_router():
+ auth.book_set.all().update(title='Different')
+ e = cm.exception
+ self.assertEqual(e.mode, RouterUsed.WRITE)
+ self.assertEqual(e.model, Book)
+ self.assertEqual(e.hints, {'instance': auth})
diff --git a/tests/nested_foreign_keys/__init__.py b/tests/nested_foreign_keys/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/nested_foreign_keys/models.py b/tests/nested_foreign_keys/models.py
new file mode 100644
index 00000000..5805de5d
--- /dev/null
+++ b/tests/nested_foreign_keys/models.py
@@ -0,0 +1,30 @@
+from django.db import models
+
+
+class Person(models.Model):
+ name = models.CharField(max_length=200)
+
+
+class Movie(models.Model):
+ title = models.CharField(max_length=200)
+ director = models.ForeignKey(Person, models.CASCADE)
+
+
+class Event(models.Model):
+ pass
+
+
+class Screening(Event):
+ movie = models.ForeignKey(Movie, models.CASCADE)
+
+
+class ScreeningNullFK(Event):
+ movie = models.ForeignKey(Movie, models.SET_NULL, null=True)
+
+
+class Package(models.Model):
+ screening = models.ForeignKey(Screening, models.SET_NULL, null=True)
+
+
+class PackageNullFK(models.Model):
+ screening = models.ForeignKey(ScreeningNullFK, models.SET_NULL, null=True)
diff --git a/tests/nested_foreign_keys/tests.py b/tests/nested_foreign_keys/tests.py
new file mode 100644
index 00000000..34a3703e
--- /dev/null
+++ b/tests/nested_foreign_keys/tests.py
@@ -0,0 +1,176 @@
+from __future__ import unicode_literals
+
+from django.test import TestCase
+
+from .models import (
+ Event, Movie, Package, PackageNullFK, Person, Screening, ScreeningNullFK,
+)
+
+
+# These are tests for #16715. The basic scheme is always the same: 3 models with
+# 2 relations. The first relation may be null, while the second is non-nullable.
+# In some cases, Django would pick the wrong join type for the second relation,
+# resulting in missing objects in the queryset.
+#
+# Model A
+# | (Relation A/B : nullable)
+# Model B
+# | (Relation B/C : non-nullable)
+# Model C
+#
+# Because of the possibility of NULL rows resulting from the LEFT OUTER JOIN
+# between Model A and Model B (i.e. instances of A without reference to B),
+# the second join must also be LEFT OUTER JOIN, so that we do not ignore
+# instances of A that do not reference B.
+#
+# Relation A/B can either be an explicit foreign key or an implicit reverse
+# relation such as introduced by one-to-one relations (through multi-table
+# inheritance).
+class NestedForeignKeysTests(TestCase):
+ def setUp(self):
+ self.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
+ self.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=self.director)
+
+ # This test failed in #16715 because in some cases INNER JOIN was selected
+ # for the second foreign key relation instead of LEFT OUTER JOIN.
+ def test_inheritance(self):
+ Event.objects.create()
+ Screening.objects.create(movie=self.movie)
+
+ self.assertEqual(len(Event.objects.all()), 2)
+ self.assertEqual(len(Event.objects.select_related('screening')), 2)
+ # This failed.
+ self.assertEqual(len(Event.objects.select_related('screening__movie')), 2)
+
+ self.assertEqual(len(Event.objects.values()), 2)
+ self.assertEqual(len(Event.objects.values('screening__pk')), 2)
+ self.assertEqual(len(Event.objects.values('screening__movie__pk')), 2)
+ self.assertEqual(len(Event.objects.values('screening__movie__title')), 2)
+ # This failed.
+ self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
+
+ # Simple filter/exclude queries for good measure.
+ self.assertEqual(Event.objects.filter(screening__movie=self.movie).count(), 1)
+ self.assertEqual(Event.objects.exclude(screening__movie=self.movie).count(), 1)
+
+ # These all work because the second foreign key in the chain has null=True.
+ def test_inheritance_null_FK(self):
+ Event.objects.create()
+ ScreeningNullFK.objects.create(movie=None)
+ ScreeningNullFK.objects.create(movie=self.movie)
+
+ self.assertEqual(len(Event.objects.all()), 3)
+ self.assertEqual(len(Event.objects.select_related('screeningnullfk')), 3)
+ self.assertEqual(len(Event.objects.select_related('screeningnullfk__movie')), 3)
+
+ self.assertEqual(len(Event.objects.values()), 3)
+ self.assertEqual(len(Event.objects.values('screeningnullfk__pk')), 3)
+ self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk')), 3)
+ self.assertEqual(len(Event.objects.values('screeningnullfk__movie__title')), 3)
+ self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk', 'screeningnullfk__movie__title')), 3)
+
+ self.assertEqual(Event.objects.filter(screeningnullfk__movie=self.movie).count(), 1)
+ self.assertEqual(Event.objects.exclude(screeningnullfk__movie=self.movie).count(), 2)
+
+ def test_null_exclude(self):
+ screening = ScreeningNullFK.objects.create(movie=None)
+ ScreeningNullFK.objects.create(movie=self.movie)
+ self.assertEqual(
+ list(ScreeningNullFK.objects.exclude(movie__id=self.movie.pk)),
+ [screening])
+
+ # This test failed in #16715 because in some cases INNER JOIN was selected
+ # for the second foreign key relation instead of LEFT OUTER JOIN.
+ def test_explicit_ForeignKey(self):
+ Package.objects.create()
+ screening = Screening.objects.create(movie=self.movie)
+ Package.objects.create(screening=screening)
+
+ self.assertEqual(len(Package.objects.all()), 2)
+ self.assertEqual(len(Package.objects.select_related('screening')), 2)
+ self.assertEqual(len(Package.objects.select_related('screening__movie')), 2)
+
+ self.assertEqual(len(Package.objects.values()), 2)
+ self.assertEqual(len(Package.objects.values('screening__pk')), 2)
+ self.assertEqual(len(Package.objects.values('screening__movie__pk')), 2)
+ self.assertEqual(len(Package.objects.values('screening__movie__title')), 2)
+ # This failed.
+ self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
+
+ self.assertEqual(Package.objects.filter(screening__movie=self.movie).count(), 1)
+ self.assertEqual(Package.objects.exclude(screening__movie=self.movie).count(), 1)
+
+ # These all work because the second foreign key in the chain has null=True.
+ def test_explicit_ForeignKey_NullFK(self):
+ PackageNullFK.objects.create()
+ screening = ScreeningNullFK.objects.create(movie=None)
+ screening_with_movie = ScreeningNullFK.objects.create(movie=self.movie)
+ PackageNullFK.objects.create(screening=screening)
+ PackageNullFK.objects.create(screening=screening_with_movie)
+
+ self.assertEqual(len(PackageNullFK.objects.all()), 3)
+ self.assertEqual(len(PackageNullFK.objects.select_related('screening')), 3)
+ self.assertEqual(len(PackageNullFK.objects.select_related('screening__movie')), 3)
+
+ self.assertEqual(len(PackageNullFK.objects.values()), 3)
+ self.assertEqual(len(PackageNullFK.objects.values('screening__pk')), 3)
+ self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk')), 3)
+ self.assertEqual(len(PackageNullFK.objects.values('screening__movie__title')), 3)
+ self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk', 'screening__movie__title')), 3)
+
+ self.assertEqual(PackageNullFK.objects.filter(screening__movie=self.movie).count(), 1)
+ self.assertEqual(PackageNullFK.objects.exclude(screening__movie=self.movie).count(), 2)
+
+
+# Some additional tests for #16715. The only difference is the depth of the
+# nesting as we now use 4 models instead of 3 (and thus 3 relations). This
+# checks if promotion of join types works for deeper nesting too.
+class DeeplyNestedForeignKeysTests(TestCase):
+ def setUp(self):
+ self.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
+ self.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=self.director)
+
+ def test_inheritance(self):
+ Event.objects.create()
+ Screening.objects.create(movie=self.movie)
+
+ self.assertEqual(len(Event.objects.all()), 2)
+ self.assertEqual(len(Event.objects.select_related('screening__movie__director')), 2)
+
+ self.assertEqual(len(Event.objects.values()), 2)
+ self.assertEqual(len(Event.objects.values('screening__movie__director__pk')), 2)
+ self.assertEqual(len(Event.objects.values('screening__movie__director__name')), 2)
+ self.assertEqual(
+ len(Event.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
+ 2
+ )
+ self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
+ self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
+ self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
+ self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
+
+ self.assertEqual(Event.objects.filter(screening__movie__director=self.director).count(), 1)
+ self.assertEqual(Event.objects.exclude(screening__movie__director=self.director).count(), 1)
+
+ def test_explicit_ForeignKey(self):
+ Package.objects.create()
+ screening = Screening.objects.create(movie=self.movie)
+ Package.objects.create(screening=screening)
+
+ self.assertEqual(len(Package.objects.all()), 2)
+ self.assertEqual(len(Package.objects.select_related('screening__movie__director')), 2)
+
+ self.assertEqual(len(Package.objects.values()), 2)
+ self.assertEqual(len(Package.objects.values('screening__movie__director__pk')), 2)
+ self.assertEqual(len(Package.objects.values('screening__movie__director__name')), 2)
+ self.assertEqual(
+ len(Package.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
+ 2
+ )
+ self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
+ self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
+ self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
+ self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
+
+ self.assertEqual(Package.objects.filter(screening__movie__director=self.director).count(), 1)
+ self.assertEqual(Package.objects.exclude(screening__movie__director=self.director).count(), 1)
diff --git a/tests/null_fk/__init__.py b/tests/null_fk/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/null_fk/models.py b/tests/null_fk/models.py
new file mode 100644
index 00000000..6a7da8f6
--- /dev/null
+++ b/tests/null_fk/models.py
@@ -0,0 +1,57 @@
+"""
+Regression tests for proper working of ForeignKey(null=True).
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+class SystemDetails(models.Model):
+ details = models.TextField()
+
+
+class SystemInfo(models.Model):
+ system_details = models.ForeignKey(SystemDetails, models.CASCADE)
+ system_name = models.CharField(max_length=32)
+
+
+class Forum(models.Model):
+ system_info = models.ForeignKey(SystemInfo, models.CASCADE)
+ forum_name = models.CharField(max_length=32)
+
+
+@python_2_unicode_compatible
+class Post(models.Model):
+ forum = models.ForeignKey(Forum, models.SET_NULL, null=True)
+ title = models.CharField(max_length=32)
+
+ def __str__(self):
+ return self.title
+
+
+@python_2_unicode_compatible
+class Comment(models.Model):
+ post = models.ForeignKey(Post, models.SET_NULL, null=True)
+ comment_text = models.CharField(max_length=250)
+
+ class Meta:
+ ordering = ('comment_text',)
+
+ def __str__(self):
+ return self.comment_text
+
+# Ticket 15823
+
+
+class Item(models.Model):
+ title = models.CharField(max_length=100)
+
+
+class PropertyValue(models.Model):
+ label = models.CharField(max_length=100)
+
+
+class Property(models.Model):
+ item = models.ForeignKey(Item, models.CASCADE, related_name='props')
+ key = models.CharField(max_length=100)
+ value = models.ForeignKey(PropertyValue, models.SET_NULL, null=True)
diff --git a/tests/null_fk/tests.py b/tests/null_fk/tests.py
new file mode 100644
index 00000000..19b285e3
--- /dev/null
+++ b/tests/null_fk/tests.py
@@ -0,0 +1,70 @@
+from __future__ import unicode_literals
+
+from django.db.models import Q
+from django.test import TestCase
+
+from .models import (
+ Comment, Forum, Item, Post, PropertyValue, SystemDetails, SystemInfo,
+)
+
+
+class NullFkTests(TestCase):
+
+ def test_null_fk(self):
+ d = SystemDetails.objects.create(details='First details')
+ s = SystemInfo.objects.create(system_name='First forum', system_details=d)
+ f = Forum.objects.create(system_info=s, forum_name='First forum')
+ p = Post.objects.create(forum=f, title='First Post')
+ c1 = Comment.objects.create(post=p, comment_text='My first comment')
+ c2 = Comment.objects.create(comment_text='My second comment')
+
+ # Starting from comment, make sure that a .select_related(...) with a specified
+ # set of fields will properly LEFT JOIN multiple levels of NULLs (and the things
+ # that come after the NULLs, or else data that should exist won't). Regression
+ # test for #7369.
+ c = Comment.objects.select_related().get(id=c1.id)
+ self.assertEqual(c.post, p)
+ self.assertIsNone(Comment.objects.select_related().get(id=c2.id).post)
+
+ self.assertQuerysetEqual(
+ Comment.objects.select_related('post__forum__system_info').all(),
+ [
+ (c1.id, 'My first comment', ''),
+ (c2.id, 'My second comment', 'None')
+ ],
+ transform=lambda c: (c.id, c.comment_text, repr(c.post))
+ )
+
+ # Regression test for #7530, #7716.
+ self.assertIsNone(Comment.objects.select_related('post').filter(post__isnull=True)[0].post)
+
+ self.assertQuerysetEqual(
+ Comment.objects.select_related('post__forum__system_info__system_details'),
+ [
+ (c1.id, 'My first comment', ''),
+ (c2.id, 'My second comment', 'None')
+ ],
+ transform=lambda c: (c.id, c.comment_text, repr(c.post))
+ )
+
+ def test_combine_isnull(self):
+ item = Item.objects.create(title='Some Item')
+ pv = PropertyValue.objects.create(label='Some Value')
+ item.props.create(key='a', value=pv)
+ item.props.create(key='b') # value=NULL
+ q1 = Q(props__key='a', props__value=pv)
+ q2 = Q(props__key='b', props__value__isnull=True)
+
+ # Each of these individually should return the item.
+ self.assertEqual(Item.objects.get(q1), item)
+ self.assertEqual(Item.objects.get(q2), item)
+
+ # Logically, qs1 and qs2, and qs3 and qs4 should be the same.
+ qs1 = Item.objects.filter(q1) & Item.objects.filter(q2)
+ qs2 = Item.objects.filter(q2) & Item.objects.filter(q1)
+ qs3 = Item.objects.filter(q1) | Item.objects.filter(q2)
+ qs4 = Item.objects.filter(q2) | Item.objects.filter(q1)
+
+ # Regression test for #15823.
+ self.assertEqual(list(qs1), list(qs2))
+ self.assertEqual(list(qs3), list(qs4))
diff --git a/tests/null_fk_ordering/__init__.py b/tests/null_fk_ordering/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/null_fk_ordering/models.py b/tests/null_fk_ordering/models.py
new file mode 100644
index 00000000..0bac5e1a
--- /dev/null
+++ b/tests/null_fk_ordering/models.py
@@ -0,0 +1,59 @@
+"""
+Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
+
+ * #7512: including a nullable foreign key reference in Meta ordering has
+unexpected results
+
+"""
+from __future__ import unicode_literals
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+# The first two models represent a very simple null FK ordering case.
+class Author(models.Model):
+ name = models.CharField(max_length=150)
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ title = models.CharField(max_length=150)
+ author = models.ForeignKey(Author, models.SET_NULL, null=True)
+
+ def __str__(self):
+ return 'Article titled: %s' % (self.title, )
+
+ class Meta:
+ ordering = ['author__name', ]
+
+
+# These following 4 models represent a far more complex ordering case.
+class SystemInfo(models.Model):
+ system_name = models.CharField(max_length=32)
+
+
+class Forum(models.Model):
+ system_info = models.ForeignKey(SystemInfo, models.CASCADE)
+ forum_name = models.CharField(max_length=32)
+
+
+@python_2_unicode_compatible
+class Post(models.Model):
+ forum = models.ForeignKey(Forum, models.SET_NULL, null=True)
+ title = models.CharField(max_length=32)
+
+ def __str__(self):
+ return self.title
+
+
+@python_2_unicode_compatible
+class Comment(models.Model):
+ post = models.ForeignKey(Post, models.SET_NULL, null=True)
+ comment_text = models.CharField(max_length=250)
+
+ class Meta:
+ ordering = ['post__forum__system_info__system_name', 'comment_text']
+
+ def __str__(self):
+ return self.comment_text
diff --git a/tests/null_fk_ordering/tests.py b/tests/null_fk_ordering/tests.py
new file mode 100644
index 00000000..7215118b
--- /dev/null
+++ b/tests/null_fk_ordering/tests.py
@@ -0,0 +1,42 @@
+from __future__ import unicode_literals
+
+from django.test import TestCase
+
+from .models import Article, Author, Comment, Forum, Post, SystemInfo
+
+
+class NullFkOrderingTests(TestCase):
+
+ def test_ordering_across_null_fk(self):
+ """
+ Regression test for #7512
+
+ ordering across nullable Foreign Keys shouldn't exclude results
+ """
+ author_1 = Author.objects.create(name='Tom Jones')
+ author_2 = Author.objects.create(name='Bob Smith')
+ Article.objects.create(title='No author on this article')
+ Article.objects.create(author=author_1, title='This article written by Tom Jones')
+ Article.objects.create(author=author_2, title='This article written by Bob Smith')
+
+ # We can't compare results directly (since different databases sort NULLs to
+ # different ends of the ordering), but we can check that all results are
+ # returned.
+ self.assertEqual(len(list(Article.objects.all())), 3)
+
+ s = SystemInfo.objects.create(system_name='System Info')
+ f = Forum.objects.create(system_info=s, forum_name='First forum')
+ p = Post.objects.create(forum=f, title='First Post')
+ Comment.objects.create(post=p, comment_text='My first comment')
+ Comment.objects.create(comment_text='My second comment')
+ s2 = SystemInfo.objects.create(system_name='More System Info')
+ f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
+ p2 = Post.objects.create(forum=f2, title='Second Post')
+ Comment.objects.create(comment_text='Another first comment')
+ Comment.objects.create(post=p2, comment_text='Another second comment')
+
+ # We have to test this carefully. Some databases sort NULL values before
+ # everything else, some sort them afterwards. So we extract the ordered list
+ # and check the length. Before the fix, this list was too short (some values
+ # were omitted).
+ self.assertEqual(len(list(Comment.objects.all())), 4)
diff --git a/tests/or_lookups/__init__.py b/tests/or_lookups/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/or_lookups/models.py b/tests/or_lookups/models.py
new file mode 100644
index 00000000..7dea8cd4
--- /dev/null
+++ b/tests/or_lookups/models.py
@@ -0,0 +1,25 @@
+"""
+OR lookups
+
+To perform an OR lookup, or a lookup that combines ANDs and ORs, combine
+``QuerySet`` objects using ``&`` and ``|`` operators.
+
+Alternatively, use positional arguments, and pass one or more expressions of
+clauses using the variable ``django.db.models.Q`` (or any object with an
+``add_to_query`` method).
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ headline = models.CharField(max_length=50)
+ pub_date = models.DateTimeField()
+
+ class Meta:
+ ordering = ('pub_date',)
+
+ def __str__(self):
+ return self.headline
diff --git a/tests/or_lookups/tests.py b/tests/or_lookups/tests.py
new file mode 100644
index 00000000..5e8b3dd6
--- /dev/null
+++ b/tests/or_lookups/tests.py
@@ -0,0 +1,243 @@
+# -*- encoding: utf-8 -*-
+from __future__ import unicode_literals
+
+from datetime import datetime
+from operator import attrgetter
+
+from django.db.models import Q
+from django.test import TestCase
+from django.utils.encoding import force_str
+
+from .models import Article
+
+
+class OrLookupsTests(TestCase):
+
+ def setUp(self):
+ self.a1 = Article.objects.create(
+ headline='Hello', pub_date=datetime(2005, 11, 27)
+ ).pk
+ self.a2 = Article.objects.create(
+ headline='Goodbye', pub_date=datetime(2005, 11, 28)
+ ).pk
+ self.a3 = Article.objects.create(
+ headline='Hello and goodbye', pub_date=datetime(2005, 11, 29)
+ ).pk
+
+ def test_filter_or(self):
+ self.assertQuerysetEqual(
+ (
+ Article.objects.filter(headline__startswith='Hello') |
+ Article.objects.filter(headline__startswith='Goodbye')
+ ), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ def test_stages(self):
+ # You can shorten this syntax with code like the following, which is
+ # especially useful if building the query in stages:
+ articles = Article.objects.all()
+ self.assertQuerysetEqual(
+ articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'),
+ []
+ )
+ self.assertQuerysetEqual(
+ articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ def test_pk_q(self):
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [
+ 'Hello',
+ 'Goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline"),
+ )
+
+ def test_pk_in(self):
+ self.assertQuerysetEqual(
+ Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline"),
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline"),
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [
+ 'Hello',
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline"),
+ )
+
+ def test_q_repr(self):
+ or_expr = Q(baz=Article(headline="Foö"))
+ self.assertEqual(repr(or_expr), force_str("))>"))
+ negated_or = ~Q(baz=Article(headline="Foö"))
+ self.assertEqual(repr(negated_or), force_str(")))>"))
+
+ def test_q_negated(self):
+ # Q objects can be negated
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [
+ 'Hello',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline"),
+ )
+ # This allows for more complex queries than filter() and exclude()
+ # alone would allow
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [
+ 'Hello'
+ ],
+ attrgetter("headline"),
+ )
+
+ def test_complex_filter(self):
+ # The 'complex_filter' method supports framework features such as
+ # 'limit_choices_to' which normally take a single dictionary of lookup
+ # arguments but need to support arbitrary queries via Q objects too.
+ self.assertQuerysetEqual(
+ Article.objects.complex_filter({'pk': self.a1}), [
+ 'Hello'
+ ],
+ attrgetter("headline"),
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [
+ 'Hello',
+ 'Goodbye'
+ ],
+ attrgetter("headline"),
+ )
+
+ def test_empty_in(self):
+ # Passing "in" an empty list returns no results ...
+ self.assertQuerysetEqual(
+ Article.objects.filter(pk__in=[]),
+ []
+ )
+ # ... but can return results if we OR it with another query.
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [
+ 'Goodbye',
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline"),
+ )
+
+ def test_q_and(self):
+ # Q arg objects are ANDed
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline")
+ )
+ # Q arg AND order is irrelevant
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [
+ 'Hello and goodbye'
+ ],
+ attrgetter("headline"),
+ )
+
+ self.assertQuerysetEqual(
+ Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')),
+ []
+ )
+
+ def test_q_exclude(self):
+ self.assertQuerysetEqual(
+ Article.objects.exclude(Q(headline__startswith='Hello')), [
+ 'Goodbye'
+ ],
+ attrgetter("headline")
+ )
+
+ def test_other_arg_queries(self):
+ # Try some arg queries with operations other than filter.
+ self.assertEqual(
+ Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline,
+ 'Hello and goodbye'
+ )
+
+ self.assertEqual(
+ Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(),
+ 3
+ )
+
+ self.assertSequenceEqual(
+ Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [
+ {"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)},
+ ],
+ )
+
+ self.assertEqual(
+ Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]),
+ {self.a1: Article.objects.get(pk=self.a1)}
+ )
diff --git a/tests/ordering/__init__.py b/tests/ordering/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/ordering/models.py b/tests/ordering/models.py
new file mode 100644
index 00000000..1f794ca3
--- /dev/null
+++ b/tests/ordering/models.py
@@ -0,0 +1,51 @@
+"""
+Specifying ordering
+
+Specify default ordering for a model using the ``ordering`` attribute, which
+should be a list or tuple of field names. This tells Django how to order
+``QuerySet`` results.
+
+If a field name in ``ordering`` starts with a hyphen, that field will be
+ordered in descending order. Otherwise, it'll be ordered in ascending order.
+The special-case field name ``"?"`` specifies random order.
+
+The ordering attribute is not required. If you leave it off, ordering will be
+undefined -- not random, just undefined.
+"""
+
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+class Author(models.Model):
+ name = models.CharField(max_length=63, null=True, blank=True)
+
+ class Meta:
+ ordering = ('-pk',)
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ author = models.ForeignKey(Author, models.SET_NULL, null=True)
+ second_author = models.ForeignKey(Author, models.SET_NULL, null=True, related_name='+')
+ headline = models.CharField(max_length=100)
+ pub_date = models.DateTimeField()
+
+ class Meta:
+ ordering = ('-pub_date', 'headline')
+
+ def __str__(self):
+ return self.headline
+
+
+class OrderedByAuthorArticle(Article):
+ class Meta:
+ proxy = True
+ ordering = ('author', 'second_author')
+
+
+class Reference(models.Model):
+ article = models.ForeignKey(OrderedByAuthorArticle, models.CASCADE)
+
+ class Meta:
+ ordering = ('article',)
diff --git a/tests/ordering/tests.py b/tests/ordering/tests.py
new file mode 100644
index 00000000..cfdb3bbf
--- /dev/null
+++ b/tests/ordering/tests.py
@@ -0,0 +1,369 @@
+from __future__ import unicode_literals
+
+from datetime import datetime
+from operator import attrgetter
+
+import django
+from django.db.models import F
+from django.db.models.functions import Upper
+from django.test import TestCase
+
+from .models import Article, Author, Reference
+
+
+class OrderingTests(TestCase):
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.a1 = Article.objects.create(headline="Article 1", pub_date=datetime(2005, 7, 26))
+ cls.a2 = Article.objects.create(headline="Article 2", pub_date=datetime(2005, 7, 27))
+ cls.a3 = Article.objects.create(headline="Article 3", pub_date=datetime(2005, 7, 27))
+ cls.a4 = Article.objects.create(headline="Article 4", pub_date=datetime(2005, 7, 28))
+ cls.author_1 = Author.objects.create(name="Name 1")
+ cls.author_2 = Author.objects.create(name="Name 2")
+ for i in range(2):
+ Author.objects.create()
+
+ def test_default_ordering(self):
+ """
+ By default, Article.objects.all() orders by pub_date descending, then
+ headline ascending.
+ """
+ self.assertQuerysetEqual(
+ Article.objects.all(), [
+ "Article 4",
+ "Article 2",
+ "Article 3",
+ "Article 1",
+ ],
+ attrgetter("headline")
+ )
+
+ # Getting a single item should work too:
+ self.assertEqual(Article.objects.all()[0], self.a4)
+
+ def test_default_ordering_override(self):
+ """
+ Override ordering with order_by, which is in the same format as the
+ ordering attribute in models.
+ """
+ self.assertQuerysetEqual(
+ Article.objects.order_by("headline"), [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+ self.assertQuerysetEqual(
+ Article.objects.order_by("pub_date", "-headline"), [
+ "Article 1",
+ "Article 3",
+ "Article 2",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_order_by_override(self):
+ """
+ Only the last order_by has any effect (since they each override any
+ previous ordering).
+ """
+ self.assertQuerysetEqual(
+ Article.objects.order_by("id"), [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+ self.assertQuerysetEqual(
+ Article.objects.order_by("id").order_by("-headline"), [
+ "Article 4",
+ "Article 3",
+ "Article 2",
+ "Article 1",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_order_by_nulls_first_and_last(self):
+ if django.VERSION < (1, 11, 0):
+ self.skipTest("Only run this on Django 1.11 or newer")
+ msg = "nulls_first and nulls_last are mutually exclusive"
+ with self.assertRaisesMessage(ValueError, msg):
+ Article.objects.order_by(F("author").desc(nulls_last=True, nulls_first=True))
+
+ def test_order_by_nulls_last(self):
+ self.skipTest("TODO fix django.db.utils.ProgrammingError: Incorrect syntax near 'NULLS'.")
+ Article.objects.filter(headline="Article 3").update(author=self.author_1)
+ Article.objects.filter(headline="Article 4").update(author=self.author_2)
+ # asc and desc are chainable with nulls_last.
+ self.assertSequenceEqual(
+ Article.objects.order_by(F("author").desc(nulls_last=True)),
+ [self.a4, self.a3, self.a1, self.a2],
+ )
+ self.assertSequenceEqual(
+ Article.objects.order_by(F("author").asc(nulls_last=True)),
+ [self.a3, self.a4, self.a1, self.a2],
+ )
+ self.assertSequenceEqual(
+ Article.objects.order_by(Upper("author__name").desc(nulls_last=True)),
+ [self.a4, self.a3, self.a1, self.a2],
+ )
+ self.assertSequenceEqual(
+ Article.objects.order_by(Upper("author__name").asc(nulls_last=True)),
+ [self.a3, self.a4, self.a1, self.a2],
+ )
+
+ def test_order_by_nulls_first(self):
+ self.skipTest("TODO fix django.db.utils.ProgrammingError: Incorrect syntax near 'NULLS'.")
+ Article.objects.filter(headline="Article 3").update(author=self.author_1)
+ Article.objects.filter(headline="Article 4").update(author=self.author_2)
+ # asc and desc are chainable with nulls_first.
+ self.assertSequenceEqual(
+ Article.objects.order_by(F("author").asc(nulls_first=True)),
+ [self.a1, self.a2, self.a3, self.a4],
+ )
+ self.assertSequenceEqual(
+ Article.objects.order_by(F("author").desc(nulls_first=True)),
+ [self.a1, self.a2, self.a4, self.a3],
+ )
+ self.assertSequenceEqual(
+ Article.objects.order_by(Upper("author__name").asc(nulls_first=True)),
+ [self.a1, self.a2, self.a3, self.a4],
+ )
+ self.assertSequenceEqual(
+ Article.objects.order_by(Upper("author__name").desc(nulls_first=True)),
+ [self.a1, self.a2, self.a4, self.a3],
+ )
+
+ def test_stop_slicing(self):
+ """
+ Use the 'stop' part of slicing notation to limit the results.
+ """
+ self.assertQuerysetEqual(
+ Article.objects.order_by("headline")[:2], [
+ "Article 1",
+ "Article 2",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_stop_start_slicing(self):
+ """
+ Use the 'stop' and 'start' parts of slicing notation to offset the
+ result list.
+ """
+ self.assertQuerysetEqual(
+ Article.objects.order_by("headline")[1:3], [
+ "Article 2",
+ "Article 3",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_random_ordering(self):
+ """
+ Use '?' to order randomly.
+ """
+ self.assertEqual(
+ len(list(Article.objects.order_by("?"))), 4
+ )
+
+ def test_reversed_ordering(self):
+ """
+ Ordering can be reversed using the reverse() method on a queryset.
+ This allows you to extract things like "the last two items" (reverse
+ and then take the first two).
+ """
+ self.assertQuerysetEqual(
+ Article.objects.all().reverse()[:2], [
+ "Article 1",
+ "Article 3",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_reverse_ordering_pure(self):
+ qs1 = Article.objects.order_by(F('headline').asc())
+ qs2 = qs1.reverse()
+ self.assertQuerysetEqual(
+ qs1, [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+ self.assertQuerysetEqual(
+ qs2, [
+ "Article 4",
+ "Article 3",
+ "Article 2",
+ "Article 1",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_extra_ordering(self):
+ """
+ Ordering can be based on fields included from an 'extra' clause
+ """
+ self.assertQuerysetEqual(
+ Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_extra_ordering_quoting(self):
+ """
+ If the extra clause uses an SQL keyword for a name, it will be
+ protected by quoting.
+ """
+ self.assertQuerysetEqual(
+ Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_extra_ordering_with_table_name(self):
+ self.assertQuerysetEqual(
+ Article.objects.extra(order_by=['ordering_article.headline']), [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+ self.assertQuerysetEqual(
+ Article.objects.extra(order_by=['-ordering_article.headline']), [
+ "Article 4",
+ "Article 3",
+ "Article 2",
+ "Article 1",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_order_by_pk(self):
+ """
+ 'pk' works as an ordering option in Meta.
+ """
+ self.assertQuerysetEqual(
+ Author.objects.all(),
+ list(reversed(range(1, Author.objects.count() + 1))),
+ attrgetter("pk"),
+ )
+
+ def test_order_by_fk_attname(self):
+ """
+ ordering by a foreign key by its attribute name prevents the query
+ from inheriting its related model ordering option (#19195).
+ """
+ for i in range(1, 5):
+ author = Author.objects.get(pk=i)
+ article = getattr(self, "a%d" % (5 - i))
+ article.author = author
+ article.save(update_fields={'author'})
+
+ self.assertQuerysetEqual(
+ Article.objects.order_by('author_id'), [
+ "Article 4",
+ "Article 3",
+ "Article 2",
+ "Article 1",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_order_by_f_expression(self):
+ self.assertQuerysetEqual(
+ Article.objects.order_by(F('headline')), [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+ self.assertQuerysetEqual(
+ Article.objects.order_by(F('headline').asc()), [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+ self.assertQuerysetEqual(
+ Article.objects.order_by(F('headline').desc()), [
+ "Article 4",
+ "Article 3",
+ "Article 2",
+ "Article 1",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_order_by_f_expression_duplicates(self):
+ """
+ A column may only be included once (the first occurrence) so we check
+ to ensure there are no duplicates by inspecting the SQL.
+ """
+ qs = Article.objects.order_by(F('headline').asc(), F('headline').desc())
+ sql = str(qs.query).upper()
+ fragment = sql[sql.find('ORDER BY'):]
+ self.assertEqual(fragment.count('HEADLINE'), 1)
+ self.assertQuerysetEqual(
+ qs, [
+ "Article 1",
+ "Article 2",
+ "Article 3",
+ "Article 4",
+ ],
+ attrgetter("headline")
+ )
+ qs = Article.objects.order_by(F('headline').desc(), F('headline').asc())
+ sql = str(qs.query).upper()
+ fragment = sql[sql.find('ORDER BY'):]
+ self.assertEqual(fragment.count('HEADLINE'), 1)
+ self.assertQuerysetEqual(
+ qs, [
+ "Article 4",
+ "Article 3",
+ "Article 2",
+ "Article 1",
+ ],
+ attrgetter("headline")
+ )
+
+ def test_related_ordering_duplicate_table_reference(self):
+ """
+ An ordering referencing a model with an ordering referencing a model
+ multiple time no circular reference should be detected (#24654).
+ """
+ first_author = Author.objects.create()
+ second_author = Author.objects.create()
+ self.a1.author = first_author
+ self.a1.second_author = second_author
+ self.a1.save()
+ self.a2.author = second_author
+ self.a2.second_author = first_author
+ self.a2.save()
+ r1 = Reference.objects.create(article_id=self.a1.pk)
+ r2 = Reference.objects.create(article_id=self.a2.pk)
+ self.assertSequenceEqual(Reference.objects.all(), [r2, r1])
diff --git a/tests/pagination/__init__.py b/tests/pagination/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/pagination/custom.py b/tests/pagination/custom.py
new file mode 100644
index 00000000..77277dca
--- /dev/null
+++ b/tests/pagination/custom.py
@@ -0,0 +1,20 @@
+from django.core.paginator import Page, Paginator
+
+
+class ValidAdjacentNumsPage(Page):
+
+ def next_page_number(self):
+ if not self.has_next():
+ return None
+ return super(ValidAdjacentNumsPage, self).next_page_number()
+
+ def previous_page_number(self):
+ if not self.has_previous():
+ return None
+ return super(ValidAdjacentNumsPage, self).previous_page_number()
+
+
+class ValidAdjacentNumsPaginator(Paginator):
+
+ def _get_page(self, *args, **kwargs):
+ return ValidAdjacentNumsPage(*args, **kwargs)
diff --git a/tests/pagination/models.py b/tests/pagination/models.py
new file mode 100644
index 00000000..9dc8d4b7
--- /dev/null
+++ b/tests/pagination/models.py
@@ -0,0 +1,11 @@
+from django.db import models
+from django.utils.encoding import python_2_unicode_compatible
+
+
+@python_2_unicode_compatible
+class Article(models.Model):
+ headline = models.CharField(max_length=100, default='Default headline')
+ pub_date = models.DateTimeField()
+
+ def __str__(self):
+ return self.headline
diff --git a/tests/pagination/tests.py b/tests/pagination/tests.py
new file mode 100644
index 00000000..2572dbe6
--- /dev/null
+++ b/tests/pagination/tests.py
@@ -0,0 +1,362 @@
+from __future__ import unicode_literals
+
+import unittest
+import warnings
+from datetime import datetime
+
+import django
+from django.core.paginator import (
+ EmptyPage, InvalidPage, PageNotAnInteger, Paginator,
+)
+if django.VERSION >= (1, 11, 0):
+ from django.core.paginator import UnorderedObjectListWarning
+
+from django.test import TestCase
+from django.utils import six
+
+from .custom import ValidAdjacentNumsPaginator
+from .models import Article
+
+
+class PaginationTests(unittest.TestCase):
+ """
+ Tests for the Paginator and Page classes.
+ """
+
+ def check_paginator(self, params, output):
+ """
+ Helper method that instantiates a Paginator object from the passed
+ params and then checks that its attributes match the passed output.
+ """
+ count, num_pages, page_range = output
+ paginator = Paginator(*params)
+ self.check_attribute('count', paginator, count, params)
+ self.check_attribute('num_pages', paginator, num_pages, params)
+ self.check_attribute('page_range', paginator, page_range, params, coerce=list)
+
+ def check_attribute(self, name, paginator, expected, params, coerce=None):
+ """
+ Helper method that checks a single attribute and gives a nice error
+ message upon test failure.
+ """
+ got = getattr(paginator, name)
+ if coerce is not None:
+ got = coerce(got)
+ self.assertEqual(
+ expected, got,
+ "For '%s', expected %s but got %s. Paginator parameters were: %s"
+ % (name, expected, got, params)
+ )
+
+ def test_paginator(self):
+ """
+ Tests the paginator attributes using varying inputs.
+ """
+ nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+ ten = nine + [10]
+ eleven = ten + [11]
+ tests = (
+ # Each item is two tuples:
+ # First tuple is Paginator parameters - object_list, per_page,
+ # orphans, and allow_empty_first_page.
+ # Second tuple is resulting Paginator attributes - count,
+ # num_pages, and page_range.
+ # Ten items, varying orphans, no empty first page.
+ ((ten, 4, 0, False), (10, 3, [1, 2, 3])),
+ ((ten, 4, 1, False), (10, 3, [1, 2, 3])),
+ ((ten, 4, 2, False), (10, 2, [1, 2])),
+ ((ten, 4, 5, False), (10, 2, [1, 2])),
+ ((ten, 4, 6, False), (10, 1, [1])),
+ # Ten items, varying orphans, allow empty first page.
+ ((ten, 4, 0, True), (10, 3, [1, 2, 3])),
+ ((ten, 4, 1, True), (10, 3, [1, 2, 3])),
+ ((ten, 4, 2, True), (10, 2, [1, 2])),
+ ((ten, 4, 5, True), (10, 2, [1, 2])),
+ ((ten, 4, 6, True), (10, 1, [1])),
+ # One item, varying orphans, no empty first page.
+ (([1], 4, 0, False), (1, 1, [1])),
+ (([1], 4, 1, False), (1, 1, [1])),
+ (([1], 4, 2, False), (1, 1, [1])),
+ # One item, varying orphans, allow empty first page.
+ (([1], 4, 0, True), (1, 1, [1])),
+ (([1], 4, 1, True), (1, 1, [1])),
+ (([1], 4, 2, True), (1, 1, [1])),
+ # Zero items, varying orphans, no empty first page.
+ (([], 4, 0, False), (0, 0, [])),
+ (([], 4, 1, False), (0, 0, [])),
+ (([], 4, 2, False), (0, 0, [])),
+ # Zero items, varying orphans, allow empty first page.
+ (([], 4, 0, True), (0, 1, [1])),
+ (([], 4, 1, True), (0, 1, [1])),
+ (([], 4, 2, True), (0, 1, [1])),
+ # Number if items one less than per_page.
+ (([], 1, 0, True), (0, 1, [1])),
+ (([], 1, 0, False), (0, 0, [])),
+ (([1], 2, 0, True), (1, 1, [1])),
+ ((nine, 10, 0, True), (9, 1, [1])),
+ # Number if items equal to per_page.
+ (([1], 1, 0, True), (1, 1, [1])),
+ (([1, 2], 2, 0, True), (2, 1, [1])),
+ ((ten, 10, 0, True), (10, 1, [1])),
+ # Number if items one more than per_page.
+ (([1, 2], 1, 0, True), (2, 2, [1, 2])),
+ (([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
+ ((eleven, 10, 0, True), (11, 2, [1, 2])),
+ # Number if items one more than per_page with one orphan.
+ (([1, 2], 1, 1, True), (2, 1, [1])),
+ (([1, 2, 3], 2, 1, True), (3, 1, [1])),
+ ((eleven, 10, 1, True), (11, 1, [1])),
+ # Non-integer inputs
+ ((ten, '4', 1, False), (10, 3, [1, 2, 3])),
+ ((ten, '4', 1, False), (10, 3, [1, 2, 3])),
+ ((ten, 4, '1', False), (10, 3, [1, 2, 3])),
+ ((ten, 4, '1', False), (10, 3, [1, 2, 3])),
+ )
+ for params, output in tests:
+ self.check_paginator(params, output)
+
+ def test_invalid_page_number(self):
+ """
+ Invalid page numbers result in the correct exception being raised.
+ """
+ paginator = Paginator([1, 2, 3], 2)
+ with self.assertRaises(InvalidPage):
+ paginator.page(3)
+ with self.assertRaises(PageNotAnInteger):
+ paginator.validate_number(None)
+ with self.assertRaises(PageNotAnInteger):
+ paginator.validate_number('x')
+ # With no content and allow_empty_first_page=True, 1 is a valid page number
+ paginator = Paginator([], 2)
+ self.assertEqual(paginator.validate_number(1), 1)
+
+ def test_paginate_misc_classes(self):
+ class CountContainer(object):
+ def count(self):
+ return 42
+ # Paginator can be passed other objects with a count() method.
+ paginator = Paginator(CountContainer(), 10)
+ self.assertEqual(42, paginator.count)
+ self.assertEqual(5, paginator.num_pages)
+ self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
+
+ # Paginator can be passed other objects that implement __len__.
+ class LenContainer(object):
+ def __len__(self):
+ return 42
+ paginator = Paginator(LenContainer(), 10)
+ self.assertEqual(42, paginator.count)
+ self.assertEqual(5, paginator.num_pages)
+ self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
+
+ def check_indexes(self, params, page_num, indexes):
+ """
+ Helper method that instantiates a Paginator object from the passed
+ params and then checks that the start and end indexes of the passed
+ page_num match those given as a 2-tuple in indexes.
+ """
+ paginator = Paginator(*params)
+ if page_num == 'first':
+ page_num = 1
+ elif page_num == 'last':
+ page_num = paginator.num_pages
+ page = paginator.page(page_num)
+ start, end = indexes
+ msg = ("For %s of page %s, expected %s but got %s. Paginator parameters were: %s")
+ self.assertEqual(start, page.start_index(), msg % ('start index', page_num, start, page.start_index(), params))
+ self.assertEqual(end, page.end_index(), msg % ('end index', page_num, end, page.end_index(), params))
+
+ def test_page_indexes(self):
+ """
+ Paginator pages have the correct start and end indexes.
+ """
+ ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ tests = (
+ # Each item is three tuples:
+ # First tuple is Paginator parameters - object_list, per_page,
+ # orphans, and allow_empty_first_page.
+ # Second tuple is the start and end indexes of the first page.
+ # Third tuple is the start and end indexes of the last page.
+ # Ten items, varying per_page, no orphans.
+ ((ten, 1, 0, True), (1, 1), (10, 10)),
+ ((ten, 2, 0, True), (1, 2), (9, 10)),
+ ((ten, 3, 0, True), (1, 3), (10, 10)),
+ ((ten, 5, 0, True), (1, 5), (6, 10)),
+ # Ten items, varying per_page, with orphans.
+ ((ten, 1, 1, True), (1, 1), (9, 10)),
+ ((ten, 1, 2, True), (1, 1), (8, 10)),
+ ((ten, 3, 1, True), (1, 3), (7, 10)),
+ ((ten, 3, 2, True), (1, 3), (7, 10)),
+ ((ten, 3, 4, True), (1, 3), (4, 10)),
+ ((ten, 5, 1, True), (1, 5), (6, 10)),
+ ((ten, 5, 2, True), (1, 5), (6, 10)),
+ ((ten, 5, 5, True), (1, 10), (1, 10)),
+ # One item, varying orphans, no empty first page.
+ (([1], 4, 0, False), (1, 1), (1, 1)),
+ (([1], 4, 1, False), (1, 1), (1, 1)),
+ (([1], 4, 2, False), (1, 1), (1, 1)),
+ # One item, varying orphans, allow empty first page.
+ (([1], 4, 0, True), (1, 1), (1, 1)),
+ (([1], 4, 1, True), (1, 1), (1, 1)),
+ (([1], 4, 2, True), (1, 1), (1, 1)),
+ # Zero items, varying orphans, allow empty first page.
+ (([], 4, 0, True), (0, 0), (0, 0)),
+ (([], 4, 1, True), (0, 0), (0, 0)),
+ (([], 4, 2, True), (0, 0), (0, 0)),
+ )
+ for params, first, last in tests:
+ self.check_indexes(params, 'first', first)
+ self.check_indexes(params, 'last', last)
+
+ # When no items and no empty first page, we should get EmptyPage error.
+ with self.assertRaises(EmptyPage):
+ self.check_indexes(([], 4, 0, False), 1, None)
+ with self.assertRaises(EmptyPage):
+ self.check_indexes(([], 4, 1, False), 1, None)
+ with self.assertRaises(EmptyPage):
+ self.check_indexes(([], 4, 2, False), 1, None)
+
+ def test_page_sequence(self):
+ """
+ A paginator page acts like a standard sequence.
+ """
+ eleven = 'abcdefghijk'
+ page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
+ self.assertEqual(len(page2), 6)
+ self.assertIn('k', page2)
+ self.assertNotIn('a', page2)
+ self.assertEqual(''.join(page2), 'fghijk')
+ self.assertEqual(''.join(reversed(page2)), 'kjihgf')
+
+ def test_get_page_hook(self):
+ """
+ A Paginator subclass can use the ``_get_page`` hook to
+ return an alternative to the standard Page class.
+ """
+ eleven = 'abcdefghijk'
+ paginator = ValidAdjacentNumsPaginator(eleven, per_page=6)
+ page1 = paginator.page(1)
+ page2 = paginator.page(2)
+ self.assertIsNone(page1.previous_page_number())
+ self.assertEqual(page1.next_page_number(), 2)
+ self.assertEqual(page2.previous_page_number(), 1)
+ self.assertIsNone(page2.next_page_number())
+
+ def test_page_range_iterator(self):
+ """
+ Paginator.page_range should be an iterator.
+ """
+ self.assertIsInstance(Paginator([1, 2, 3], 2).page_range, type(six.moves.range(0)))
+
+
+class ModelPaginationTests(TestCase):
+ """
+ Test pagination with Django model instances
+ """
+ def setUp(self):
+ # Prepare a list of objects for pagination.
+ for x in range(1, 10):
+ a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
+ a.save()
+
+ def test_first_page(self):
+ paginator = Paginator(Article.objects.order_by('id'), 5)
+ p = paginator.page(1)
+ self.assertEqual("", six.text_type(p))
+ self.assertQuerysetEqual(p.object_list, [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ])
+ self.assertTrue(p.has_next())
+ self.assertFalse(p.has_previous())
+ self.assertTrue(p.has_other_pages())
+ self.assertEqual(2, p.next_page_number())
+ with self.assertRaises(InvalidPage):
+ p.previous_page_number()
+ self.assertEqual(1, p.start_index())
+ self.assertEqual(5, p.end_index())
+
+ def test_last_page(self):
+ paginator = Paginator(Article.objects.order_by('id'), 5)
+ p = paginator.page(2)
+ self.assertEqual("", six.text_type(p))
+ self.assertQuerysetEqual(p.object_list, [
+ "",
+ "",
+ "",
+ ""
+ ])
+ self.assertFalse(p.has_next())
+ self.assertTrue(p.has_previous())
+ self.assertTrue(p.has_other_pages())
+ with self.assertRaises(InvalidPage):
+ p.next_page_number()
+ self.assertEqual(1, p.previous_page_number())
+ self.assertEqual(6, p.start_index())
+ self.assertEqual(9, p.end_index())
+
+ def test_page_getitem(self):
+ """
+ Tests proper behavior of a paginator page __getitem__ (queryset
+ evaluation, slicing, exception raised).
+ """
+ paginator = Paginator(Article.objects.order_by('id'), 5)
+ p = paginator.page(1)
+
+ # Make sure object_list queryset is not evaluated by an invalid __getitem__ call.
+ # (this happens from the template engine when using eg: {% page_obj.has_previous %})
+ self.assertIsNone(p.object_list._result_cache)
+ with self.assertRaises(TypeError):
+ p['has_previous']
+ self.assertIsNone(p.object_list._result_cache)
+ self.assertNotIsInstance(p.object_list, list)
+
+ # Make sure slicing the Page object with numbers and slice objects work.
+ self.assertEqual(p[0], Article.objects.get(headline='Article 1'))
+ self.assertQuerysetEqual(p[slice(2)], [
+ "",
+ "",
+ ]
+ )
+ # After __getitem__ is called, object_list is a list
+ self.assertIsInstance(p.object_list, list)
+
+ def test_paginating_unordered_queryset_raises_warning(self):
+ if django.VERSION < (1, 11, 0):
+ self.skipTest("does not work on older version of Django")
+ with warnings.catch_warnings(record=True) as warns:
+ # Prevent the RuntimeWarning subclass from appearing as an
+ # exception due to the warnings.simplefilter() in runtests.py.
+ warnings.filterwarnings('always', category=UnorderedObjectListWarning)
+ Paginator(Article.objects.all(), 5)
+ self.assertEqual(len(warns), 1)
+ warning = warns[0]
+ self.assertEqual(str(warning.message), (
+ "Pagination may yield inconsistent results with an unordered "
+ "object_list: QuerySet."
+ ))
+ # The warning points at the Paginator caller (i.e. the stacklevel
+ # is appropriate).
+ self.assertEqual(warning.filename, __file__)
+
+ def test_paginating_unordered_object_list_raises_warning(self):
+ """
+ Unordered object list warning with an object that has an orderd
+ attribute but not a model attribute.
+ """
+ if django.VERSION < (1, 11, 0):
+ self.skipTest("does not work on older version of Django")
+ class ObjectList():
+ ordered = False
+ object_list = ObjectList()
+ with warnings.catch_warnings(record=True) as warns:
+ warnings.filterwarnings('always', category=UnorderedObjectListWarning)
+ Paginator(object_list, 5)
+ self.assertEqual(len(warns), 1)
+ self.assertEqual(str(warns[0].message), (
+ "Pagination may yield inconsistent results with an unordered "
+ "object_list: {!r}.".format(object_list)
+ ))
diff --git a/tests/queries/__init__.py b/tests/queries/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/queries/models.py b/tests/queries/models.py
new file mode 100644
index 00000000..587d2e68
--- /dev/null
+++ b/tests/queries/models.py
@@ -0,0 +1,720 @@
+"""
+Various complex queries that have been problematic in the past.
+"""
+import threading
+
+from django.db import models
+
+
+class DumbCategory(models.Model):
+ pass
+
+
+class ProxyCategory(DumbCategory):
+ class Meta:
+ proxy = True
+
+
+class NamedCategory(DumbCategory):
+ name = models.CharField(max_length=10)
+
+ def __str__(self):
+ return self.name
+
+
+class Tag(models.Model):
+ name = models.CharField(max_length=10)
+ parent = models.ForeignKey(
+ 'self',
+ models.SET_NULL,
+ blank=True, null=True,
+ related_name='children',
+ )
+ category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None)
+
+ class Meta:
+ ordering = ['name']
+
+ def __str__(self):
+ return self.name
+
+
+class Note(models.Model):
+ note = models.CharField(max_length=100)
+ misc = models.CharField(max_length=10)
+ tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True)
+
+ class Meta:
+ ordering = ['note']
+
+ def __str__(self):
+ return self.note
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # Regression for #13227 -- having an attribute that
+ # is unpicklable doesn't stop you from cloning queries
+ # that use objects of that type as an argument.
+ self.lock = threading.Lock()
+
+
+class Annotation(models.Model):
+ name = models.CharField(max_length=10)
+ tag = models.ForeignKey(Tag, models.CASCADE)
+ notes = models.ManyToManyField(Note)
+
+ def __str__(self):
+ return self.name
+
+
+class ExtraInfo(models.Model):
+ info = models.CharField(max_length=100)
+ note = models.ForeignKey(Note, models.CASCADE, null=True)
+ value = models.IntegerField(null=True)
+
+ class Meta:
+ ordering = ['info']
+
+ def __str__(self):
+ return self.info
+
+
+class Author(models.Model):
+ name = models.CharField(max_length=10)
+ num = models.IntegerField(unique=True)
+ extra = models.ForeignKey(ExtraInfo, models.CASCADE)
+
+ class Meta:
+ ordering = ['name']
+
+ def __str__(self):
+ return self.name
+
+
+class Item(models.Model):
+ name = models.CharField(max_length=10)
+ created = models.DateTimeField()
+ modified = models.DateTimeField(blank=True, null=True)
+ tags = models.ManyToManyField(Tag, blank=True)
+ creator = models.ForeignKey(Author, models.CASCADE)
+ note = models.ForeignKey(Note, models.CASCADE)
+
+ class Meta:
+ ordering = ['-note', 'name']
+
+ def __str__(self):
+ return self.name
+
+
+class Report(models.Model):
+ name = models.CharField(max_length=10)
+ creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True)
+
+ def __str__(self):
+ return self.name
+
+
+class ReportComment(models.Model):
+ report = models.ForeignKey(Report, models.CASCADE)
+
+
+class Ranking(models.Model):
+ rank = models.IntegerField()
+ author = models.ForeignKey(Author, models.CASCADE)
+
+ class Meta:
+ # A complex ordering specification. Should stress the system a bit.
+ ordering = ('author__extra__note', 'author__name', 'rank')
+
+ def __str__(self):
+ return '%d: %s' % (self.rank, self.author.name)
+
+
+class Cover(models.Model):
+ title = models.CharField(max_length=50)
+ item = models.ForeignKey(Item, models.CASCADE)
+
+ class Meta:
+ ordering = ['item']
+
+ def __str__(self):
+ return self.title
+
+
+class Number(models.Model):
+ num = models.IntegerField()
+
+ def __str__(self):
+ return str(self.num)
+
+# Symmetrical m2m field with a normal field using the reverse accessor name
+# ("valid").
+
+
+class Valid(models.Model):
+ valid = models.CharField(max_length=10)
+ parent = models.ManyToManyField('self')
+
+ class Meta:
+ ordering = ['valid']
+
+# Some funky cross-linked models for testing a couple of infinite recursion
+# cases.
+
+
+class X(models.Model):
+ y = models.ForeignKey('Y', models.CASCADE)
+
+
+class Y(models.Model):
+ x1 = models.ForeignKey(X, models.CASCADE, related_name='y1')
+
+# Some models with a cycle in the default ordering. This would be bad if we
+# didn't catch the infinite loop.
+
+
+class LoopX(models.Model):
+ y = models.ForeignKey('LoopY', models.CASCADE)
+
+ class Meta:
+ ordering = ['y']
+
+
+class LoopY(models.Model):
+ x = models.ForeignKey(LoopX, models.CASCADE)
+
+ class Meta:
+ ordering = ['x']
+
+
+class LoopZ(models.Model):
+ z = models.ForeignKey('self', models.CASCADE)
+
+ class Meta:
+ ordering = ['z']
+
+
+# A model and custom default manager combination.
+
+
+class CustomManager(models.Manager):
+ def get_queryset(self):
+ qs = super().get_queryset()
+ return qs.filter(public=True, tag__name='t1')
+
+
+class ManagedModel(models.Model):
+ data = models.CharField(max_length=10)
+ tag = models.ForeignKey(Tag, models.CASCADE)
+ public = models.BooleanField(default=True)
+
+ objects = CustomManager()
+ normal_manager = models.Manager()
+
+ def __str__(self):
+ return self.data
+
+# An inter-related setup with multiple paths from Child to Detail.
+
+
+class Detail(models.Model):
+ data = models.CharField(max_length=10)
+
+
+class MemberManager(models.Manager):
+ def get_queryset(self):
+ return super().get_queryset().select_related("details")
+
+
+class Member(models.Model):
+ name = models.CharField(max_length=10)
+ details = models.OneToOneField(Detail, models.CASCADE, primary_key=True)
+
+ objects = MemberManager()
+
+
+class Child(models.Model):
+ person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
+ parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
+
+# Custom primary keys interfered with ordering in the past.
+
+
+class CustomPk(models.Model):
+ name = models.CharField(max_length=10, primary_key=True)
+ extra = models.CharField(max_length=10)
+
+ class Meta:
+ ordering = ['name', 'extra']
+
+
+class Related(models.Model):
+ custom = models.ForeignKey(CustomPk, models.CASCADE, null=True)
+
+
+class CustomPkTag(models.Model):
+ id = models.CharField(max_length=20, primary_key=True)
+ custom_pk = models.ManyToManyField(CustomPk)
+ tag = models.CharField(max_length=20)
+
+# An inter-related setup with a model subclass that has a nullable
+# path to another model, and a return path from that model.
+
+
+class Celebrity(models.Model):
+ name = models.CharField("Name", max_length=20)
+ greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True)
+
+ def __str__(self):
+ return self.name
+
+
+class TvChef(Celebrity):
+ pass
+
+
+class Fan(models.Model):
+ fan_of = models.ForeignKey(Celebrity, models.CASCADE)
+
+# Multiple foreign keys
+
+
+class LeafA(models.Model):
+ data = models.CharField(max_length=10)
+
+ def __str__(self):
+ return self.data
+
+
+class LeafB(models.Model):
+ data = models.CharField(max_length=10)
+
+
+class Join(models.Model):
+ a = models.ForeignKey(LeafA, models.CASCADE)
+ b = models.ForeignKey(LeafB, models.CASCADE)
+
+
+class ReservedName(models.Model):
+ name = models.CharField(max_length=20)
+ order = models.IntegerField()
+
+ def __str__(self):
+ return self.name
+
+# A simpler shared-foreign-key setup that can expose some problems.
+
+
+class SharedConnection(models.Model):
+ data = models.CharField(max_length=10)
+
+ def __str__(self):
+ return self.data
+
+
+class PointerA(models.Model):
+ connection = models.ForeignKey(SharedConnection, models.CASCADE)
+
+
+class PointerB(models.Model):
+ connection = models.ForeignKey(SharedConnection, models.CASCADE)
+
+# Multi-layer ordering
+
+
+class SingleObject(models.Model):
+ name = models.CharField(max_length=10)
+
+ class Meta:
+ ordering = ['name']
+
+ def __str__(self):
+ return self.name
+
+
+class RelatedObject(models.Model):
+ single = models.ForeignKey(SingleObject, models.SET_NULL, null=True)
+ f = models.IntegerField(null=True)
+
+ class Meta:
+ ordering = ['single']
+
+
+class Plaything(models.Model):
+ name = models.CharField(max_length=10)
+ others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True)
+
+ class Meta:
+ ordering = ['others']
+
+ def __str__(self):
+ return self.name
+
+
+class Article(models.Model):
+ name = models.CharField(max_length=20)
+ created = models.DateTimeField()
+
+ def __str__(self):
+ return self.name
+
+
+class Food(models.Model):
+ name = models.CharField(max_length=20, unique=True)
+
+ def __str__(self):
+ return self.name
+
+
+class Eaten(models.Model):
+ food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
+ meal = models.CharField(max_length=20)
+
+ def __str__(self):
+ return "%s at %s" % (self.food, self.meal)
+
+
+class Node(models.Model):
+ num = models.IntegerField(unique=True)
+ parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
+
+ def __str__(self):
+ return "%s" % self.num
+
+# Bug #12252
+
+
+class ObjectA(models.Model):
+ name = models.CharField(max_length=50)
+
+ def __str__(self):
+ return self.name
+
+ def __iter__(self):
+ # Ticket #23721
+ assert False, 'type checking should happen without calling model __iter__'
+
+
+class ProxyObjectA(ObjectA):
+ class Meta:
+ proxy = True
+
+
+class ChildObjectA(ObjectA):
+ pass
+
+
+class ObjectB(models.Model):
+ name = models.CharField(max_length=50)
+ objecta = models.ForeignKey(ObjectA, models.CASCADE)
+ num = models.PositiveSmallIntegerField()
+
+ def __str__(self):
+ return self.name
+
+
+class ProxyObjectB(ObjectB):
+ class Meta:
+ proxy = True
+
+
+class ObjectC(models.Model):
+ name = models.CharField(max_length=50)
+ objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True)
+ objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True)
+ childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk')
+
+ def __str__(self):
+ return self.name
+
+
+class SimpleCategory(models.Model):
+ name = models.CharField(max_length=15)
+
+ def __str__(self):
+ return self.name
+
+
+class SpecialCategory(SimpleCategory):
+ special_name = models.CharField(max_length=15)
+
+ def __str__(self):
+ return self.name + " " + self.special_name
+
+
+class CategoryItem(models.Model):
+ category = models.ForeignKey(SimpleCategory, models.CASCADE)
+
+ def __str__(self):
+ return "category item: " + str(self.category)
+
+
+class MixedCaseFieldCategoryItem(models.Model):
+ CaTeGoRy = models.ForeignKey(SimpleCategory, models.CASCADE)
+
+
+class MixedCaseDbColumnCategoryItem(models.Model):
+ category = models.ForeignKey(SimpleCategory, models.CASCADE, db_column='CaTeGoRy_Id')
+
+
+class OneToOneCategory(models.Model):
+ new_name = models.CharField(max_length=15)
+ category = models.OneToOneField(SimpleCategory, models.CASCADE)
+
+ def __str__(self):
+ return "one2one " + self.new_name
+
+
+class CategoryRelationship(models.Model):
+ first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel')
+ second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel')
+
+
+class CommonMixedCaseForeignKeys(models.Model):
+ category = models.ForeignKey(CategoryItem, models.CASCADE)
+ mixed_case_field_category = models.ForeignKey(MixedCaseFieldCategoryItem, models.CASCADE)
+ mixed_case_db_column_category = models.ForeignKey(MixedCaseDbColumnCategoryItem, models.CASCADE)
+
+
+class NullableName(models.Model):
+ name = models.CharField(max_length=20, null=True)
+
+ class Meta:
+ ordering = ['id']
+
+
+class ModelD(models.Model):
+ name = models.TextField()
+
+
+class ModelC(models.Model):
+ name = models.TextField()
+
+
+class ModelB(models.Model):
+ name = models.TextField()
+ c = models.ForeignKey(ModelC, models.CASCADE)
+
+
+class ModelA(models.Model):
+ name = models.TextField()
+ b = models.ForeignKey(ModelB, models.SET_NULL, null=True)
+ d = models.ForeignKey(ModelD, models.CASCADE)
+
+
+class Job(models.Model):
+ name = models.CharField(max_length=20, unique=True)
+
+ def __str__(self):
+ return self.name
+
+
+class JobResponsibilities(models.Model):
+ job = models.ForeignKey(Job, models.CASCADE, to_field='name')
+ responsibility = models.ForeignKey('Responsibility', models.CASCADE, to_field='description')
+
+
+class Responsibility(models.Model):
+ description = models.CharField(max_length=20, unique=True)
+ jobs = models.ManyToManyField(Job, through=JobResponsibilities,
+ related_name='responsibilities')
+
+ def __str__(self):
+ return self.description
+
+# Models for disjunction join promotion low level testing.
+
+
+class FK1(models.Model):
+ f1 = models.TextField()
+ f2 = models.TextField()
+
+
+class FK2(models.Model):
+ f1 = models.TextField()
+ f2 = models.TextField()
+
+
+class FK3(models.Model):
+ f1 = models.TextField()
+ f2 = models.TextField()
+
+
+class BaseA(models.Model):
+ a = models.ForeignKey(FK1, models.SET_NULL, null=True)
+ b = models.ForeignKey(FK2, models.SET_NULL, null=True)
+ c = models.ForeignKey(FK3, models.SET_NULL, null=True)
+
+
+class Identifier(models.Model):
+ name = models.CharField(max_length=100)
+
+ def __str__(self):
+ return self.name
+
+
+class Program(models.Model):
+ identifier = models.OneToOneField(Identifier, models.CASCADE)
+
+
+class Channel(models.Model):
+ programs = models.ManyToManyField(Program)
+ identifier = models.OneToOneField(Identifier, models.CASCADE)
+
+
+class Book(models.Model):
+ title = models.TextField()
+ chapter = models.ForeignKey('Chapter', models.CASCADE)
+
+
+class Chapter(models.Model):
+ title = models.TextField()
+ paragraph = models.ForeignKey('Paragraph', models.CASCADE)
+
+
+class Paragraph(models.Model):
+ text = models.TextField()
+ page = models.ManyToManyField('Page')
+
+
+class Page(models.Model):
+ text = models.TextField()
+
+
+class MyObject(models.Model):
+ parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
+ data = models.CharField(max_length=100)
+ created_at = models.DateTimeField(auto_now_add=True)
+
+# Models for #17600 regressions
+
+
+class Order(models.Model):
+ id = models.IntegerField(primary_key=True)
+
+ class Meta:
+ ordering = ('pk',)
+
+ def __str__(self):
+ return '%s' % self.pk
+
+
+class OrderItem(models.Model):
+ order = models.ForeignKey(Order, models.CASCADE, related_name='items')
+ status = models.IntegerField()
+
+ class Meta:
+ ordering = ('pk',)
+
+ def __str__(self):
+ return '%s' % self.pk
+
+
+class BaseUser(models.Model):
+ pass
+
+
+class Task(models.Model):
+ title = models.CharField(max_length=10)
+ owner = models.ForeignKey(BaseUser, models.CASCADE, related_name='owner')
+ creator = models.ForeignKey(BaseUser, models.CASCADE, related_name='creator')
+
+ def __str__(self):
+ return self.title
+
+
+class Staff(models.Model):
+ name = models.CharField(max_length=10)
+
+ def __str__(self):
+ return self.name
+
+
+class StaffUser(BaseUser):
+ staff = models.OneToOneField(Staff, models.CASCADE, related_name='user')
+
+ def __str__(self):
+ return self.staff
+
+
+class Ticket21203Parent(models.Model):
+ parentid = models.AutoField(primary_key=True)
+ parent_bool = models.BooleanField(default=True)
+ created = models.DateTimeField(auto_now=True)
+
+
+class Ticket21203Child(models.Model):
+ childid = models.AutoField(primary_key=True)
+ parent = models.ForeignKey(Ticket21203Parent, models.CASCADE)
+
+
+class Person(models.Model):
+ name = models.CharField(max_length=128)
+
+
+class Company(models.Model):
+ name = models.CharField(max_length=128)
+ employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
+
+ def __str__(self):
+ return self.name
+
+
+class Employment(models.Model):
+ employer = models.ForeignKey(Company, models.CASCADE)
+ employee = models.ForeignKey(Person, models.CASCADE)
+ title = models.CharField(max_length=128)
+
+
+class School(models.Model):
+ pass
+
+
+class Student(models.Model):
+ school = models.ForeignKey(School, models.CASCADE)
+
+
+class Classroom(models.Model):
+ name = models.CharField(max_length=20)
+ has_blackboard = models.BooleanField(null=True)
+ school = models.ForeignKey(School, models.CASCADE)
+ students = models.ManyToManyField(Student, related_name='classroom')
+
+
+class Teacher(models.Model):
+ schools = models.ManyToManyField(School)
+ friends = models.ManyToManyField('self')
+
+
+class Ticket23605AParent(models.Model):
+ pass
+
+
+class Ticket23605A(Ticket23605AParent):
+ pass
+
+
+class Ticket23605B(models.Model):
+ modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
+ modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
+ field_b0 = models.IntegerField(null=True)
+ field_b1 = models.BooleanField(default=False)
+
+
+class Ticket23605C(models.Model):
+ field_c0 = models.FloatField()
+
+
+# db_table names have capital letters to ensure they are quoted in queries.
+class Individual(models.Model):
+ alive = models.BooleanField()
+
+ class Meta:
+ db_table = 'Individual'
+
+
+class RelatedIndividual(models.Model):
+ related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual')
+
+ class Meta:
+ db_table = 'RelatedIndividual'
diff --git a/tests/queries/test_explain.py b/tests/queries/test_explain.py
new file mode 100644
index 00000000..ad4ca988
--- /dev/null
+++ b/tests/queries/test_explain.py
@@ -0,0 +1,102 @@
+import unittest
+
+from django.db import NotSupportedError, connection, transaction
+from django.db.models import Count
+from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
+from django.test.utils import CaptureQueriesContext
+
+from .models import Tag
+
+
+@skipUnlessDBFeature('supports_explaining_query_execution')
+class ExplainTests(TestCase):
+
+ def test_basic(self):
+ querysets = [
+ Tag.objects.filter(name='test'),
+ Tag.objects.filter(name='test').select_related('parent'),
+ Tag.objects.filter(name='test').prefetch_related('children'),
+ Tag.objects.filter(name='test').annotate(Count('children')),
+ Tag.objects.filter(name='test').values_list('name'),
+ Tag.objects.order_by().union(Tag.objects.order_by().filter(name='test')),
+ Tag.objects.all().select_for_update().filter(name='test'),
+ ]
+ supported_formats = connection.features.supported_explain_formats
+ all_formats = (None,) + tuple(supported_formats) + tuple(f.lower() for f in supported_formats)
+ for idx, queryset in enumerate(querysets):
+ for format in all_formats:
+ with self.subTest(format=format, queryset=idx):
+ if connection.vendor == 'mysql':
+ # This does a query and caches the result.
+ connection.features.needs_explain_extended
+ with self.assertNumQueries(1), CaptureQueriesContext(connection) as captured_queries:
+ result = queryset.explain(format=format)
+ self.assertTrue(captured_queries[0]['sql'].startswith(connection.ops.explain_prefix))
+ self.assertIsInstance(result, str)
+ self.assertTrue(result)
+
+ @skipUnlessDBFeature('validates_explain_options')
+ def test_unknown_options(self):
+ with self.assertRaisesMessage(ValueError, 'Unknown options: test, test2'):
+ Tag.objects.all().explain(test=1, test2=1)
+
+ def test_unknown_format(self):
+ msg = 'DOES NOT EXIST is not a recognized format.'
+ if connection.features.supported_explain_formats:
+ msg += ' Allowed formats: %s' % ', '.join(sorted(connection.features.supported_explain_formats))
+ with self.assertRaisesMessage(ValueError, msg):
+ Tag.objects.all().explain(format='does not exist')
+
+ @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific')
+ def test_postgres_options(self):
+ qs = Tag.objects.filter(name='test')
+ test_options = [
+ {'COSTS': False, 'BUFFERS': True, 'ANALYZE': True},
+ {'costs': False, 'buffers': True, 'analyze': True},
+ {'verbose': True, 'timing': True, 'analyze': True},
+ {'verbose': False, 'timing': False, 'analyze': True},
+ ]
+ if connection.pg_version >= 100000:
+ test_options.append({'summary': True})
+ for options in test_options:
+ with self.subTest(**options), transaction.atomic():
+ with CaptureQueriesContext(connection) as captured_queries:
+ qs.explain(format='text', **options)
+ self.assertEqual(len(captured_queries), 1)
+ for name, value in options.items():
+ option = '{} {}'.format(name.upper(), 'true' if value else 'false')
+ self.assertIn(option, captured_queries[0]['sql'])
+
+ @unittest.skipUnless(connection.vendor == 'mysql', 'MySQL specific')
+ def test_mysql_text_to_traditional(self):
+ # Initialize the cached property, if needed, to prevent a query for
+ # the MySQL version during the QuerySet evaluation.
+ connection.features.needs_explain_extended
+ with CaptureQueriesContext(connection) as captured_queries:
+ Tag.objects.filter(name='test').explain(format='text')
+ self.assertEqual(len(captured_queries), 1)
+ self.assertIn('FORMAT=TRADITIONAL', captured_queries[0]['sql'])
+
+ @unittest.skipUnless(connection.vendor == 'mysql', 'MySQL < 5.7 specific')
+ def test_mysql_extended(self):
+ # Inner skip to avoid module level query for MySQL version.
+ if not connection.features.needs_explain_extended:
+ raise unittest.SkipTest('MySQL < 5.7 specific')
+ qs = Tag.objects.filter(name='test')
+ with CaptureQueriesContext(connection) as captured_queries:
+ qs.explain(format='json')
+ self.assertEqual(len(captured_queries), 1)
+ self.assertNotIn('EXTENDED', captured_queries[0]['sql'])
+ with CaptureQueriesContext(connection) as captured_queries:
+ qs.explain(format='text')
+ self.assertEqual(len(captured_queries), 1)
+ self.assertNotIn('EXTENDED', captured_queries[0]['sql'])
+
+
+@skipIfDBFeature('supports_explaining_query_execution')
+class ExplainUnsupportedTests(TestCase):
+
+ def test_message(self):
+ msg = 'This backend does not support explaining query execution.'
+ with self.assertRaisesMessage(NotSupportedError, msg):
+ Tag.objects.filter(name='test').explain()
diff --git a/tests/queries/test_iterator.py b/tests/queries/test_iterator.py
new file mode 100644
index 00000000..56f42c21
--- /dev/null
+++ b/tests/queries/test_iterator.py
@@ -0,0 +1,39 @@
+import datetime
+from unittest import mock
+
+from django.db.models.sql.compiler import cursor_iter
+from django.test import TestCase
+
+from .models import Article
+
+
+class QuerySetIteratorTests(TestCase):
+ itersize_index_in_mock_args = 3
+
+ @classmethod
+ def setUpTestData(cls):
+ Article.objects.create(name='Article 1', created=datetime.datetime.now())
+ Article.objects.create(name='Article 2', created=datetime.datetime.now())
+
+ def test_iterator_invalid_chunk_size(self):
+ for size in (0, -1):
+ with self.subTest(size=size):
+ with self.assertRaisesMessage(ValueError, 'Chunk size must be strictly positive.'):
+ Article.objects.iterator(chunk_size=size)
+
+ def test_default_iterator_chunk_size(self):
+ qs = Article.objects.iterator()
+ with mock.patch('django.db.models.sql.compiler.cursor_iter', side_effect=cursor_iter) as cursor_iter_mock:
+ next(qs)
+ self.assertEqual(cursor_iter_mock.call_count, 1)
+ mock_args, _mock_kwargs = cursor_iter_mock.call_args
+ self.assertEqual(mock_args[self.itersize_index_in_mock_args], 2000)
+
+ def test_iterator_chunk_size(self):
+ batch_size = 3
+ qs = Article.objects.iterator(chunk_size=batch_size)
+ with mock.patch('django.db.models.sql.compiler.cursor_iter', side_effect=cursor_iter) as cursor_iter_mock:
+ next(qs)
+ self.assertEqual(cursor_iter_mock.call_count, 1)
+ mock_args, _mock_kwargs = cursor_iter_mock.call_args
+ self.assertEqual(mock_args[self.itersize_index_in_mock_args], batch_size)
diff --git a/tests/queries/test_q.py b/tests/queries/test_q.py
new file mode 100644
index 00000000..9adff07e
--- /dev/null
+++ b/tests/queries/test_q.py
@@ -0,0 +1,105 @@
+from django.db.models import F, Q
+from django.test import SimpleTestCase
+
+
+class QTests(SimpleTestCase):
+ def test_combine_and_empty(self):
+ q = Q(x=1)
+ self.assertEqual(q & Q(), q)
+ self.assertEqual(Q() & q, q)
+
+ def test_combine_and_both_empty(self):
+ self.assertEqual(Q() & Q(), Q())
+
+ def test_combine_or_empty(self):
+ q = Q(x=1)
+ self.assertEqual(q | Q(), q)
+ self.assertEqual(Q() | q, q)
+
+ def test_combine_or_both_empty(self):
+ self.assertEqual(Q() | Q(), Q())
+
+ def test_combine_not_q_object(self):
+ obj = object()
+ q = Q(x=1)
+ with self.assertRaisesMessage(TypeError, str(obj)):
+ q | obj
+ with self.assertRaisesMessage(TypeError, str(obj)):
+ q & obj
+
+ def test_deconstruct(self):
+ q = Q(price__gt=F('discounted_price'))
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(path, 'django.db.models.Q')
+ self.assertEqual(args, ())
+ self.assertEqual(kwargs, {'price__gt': F('discounted_price')})
+
+ def test_deconstruct_negated(self):
+ q = ~Q(price__gt=F('discounted_price'))
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(args, ())
+ self.assertEqual(kwargs, {
+ 'price__gt': F('discounted_price'),
+ '_negated': True,
+ })
+
+ def test_deconstruct_or(self):
+ q1 = Q(price__gt=F('discounted_price'))
+ q2 = Q(price=F('discounted_price'))
+ q = q1 | q2
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(args, (
+ ('price__gt', F('discounted_price')),
+ ('price', F('discounted_price')),
+ ))
+ self.assertEqual(kwargs, {'_connector': 'OR'})
+
+ def test_deconstruct_and(self):
+ q1 = Q(price__gt=F('discounted_price'))
+ q2 = Q(price=F('discounted_price'))
+ q = q1 & q2
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(args, (
+ ('price__gt', F('discounted_price')),
+ ('price', F('discounted_price')),
+ ))
+ self.assertEqual(kwargs, {})
+
+ def test_deconstruct_multiple_kwargs(self):
+ q = Q(price__gt=F('discounted_price'), price=F('discounted_price'))
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(args, (
+ ('price', F('discounted_price')),
+ ('price__gt', F('discounted_price')),
+ ))
+ self.assertEqual(kwargs, {})
+
+ def test_deconstruct_nested(self):
+ q = Q(Q(price__gt=F('discounted_price')))
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(args, (Q(price__gt=F('discounted_price')),))
+ self.assertEqual(kwargs, {})
+
+ def test_reconstruct(self):
+ q = Q(price__gt=F('discounted_price'))
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(Q(*args, **kwargs), q)
+
+ def test_reconstruct_negated(self):
+ q = ~Q(price__gt=F('discounted_price'))
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(Q(*args, **kwargs), q)
+
+ def test_reconstruct_or(self):
+ q1 = Q(price__gt=F('discounted_price'))
+ q2 = Q(price=F('discounted_price'))
+ q = q1 | q2
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(Q(*args, **kwargs), q)
+
+ def test_reconstruct_and(self):
+ q1 = Q(price__gt=F('discounted_price'))
+ q2 = Q(price=F('discounted_price'))
+ q = q1 & q2
+ path, args, kwargs = q.deconstruct()
+ self.assertEqual(Q(*args, **kwargs), q)
diff --git a/tests/queries/test_qs_combinators.py b/tests/queries/test_qs_combinators.py
new file mode 100644
index 00000000..b3abfc53
--- /dev/null
+++ b/tests/queries/test_qs_combinators.py
@@ -0,0 +1,209 @@
+from django.db.models import Exists, F, IntegerField, OuterRef, Value
+from django.db.utils import DatabaseError, NotSupportedError
+from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
+
+from .models import Number, ReservedName
+
+
+@skipUnlessDBFeature('supports_select_union')
+class QuerySetSetOperationTests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ Number.objects.bulk_create(Number(num=i) for i in range(10))
+
+ def number_transform(self, value):
+ return value.num
+
+ def assertNumbersEqual(self, queryset, expected_numbers, ordered=True):
+ self.assertQuerysetEqual(queryset, expected_numbers, self.number_transform, ordered)
+
+ def test_simple_union(self):
+ qs1 = Number.objects.filter(num__lte=1)
+ qs2 = Number.objects.filter(num__gte=8)
+ qs3 = Number.objects.filter(num=5)
+ self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False)
+
+ @skipUnlessDBFeature('supports_select_intersection')
+ def test_simple_intersection(self):
+ qs1 = Number.objects.filter(num__lte=5)
+ qs2 = Number.objects.filter(num__gte=5)
+ qs3 = Number.objects.filter(num__gte=4, num__lte=6)
+ self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False)
+
+ @skipUnlessDBFeature('supports_select_intersection')
+ def test_intersection_with_values(self):
+ ReservedName.objects.create(name='a', order=2)
+ qs1 = ReservedName.objects.all()
+ reserved_name = qs1.intersection(qs1).values('name', 'order', 'id').get()
+ self.assertEqual(reserved_name['name'], 'a')
+ self.assertEqual(reserved_name['order'], 2)
+ reserved_name = qs1.intersection(qs1).values_list('name', 'order', 'id').get()
+ self.assertEqual(reserved_name[:2], ('a', 2))
+
+ @skipUnlessDBFeature('supports_select_difference')
+ def test_simple_difference(self):
+ qs1 = Number.objects.filter(num__lte=5)
+ qs2 = Number.objects.filter(num__lte=4)
+ self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False)
+
+ def test_union_distinct(self):
+ qs1 = Number.objects.all()
+ qs2 = Number.objects.all()
+ self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)
+ self.assertEqual(len(list(qs1.union(qs2))), 10)
+
+ @skipUnlessDBFeature('supports_select_intersection')
+ def test_intersection_with_empty_qs(self):
+ qs1 = Number.objects.all()
+ qs2 = Number.objects.none()
+ qs3 = Number.objects.filter(pk__in=[])
+ self.assertEqual(len(qs1.intersection(qs2)), 0)
+ self.assertEqual(len(qs1.intersection(qs3)), 0)
+ self.assertEqual(len(qs2.intersection(qs1)), 0)
+ self.assertEqual(len(qs3.intersection(qs1)), 0)
+ self.assertEqual(len(qs2.intersection(qs2)), 0)
+ self.assertEqual(len(qs3.intersection(qs3)), 0)
+
+ @skipUnlessDBFeature('supports_select_difference')
+ def test_difference_with_empty_qs(self):
+ qs1 = Number.objects.all()
+ qs2 = Number.objects.none()
+ qs3 = Number.objects.filter(pk__in=[])
+ self.assertEqual(len(qs1.difference(qs2)), 10)
+ self.assertEqual(len(qs1.difference(qs3)), 10)
+ self.assertEqual(len(qs2.difference(qs1)), 0)
+ self.assertEqual(len(qs3.difference(qs1)), 0)
+ self.assertEqual(len(qs2.difference(qs2)), 0)
+ self.assertEqual(len(qs3.difference(qs3)), 0)
+
+ @skipUnlessDBFeature('supports_select_difference')
+ def test_difference_with_values(self):
+ ReservedName.objects.create(name='a', order=2)
+ qs1 = ReservedName.objects.all()
+ qs2 = ReservedName.objects.none()
+ reserved_name = qs1.difference(qs2).values('name', 'order', 'id').get()
+ self.assertEqual(reserved_name['name'], 'a')
+ self.assertEqual(reserved_name['order'], 2)
+ reserved_name = qs1.difference(qs2).values_list('name', 'order', 'id').get()
+ self.assertEqual(reserved_name[:2], ('a', 2))
+
+ def test_union_with_empty_qs(self):
+ qs1 = Number.objects.all()
+ qs2 = Number.objects.none()
+ qs3 = Number.objects.filter(pk__in=[])
+ self.assertEqual(len(qs1.union(qs2)), 10)
+ self.assertEqual(len(qs2.union(qs1)), 10)
+ self.assertEqual(len(qs1.union(qs3)), 10)
+ self.assertEqual(len(qs3.union(qs1)), 10)
+ self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10)
+ self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20)
+ self.assertEqual(len(qs2.union(qs2)), 0)
+ self.assertEqual(len(qs3.union(qs3)), 0)
+
+ #def test_limits(self):
+ # qs1 = Number.objects.all()
+ # qs2 = Number.objects.all()
+ # self.assertEqual(len(list(qs1.union(qs2)[:2])), 2)
+
+ def test_ordering(self):
+ qs1 = Number.objects.filter(num__lte=1)
+ qs2 = Number.objects.filter(num__gte=2, num__lte=3)
+ self.assertNumbersEqual(qs1.union(qs2).order_by('-num'), [3, 2, 1, 0])
+
+ def test_union_with_values(self):
+ ReservedName.objects.create(name='a', order=2)
+ qs1 = ReservedName.objects.all()
+ reserved_name = qs1.union(qs1).values('name', 'order', 'id').get()
+ self.assertEqual(reserved_name['name'], 'a')
+ self.assertEqual(reserved_name['order'], 2)
+ reserved_name = qs1.union(qs1).values_list('name', 'order', 'id').get()
+ self.assertEqual(reserved_name[:2], ('a', 2))
+
+ def test_union_with_two_annotated_values_list(self):
+ qs1 = Number.objects.filter(num=1).annotate(
+ count=Value(0, IntegerField()),
+ ).values_list('num', 'count')
+ qs2 = Number.objects.filter(num=2).values('pk').annotate(
+ count=F('num'),
+ ).annotate(
+ num=Value(1, IntegerField()),
+ ).values_list('num', 'count')
+ self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
+
+ def test_union_with_values_list_on_annotated_and_unannotated(self):
+ ReservedName.objects.create(name='rn1', order=1)
+ qs1 = Number.objects.annotate(
+ has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef('num')))
+ ).filter(has_reserved_name=True)
+ qs2 = Number.objects.filter(num=9)
+ self.assertCountEqual(qs1.union(qs2).values_list('num', flat=True), [1, 9])
+
+ def test_count_union(self):
+ qs1 = Number.objects.filter(num__lte=1).values('num')
+ qs2 = Number.objects.filter(num__gte=2, num__lte=3).values('num')
+ self.assertEqual(qs1.union(qs2).count(), 4)
+
+ def test_count_union_empty_result(self):
+ qs = Number.objects.filter(pk__in=[])
+ self.assertEqual(qs.union(qs).count(), 0)
+
+ @skipUnlessDBFeature('supports_select_difference')
+ def test_count_difference(self):
+ qs1 = Number.objects.filter(num__lt=10)
+ qs2 = Number.objects.filter(num__lt=9)
+ self.assertEqual(qs1.difference(qs2).count(), 1)
+
+ @skipUnlessDBFeature('supports_select_intersection')
+ def test_count_intersection(self):
+ qs1 = Number.objects.filter(num__gte=5)
+ qs2 = Number.objects.filter(num__lte=5)
+ self.assertEqual(qs1.intersection(qs2).count(), 1)
+
+ @skipUnlessDBFeature('supports_slicing_ordering_in_compound')
+ def test_ordering_subqueries(self):
+ qs1 = Number.objects.order_by('num')[:2]
+ qs2 = Number.objects.order_by('-num')[:2]
+ self.assertNumbersEqual(qs1.union(qs2).order_by('-num')[:4], [9, 8, 1, 0])
+
+ @skipIfDBFeature('supports_slicing_ordering_in_compound')
+ def test_unsupported_ordering_slicing_raises_db_error(self):
+ qs1 = Number.objects.all()
+ qs2 = Number.objects.all()
+ msg = 'LIMIT/OFFSET not allowed in subqueries of compound statements'
+ with self.assertRaisesMessage(DatabaseError, msg):
+ list(qs1.union(qs2[:10]))
+ msg = 'ORDER BY not allowed in subqueries of compound statements'
+ with self.assertRaisesMessage(DatabaseError, msg):
+ list(qs1.order_by('id').union(qs2))
+
+ @skipIfDBFeature('supports_select_intersection')
+ def test_unsupported_intersection_raises_db_error(self):
+ qs1 = Number.objects.all()
+ qs2 = Number.objects.all()
+ msg = 'intersection is not supported on this database backend'
+ with self.assertRaisesMessage(NotSupportedError, msg):
+ list(qs1.intersection(qs2))
+
+ def test_combining_multiple_models(self):
+ ReservedName.objects.create(name='99 little bugs', order=99)
+ qs1 = Number.objects.filter(num=1).values_list('num', flat=True)
+ qs2 = ReservedName.objects.values_list('order')
+ self.assertEqual(list(qs1.union(qs2).order_by('num')), [1, 99])
+
+ def test_order_raises_on_non_selected_column(self):
+ qs1 = Number.objects.filter().annotate(
+ annotation=Value(1, IntegerField()),
+ ).values('annotation', num2=F('num'))
+ qs2 = Number.objects.filter().values('id', 'num')
+ # Should not raise
+ list(qs1.union(qs2).order_by('annotation'))
+ list(qs1.union(qs2).order_by('num2'))
+ msg = 'ORDER BY term does not match any column in the result set'
+ # 'id' is not part of the select
+ with self.assertRaisesMessage(DatabaseError, msg):
+ list(qs1.union(qs2).order_by('id'))
+ # 'num' got realiased to num2
+ with self.assertRaisesMessage(DatabaseError, msg):
+ list(qs1.union(qs2).order_by('num'))
+ # switched order, now 'exists' again:
+ list(qs2.union(qs1).order_by('num'))
diff --git a/tests/queries/tests.py b/tests/queries/tests.py
new file mode 100644
index 00000000..c592086f
--- /dev/null
+++ b/tests/queries/tests.py
@@ -0,0 +1,3899 @@
+import datetime
+import pickle
+import unittest
+from collections import OrderedDict
+from operator import attrgetter
+
+from django.core.exceptions import EmptyResultSet, FieldError
+from django.db import DEFAULT_DB_ALIAS, connection
+from django.db.models import Count, F, Q
+from django.db.models.sql.constants import LOUTER
+from django.db.models.sql.where import NothingNode, WhereNode
+from django.test import TestCase, skipUnlessDBFeature
+from django.test.utils import CaptureQueriesContext
+
+from .models import (
+ FK1, Annotation, Article, Author, BaseA, Book, CategoryItem,
+ CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
+ Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk,
+ CustomPkTag, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food,
+ Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB,
+ LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem,
+ MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject,
+ NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC,
+ OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything,
+ PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking,
+ Related, RelatedIndividual, RelatedObject, Report, ReportComment,
+ ReservedName, Responsibility, School, SharedConnection, SimpleCategory,
+ SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task,
+ Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B,
+ Ticket23605C, TvChef, Valid, X,
+)
+
+
+class Queries1Tests(TestCase):
+ @classmethod
+ def setUpTestData(cls):
+ generic = NamedCategory.objects.create(name="Generic")
+ cls.t1 = Tag.objects.create(name='t1', category=generic)
+ cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
+ cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
+ t4 = Tag.objects.create(name='t4', parent=cls.t3)
+ cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
+
+ cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
+ n2 = Note.objects.create(note='n2', misc='bar', id=2)
+ cls.n3 = Note.objects.create(note='n3', misc='foo', id=3)
+
+ ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
+ ann1.notes.add(cls.n1)
+ ann2 = Annotation.objects.create(name='a2', tag=t4)
+ ann2.notes.add(n2, cls.n3)
+
+ # Create these out of order so that sorting by 'id' will be different to sorting
+ # by 'info'. Helps detect some problems later.
+ cls.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
+ e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
+
+ cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
+ cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
+ a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
+ cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
+
+ cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
+ cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
+ time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
+ time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
+ cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
+ cls.i1.tags.set([cls.t1, cls.t2])
+ cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=n2)
+ cls.i2.tags.set([cls.t1, cls.t3])
+ cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
+ i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
+ i4.tags.set([t4])
+
+ cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
+ Report.objects.create(name='r2', creator=a3)
+ Report.objects.create(name='r3')
+
+ # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
+ # will be rank3, rank2, rank1.
+ cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
+
+ Cover.objects.create(title="first", item=i4)
+ Cover.objects.create(title="second", item=cls.i2)
+
+ def test_subquery_condition(self):
+ qs1 = Tag.objects.filter(pk__lte=0)
+ qs2 = Tag.objects.filter(parent__in=qs1)
+ qs3 = Tag.objects.filter(parent__in=qs2)
+ self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
+ self.assertIn('v0', str(qs3.query).lower())
+ qs4 = qs3.filter(parent__in=qs1)
+ self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
+ # It is possible to reuse U for the second subquery, no need to use W.
+ self.assertNotIn('w0', str(qs4.query).lower())
+ # So, 'U0."id"' is referenced twice.
+ self.assertTrue(str(qs4.query).lower().count('u0'), 2)
+
+ def test_ticket1050(self):
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__isnull=True),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__id__isnull=True),
+ ['']
+ )
+
+ def test_ticket1801(self):
+ self.assertQuerysetEqual(
+ Author.objects.filter(item=self.i2),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Author.objects.filter(item=self.i3),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
+ ['']
+ )
+
+ def test_ticket2306(self):
+ # Checking that no join types are "left outer" joins.
+ query = Item.objects.filter(tags=self.t2).query
+ self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
+
+ self.assertQuerysetEqual(
+ Item.objects.filter(Q(tags=self.t1)).order_by('name'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
+ ['']
+ )
+
+ # Each filter call is processed "at once" against a single table, so this is
+ # different from the previous example as it tries to find tags that are two
+ # things at once (rather than two tags).
+ self.assertQuerysetEqual(
+ Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
+ []
+ )
+ self.assertQuerysetEqual(
+ Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
+ []
+ )
+
+ qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
+ self.assertQuerysetEqual(list(qs), [''])
+ self.assertEqual(2, qs.query.count_active_tables(), 2)
+ qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
+ self.assertEqual(qs.query.count_active_tables(), 3)
+
+ def test_ticket4464(self):
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags=self.t1).filter(tags=self.t2),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
+ ['']
+ )
+
+ # Make sure .distinct() works with slicing (this was broken in Oracle).
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
+ ['', '', '']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
+ ['', '']
+ )
+
+ def test_tickets_2080_3592(self):
+ self.assertQuerysetEqual(
+ Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Author.objects.filter(Q(item__name='one') | Q(name='a3')),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Author.objects.filter(Q(name='a3') | Q(item__name='one')),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
+ ['']
+ )
+
+ def test_ticket6074(self):
+ # Merging two empty result sets shouldn't leave a queryset with no constraints
+ # (which would match everything).
+ self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
+ self.assertQuerysetEqual(
+ Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
+ []
+ )
+
+ def test_tickets_1878_2939(self):
+ self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
+
+ # Create something with a duplicate 'name' so that we can test multi-column
+ # cases (which require some tricky SQL transformations under the covers).
+ xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
+ xx.save()
+ self.assertEqual(
+ Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
+ 4
+ )
+ self.assertEqual(
+ (
+ Item.objects
+ .exclude(name='two')
+ .extra(select={'foo': '%s'}, select_params=(1,))
+ .values('creator', 'name', 'foo')
+ .distinct()
+ .count()
+ ),
+ 4
+ )
+ self.assertEqual(
+ (
+ Item.objects
+ .exclude(name='two')
+ .extra(select={'foo': '%s'}, select_params=(1,))
+ .values('creator', 'name')
+ .distinct()
+ .count()
+ ),
+ 4
+ )
+ xx.delete()
+
+ def test_ticket7323(self):
+ self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
+
+ def test_ticket2253(self):
+ q1 = Item.objects.order_by('name')
+ q2 = Item.objects.filter(id=self.i1.id)
+ self.assertQuerysetEqual(
+ q1,
+ ['', '', '', '']
+ )
+ self.assertQuerysetEqual(q2, [''])
+ self.assertQuerysetEqual(
+ (q1 | q2).order_by('name'),
+ ['', '', '', '']
+ )
+ self.assertQuerysetEqual((q1 & q2).order_by('name'), [''])
+
+ q1 = Item.objects.filter(tags=self.t1)
+ q2 = Item.objects.filter(note=self.n3, tags=self.t2)
+ q3 = Item.objects.filter(creator=self.a4)
+ self.assertQuerysetEqual(
+ ((q1 & q2) | q3).order_by('name'),
+ ['', '']
+ )
+
+ def test_order_by_tables(self):
+ q1 = Item.objects.order_by('name')
+ q2 = Item.objects.filter(id=self.i1.id)
+ list(q2)
+ combined_query = (q1 & q2).order_by('name').query
+ self.assertEqual(len([
+ t for t in combined_query.alias_map if combined_query.alias_refcount[t]
+ ]), 1)
+
+ def test_order_by_join_unref(self):
+ """
+ This test is related to the above one, testing that there aren't
+ old JOINs in the query.
+ """
+ qs = Celebrity.objects.order_by('greatest_fan__fan_of')
+ self.assertIn('OUTER JOIN', str(qs.query))
+ qs = qs.order_by('id')
+ self.assertNotIn('OUTER JOIN', str(qs.query))
+
+ def test_get_clears_ordering(self):
+ """
+ get() should clear ordering for optimization purposes.
+ """
+ with CaptureQueriesContext(connection) as captured_queries:
+ Author.objects.order_by('name').get(pk=self.a1.pk)
+ self.assertNotIn('order by', captured_queries[0]['sql'].lower())
+
+ def test_tickets_4088_4306(self):
+ self.assertQuerysetEqual(
+ Report.objects.filter(creator=1001),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Report.objects.filter(creator__num=1001),
+ ['']
+ )
+ self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
+ self.assertQuerysetEqual(
+ Report.objects.filter(creator__id=self.a1.id),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Report.objects.filter(creator__name='a1'),
+ ['']
+ )
+
+ def test_ticket4510(self):
+ self.assertQuerysetEqual(
+ Author.objects.filter(report__name='r1'),
+ ['']
+ )
+
+ def test_ticket7378(self):
+ self.assertQuerysetEqual(self.a1.report_set.all(), [''])
+
+ def test_tickets_5324_6704(self):
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__name='t4'),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
+ ['', '', '']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
+ ['', '', '']
+ )
+ self.assertQuerysetEqual(
+ Author.objects.exclude(item__name='one').distinct().order_by('name'),
+ ['', '', '']
+ )
+
+ # Excluding across a m2m relation when there is more than one related
+ # object associated was problematic.
+ self.assertQuerysetEqual(
+ Item.objects.exclude(tags__name='t1').order_by('name'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
+ ['']
+ )
+
+ # Excluding from a relation that cannot be NULL should not use outer joins.
+ query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
+ self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
+
+ # Similarly, when one of the joins cannot possibly, ever, involve NULL
+ # values (Author -> ExtraInfo, in the following), it should never be
+ # promoted to a left outer join. So the following query should only
+ # involve one "left outer" join (Author -> Item is 0-to-many).
+ qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
+ self.assertEqual(
+ len([
+ x for x in qs.query.alias_map.values()
+ if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
+ ]),
+ 1
+ )
+
+ # The previous changes shouldn't affect nullable foreign key joins.
+ self.assertQuerysetEqual(
+ Tag.objects.filter(parent__isnull=True).order_by('name'),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Tag.objects.exclude(parent__isnull=True).order_by('name'),
+ ['', '', '', '']
+ )
+ self.assertQuerysetEqual(
+ Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
+ ['', '']
+ )
+ self.assertQuerysetEqual(
+ Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
+ ['', '']
+ )
+
+ def test_ticket2091(self):
+ t = Tag.objects.get(name='t4')
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__in=[t]),
+ ['']
+ )
+
+ def test_avoid_infinite_loop_on_too_many_subqueries(self):
+ x = Tag.objects.filter(pk=1)
+ local_recursion_limit = 127
+ msg = 'Maximum recursion depth exceeded: too many subqueries.'
+ with self.assertRaisesMessage(RuntimeError, msg):
+ for i in range(local_recursion_limit * 2):
+ x = Tag.objects.filter(pk__in=x)
+
+ def test_reasonable_number_of_subq_aliases(self):
+ x = Tag.objects.filter(pk=1)
+ for _ in range(20):
+ x = Tag.objects.filter(pk__in=x)
+ self.assertEqual(
+ x.query.subq_aliases, {
+ 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
+ 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
+ }
+ )
+
+ def test_heterogeneous_qs_combination(self):
+ # Combining querysets built on different models should behave in a well-defined
+ # fashion. We raise an error.
+ with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
+ Author.objects.all() & Tag.objects.all()
+ with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
+ Author.objects.all() | Tag.objects.all()
+
+ def test_ticket3141(self):
+ self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
+ self.assertEqual(
+ Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
+ 4
+ )
+
+ def test_ticket2400(self):
+ self.assertQuerysetEqual(
+ Author.objects.filter(item__isnull=True),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Tag.objects.filter(item__isnull=True),
+ ['']
+ )
+
+ def test_ticket2496(self):
+ self.assertQuerysetEqual(
+ Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
+ ['']
+ )
+
+ def test_error_raised_on_filter_with_dictionary(self):
+ with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
+ Note.objects.filter({'note': 'n1', 'misc': 'foo'})
+
+ def test_tickets_2076_7256(self):
+ # Ordering on related tables should be possible, even if the table is
+ # not otherwise involved.
+ self.assertQuerysetEqual(
+ Item.objects.order_by('note__note', 'name'),
+ ['', '', '', '']
+ )
+
+ # Ordering on a related field should use the remote model's default
+ # ordering as a final step.
+ self.assertQuerysetEqual(
+ Author.objects.order_by('extra', '-name'),
+ ['', '', '', '']
+ )
+
+ # Using remote model default ordering can span multiple models (in this
+ # case, Cover is ordered by Item's default, which uses Note's default).
+ self.assertQuerysetEqual(
+ Cover.objects.all(),
+ ['', '']
+ )
+
+ # If the remote model does not have a default ordering, we order by its 'id'
+ # field.
+ self.assertQuerysetEqual(
+ Item.objects.order_by('creator', 'name'),
+ ['', '', '', '']
+ )
+
+ # Ordering by a many-valued attribute (e.g. a many-to-many or reverse
+ # ForeignKey) is legal, but the results might not make sense. That
+ # isn't Django's problem. Garbage in, garbage out.
+ self.assertQuerysetEqual(
+ Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
+ ['', '', '', '', '']
+ )
+
+ # If we replace the default ordering, Django adjusts the required
+ # tables automatically. Item normally requires a join with Note to do
+ # the default ordering, but that isn't needed here.
+ qs = Item.objects.order_by('name')
+ self.assertQuerysetEqual(
+ qs,
+ ['', '', '', '']
+ )
+ self.assertEqual(len(qs.query.alias_map), 1)
+
+ def test_tickets_2874_3002(self):
+ qs = Item.objects.select_related().order_by('note__note', 'name')
+ self.assertQuerysetEqual(
+ qs,
+ ['', '', '', '']
+ )
+
+ # This is also a good select_related() test because there are multiple
+ # Note entries in the SQL. The two Note items should be different.
+ self.assertTrue(repr(qs[0].note), '')
+ self.assertEqual(repr(qs[0].creator.extra.note), '')
+
+ def test_ticket3037(self):
+ self.assertQuerysetEqual(
+ Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
+ ['']
+ )
+
+ def test_tickets_5321_7070(self):
+ # Ordering columns must be included in the output columns. Note that
+ # this means results that might otherwise be distinct are not (if there
+ # are multiple values in the ordering cols), as in this example. This
+ # isn't a bug; it's a warning to be careful with the selection of
+ # ordering columns.
+ self.assertSequenceEqual(
+ Note.objects.values('misc').distinct().order_by('note', '-misc'),
+ [{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
+ )
+
+ def test_ticket4358(self):
+ # If you don't pass any fields to values(), relation fields are
+ # returned as "foo_id" keys, not "foo". For consistency, you should be
+ # able to pass "foo_id" in the fields list and have it work, too. We
+ # actually allow both "foo" and "foo_id".
+ # The *_id version is returned by default.
+ self.assertIn('note_id', ExtraInfo.objects.values()[0])
+ # You can also pass it in explicitly.
+ self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}])
+ # ...or use the field name.
+ self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}])
+
+ def test_ticket2902(self):
+ # Parameters can be given to extra_select, *if* you use an OrderedDict.
+
+ # (First we need to know which order the keys fall in "naturally" on
+ # your system, so we can put things in the wrong way around from
+ # normal. A normal dict would thus fail.)
+ s = [('a', '%s'), ('b', '%s')]
+ params = ['one', 'two']
+ if list({'a': 1, 'b': 2}) == ['a', 'b']:
+ s.reverse()
+ params.reverse()
+
+ d = Item.objects.extra(select=OrderedDict(s), select_params=params).values('a', 'b')[0]
+ self.assertEqual(d, {'a': 'one', 'b': 'two'})
+
+ # Order by the number of tags attached to an item.
+ qs = (
+ Item.objects
+ .extra(select={
+ 'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'
+ })
+ .order_by('-count')
+ )
+ self.assertEqual([o.count for o in qs], [2, 2, 1, 0])
+
+ def test_ticket6154(self):
+ # Multiple filter statements are joined using "AND" all the time.
+
+ self.assertQuerysetEqual(
+ Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
+ ['']
+ )
+ self.assertQuerysetEqual(
+ Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
+ ['']
+ )
+
+ def test_ticket6981(self):
+ self.assertQuerysetEqual(
+ Tag.objects.select_related('parent').order_by('name'),
+ ['', '