|
| 1 | +# docker-compose -f local.yml run --rm django pytest environmental_justice/tests/test_views.py |
| 2 | +import pytest |
| 3 | +from rest_framework import status |
| 4 | + |
| 5 | +from environmental_justice.models import EnvironmentalJusticeRow |
| 6 | +from environmental_justice.tests.factories import EnvironmentalJusticeRowFactory |
| 7 | + |
| 8 | + |
| 9 | +@pytest.mark.django_db |
| 10 | +class TestEnvironmentalJusticeRowViewSet: |
| 11 | + """Test suite for the EnvironmentalJusticeRow API endpoints""" |
| 12 | + |
| 13 | + def setup_method(self): |
| 14 | + """Setup URL for API endpoint""" |
| 15 | + self.url = "/api/environmental-justice/" |
| 16 | + |
| 17 | + def test_empty_database_returns_empty_list(self, client): |
| 18 | + """Should return empty list when no records exist""" |
| 19 | + response = client.get(self.url) |
| 20 | + assert response.status_code == status.HTTP_200_OK |
| 21 | + assert response.json()["results"] == [] |
| 22 | + assert response.json()["count"] == 0 |
| 23 | + |
| 24 | + def test_single_source_filtering(self, client): |
| 25 | + """Should return records only from requested data source""" |
| 26 | + # Create records for each data source |
| 27 | + spreadsheet_record = EnvironmentalJusticeRowFactory( |
| 28 | + dataset="test_dataset", data_source=EnvironmentalJusticeRow.DataSourceChoices.SPREADSHEET |
| 29 | + ) |
| 30 | + ml_prod_record = EnvironmentalJusticeRowFactory( |
| 31 | + dataset="another_dataset", data_source=EnvironmentalJusticeRow.DataSourceChoices.ML_PRODUCTION |
| 32 | + ) |
| 33 | + ml_test_record = EnvironmentalJusticeRowFactory( |
| 34 | + dataset="test_dataset_3", data_source=EnvironmentalJusticeRow.DataSourceChoices.ML_TESTING |
| 35 | + ) |
| 36 | + |
| 37 | + # Test spreadsheet filter |
| 38 | + response = client.get(f"{self.url}?data_source=spreadsheet") |
| 39 | + assert response.status_code == status.HTTP_200_OK |
| 40 | + data = response.json()["results"] |
| 41 | + assert len(data) == 1 |
| 42 | + assert data[0]["dataset"] == spreadsheet_record.dataset |
| 43 | + |
| 44 | + # Test ml_production filter |
| 45 | + response = client.get(f"{self.url}?data_source=ml_production") |
| 46 | + assert response.status_code == status.HTTP_200_OK |
| 47 | + data = response.json()["results"] |
| 48 | + assert len(data) == 1 |
| 49 | + assert data[0]["dataset"] == ml_prod_record.dataset |
| 50 | + |
| 51 | + # Test ml_testing filter |
| 52 | + response = client.get(f"{self.url}?data_source=ml_testing") |
| 53 | + assert response.status_code == status.HTTP_200_OK |
| 54 | + data = response.json()["results"] |
| 55 | + assert len(data) == 1 |
| 56 | + assert data[0]["dataset"] == ml_test_record.dataset |
| 57 | + |
| 58 | + def test_combined_data_precedence(self, client): |
| 59 | + """ |
| 60 | + Should return combined data with spreadsheet taking precedence over ml_production |
| 61 | + for matching datasets |
| 62 | + """ |
| 63 | + # Create spreadsheet record |
| 64 | + EnvironmentalJusticeRowFactory( |
| 65 | + dataset="common_dataset", |
| 66 | + description="spreadsheet version", |
| 67 | + data_source=EnvironmentalJusticeRow.DataSourceChoices.SPREADSHEET, |
| 68 | + ) |
| 69 | + |
| 70 | + # Create ML production record with same dataset |
| 71 | + EnvironmentalJusticeRowFactory( |
| 72 | + dataset="common_dataset", |
| 73 | + description="ml version", |
| 74 | + data_source=EnvironmentalJusticeRow.DataSourceChoices.ML_PRODUCTION, |
| 75 | + ) |
| 76 | + |
| 77 | + # Create unique ML production record |
| 78 | + EnvironmentalJusticeRowFactory( |
| 79 | + dataset="unique_ml_dataset", data_source=EnvironmentalJusticeRow.DataSourceChoices.ML_PRODUCTION |
| 80 | + ) |
| 81 | + |
| 82 | + # Test combined view (default) |
| 83 | + response = client.get(self.url) |
| 84 | + assert response.status_code == status.HTTP_200_OK |
| 85 | + data = response.json()["results"] |
| 86 | + assert len(data) == 2 # Should only return 2 records (not 3) |
| 87 | + |
| 88 | + # Verify correct records are returned |
| 89 | + datasets = [record["dataset"] for record in data] |
| 90 | + assert "common_dataset" in datasets |
| 91 | + assert "unique_ml_dataset" in datasets |
| 92 | + |
| 93 | + # Verify precedence - should get spreadsheet version of common dataset |
| 94 | + common_record = next(r for r in data if r["dataset"] == "common_dataset") |
| 95 | + assert common_record["description"] == "spreadsheet version" |
| 96 | + |
| 97 | + def test_combined_explicit_parameter(self, client): |
| 98 | + """Should handle explicit 'combined' parameter same as default""" |
| 99 | + EnvironmentalJusticeRowFactory(data_source=EnvironmentalJusticeRow.DataSourceChoices.SPREADSHEET) |
| 100 | + EnvironmentalJusticeRowFactory( |
| 101 | + dataset="unique_ml_dataset", # Ensure different dataset |
| 102 | + data_source=EnvironmentalJusticeRow.DataSourceChoices.ML_PRODUCTION, |
| 103 | + ) |
| 104 | + |
| 105 | + # Compare default and explicit combined responses |
| 106 | + default_response = client.get(self.url) |
| 107 | + combined_response = client.get(f"{self.url}?data_source=combined") |
| 108 | + |
| 109 | + assert default_response.status_code == status.HTTP_200_OK |
| 110 | + assert combined_response.status_code == status.HTTP_200_OK |
| 111 | + assert default_response.json()["results"] == combined_response.json()["results"] |
| 112 | + |
| 113 | + def test_invalid_data_source(self, client): |
| 114 | + """Should return 400 error for invalid data_source parameter""" |
| 115 | + response = client.get(f"{self.url}?data_source=invalid") |
| 116 | + assert response.status_code == status.HTTP_400_BAD_REQUEST |
| 117 | + assert "Invalid data_source" in str(response.json()) |
| 118 | + |
| 119 | + def test_sorting_in_combined_view(self, client): |
| 120 | + """Should return combined results sorted by dataset name""" |
| 121 | + # Create records in non-alphabetical order |
| 122 | + EnvironmentalJusticeRowFactory( |
| 123 | + dataset="zebra_dataset", data_source=EnvironmentalJusticeRow.DataSourceChoices.SPREADSHEET |
| 124 | + ) |
| 125 | + EnvironmentalJusticeRowFactory( |
| 126 | + dataset="alpha_dataset", data_source=EnvironmentalJusticeRow.DataSourceChoices.ML_PRODUCTION |
| 127 | + ) |
| 128 | + |
| 129 | + response = client.get(self.url) |
| 130 | + assert response.status_code == status.HTTP_200_OK |
| 131 | + data = response.json()["results"] |
| 132 | + |
| 133 | + # Verify sorting |
| 134 | + datasets = [record["dataset"] for record in data] |
| 135 | + assert datasets == sorted(datasets) |
| 136 | + |
| 137 | + def test_http_methods_allowed(self, client): |
| 138 | + """Should only allow GET requests""" |
| 139 | + # Test GET (should work) |
| 140 | + get_response = client.get(self.url) |
| 141 | + assert get_response.status_code == status.HTTP_200_OK |
| 142 | + |
| 143 | + # Test POST (should fail) |
| 144 | + post_response = client.post(self.url, {}) |
| 145 | + assert post_response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED |
| 146 | + |
| 147 | + # Test PUT (should fail) |
| 148 | + put_response = client.put(self.url, {}) |
| 149 | + assert put_response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED |
| 150 | + |
| 151 | + # Test DELETE (should fail) |
| 152 | + delete_response = client.delete(self.url) |
| 153 | + assert delete_response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED |
0 commit comments