Skip to content

Commit 692a1a2

Browse files
authored
Revert "[7.17] Backport DLS changes (#108330)" (#109349)
This reverts commit 4e08df5 (#108330) This commit also fixes #109273 in 7.17.
1 parent 5d80ef6 commit 692a1a2

File tree

22 files changed

+57
-936
lines changed

22 files changed

+57
-936
lines changed

docs/reference/migration/migrate_7_17.asciidoc

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -80,23 +80,6 @@ To avoid deprecation warnings, remove any SSL truststores that do not
8080
contain any trusted entries.
8181
====
8282

83-
[[deprecation_for_dls_settings]]
84-
.Deprecation for DLS settings
85-
[%collapsible]
86-
====
87-
*Details* +
88-
Two settings available in the latest versions of 7.17 are not available in the next major version.
89-
Newer versions of 7.17 default to stricter Document Level Security (DLS) rules and the follow
90-
settings can be used to disable those stricter DLS rules:
91-
`xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled` and
92-
`xpack.security.dls.error_when_validate_query_with_rewrite.enabled`.
93-
Newer versions, of next major version of {es}, also default to the stricter DLS rules but don't allow
94-
usage of the less strict rules.
95-
96-
*Impact* +
97-
To avoid deprecation warnings, remove these settings from elasticsearch.yml.
98-
====
99-
10083
[discrete]
10184
[[deprecations_717_mapping]]
10285
==== Mapping deprecations

docs/reference/release-notes/7.17.22.asciidoc

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -5,36 +5,6 @@ coming[7.17.22]
55

66
Also see <<breaking-changes-7.17,Breaking changes in 7.17>>.
77

8-
[[breaking-7.17.22]]
9-
[float]
10-
=== Breaking changes
11-
12-
[discrete]
13-
[[breaking_7_17_22_dls_changes]]
14-
==== Stricter Document Level Security (DLS)
15-
16-
[[stricter_dls_7_17_22]]
17-
.Document Level Security (DLS) applies stricter checks for the validate query API and for terms aggregations when min_doc_count is set to 0.
18-
19-
[%collapsible]
20-
====
21-
*Details* +
22-
When Document Level Security (DLS) is applied to terms aggregations and min_doc_count is set to 0, stricter security rules apply.
23-
When Document Level Security (DLS) is applied to the validate query API with the rewrite parameter, stricter security rules apply.
24-
25-
*Impact* +
26-
If needed, test workflows with DLS enabled to ensure that the stricter security rules do not impact your application.
27-
28-
*Remediation* +
29-
Set min_doc_count to a value greater than 0 in terms aggregations or use an account not constrained by DLS for the validate query API calls.
30-
31-
Set `xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled` to `false` in the Elasticsearch configuration
32-
to revert to the previous behavior.
33-
34-
Set `xpack.security.dls.error_when_validate_query_with_rewrite.enabled` to `false` in the Elasticsearch configuration
35-
to revert to the previous behavior.
36-
====
37-
388
[[bug-7.17.22]]
399
[float]
4010
=== Bug fixes

server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
import java.util.Locale;
4242
import java.util.Map;
4343
import java.util.Objects;
44-
import java.util.Queue;
4544
import java.util.Set;
4645
import java.util.regex.Matcher;
4746
import java.util.regex.Pattern;
@@ -331,48 +330,6 @@ public boolean mustVisitAllDocs() {
331330
return false;
332331
}
333332

334-
/**
335-
* Return true if any of the builders is a terms aggregation with min_doc_count=0
336-
*/
337-
public boolean hasZeroMinDocTermsAggregation() {
338-
final Queue<AggregationBuilder> queue = new LinkedList<>(aggregationBuilders);
339-
while (queue.isEmpty() == false) {
340-
final AggregationBuilder current = queue.poll();
341-
if (current == null) {
342-
continue;
343-
}
344-
if (current instanceof TermsAggregationBuilder) {
345-
TermsAggregationBuilder termsBuilder = (TermsAggregationBuilder) current;
346-
if (termsBuilder.minDocCount() == 0) {
347-
return true;
348-
}
349-
}
350-
queue.addAll(current.getSubAggregations());
351-
}
352-
return false;
353-
}
354-
355-
/**
356-
* Force all min_doc_count=0 terms aggregations to exclude deleted docs.
357-
*/
358-
public void forceTermsAggsToExcludeDeletedDocs() {
359-
assert hasZeroMinDocTermsAggregation();
360-
final Queue<AggregationBuilder> queue = new LinkedList<>(aggregationBuilders);
361-
while (queue.isEmpty() == false) {
362-
final AggregationBuilder current = queue.poll();
363-
if (current == null) {
364-
continue;
365-
}
366-
if (current instanceof TermsAggregationBuilder) {
367-
TermsAggregationBuilder termsBuilder = (TermsAggregationBuilder) current;
368-
if (termsBuilder.minDocCount() == 0) {
369-
termsBuilder.excludeDeletedDocs(true);
370-
}
371-
}
372-
queue.addAll(current.getSubAggregations());
373-
}
374-
}
375-
376333
public Builder addAggregator(AggregationBuilder factory) {
377334
if (names.add(factory.name) == false) {
378335
throw new IllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]");

server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java

Lines changed: 22 additions & 130 deletions
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,15 @@
99
package org.elasticsearch.search.aggregations.bucket.terms;
1010

1111
import org.apache.lucene.index.DocValues;
12-
import org.apache.lucene.index.LeafReader;
1312
import org.apache.lucene.index.LeafReaderContext;
1413
import org.apache.lucene.index.SortedDocValues;
1514
import org.apache.lucene.index.SortedSetDocValues;
1615
import org.apache.lucene.util.ArrayUtil;
17-
import org.apache.lucene.util.Bits;
1816
import org.apache.lucene.util.BytesRef;
1917
import org.apache.lucene.util.PriorityQueue;
2018
import org.elasticsearch.common.io.stream.StreamOutput;
21-
import org.elasticsearch.common.util.BigArrays;
2219
import org.elasticsearch.common.util.LongArray;
2320
import org.elasticsearch.common.util.LongHash;
24-
import org.elasticsearch.core.Nullable;
2521
import org.elasticsearch.core.Releasable;
2622
import org.elasticsearch.core.Releasables;
2723
import org.elasticsearch.search.DocValueFormat;
@@ -86,8 +82,7 @@ public GlobalOrdinalsStringTermsAggregator(
8682
SubAggCollectionMode collectionMode,
8783
boolean showTermDocCountError,
8884
CardinalityUpperBound cardinality,
89-
Map<String, Object> metadata,
90-
boolean excludeDeletedDocs
85+
Map<String, Object> metadata
9186
) throws IOException {
9287
super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata);
9388
this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
@@ -96,13 +91,13 @@ public GlobalOrdinalsStringTermsAggregator(
9691
this.lookupGlobalOrd = values::lookupOrd;
9792
this.acceptedGlobalOrdinals = acceptedOrds;
9893
if (remapGlobalOrds) {
99-
this.collectionStrategy = new RemapGlobalOrds(cardinality, excludeDeletedDocs);
94+
this.collectionStrategy = new RemapGlobalOrds(cardinality);
10095
} else {
10196
this.collectionStrategy = cardinality.map(estimate -> {
10297
if (estimate > 1) {
10398
throw new AggregationExecutionException("Dense ords don't know how to collect from many buckets");
10499
}
105-
return new DenseGlobalOrds(excludeDeletedDocs);
100+
return new DenseGlobalOrds();
106101
});
107102
}
108103
}
@@ -279,8 +274,7 @@ static class LowCardinality extends GlobalOrdinalsStringTermsAggregator {
279274
boolean remapGlobalOrds,
280275
SubAggCollectionMode collectionMode,
281276
boolean showTermDocCountError,
282-
Map<String, Object> metadata,
283-
boolean excludeDeletedDocs
277+
Map<String, Object> metadata
284278
) throws IOException {
285279
super(
286280
name,
@@ -298,8 +292,7 @@ static class LowCardinality extends GlobalOrdinalsStringTermsAggregator {
298292
collectionMode,
299293
showTermDocCountError,
300294
CardinalityUpperBound.ONE,
301-
metadata,
302-
excludeDeletedDocs
295+
metadata
303296
);
304297
assert factories == null || factories.countAggregators() == 0;
305298
this.segmentDocCounts = context.bigArrays().newLongArray(1, true);
@@ -448,13 +441,6 @@ interface BucketInfoConsumer {
448441
* bucket ordinal.
449442
*/
450443
class DenseGlobalOrds extends CollectionStrategy {
451-
452-
private final boolean excludeDeletedDocs;
453-
454-
DenseGlobalOrds(boolean excludeDeletedDocs) {
455-
this.excludeDeletedDocs = excludeDeletedDocs;
456-
}
457-
458444
@Override
459445
String describe() {
460446
return "dense";
@@ -485,14 +471,6 @@ long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) {
485471
@Override
486472
void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
487473
assert owningBucketOrd == 0;
488-
if (excludeDeletedDocs) {
489-
forEachExcludeDeletedDocs(consumer);
490-
} else {
491-
forEachAllowDeletedDocs(consumer);
492-
}
493-
}
494-
495-
private void forEachAllowDeletedDocs(BucketInfoConsumer consumer) throws IOException {
496474
for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) {
497475
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
498476
continue;
@@ -504,39 +482,6 @@ private void forEachAllowDeletedDocs(BucketInfoConsumer consumer) throws IOExcep
504482
}
505483
}
506484

507-
/**
508-
* Excludes deleted docs in the results by cross-checking with liveDocs.
509-
*/
510-
private void forEachExcludeDeletedDocs(BucketInfoConsumer consumer) throws IOException {
511-
try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) {
512-
for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) {
513-
LeafReader reader = ctx.reader();
514-
Bits liveDocs = reader.getLiveDocs();
515-
SortedSetDocValues globalOrds = null;
516-
for (int docId = 0; docId < reader.maxDoc(); ++docId) {
517-
if (liveDocs == null || liveDocs.get(docId)) { // document is not deleted
518-
globalOrds = globalOrds == null ? valuesSource.globalOrdinalsValues(ctx) : globalOrds;
519-
if (globalOrds.advanceExact(docId)) {
520-
for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) {
521-
if (accepted.find(globalOrd) >= 0) {
522-
continue;
523-
}
524-
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
525-
continue;
526-
}
527-
long docCount = bucketDocCount(globalOrd);
528-
if (bucketCountThresholds.getMinDocCount() == 0 || docCount > 0) {
529-
consumer.accept(globalOrd, globalOrd, docCount);
530-
accepted.add(globalOrd);
531-
}
532-
}
533-
}
534-
}
535-
}
536-
}
537-
}
538-
}
539-
540485
@Override
541486
public void close() {}
542487
}
@@ -549,11 +494,9 @@ public void close() {}
549494
*/
550495
private class RemapGlobalOrds extends CollectionStrategy {
551496
private final LongKeyedBucketOrds bucketOrds;
552-
private final boolean excludeDeletedDocs;
553497

554-
private RemapGlobalOrds(CardinalityUpperBound cardinality, boolean excludeDeletedDocs) {
498+
private RemapGlobalOrds(CardinalityUpperBound cardinality) {
555499
bucketOrds = LongKeyedBucketOrds.buildForValueRange(bigArrays(), cardinality, 0, valueCount - 1);
556-
this.excludeDeletedDocs = excludeDeletedDocs;
557500
}
558501

559502
@Override
@@ -587,20 +530,27 @@ long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) {
587530

588531
@Override
589532
void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
590-
if (excludeDeletedDocs) {
591-
forEachExcludeDeletedDocs(owningBucketOrd, consumer);
592-
} else {
593-
forEachAllowDeletedDocs(owningBucketOrd, consumer);
594-
}
595-
}
596-
597-
void forEachAllowDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
598533
if (bucketCountThresholds.getMinDocCount() == 0) {
599534
for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) {
600535
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
601536
continue;
602537
}
603-
addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, null);
538+
/*
539+
* Use `add` instead of `find` here to assign an ordinal
540+
* even if the global ord wasn't found so we can build
541+
* sub-aggregations without trouble even though we haven't
542+
* hit any documents for them. This is wasteful, but
543+
* settings minDocCount == 0 is wasteful in general.....
544+
*/
545+
long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd);
546+
long docCount;
547+
if (bucketOrd < 0) {
548+
bucketOrd = -1 - bucketOrd;
549+
docCount = bucketDocCount(bucketOrd);
550+
} else {
551+
docCount = 0;
552+
}
553+
consumer.accept(globalOrd, bucketOrd, docCount);
604554
}
605555
} else {
606556
LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
@@ -613,64 +563,6 @@ void forEachAllowDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer)
613563
}
614564
}
615565

616-
/**
617-
* Excludes deleted docs in the results by cross-checking with liveDocs.
618-
*/
619-
void forEachExcludeDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
620-
assert bucketCountThresholds.getMinDocCount() == 0;
621-
try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) {
622-
for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) {
623-
LeafReader reader = ctx.reader();
624-
Bits liveDocs = reader.getLiveDocs();
625-
SortedSetDocValues globalOrds = null;
626-
for (int docId = 0; docId < reader.maxDoc(); ++docId) {
627-
if (liveDocs == null || liveDocs.get(docId)) { // document is not deleted
628-
globalOrds = globalOrds == null ? valuesSource.globalOrdinalsValues(ctx) : globalOrds;
629-
if (globalOrds.advanceExact(docId)) {
630-
for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) {
631-
if (accepted.find(globalOrd) >= 0) {
632-
continue;
633-
}
634-
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
635-
continue;
636-
}
637-
addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, accepted);
638-
}
639-
}
640-
}
641-
}
642-
}
643-
}
644-
}
645-
646-
private void addBucketForMinDocCountZero(
647-
long owningBucketOrd,
648-
long globalOrd,
649-
BucketInfoConsumer consumer,
650-
@Nullable LongHash accepted
651-
) throws IOException {
652-
/*
653-
* Use `add` instead of `find` here to assign an ordinal
654-
* even if the global ord wasn't found so we can build
655-
* sub-aggregations without trouble even though we haven't
656-
* hit any documents for them. This is wasteful, but
657-
* settings minDocCount == 0 is wasteful in general.....
658-
*/
659-
long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd);
660-
long docCount;
661-
if (bucketOrd < 0) {
662-
bucketOrd = -1 - bucketOrd;
663-
docCount = bucketDocCount(bucketOrd);
664-
} else {
665-
docCount = 0;
666-
}
667-
assert globalOrd >= 0;
668-
consumer.accept(globalOrd, bucketOrd, docCount);
669-
if (accepted != null) {
670-
accepted.add(globalOrd);
671-
}
672-
}
673-
674566
@Override
675567
public void close() {
676568
bucketOrds.close();

0 commit comments

Comments
 (0)