diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index d2a7bbb7345d1..40b3679d8fb79 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -60,6 +60,9 @@ static List systemJvmOptions(Settings nodeSettings, final Map FLAT_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS; + private static final Class SCALAR_QUANTIZED_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS; + private static final Class FLOAT_SCORING_SUPPLIER_CLASS; + static { + try { + FLAT_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS = Class.forName( + "org.apache.lucene.codecs.lucene99.Lucene99FlatVectorsWriter$FlatCloseableRandomVectorScorerSupplier" + ); + var lookup = MethodHandles.privateLookupIn(FLAT_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS, MethodHandles.lookup()); + FLOAT_SUPPLIER_HANDLE = lookup.findVarHandle( + FLAT_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS, + "supplier", + RandomVectorScorerSupplier.class + ); + + FLOAT_SCORING_SUPPLIER_CLASS = Class.forName( + "org.apache.lucene.internal.vectorization.Lucene99MemorySegmentFloatVectorScorerSupplier" + ); + lookup = MethodHandles.privateLookupIn(FLOAT_SCORING_SUPPLIER_CLASS, MethodHandles.lookup()); + FLOAT_VECTORS_HANDLE = lookup.findVarHandle(FLOAT_SCORING_SUPPLIER_CLASS, "values", FloatVectorValues.class); + + SCALAR_QUANTIZED_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS = Class.forName( + Lucene99ScalarQuantizedVectorsWriter.class.getCanonicalName() + "$ScalarQuantizedCloseableRandomVectorScorerSupplier" + ); + lookup = MethodHandles.privateLookupIn(SCALAR_QUANTIZED_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS, MethodHandles.lookup()); + BYTE_SUPPLIER_HANDLE = lookup.findVarHandle( + SCALAR_QUANTIZED_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS, + "supplier", + RandomVectorScorerSupplier.class + ); + + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen, check opens", e); + } catch (ReflectiveOperationException e) { + throw new AssertionError(e); + } + } + + public static RandomVectorScorerSupplier getFlatRandomVectorScorerInnerSupplier(CloseableRandomVectorScorerSupplier scorerSupplier) { + if (scorerSupplier.getClass().equals(FLAT_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS)) { + return (RandomVectorScorerSupplier) FLOAT_SUPPLIER_HANDLE.get(scorerSupplier); + } + return null; + } + + public static RandomVectorScorerSupplier getScalarQuantizedRandomVectorScorerInnerSupplier( + CloseableRandomVectorScorerSupplier scorerSupplier + ) { + if (scorerSupplier.getClass().equals(SCALAR_QUANTIZED_CLOSEABLE_RANDOM_VECTOR_SCORER_SUPPLIER_CLASS)) { + return (RandomVectorScorerSupplier) BYTE_SUPPLIER_HANDLE.get(scorerSupplier); + } + return null; + } + + public static HasIndexSlice getFloatScoringSupplierVectorOrNull(RandomVectorScorerSupplier scorerSupplier) { + if (FLOAT_SCORING_SUPPLIER_CLASS.isAssignableFrom(scorerSupplier.getClass())) { + var vectorValues = FLOAT_VECTORS_HANDLE.get(scorerSupplier); + if (vectorValues instanceof HasIndexSlice indexSlice) { + return indexSlice; + } + } + return null; + } + + public static HasIndexSlice getByteScoringSupplierVectorOrNull(RandomVectorScorerSupplier scorerSupplier) { + if (scorerSupplier instanceof QuantizedByteVectorValuesAccess quantizedByteVectorValuesAccess) { + return quantizedByteVectorValuesAccess.get(); + } + return null; + } +} diff --git a/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/GPUSupport.java b/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/GPUSupport.java index c21bda894790a..f6e0c58495ef3 100644 --- a/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/GPUSupport.java +++ b/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/GPUSupport.java @@ -59,7 +59,7 @@ public static boolean isSupported(boolean logError) { } } else { if (logError) { - LOG.info("Found compatible GPU [{}] (id: [{}])", gpu.name(), gpu.gpuId()); + LOG.debug("Found compatible GPU [{}] (id: [{}])", gpu.name(), gpu.gpuId()); } return true; } diff --git a/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ES92GpuHnswVectorsWriter.java b/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ES92GpuHnswVectorsWriter.java index 44c43cf3a93fb..a9ec6d6cdf2bd 100644 --- a/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ES92GpuHnswVectorsWriter.java +++ b/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ES92GpuHnswVectorsWriter.java @@ -18,7 +18,6 @@ import org.apache.lucene.codecs.hnsw.FlatVectorsWriter; import org.apache.lucene.codecs.lucene99.Lucene99FlatVectorsWriter; import org.apache.lucene.index.ByteVectorValues; -import org.apache.lucene.index.DocsWithFieldSet; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FloatVectorValues; import org.apache.lucene.index.IndexFileNames; @@ -28,26 +27,23 @@ import org.apache.lucene.index.Sorter; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterIndexInput; -import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.MemorySegmentAccessInput; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.hnsw.HnswGraph; import org.apache.lucene.util.hnsw.HnswGraph.NodesIterator; +import org.apache.lucene.util.hnsw.RandomVectorScorerSupplier; import org.apache.lucene.util.packed.DirectMonotonicWriter; import org.apache.lucene.util.quantization.ScalarQuantizer; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.codec.vectors.ES814ScalarQuantizedVectorsFormat; +import org.elasticsearch.index.codec.vectors.reflect.VectorsFormatReflectionUtils; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -276,6 +272,8 @@ private void generateGpuGraphAndWriteMeta(ResourcesHolder resourcesHolder, Field var deviceGraph = index.getGraph(); var graphSize = deviceGraph.size() * deviceGraph.columns() * Integer.BYTES; if (graphSize < DIRECT_COPY_THRESHOLD_IN_BYTES) { + // If the graph is "small enough", copy it entirely to host memory so we can + // release the associated resource early and increase parallelism. try (var hostGraph = deviceGraph.toHost()) { resourcesHolder.close(); graph = writeGraph(hostGraph, graphLevelNodeOffsets); @@ -448,105 +446,205 @@ public NodesIterator getNodesOnLevel(int level) { }; } - @SuppressForbidden(reason = "require usage of Lucene's IOUtils#deleteFilesIgnoringExceptions(...)") - private static void deleteFilesIgnoringExceptions(Directory dir, String fileName) { - org.apache.lucene.util.IOUtils.deleteFilesIgnoringExceptions(dir, fileName); - } - // TODO check with deleted documents @Override // fix sorted index case public void mergeOneField(FieldInfo fieldInfo, MergeState mergeState) throws IOException { - var started = System.nanoTime(); - flatVectorWriter.mergeOneField(fieldInfo, mergeState); - final int numVectors; - String tempRawVectorsFileName = null; - boolean success = false; - // save merged vector values to a temp file - try (IndexOutput out = mergeState.segmentInfo.dir.createTempOutput(mergeState.segmentInfo.name, "vec_", IOContext.DEFAULT)) { - tempRawVectorsFileName = out.getName(); - if (dataType == CuVSMatrix.DataType.BYTE) { - numVectors = writeByteVectorValues(out, getMergedByteVectorValues(fieldInfo, mergeState)); + try (var scorerSupplier = flatVectorWriter.mergeOneFieldToIndex(fieldInfo, mergeState)) { + var started = System.nanoTime(); + int numVectors = scorerSupplier.totalVectorCount(); + if (numVectors < MIN_NUM_VECTORS_FOR_GPU_BUILD) { + // we don't really need real value for vectors here, + // we just build a mock graph where every node is connected to every other node + generateMockGraphAndWriteMeta(fieldInfo, numVectors); } else { - numVectors = writeFloatVectorValues(fieldInfo, out, MergedVectorValues.mergeFloatVectorValues(fieldInfo, mergeState)); - } - CodecUtil.writeFooter(out); - success = true; - } finally { - if (success == false && tempRawVectorsFileName != null) { - deleteFilesIgnoringExceptions(mergeState.segmentInfo.dir, tempRawVectorsFileName); + if (dataType == CuVSMatrix.DataType.FLOAT) { + var randomScorerSupplier = VectorsFormatReflectionUtils.getFlatRandomVectorScorerInnerSupplier(scorerSupplier); + mergeFloatVectorField(fieldInfo, mergeState, randomScorerSupplier, numVectors); + } else { + // During merging, we use quantized data, so we need to support byte[] too. + // That's how our current formats work: use floats during indexing, and quantized data to build a graph + // during merging. + assert dataType == CuVSMatrix.DataType.BYTE; + var randomScorerSupplier = VectorsFormatReflectionUtils.getScalarQuantizedRandomVectorScorerInnerSupplier( + scorerSupplier + ); + mergeByteVectorField(fieldInfo, mergeState, randomScorerSupplier, numVectors); + } } + var elapsed = started - System.nanoTime(); + logger.debug("Merged [{}] vectors in [{}ms]", numVectors, elapsed / 1_000_000.0); + } catch (Throwable t) { + throw new IOException("Failed to merge GPU index: ", t); } - try (IndexInput in = mergeState.segmentInfo.dir.openInput(tempRawVectorsFileName, IOContext.DEFAULT)) { - var input = FilterIndexInput.unwrapOnlyTest(in); - - if (numVectors >= MIN_NUM_VECTORS_FOR_GPU_BUILD) { - if (input instanceof MemorySegmentAccessInput memorySegmentAccessInput) { - // Direct access to mmapped file - - try ( - var dataset = DatasetUtils.getInstance() - .fromInput(memorySegmentAccessInput, numVectors, fieldInfo.getVectorDimension(), dataType); - var resourcesHolder = new ResourcesHolder( - cuVSResourceManager, - cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) - ) - ) { + } + + private void mergeByteVectorField( + FieldInfo fieldInfo, + MergeState mergeState, + RandomVectorScorerSupplier randomScorerSupplier, + int numVectors + ) throws IOException, InterruptedException { + var vectorValues = randomScorerSupplier == null + ? null + : VectorsFormatReflectionUtils.getByteScoringSupplierVectorOrNull(randomScorerSupplier); + if (vectorValues != null) { + IndexInput slice = vectorValues.getSlice(); + var input = FilterIndexInput.unwrapOnlyTest(slice); + if (input instanceof MemorySegmentAccessInput memorySegmentAccessInput) { + // Direct access to mmapped file + // TODO: strides!! + // for int8_hnsw, the raw vector data has extra 4-byte at the end of each vector to encode a correction constant + int rowStride = fieldInfo.getVectorDimension() + 4; + try ( + var dataset = DatasetUtils.getInstance() + .fromInput(memorySegmentAccessInput, numVectors, fieldInfo.getVectorDimension(), rowStride, -1, dataType); + var resourcesHolder = new ResourcesHolder( + cuVSResourceManager, + cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) + ); + // Explicitly copy the dataset to GPU memory. The current (25.10) CAGRA index implementation has + // problems with strides; the explicit copy removes the stride while copying. + // Note that this is _not_ an additional copy: input data needs to be moved to GPU memory anyway, + // we are just doing it explicitly instead of relying on CagraIndex#build to do it. + var deviceDataSet = dataset.toDevice(resourcesHolder.resources()) + ) { + generateGpuGraphAndWriteMeta(resourcesHolder, fieldInfo, deviceDataSet); + } + } else { + logger.debug( + () -> "Cannot mmap merged raw vectors temporary file. IndexInput type [" + input.getClass().getSimpleName() + "]" + ); + + try ( + var resourcesHolder = new ResourcesHolder( + cuVSResourceManager, + cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) + ) + ) { + // Read vector-by-vector + var builder = CuVSMatrix.deviceBuilder( + resourcesHolder.resources(), + numVectors, + fieldInfo.getVectorDimension(), + dataType + ); + + byte[] vector = new byte[fieldInfo.getVectorDimension()]; + for (int i = 0; i < numVectors; ++i) { + input.readBytes(vector, 0, fieldInfo.getVectorDimension()); + builder.addVector(vector); + } + + try (var dataset = builder.build()) { generateGpuGraphAndWriteMeta(resourcesHolder, fieldInfo, dataset); } - } else { - logger.debug( - () -> "Cannot mmap merged raw vectors temporary file. IndexInput type [" + input.getClass().getSimpleName() + "]" + } + } + } else { + logger.warn("Cannot get merged raw vectors from scorer."); + var byteVectorValues = getMergedByteVectorValues(fieldInfo, mergeState); + try ( + var resourcesHolder = new ResourcesHolder( + cuVSResourceManager, + cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) + ) + ) { + // Read vector-by-vector + final var builder = CuVSMatrix.deviceBuilder( + resourcesHolder.resources(), + numVectors, + fieldInfo.getVectorDimension(), + dataType + ); + final KnnVectorValues.DocIndexIterator iterator = byteVectorValues.iterator(); + for (int docV = iterator.nextDoc(); docV != NO_MORE_DOCS; docV = iterator.nextDoc()) { + builder.addVector(byteVectorValues.vectorValue(iterator.index())); + } + + try (var dataset = builder.build()) { + generateGpuGraphAndWriteMeta(resourcesHolder, fieldInfo, dataset); + } + } + } + } + + private void mergeFloatVectorField( + FieldInfo fieldInfo, + MergeState mergeState, + RandomVectorScorerSupplier randomScorerSupplier, + final int numVectors + ) throws IOException, InterruptedException { + var vectorValues = randomScorerSupplier == null + ? null + : VectorsFormatReflectionUtils.getFloatScoringSupplierVectorOrNull(randomScorerSupplier); + if (vectorValues != null) { + IndexInput slice = vectorValues.getSlice(); + var input = FilterIndexInput.unwrapOnlyTest(slice); + if (input instanceof MemorySegmentAccessInput memorySegmentAccessInput) { + // Direct access to mmapped file + try ( + var dataset = DatasetUtils.getInstance() + .fromInput(memorySegmentAccessInput, numVectors, fieldInfo.getVectorDimension(), dataType); + var resourcesHolder = new ResourcesHolder( + cuVSResourceManager, + cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) + ) + ) { + generateGpuGraphAndWriteMeta(resourcesHolder, fieldInfo, dataset); + } + } else { + logger.debug( + () -> "Cannot mmap merged raw vectors temporary file. IndexInput type [" + input.getClass().getSimpleName() + "]" + ); + + try ( + var resourcesHolder = new ResourcesHolder( + cuVSResourceManager, + cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) + ) + ) { + // Read vector-by-vector + var builder = CuVSMatrix.deviceBuilder( + resourcesHolder.resources(), + numVectors, + fieldInfo.getVectorDimension(), + dataType ); - try ( - var resourcesHolder = new ResourcesHolder( - cuVSResourceManager, - cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) - ) - ) { - // Read vector-by-vector - var builder = CuVSMatrix.deviceBuilder( - resourcesHolder.resources(), - numVectors, - fieldInfo.getVectorDimension(), - dataType - ); - - // During merging, we use quantized data, so we need to support byte[] too. - // That's how our current formats work: use floats during indexing, and quantized data to build a graph - // during merging. - if (dataType == CuVSMatrix.DataType.FLOAT) { - float[] vector = new float[fieldInfo.getVectorDimension()]; - for (int i = 0; i < numVectors; ++i) { - input.readFloats(vector, 0, fieldInfo.getVectorDimension()); - builder.addVector(vector); - } - } else { - assert dataType == CuVSMatrix.DataType.BYTE; - byte[] vector = new byte[fieldInfo.getVectorDimension()]; - for (int i = 0; i < numVectors; ++i) { - input.readBytes(vector, 0, fieldInfo.getVectorDimension()); - builder.addVector(vector); - } - } - try (var dataset = builder.build()) { - generateGpuGraphAndWriteMeta(resourcesHolder, fieldInfo, dataset); - } + float[] vector = new float[fieldInfo.getVectorDimension()]; + for (int i = 0; i < numVectors; ++i) { + input.readFloats(vector, 0, fieldInfo.getVectorDimension()); + builder.addVector(vector); + } + + try (var dataset = builder.build()) { + generateGpuGraphAndWriteMeta(resourcesHolder, fieldInfo, dataset); } } - } else { - // we don't really need real value for vectors here, - // we just build a mock graph where every node is connected to every other node - generateMockGraphAndWriteMeta(fieldInfo, numVectors); } - } catch (Throwable t) { - throw new IOException("Failed to merge GPU index: ", t); - } finally { - deleteFilesIgnoringExceptions(mergeState.segmentInfo.dir, tempRawVectorsFileName); + } else { + logger.warn("Cannot get merged raw vectors from scorer."); + FloatVectorValues floatVectorValues = MergedVectorValues.mergeFloatVectorValues(fieldInfo, mergeState); + try ( + var resourcesHolder = new ResourcesHolder( + cuVSResourceManager, + cuVSResourceManager.acquire(numVectors, fieldInfo.getVectorDimension(), dataType) + ) + ) { + // Read vector-by-vector + var builder = CuVSMatrix.deviceBuilder(resourcesHolder.resources(), numVectors, fieldInfo.getVectorDimension(), dataType); + + final KnnVectorValues.DocIndexIterator iterator = floatVectorValues.iterator(); + for (int docV = iterator.nextDoc(); docV != NO_MORE_DOCS; docV = iterator.nextDoc()) { + float[] vector = floatVectorValues.vectorValue(iterator.index()); + builder.addVector(vector); + } + try (var dataset = builder.build()) { + generateGpuGraphAndWriteMeta(resourcesHolder, fieldInfo, dataset); + } + } } - var elapsed = started - System.nanoTime(); - logger.debug("Merged [{}] vectors in [{}ms]", numVectors, elapsed / 1_000_000.0); } private ByteVectorValues getMergedByteVectorValues(FieldInfo fieldInfo, MergeState mergeState) throws IOException { @@ -557,32 +655,6 @@ private ByteVectorValues getMergedByteVectorValues(FieldInfo fieldInfo, MergeSta return MergedQuantizedVectorValues.mergeQuantizedByteVectorValues(fieldInfo, mergeState, quantizer); } - private static int writeByteVectorValues(IndexOutput out, ByteVectorValues vectorValues) throws IOException { - int numVectors = 0; - byte[] vector; - final KnnVectorValues.DocIndexIterator iterator = vectorValues.iterator(); - for (int docV = iterator.nextDoc(); docV != NO_MORE_DOCS; docV = iterator.nextDoc()) { - numVectors++; - vector = vectorValues.vectorValue(iterator.index()); - out.writeBytes(vector, vector.length); - } - return numVectors; - } - - private static int writeFloatVectorValues(FieldInfo fieldInfo, IndexOutput out, FloatVectorValues floatVectorValues) - throws IOException { - int numVectors = 0; - final ByteBuffer buffer = ByteBuffer.allocate(fieldInfo.getVectorDimension() * Float.BYTES).order(ByteOrder.LITTLE_ENDIAN); - final KnnVectorValues.DocIndexIterator iterator = floatVectorValues.iterator(); - for (int docV = iterator.nextDoc(); docV != NO_MORE_DOCS; docV = iterator.nextDoc()) { - numVectors++; - float[] vector = floatVectorValues.vectorValue(iterator.index()); - buffer.asFloatBuffer().put(vector); - out.writeBytes(buffer.array(), buffer.array().length); - } - return numVectors; - } - private void writeMeta( FieldInfo field, long vectorIndexOffset, @@ -687,10 +759,6 @@ public void addValue(int docID, float[] vectorValue) throws IOException { lastDocID = docID; } - public DocsWithFieldSet getDocsWithFieldSet() { - return flatFieldVectorsWriter.getDocsWithFieldSet(); - } - @Override public float[] copyValue(float[] vectorValue) { throw new UnsupportedOperationException(); diff --git a/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ResourcesHolder.java b/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ResourcesHolder.java index 7517cb04713aa..91a1bfa1dd781 100644 --- a/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ResourcesHolder.java +++ b/x-pack/plugin/gpu/src/main/java/org/elasticsearch/xpack/gpu/codec/ResourcesHolder.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.gpu.codec; /** - * Holds an acquired resource, allows to manually release it while ensuring it gets released (closed) via the {@link AutoCloseable} - * pattern. + * Holds an acquired resource, allows to manually release it, but still ensures it gets released (closed) + * at the end of a try-with-resources block via the {@link AutoCloseable} pattern. */ class ResourcesHolder implements AutoCloseable { diff --git a/x-pack/plugin/gpu/src/yamlRestTest/java/org/elasticsearch/xpack/gpu/GPUClientYamlTestSuiteIT.java b/x-pack/plugin/gpu/src/yamlRestTest/java/org/elasticsearch/xpack/gpu/GPUClientYamlTestSuiteIT.java index c4e7e936b0111..e9211f65bc5c0 100644 --- a/x-pack/plugin/gpu/src/yamlRestTest/java/org/elasticsearch/xpack/gpu/GPUClientYamlTestSuiteIT.java +++ b/x-pack/plugin/gpu/src/yamlRestTest/java/org/elasticsearch/xpack/gpu/GPUClientYamlTestSuiteIT.java @@ -29,7 +29,10 @@ private static ElasticsearchCluster createCluster() { .nodes(1) .module("gpu") .setting("xpack.license.self_generated.type", "trial") - .setting("xpack.security.enabled", "false"); + .setting("xpack.security.enabled", "false") + // temporary until we get access to raw vectors in a future Lucene version + .jvmArg("--add-opens=org.apache.lucene.core/org.apache.lucene.codecs.lucene99=org.elasticsearch.server") + .jvmArg("--add-opens=org.apache.lucene.core/org.apache.lucene.internal.vectorization=org.elasticsearch.server"); var libraryPath = System.getenv("LD_LIBRARY_PATH"); if (libraryPath != null) {