Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions pixels-cli/src/main/java/io/pixelsdb/pixels/cli/Main.java
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,9 @@ public static void main(String[] args)
.help("specify the schema name");
argumentParser.addArgument("-t", "--table").required(true)
.help("specify the table name");
argumentParser.addArgument("-c", "--concurrency")
.setDefault("4").required(false)
.help("specify the number of threads used for data stat");

Namespace ns;
try
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,16 @@
import io.trino.jdbc.TrinoDriver;
import net.sourceforge.argparse4j.inf.Namespace;

import java.io.IOException;
import java.sql.*;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;

import static com.google.common.base.Preconditions.checkArgument;

Expand All @@ -49,6 +57,7 @@ public void execute(Namespace ns, String command) throws Exception
{
String schemaName = ns.getString("schema");
String tableName = ns.getString("table");
int concurrency = Integer.parseInt(ns.getString("concurrency"));
boolean orderedEnabled = Boolean.parseBoolean(ConfigFactory.Instance().getProperty("executor.ordered.layout.enabled"));
boolean compactEnabled = Boolean.parseBoolean(ConfigFactory.Instance().getProperty("executor.compact.layout.enabled"));

Expand Down Expand Up @@ -81,7 +90,7 @@ public void execute(Namespace ns, String command) throws Exception

List<Column> columns = metadataService.getColumns(schemaName, tableName, true);
Map<String, Column> columnMap = new HashMap<>(columns.size());
Map<String, StatsRecorder> columnStatsMap = new HashMap<>(columns.size());
Map<String, StatsRecorder> columnStatsMap = new ConcurrentHashMap<>(columns.size());

for (Column column : columns)
{
Expand All @@ -90,51 +99,28 @@ public void execute(Namespace ns, String command) throws Exception
columnMap.put(column.getName(), column);
}

int rowGroupCount = 0;
long rowCount = 0;
for (String filePath : files)
{
Storage storage = StorageFactory.Instance().getStorage(filePath);
PixelsReader pixelsReader = PixelsReaderImpl.newBuilder()
.setPath(filePath).setStorage(storage).setEnableCache(false)
.setCacheOrder(ImmutableList.of()).setPixelsCacheReader(null)
.setPixelsFooterCache(new PixelsFooterCache()).build();
PixelsProto.Footer fileFooter = pixelsReader.getFooter();
int numRowGroup = pixelsReader.getRowGroupNum();
rowGroupCount += numRowGroup;
rowCount += pixelsReader.getNumberOfRows();
List<PixelsProto.Type> types = fileFooter.getTypesList();
for (int i = 0; i < numRowGroup; ++i)
{
PixelsProto.RowGroupFooter rowGroupFooter = pixelsReader.getRowGroupFooter(i);
List<PixelsProto.ColumnChunkIndex> chunkIndices =
rowGroupFooter.getRowGroupIndexEntry().getColumnChunkIndexEntriesList();
for (int j = 0; j < types.size(); ++j)
ExecutorService executor = Executors.newFixedThreadPool(concurrency);
AtomicInteger totalRowGroupCount = new AtomicInteger(0);
AtomicLong totalRowCount = new AtomicLong(0L);
System.out.println("Read File Count: " + files.size() + "\tConcurrency: "+ concurrency);
CompletableFuture.allOf(files.stream().map(filePath ->
CompletableFuture.runAsync(() ->
{
Column column = columnMap.get(types.get(j).getName());
long chunkLength = chunkIndices.get(j).getChunkLength();
column.setSize(chunkLength + column.getSize());
}
}
List<TypeDescription> fields = pixelsReader.getFileSchema().getChildren();
checkArgument(fields.size() == types.size(),
"types.size and fields.size are not consistent");
for (int i = 0; i < fields.size(); ++i)
{
TypeDescription field = fields.get(i);
PixelsProto.Type type = types.get(i);
StatsRecorder statsRecorder = columnStatsMap.get(type.getName());
if (statsRecorder == null)
{
columnStatsMap.put(type.getName(),
StatsRecorder.create(field, fileFooter.getColumnStats(i)));
}
else
{
statsRecorder.merge(StatsRecorder.create(field, fileFooter.getColumnStats(i)));
}
}
pixelsReader.close();
try
{
processFile(filePath, columnMap, columnStatsMap, totalRowGroupCount, totalRowCount);
}
catch (Exception e)
{
System.err.println("Error processing file: " + filePath);
e.printStackTrace();
}
}, executor)
).toArray(CompletableFuture[]::new)).join();
executor.shutdown();
{
long readFileEndTime = System.currentTimeMillis();
System.out.println("Read File Elapsed time: " + (readFileEndTime - startTime) / 1000.0 + "s.");
}

ConfigFactory instance = ConfigFactory.Instance();
Expand All @@ -158,15 +144,15 @@ public void execute(Namespace ns, String command) throws Exception

for (Column column : columns)
{
column.setChunkSize(column.getSize() / rowGroupCount);
column.setChunkSize(column.getSize() / totalRowGroupCount.get());
column.setRecordStats(columnStatsMap.get(column.getName())
.serialize().build().toByteString().asReadOnlyByteBuffer());
column.getRecordStats().mark();
metadataService.updateColumn(column);
column.getRecordStats().reset();
}

metadataService.updateRowCount(schemaName, tableName, rowCount);
metadataService.updateRowCount(schemaName, tableName, totalRowCount.get());

/* Set cardinality and null_fraction after the chunk size and column size,
* because chunk size and column size must exist in the metadata when calculating
Expand All @@ -188,7 +174,7 @@ public void execute(Namespace ns, String command) throws Exception
if (resultSet.next())
{
long cardinality = resultSet.getLong("cardinality");
double nullFraction = resultSet.getLong("null_count") / (double) rowCount;
double nullFraction = resultSet.getLong("null_count") / (double) totalRowCount.get();
System.out.println(column.getName() + " cardinality: " + cardinality +
", null fraction: " + nullFraction);
column.setCardinality(cardinality);
Expand All @@ -207,4 +193,60 @@ public void execute(Namespace ns, String command) throws Exception
long endTime = System.currentTimeMillis();
System.out.println("Elapsed time: " + (endTime - startTime) / 1000.0 + "s.");
}

private void processFile(String filePath,
Map<String, Column> columnMap,
Map<String, StatsRecorder> columnStatsMap,
AtomicInteger totalRowGroupCount,
AtomicLong totalRowCount) throws IOException
{
Storage storage = StorageFactory.Instance().getStorage(filePath);
try (PixelsReader pixelsReader = PixelsReaderImpl.newBuilder()
.setPath(filePath).setStorage(storage).setEnableCache(false)
.setCacheOrder(ImmutableList.of()).setPixelsCacheReader(null)
.setPixelsFooterCache(new PixelsFooterCache()).build())
{
PixelsProto.Footer fileFooter = pixelsReader.getFooter();
int numRowGroup = pixelsReader.getRowGroupNum();
totalRowGroupCount.addAndGet(numRowGroup);
totalRowCount.addAndGet(pixelsReader.getNumberOfRows());
List<PixelsProto.Type> types = fileFooter.getTypesList();
for (int i = 0; i < numRowGroup; ++i)
{
PixelsProto.RowGroupFooter rowGroupFooter = pixelsReader.getRowGroupFooter(i);
List<PixelsProto.ColumnChunkIndex> chunkIndices =
rowGroupFooter.getRowGroupIndexEntry().getColumnChunkIndexEntriesList();
for (int j = 0; j < types.size(); ++j)
{
Column column = columnMap.get(types.get(j).getName());
synchronized (column)
{
long chunkLength = chunkIndices.get(j).getChunkLength();
column.setSize(chunkLength + column.getSize());
}
}
}
List<TypeDescription> fields = pixelsReader.getFileSchema().getChildren();
checkArgument(fields.size() == types.size(),
"types.size and fields.size are not consistent");
for (int i = 0; i < fields.size(); ++i)
{
TypeDescription field = fields.get(i);
PixelsProto.Type type = types.get(i);

PixelsProto.ColumnStatistic currentStat = fileFooter.getColumnStats(i);
columnStatsMap.compute(type.getName(), (k, existingRecorder) ->
{
if (existingRecorder == null)
{
return StatsRecorder.create(field, currentStat);
} else
{
existingRecorder.merge(StatsRecorder.create(field, currentStat));
return existingRecorder;
}
});
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
package io.pixelsdb.pixels.cli.load;

import io.pixelsdb.pixels.common.exception.MetadataException;
import io.pixelsdb.pixels.common.index.IndexService;
import io.pixelsdb.pixels.common.index.IndexServiceProvider;
import io.pixelsdb.pixels.common.index.service.IndexService;
import io.pixelsdb.pixels.common.index.service.IndexServiceProvider;
import io.pixelsdb.pixels.common.metadata.MetadataService;
import io.pixelsdb.pixels.common.metadata.domain.File;
import io.pixelsdb.pixels.common.metadata.domain.Path;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,14 @@
import com.google.protobuf.ByteString;
import io.pixelsdb.pixels.common.exception.IndexException;
import io.pixelsdb.pixels.common.exception.MetadataException;
import io.pixelsdb.pixels.common.index.IndexService;
import io.pixelsdb.pixels.common.index.RPCIndexService;
import io.pixelsdb.pixels.common.index.IndexOption;
import io.pixelsdb.pixels.common.index.service.IndexService;
import io.pixelsdb.pixels.common.index.service.RPCIndexService;
import io.pixelsdb.pixels.common.index.RowIdAllocator;
import io.pixelsdb.pixels.common.metadata.domain.File;
import io.pixelsdb.pixels.common.metadata.domain.Path;
import io.pixelsdb.pixels.common.node.BucketCache;
import io.pixelsdb.pixels.common.node.VnodeIdentifier;
import io.pixelsdb.pixels.common.physical.Storage;
import io.pixelsdb.pixels.common.physical.StorageFactory;
import io.pixelsdb.pixels.common.utils.ConfigFactory;
Expand Down Expand Up @@ -60,8 +62,7 @@
public class IndexedPixelsConsumer extends AbstractPixelsConsumer
{

// Map: Retina Host -> Writer state
private final Map<String, PerRetinaNodeWriter> retinaWriters = new ConcurrentHashMap<>();
private final Map<VnodeIdentifier, PerVirtualNodeWriter> retinaWriters = new ConcurrentHashMap<>();
private final BucketCache bucketCache = BucketCache.getInstance();
private final Map<String, IndexService> indexServices = new ConcurrentHashMap<>();
private final int indexServerPort;
Expand Down Expand Up @@ -99,9 +100,8 @@ protected void processSourceFile(String originalFilePath) throws IOException, Me
ByteString pkByteString = calculatePrimaryKeyBytes(colsInLine);
// Assume BucketCache has the necessary method and configuration
int bucketId = RetinaUtils.getBucketIdFromByteBuffer(pkByteString);
String retinaName = RetinaUtils.getRetinaHostNameFromBucketId(bucketId);
// 2. Get/Initialize the Writer for this Bucket
PerRetinaNodeWriter retinaNodeWriter = retinaWriters.computeIfAbsent(retinaName, id ->
VnodeIdentifier vnodeIdentifier = RetinaUtils.getVnodeIdentifierFromBucketId(bucketId);
PerVirtualNodeWriter retinaNodeWriter = retinaWriters.computeIfAbsent(vnodeIdentifier, id ->
{
try
{
Expand Down Expand Up @@ -132,7 +132,7 @@ protected void processSourceFile(String originalFilePath) throws IOException, Me
{
closePixelsFile(retinaNodeWriter);
// Remove writer to force re-initialization on next use
retinaWriters.remove(retinaName);
retinaWriters.remove(vnodeIdentifier);
}
} catch (IndexException e)
{
Expand All @@ -145,7 +145,7 @@ protected void processSourceFile(String originalFilePath) throws IOException, Me
@Override
protected void flushRemainingData() throws IOException, MetadataException
{
for (PerRetinaNodeWriter bucketWriter : retinaWriters.values())
for (PerVirtualNodeWriter bucketWriter : retinaWriters.values())
{
if (bucketWriter.rowCounter > 0)
{
Expand All @@ -164,11 +164,10 @@ protected void flushRemainingData() throws IOException, MetadataException
/**
* Initializes a new PixelsWriter and associated File/Path for a given bucket ID.
*/
private PerRetinaNodeWriter initializeRetinaWriter(int bucketId) throws IOException, MetadataException
private PerVirtualNodeWriter initializeRetinaWriter(int bucketId) throws IOException, MetadataException
{
// Use the Node Cache to find the responsible Retina Node
NodeProto.NodeInfo targetNode = bucketCache.getRetinaNodeInfoByBucketId(bucketId);

// Target path selection logic (simple round-robin for the path, but the NodeInfo is bucket-specific)
int targetPathId = GlobalTargetPathId.getAndIncrement() % targetPaths.size();
Path currTargetPath = targetPaths.get(targetPathId);
Expand All @@ -187,7 +186,7 @@ private PerRetinaNodeWriter initializeRetinaWriter(int bucketId) throws IOExcept
File currFile = openTmpFile(targetFileName, currTargetPath);
tmpFiles.add(currFile);

return new PerRetinaNodeWriter(pixelsWriter, currFile, currTargetPath, targetNode);
return new PerVirtualNodeWriter(pixelsWriter, currFile, currTargetPath, targetNode, targetNode.getVirtualNodeId());
}

// --- Private Helper Methods ---
Expand Down Expand Up @@ -219,7 +218,7 @@ private ByteString calculatePrimaryKeyBytes(String[] colsInLine)
return ByteString.copyFrom((ByteBuffer) indexKeyBuffer.rewind());
}

private void updateIndexEntry(PerRetinaNodeWriter bucketWriter, ByteString pkByteString) throws IndexException
private void updateIndexEntry(PerVirtualNodeWriter bucketWriter, ByteString pkByteString) throws IndexException
{
IndexProto.PrimaryIndexEntry.Builder builder = IndexProto.PrimaryIndexEntry.newBuilder();
builder.getIndexKeyBuilder()
Expand All @@ -237,7 +236,7 @@ private void updateIndexEntry(PerRetinaNodeWriter bucketWriter, ByteString pkByt
bucketWriter.indexEntries.add(builder.build());
}

private void flushRowBatch(PerRetinaNodeWriter bucketWriter) throws IOException, IndexException
private void flushRowBatch(PerVirtualNodeWriter bucketWriter) throws IOException, IndexException
{
bucketWriter.pixelsWriter.addRowBatch(bucketWriter.rowBatch);
bucketWriter.rowBatch.reset();
Expand All @@ -250,12 +249,12 @@ private void flushRowBatch(PerRetinaNodeWriter bucketWriter) throws IOException,
}

// Push index entries to the corresponding IndexService (determined by targetNode address)
bucketWriter.indexService.putPrimaryIndexEntries(index.getTableId(), index.getId(), bucketWriter.indexEntries);
bucketWriter.indexService.flushIndexEntriesOfFile(index.getTableId(), index.getId(),bucketWriter.currFile.getId(), true);
bucketWriter.indexService.putPrimaryIndexEntries(index.getTableId(), index.getId(), bucketWriter.indexEntries, bucketWriter.option);
bucketWriter.indexService.flushIndexEntriesOfFile(index.getTableId(), index.getId(),bucketWriter.currFile.getId(), true, bucketWriter.option);
bucketWriter.indexEntries.clear();
}

private void closePixelsFile(PerRetinaNodeWriter bucketWriter) throws IOException, IndexException
private void closePixelsFile(PerVirtualNodeWriter bucketWriter) throws IOException, IndexException
{
// Final flush of remaining rows/indexes
if (bucketWriter.rowBatch.size != 0)
Expand All @@ -266,7 +265,7 @@ private void closePixelsFile(PerRetinaNodeWriter bucketWriter) throws IOExceptio
closeWriterAndAddFile(bucketWriter.pixelsWriter, bucketWriter.currFile, bucketWriter.currTargetPath, bucketWriter.targetNode);
}

private class PerRetinaNodeWriter
private class PerVirtualNodeWriter
{
PixelsWriter pixelsWriter;
File currFile;
Expand All @@ -275,13 +274,15 @@ private class PerRetinaNodeWriter
int rgRowOffset;
int prevRgId;
int rowCounter;
int vNodeId;
IndexOption option;
NodeProto.NodeInfo targetNode;
List<IndexProto.PrimaryIndexEntry> indexEntries = new ArrayList<>();
VectorizedRowBatch rowBatch;
IndexService indexService;
RowIdAllocator rowIdAllocator;

public PerRetinaNodeWriter(PixelsWriter writer, File file, Path path, NodeProto.NodeInfo node)
public PerVirtualNodeWriter(PixelsWriter writer, File file, Path path, NodeProto.NodeInfo node, int vNodeId)
{
this.pixelsWriter = writer;
this.currFile = file;
Expand All @@ -292,9 +293,19 @@ public PerRetinaNodeWriter(PixelsWriter writer, File file, Path path, NodeProto.
this.rgRowOffset = 0;
this.rowCounter = 0;
this.rowBatch = schema.createRowBatchWithHiddenColumn(pixelStride, TypeDescription.Mode.NONE);
this.vNodeId = vNodeId;
this.indexService = indexServices.computeIfAbsent(node.getAddress(), nodeInfo ->
RPCIndexService.CreateInstance(nodeInfo, indexServerPort));
this.rowIdAllocator = new RowIdAllocator(index.getTableId(), maxRowNum, this.indexService);
initIndexOption();
}

private void initIndexOption()
{
this.option = IndexOption.builder()
.vNodeId(this.vNodeId)
.build();
}

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@
package io.pixelsdb.pixels.cli.load;

import io.pixelsdb.pixels.common.exception.MetadataException;
import io.pixelsdb.pixels.common.index.IndexServiceProvider;
import io.pixelsdb.pixels.common.index.RowIdAllocator;
import io.pixelsdb.pixels.common.metadata.MetadataService;
import io.pixelsdb.pixels.common.metadata.domain.*;
import io.pixelsdb.pixels.core.TypeDescription;
Expand Down
Loading