Skip to content

Commit 44cd867

Browse files
committed
[MINOR] Fix various warnings and formatting issues
1 parent dc9fca9 commit 44cd867

File tree

15 files changed

+1447
-1443
lines changed

15 files changed

+1447
-1443
lines changed

src/main/java/org/apache/sysds/hops/fedplanner/FederatedPlanCostEnumerator.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,7 @@ private static void enumerateHopDAG(Hop hop, DMLProgram prog, FederatedMemoTable
282282
* generates federated plan variants for both LOUT and FOUT output types,
283283
* and prunes redundant plans before adding them to the memo table.
284284
*/
285+
@SuppressWarnings("unused")
285286
private static void enumerateHop(Hop hop, FederatedMemoTable memoTable, Map<Long, HopCommon> hopCommonTable,
286287
Map<Long, List<Hop>> rewireTable, Map<Long, Privacy> privacyConstraintMap,
287288
Map<Long, FType> fTypeMap, Set<Long> unRefTwriteSet, int numOfWorkers) {
@@ -510,6 +511,7 @@ private static void singleTypeEnumerateChildFedPlan(FedPlanVariants fedPlanVaria
510511
* it generates only
511512
* a single plan for each output type
512513
*/
514+
@SuppressWarnings("unused")
513515
private static void enumerateTransChildFedPlan(FedPlanVariants lOutFedPlanVariants,
514516
FedPlanVariants fOutFedPlanVariants,
515517
List<Hop> childHops, double[][] childCumulativeCost,
@@ -647,6 +649,7 @@ private static FedPlan getMinCostRootFedPlan(Set<Hop> progRootHopSet, FederatedM
647649
* - Re-running BFS with resolved conflicts to ensure all inconsistencies are
648650
* addressed.
649651
*/
652+
@SuppressWarnings("unused")
650653
private static double detectAndResolveConflictFedPlan(FedPlan rootPlan, FederatedMemoTable memoTable) {
651654
// Map to track conflicts: maps a plan ID to its federated output type and list
652655
// of parent plans

src/main/java/org/apache/sysds/hops/fedplanner/FederatedPlanRewireTransTable.java

Lines changed: 1401 additions & 1401 deletions
Large diffs are not rendered by default.

src/main/java/org/apache/sysds/hops/fedplanner/FederatedPlannerFedCostBased.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
package org.apache.sysds.hops.fedplanner;
2121

2222
import java.util.Set;
23-
import java.util.Map;
2423

2524
import org.apache.sysds.common.Types.ExecType;
2625
import org.apache.sysds.hops.ipa.FunctionCallGraph;

src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedData.java

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -374,15 +374,16 @@ public Future<FederatedResponse> requestPrivacyConstraints() {
374374
}
375375

376376
public static class GetPrivacyConstraints extends FederatedUDF {
377+
private static final long serialVersionUID = 1637852940793579590L;
377378
private final String filename;
378379

379-
public GetPrivacyConstraints(String filename) {
380-
super(new long[] { }); // Pass empty ID array to parent constructor as this is a static class
380+
public GetPrivacyConstraints(String filename) {
381+
super(new long[] { }); // Pass empty ID array to parent constructor as this is a static class
381382
this.filename = filename;
382-
}
383-
384-
@Override
385-
public FederatedResponse execute(ExecutionContext ec, Data... data) {
383+
}
384+
385+
@Override
386+
public FederatedResponse execute(ExecutionContext ec, Data... data) {
386387
String privacyConstraints = null;
387388
FileSystem fs = null;
388389
MetaDataAll mtd = null;
@@ -416,19 +417,19 @@ public FederatedResponse execute(ExecutionContext ec, Data... data) {
416417
finally {
417418
IOUtilFunctions.closeSilently(fs);
418419
}
419-
}
420-
421-
@Override
422-
public Pair<String, LineageItem> getLineageItem(ExecutionContext ec) {
423-
String opcode = "fedprivconst"; // Appropriate operation code
424-
425-
// Create input LineageItem for the operation
426-
LineageItem[] inputs = new LineageItem[] {
427-
new LineageItem(filename) // Create literal LineageItem by passing only the string
428-
};
429-
430-
// Create appropriate LineageItem (for read operation)
431-
return Pair.of(opcode, new LineageItem(opcode, inputs));
432-
}
433-
}
420+
}
421+
422+
@Override
423+
public Pair<String, LineageItem> getLineageItem(ExecutionContext ec) {
424+
String opcode = "fedprivconst"; // Appropriate operation code
425+
426+
// Create input LineageItem for the operation
427+
LineageItem[] inputs = new LineageItem[] {
428+
new LineageItem(filename) // Create literal LineageItem by passing only the string
429+
};
430+
431+
// Create appropriate LineageItem (for read operation)
432+
return Pair.of(opcode, new LineageItem(opcode, inputs));
433+
}
434+
}
434435
}

src/main/java/org/apache/sysds/runtime/instructions/gpu/context/CSRPointer.java

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121

2222
import jcuda.Pointer;
2323
import jcuda.Sizeof;
24-
import jcuda.cudaDataType;
2524
import jcuda.jcublas.cublasHandle;
2625
import jcuda.jcusparse.*;
2726
import org.apache.commons.logging.Log;
@@ -37,12 +36,8 @@
3736
import static jcuda.runtime.JCuda.*;
3837
import static jcuda.runtime.cudaMemcpyKind.*;
3938
import static jcuda.jcusparse.cusparseIndexType.CUSPARSE_INDEX_32I;
40-
import static jcuda.cudaDataType.CUDA_R_64F;
41-
import static jcuda.cudaDataType.CUDA_R_32F;
4239
import static jcuda.jcusparse.cusparseSpGEMMAlg.CUSPARSE_SPGEMM_DEFAULT;
4340
import static org.apache.sysds.runtime.matrix.data.LibMatrixCUDA.cudaSupportFunctions;
44-
import static jcuda.jcusparse.cusparseOperation.CUSPARSE_OPERATION_TRANSPOSE;
45-
import static jcuda.jcusparse.cusparseOperation.CUSPARSE_OPERATION_NON_TRANSPOSE;
4641

4742
/**
4843
* Compressed Sparse Row (CSR) format for CUDA

src/main/java/org/apache/sysds/runtime/matrix/data/DoublePrecisionCudaSupportFunctions.java

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,17 +44,13 @@
4444
import jcuda.jcusparse.cusparseDnMatDescr;
4545

4646
import static jcuda.jcusparse.cusparseIndexType.CUSPARSE_INDEX_32I;
47-
import static jcuda.jcusparse.cusparseIndexBase.CUSPARSE_INDEX_BASE_ZERO;
4847
import static jcuda.cudaDataType.CUDA_R_64F;
49-
import static jcuda.jcusparse.cusparseSpGEMMAlg.CUSPARSE_SPGEMM_DEFAULT;
5048
import static jcuda.jcusparse.cusparseStatus.CUSPARSE_STATUS_SUCCESS;
5149
import static jcuda.jcusparse.cusparseSpMVAlg.CUSPARSE_SPMV_ALG_DEFAULT;
52-
import static jcuda.jcusparse.cusparseOperation.CUSPARSE_OPERATION_NON_TRANSPOSE;
5350
import static jcuda.jcusparse.cusparseOrder.CUSPARSE_ORDER_COL;
5451
import static jcuda.jcusparse.cusparseSpMMAlg.CUSPARSE_SPMM_ALG_DEFAULT;
5552
import static jcuda.jcusparse.cusparseCsr2CscAlg.CUSPARSE_CSR2CSC_ALG1;
5653
import static jcuda.jcusparse.cusparseSparseToDenseAlg.CUSPARSE_SPARSETODENSE_ALG_DEFAULT;
57-
import static jcuda.jcusparse.cusparseIndexBase.CUSPARSE_INDEX_BASE_ONE;
5854
import static jcuda.jcusparse.cusparseDenseToSparseAlg.CUSPARSE_DENSETOSPARSE_ALG_DEFAULT;
5955

6056
public class DoublePrecisionCudaSupportFunctions implements CudaSupportFunctions {

src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCuDNN.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,7 @@ private static void throwCuDNNDimensionError(long dim1, long dim2, long dim3, lo
345345
* @param output the output matrix allocated on the GPU
346346
* @param algo cudnn algorithm wrapper
347347
*/
348+
@SuppressWarnings("deprecation")
348349
private static void cudnnConv2d(GPUContext gCtx, String instName, Pointer image, Pointer filter, Pointer output,
349350
LibMatrixCuDNNConvolutionAlgorithm algo) {
350351
if(LOG.isTraceEnabled()) {
@@ -458,6 +459,7 @@ public static void conv2dBackwardFilter(GPUContext gCtx, String instName, Matrix
458459
* @param dwPointer output errors
459460
* @param algo cudnn algorithm wrapper
460461
*/
462+
@SuppressWarnings("deprecation")
461463
private static void cudnnConv2dBackwardFilter(GPUContext gCtx, String instName, Pointer imagePointer, Pointer doutPointer,
462464
Pointer dwPointer, LibMatrixCuDNNConvolutionAlgorithm algo) {
463465
if(LOG.isTraceEnabled()) {
@@ -558,6 +560,7 @@ public static void conv2dBackwardData(GPUContext gCtx, String instName, MatrixOb
558560
* @param dx pointer to output errors
559561
* @param algo cudnn algorithm wrapper
560562
*/
563+
@SuppressWarnings("deprecation")
561564
private static void cudnnConv2dBackwardData(GPUContext gCtx, String instName, Pointer w, Pointer dy,
562565
Pointer dx, LibMatrixCuDNNConvolutionAlgorithm algo) {
563566
if(LOG.isTraceEnabled()) {
@@ -625,6 +628,7 @@ public static void pooling(GPUContext gCtx, String instName, MatrixObject image,
625628
}
626629
}
627630

631+
@SuppressWarnings("deprecation")
628632
private static void cudnnPoolingHelper(GPUContext gCtx, String instName, Pointer x,
629633
Pointer y, int N, int C, int H, int W, int K, int R,
630634
int S, int pad_h, int pad_w, int stride_h, int stride_w, int P,
@@ -714,6 +718,7 @@ public static void poolingBackward(GPUContext gCtx, String instName, MatrixObjec
714718
}
715719
}
716720

721+
@SuppressWarnings("deprecation")
717722
private static void cudnnPoolingBackwardHelper(GPUContext gCtx, String instName,
718723
Pointer x, Pointer dy, Pointer y, Pointer dx,
719724
int N, int C, int H, int W, int K, int R,
@@ -751,6 +756,7 @@ private static void cudnnPoolingBackwardHelper(GPUContext gCtx, String instName,
751756
}
752757
}
753758

759+
@SuppressWarnings("deprecation")
754760
private static void cudnnReLU(GPUContext gCtx, String instName, MatrixObject in, Pointer dstData, cudnnTensorDescriptor srcTensorDesc) {
755761
try {
756762
if(LOG.isTraceEnabled()) {
@@ -1076,6 +1082,7 @@ algo.hxDesc, hx, new Pointer(), // dhy = 0
10761082
* @param resultSaveInvVariance (output) running variance accumulated during training phase: shape [1, C, 1, 1]
10771083
* @throws DMLRuntimeException if error occurs
10781084
*/
1085+
@SuppressWarnings("deprecation")
10791086
public static void batchNormalizationForwardTraining(GPUContext gCtx, String instName, MatrixObject image,
10801087
MatrixObject scale, MatrixObject bias, MatrixObject runningMean, MatrixObject runningVar,
10811088
MatrixObject ret, MatrixObject retRunningMean, MatrixObject retRunningVar,
@@ -1132,6 +1139,7 @@ jcuda.jcudnn.cudnnBatchNormMode.CUDNN_BATCHNORM_SPATIAL, one(), zero(),
11321139
* @param epsilon epsilon value used in the batch normalization formula
11331140
* @throws DMLRuntimeException if error occurs
11341141
*/
1142+
@SuppressWarnings("deprecation")
11351143
public static void batchNormalizationForwardInference(GPUContext gCtx, String instName, MatrixObject image,
11361144
MatrixObject scale, MatrixObject bias, MatrixObject runningMean, MatrixObject runningVar,
11371145
MatrixObject ret, double epsilon) throws DMLRuntimeException {
@@ -1179,6 +1187,7 @@ jcuda.jcudnn.cudnnBatchNormMode.CUDNN_BATCHNORM_SPATIAL, one(), zero(),
11791187
* @param resultSaveInvVariance (input) running variance accumulated during training phase: shape [1, C, 1, 1]
11801188
* @throws DMLRuntimeException if error occurs
11811189
*/
1190+
@SuppressWarnings("deprecation")
11821191
public static void batchNormalizationBackward(GPUContext gCtx, String instName, MatrixObject image, MatrixObject dout,
11831192
MatrixObject scale, MatrixObject dX, MatrixObject dScale, MatrixObject dBias,
11841193
double epsilon, MatrixObject resultSaveMean, MatrixObject resultSaveInvVariance) throws DMLRuntimeException {

src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCuDNNConvolutionAlgorithm.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ private LibMatrixCuDNNConvolutionAlgorithm(GPUContext gCtx, String instName, int
8585
/**
8686
* Deallocates the tensor and filter descriptors as well as allocated workspace
8787
*/
88+
@SuppressWarnings("deprecation")
8889
@Override
8990
public void close() {
9091
if(nchwTensorDesc != null)
@@ -125,6 +126,7 @@ public void close() {
125126
* @param workspaceLimit maximum intermediate memory to use
126127
* @return algorithm wrapper
127128
*/
129+
@SuppressWarnings("deprecation")
128130
public static LibMatrixCuDNNConvolutionAlgorithm cudnnGetConvolutionForwardAlgorithm(
129131
GPUContext gCtx, String instName, int N, int C, int H, int W, int K, int R, int S,
130132
int pad_h, int pad_w, int stride_h, int stride_w, int P, int Q, long workspaceLimit) {
@@ -168,6 +170,7 @@ public static LibMatrixCuDNNConvolutionAlgorithm cudnnGetConvolutionForwardAlgor
168170
* @param workspaceLimit maximum intermediate memory to use
169171
* @return algorithm wrapper
170172
*/
173+
@SuppressWarnings("deprecation")
171174
public static LibMatrixCuDNNConvolutionAlgorithm cudnnGetConvolutionBackwardFilterAlgorithm(
172175
GPUContext gCtx, String instName, int N, int C, int H, int W, int K, int R, int S,
173176
int pad_h, int pad_w, int stride_h, int stride_w, int P, int Q, long workspaceLimit) {
@@ -227,6 +230,7 @@ public static LibMatrixCuDNNConvolutionAlgorithm cudnnGetConvolutionBackwardFilt
227230
* @param workspaceLimit maximum intermediate memory to use
228231
* @return algorithm wrapper
229232
*/
233+
@SuppressWarnings("deprecation")
230234
public static LibMatrixCuDNNConvolutionAlgorithm cudnnGetConvolutionBackwardDataAlgorithm(
231235
GPUContext gCtx, String instName, int N, int C, int H, int W, int K, int R, int S,
232236
int pad_h, int pad_w, int stride_h, int stride_w, int P, int Q, long workspaceLimit) {
@@ -289,13 +293,15 @@ private static cudnnTensorDescriptor allocateTensorDescriptor(int N, int C, int
289293
return tensorDescriptor;
290294
}
291295

296+
@SuppressWarnings("deprecation")
292297
private static cudnnFilterDescriptor allocateFilterDescriptor(int K, int C, int R, int S) {
293298
cudnnFilterDescriptor filterDesc = new cudnnFilterDescriptor();
294299
cudnnCreateFilterDescriptor(filterDesc);
295300
cudnnSetFilter4dDescriptor(filterDesc, LibMatrixCUDA.CUDNN_DATA_TYPE, CUDNN_TENSOR_NCHW, K, C, R, S);
296301
return filterDesc;
297302
}
298303

304+
@SuppressWarnings("deprecation")
299305
private static cudnnConvolutionDescriptor allocateConvolutionDescriptor(int padding [], int strides []) {
300306
cudnnConvolutionDescriptor convDesc = new cudnnConvolutionDescriptor();
301307
cudnnCreateConvolutionDescriptor(convDesc);

src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCuDNNPoolingDescriptors.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ public class LibMatrixCuDNNPoolingDescriptors implements java.lang.AutoCloseable
4949
public cudnnTensorDescriptor dyDesc;
5050
public cudnnPoolingDescriptor poolingDesc;
5151

52+
@SuppressWarnings("deprecation")
5253
@Override
5354
public void close() {
5455
if(xDesc != null)
@@ -155,6 +156,7 @@ private static cudnnTensorDescriptor allocateTensorDescriptor(int N, int C, int
155156
* @param poolingType type of pooling
156157
* @return cudnn pooling descriptor
157158
*/
159+
@SuppressWarnings("deprecation")
158160
private static cudnnPoolingDescriptor allocatePoolingDescriptor(int R, int S, int pad_h, int pad_w, int stride_h, int stride_w, PoolingType poolingType) {
159161
cudnnPoolingDescriptor poolingDesc = new cudnnPoolingDescriptor();
160162
cudnnCreatePoolingDescriptor(poolingDesc);

src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCuDNNRnnAlgorithm.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,7 @@ private int getExpectedNumWeights() throws DMLRuntimeException {
219219
return LibMatrixCUDA.toInt(numScalars);
220220
}
221221

222+
@SuppressWarnings("deprecation")
222223
private static cudnnFilterDescriptor allocateFilterDescriptor(int numWeights) {
223224
cudnnFilterDescriptor filterDesc = new cudnnFilterDescriptor();
224225
cudnnCreateFilterDescriptor(filterDesc);
@@ -238,6 +239,7 @@ private static cudnnTensorDescriptor allocateTensorDescriptorWithStride(int firs
238239
}
239240

240241

242+
@SuppressWarnings("deprecation")
241243
@Override
242244
public void close() {
243245
if(dropoutDesc != null)

0 commit comments

Comments
 (0)