Skip to content

Commit 8f8af61

Browse files
authored
fix warnings showing up with -Wall (#2692)
1 parent 2333841 commit 8f8af61

30 files changed

+35
-222
lines changed

mlx/array.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,8 +241,8 @@ array::ArrayDesc::ArrayDesc(
241241
std::vector<array> inputs)
242242
: shape(std::move(shape)),
243243
dtype(dtype),
244-
status(Status::unscheduled),
245244
primitive(std::move(primitive)),
245+
status(Status::unscheduled),
246246
inputs(std::move(inputs)) {
247247
init();
248248
}

mlx/backend/cpu/conv.cpp

Lines changed: 0 additions & 125 deletions
Original file line numberDiff line numberDiff line change
@@ -996,131 +996,6 @@ void explicit_gemm_conv_1D_cpu(
996996
encoder.add_temporaries(std::move(temps));
997997
}
998998

999-
void explicit_gemm_conv_2D_cpu(
1000-
const array& in,
1001-
const array& wt,
1002-
array out,
1003-
const std::vector<int>& padding_lo,
1004-
const std::vector<int>& padding_hi,
1005-
const std::vector<int>& wt_strides,
1006-
const std::vector<int>& wt_dilation,
1007-
Stream stream) {
1008-
const int N = in.shape(0); // Batch size, should be the same as out.shape(0)
1009-
const int iH = in.shape(1); // Input spatial dim
1010-
const int iW = in.shape(2); // Input spatial dim
1011-
const int oH = out.shape(1); // Output spatial dim
1012-
const int oW = out.shape(2); // Output spatial dim
1013-
const int O = wt.shape(0); // Out channels
1014-
const int C = wt.shape(3); // In channels
1015-
const int wH = wt.shape(1); // Weight spatial dim
1016-
const int wW = wt.shape(2); // Weight spatial dim
1017-
1018-
auto conv_dtype = out.dtype();
1019-
auto& encoder = cpu::get_command_encoder(stream);
1020-
1021-
// Pad input
1022-
Shape padded_shape = {
1023-
N,
1024-
iH + padding_lo[0] + padding_hi[0],
1025-
iW + padding_lo[1] + padding_hi[1],
1026-
C};
1027-
array in_padded(padded_shape, conv_dtype, nullptr, {});
1028-
1029-
// Fill with zeros
1030-
std::vector<array> temps;
1031-
temps.push_back(array(0, conv_dtype));
1032-
copy_cpu(temps.back(), in_padded, CopyType::Scalar, stream);
1033-
1034-
// Pick input slice from padded
1035-
size_t data_offset = padding_lo[0] * in_padded.strides()[1] +
1036-
padding_lo[1] * in_padded.strides()[2];
1037-
array in_padded_slice(in.shape(), in_padded.dtype(), nullptr, {});
1038-
in_padded_slice.copy_shared_buffer(
1039-
in_padded,
1040-
in_padded.strides(),
1041-
in_padded.flags(),
1042-
in_padded_slice.size(),
1043-
data_offset);
1044-
temps.push_back(in_padded_slice);
1045-
1046-
// Copy input values into the slice
1047-
copy_cpu_inplace(in, in_padded_slice, CopyType::GeneralGeneral, stream);
1048-
1049-
// Make strided view
1050-
Shape strided_shape = {N, oH, oW, wH, wW, C};
1051-
1052-
Strides strided_strides = {
1053-
in_padded.strides()[0],
1054-
in_padded.strides()[1] * wt_strides[0],
1055-
in_padded.strides()[2] * wt_strides[1],
1056-
in_padded.strides()[1],
1057-
in_padded.strides()[2],
1058-
in_padded.strides()[3]};
1059-
auto flags = in_padded.flags();
1060-
1061-
array in_strided_view(strided_shape, in_padded.dtype(), nullptr, {});
1062-
in_strided_view.copy_shared_buffer(
1063-
in_padded, strided_strides, flags, in_strided_view.size(), 0);
1064-
1065-
// Materialize strided view
1066-
Shape strided_reshape = {N * oH * oW, wH * wW * C};
1067-
array in_strided(strided_reshape, in_strided_view.dtype(), nullptr, {});
1068-
copy_cpu(in_strided_view, in_strided, CopyType::General, stream);
1069-
temps.push_back(in_strided);
1070-
1071-
// Check wt dtype and prepare
1072-
auto gemm_wt = wt;
1073-
auto gemm_out = out;
1074-
1075-
if (wt.dtype() != float32 || !wt.flags().row_contiguous) {
1076-
auto ctype =
1077-
wt.flags().row_contiguous ? CopyType::Vector : CopyType::General;
1078-
gemm_wt = array(wt.shape(), float32, nullptr, {});
1079-
copy_cpu(wt, gemm_wt, ctype, stream);
1080-
temps.push_back(gemm_wt);
1081-
}
1082-
1083-
if (out.dtype() != float32) {
1084-
gemm_out = array(out.shape(), float32, nullptr, {});
1085-
gemm_out.set_data(allocator::malloc(gemm_out.nbytes()));
1086-
temps.push_back(gemm_out);
1087-
}
1088-
1089-
encoder.set_input_array(in_strided);
1090-
encoder.set_input_array(gemm_wt);
1091-
encoder.set_output_array(gemm_out);
1092-
1093-
encoder.dispatch([in_strided_ptr = in_strided.data<float>(),
1094-
gemm_wt_ptr = gemm_wt.data<float>(),
1095-
gemm_out_ptr = gemm_out.data<float>(),
1096-
strided_reshape = std::move(strided_reshape),
1097-
O]() {
1098-
// Perform gemm
1099-
cblas_sgemm(
1100-
CblasRowMajor,
1101-
CblasNoTrans, // no trans A
1102-
CblasTrans, // transB
1103-
strided_reshape[0], // M
1104-
O, // N
1105-
strided_reshape[1], // K
1106-
1.0f, // alpha
1107-
in_strided_ptr,
1108-
strided_reshape[1], // lda
1109-
gemm_wt_ptr,
1110-
strided_reshape[1], // ldb
1111-
0.0f, // beta
1112-
gemm_out_ptr,
1113-
O // ldc
1114-
);
1115-
});
1116-
1117-
// Copy results if needed
1118-
if (out.dtype() != float32) {
1119-
copy_cpu_inplace(gemm_out, out, CopyType::Vector, stream);
1120-
}
1121-
encoder.add_temporaries(std::move(temps));
1122-
}
1123-
1124999
void explicit_gemm_conv_ND_cpu(
11251000
const array& in,
11261001
const array& wt,

mlx/backend/cpu/eig.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ void eig_impl(
4646
int info;
4747
{
4848
T work;
49-
int iwork;
5049
geev<T>(
5150
&jobl,
5251
&jobr,

mlx/backend/cpu/masked_mm.cpp

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -215,18 +215,18 @@ void BlockMaskedMM::eval_cpu(const std::vector<array>& inputs, array& out) {
215215

216216
encoder.set_input_array(a);
217217
encoder.set_input_array(b);
218-
const void* a_mask_ptr;
219-
const void* b_mask_ptr;
220-
const void* out_mask_ptr;
218+
const void* a_mask_ptr = nullptr;
219+
const void* b_mask_ptr = nullptr;
220+
const void* out_mask_ptr = nullptr;
221221
Shape a_mask_shape;
222222
Shape b_mask_shape;
223223
Shape out_mask_shape;
224224
Strides a_mask_strides;
225225
Strides b_mask_strides;
226226
Strides out_mask_strides;
227-
bool a_mask_bool;
228-
bool b_mask_bool;
229-
bool out_mask_bool;
227+
bool a_mask_bool = false;
228+
bool b_mask_bool = false;
229+
bool out_mask_bool = false;
230230
if (has_op_mask) {
231231
auto& a_mask = inputs[inputs.size() - 2];
232232
auto& b_mask = inputs[inputs.size() - 1];
@@ -423,7 +423,6 @@ void GatherMM::eval_cpu(const std::vector<array>& inputs, array& out) {
423423
auto& rhs_indices = inputs[3];
424424

425425
auto batch_shape = get_batch_dims(out.shape());
426-
int batch_ndim = batch_shape.size();
427426

428427
auto batch_shape_A = get_batch_dims(a.shape());
429428
auto batch_strides_A = get_batch_dims(a.strides());

mlx/backend/cpu/matmul.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,6 @@ void matmul_general(
9191
auto [b_transposed, ldb, b] = check_transpose(b_pre);
9292
size_t M = a.shape(-2);
9393
size_t N = b.shape(-1);
94-
size_t K = a.shape(-1);
9594
if (M == 0 || N == 0) {
9695
return;
9796
}

mlx/backend/cpu/quantized.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,6 @@ void mxfp4_qmm(
445445
int K) {
446446
constexpr int group_size = 32;
447447
constexpr int pack_factor = get_pack_factor(4, 8);
448-
constexpr int bytes_per_pack = get_bytes_per_pack(4);
449448
constexpr int packs_in_group = group_size / pack_factor;
450449

451450
for (int m = 0; m < M; m++) {
@@ -487,7 +486,6 @@ void mxfp4_qmm_t(
487486
int K) {
488487
constexpr int group_size = 32;
489488
constexpr int pack_factor = get_pack_factor(4, 8);
490-
constexpr int bytes_per_pack = get_bytes_per_pack(4);
491489
constexpr int packs_in_group = group_size / pack_factor;
492490

493491
for (int m = 0; m < M; m++) {

mlx/backend/cpu/sort.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ struct StridedIterator {
3939
StridedIterator() = default;
4040

4141
explicit StridedIterator(T* ptr, int64_t stride, difference_type offset = 0)
42-
: ptr_(ptr + offset * stride), stride_(stride) {}
42+
: stride_(stride), ptr_(ptr + offset * stride) {}
4343

4444
explicit StridedIterator(array& arr, int axis, difference_type offset = 0)
4545
: StridedIterator(arr.data<T>(), arr.strides()[axis], offset) {}

mlx/backend/cpu/svd.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,6 @@ void svd_impl(
8383

8484
auto jobz = (u_ptr) ? "A" : "N";
8585

86-
// Will contain the number of singular values after the call has returned.
87-
int ns = 0;
8886
T workspace_dimension = 0;
8987

9088
// Will contain the indices of eigenvectors that failed to converge (not

mlx/backend/metal/allocator.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ namespace metal {
3232

3333
MetalAllocator::MetalAllocator()
3434
: device_(device(mlx::core::Device::gpu).mtl_device()),
35-
residency_set_(device_),
3635
buffer_cache_(
3736
vm_page_size,
3837
[](MTL::Buffer* buf) { return buf->length(); },
@@ -41,7 +40,8 @@ MetalAllocator::MetalAllocator()
4140
residency_set_.erase(buf);
4241
}
4342
buf->release();
44-
}) {
43+
}),
44+
residency_set_(device_) {
4545
auto pool = metal::new_scoped_memory_pool();
4646
auto memsize = std::get<size_t>(device_info().at("memory_size"));
4747
auto max_rec_size =

mlx/backend/metal/allocator.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,6 @@ class MetalAllocator : public allocator::Allocator {
6565
size_t peak_memory_{0};
6666
size_t max_pool_size_;
6767
size_t wired_limit_{0};
68-
bool relaxed_{true};
6968
size_t num_resources_{0};
7069
size_t resource_limit_{0};
7170

0 commit comments

Comments
 (0)