Skip to content

Commit f05294a

Browse files
authored
Fix clang warnings. (dmlc#9447)
- static function in header. (which is marked as unused due to translation unit visibility). - Implicit copy operator is deprecated. - Unused lambda capture. - Moving a temporary variable prevents copy elision.
1 parent 819098a commit f05294a

File tree

5 files changed

+12
-29
lines changed

5 files changed

+12
-29
lines changed

include/xgboost/base.h

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -271,10 +271,11 @@ class GradientPairInt64 {
271271
GradientPairInt64() = default;
272272

273273
// Copy constructor if of same value type, marked as default to be trivially_copyable
274-
GradientPairInt64(const GradientPairInt64 &g) = default;
274+
GradientPairInt64(GradientPairInt64 const &g) = default;
275+
GradientPairInt64 &operator=(GradientPairInt64 const &g) = default;
275276

276-
XGBOOST_DEVICE T GetQuantisedGrad() const { return grad_; }
277-
XGBOOST_DEVICE T GetQuantisedHess() const { return hess_; }
277+
XGBOOST_DEVICE [[nodiscard]] T GetQuantisedGrad() const { return grad_; }
278+
XGBOOST_DEVICE [[nodiscard]] T GetQuantisedHess() const { return hess_; }
278279

279280
XGBOOST_DEVICE GradientPairInt64 &operator+=(const GradientPairInt64 &rhs) {
280281
grad_ += rhs.grad_;
@@ -323,17 +324,6 @@ using omp_ulong = dmlc::omp_ulong; // NOLINT
323324
using bst_omp_uint = dmlc::omp_uint; // NOLINT
324325
/*! \brief Type used for representing version number in binary form.*/
325326
using XGBoostVersionT = int32_t;
326-
327-
/*!
328-
* \brief define compatible keywords in g++
329-
* Used to support g++-4.6 and g++4.7
330-
*/
331-
#if DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__)
332-
#if __GNUC__ == 4 && __GNUC_MINOR__ < 8
333-
#define override
334-
#define final
335-
#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 8
336-
#endif // DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__)
337327
} // namespace xgboost
338328

339329
#endif // XGBOOST_BASE_H_

src/common/math.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -134,12 +134,6 @@ inline float LogSum(Iterator begin, Iterator end) {
134134
return mx + std::log(sum);
135135
}
136136

137-
// comparator functions for sorting pairs in descending order
138-
inline static bool CmpFirst(const std::pair<float, unsigned> &a,
139-
const std::pair<float, unsigned> &b) {
140-
return a.first > b.first;
141-
}
142-
143137
// Redefined here to workaround a VC bug that doesn't support overloading for integer
144138
// types.
145139
template <typename T>

src/data/iterative_dmatrix.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p,
114114
this->info_.num_row_ = accumulated_rows;
115115
this->info_.num_nonzero_ = nnz;
116116

117-
auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() {
117+
auto init_page = [this, &cuts, row_stride, accumulated_rows, get_device]() {
118118
if (!ellpack_) {
119119
// Should be put inside the while loop to protect against empty batch. In
120120
// that case device id is invalid.

src/metric/rank_metric.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,8 @@ struct EvalAMS : public MetricNoCache {
6868
const auto &h_preds = preds.ConstHostVector();
6969
common::ParallelFor(ndata, ctx_->Threads(),
7070
[&](bst_omp_uint i) { rec[i] = std::make_pair(h_preds[i], i); });
71-
common::Sort(ctx_, rec.begin(), rec.end(), common::CmpFirst);
71+
common::Sort(ctx_, rec.begin(), rec.end(),
72+
[](auto const& l, auto const& r) { return l.first > r.first; });
7273
auto ntop = static_cast<unsigned>(ratio_ * ndata);
7374
if (ntop == 0) ntop = ndata;
7475
const double br = 10.0;

src/predictor/gpu_predictor.cu

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ class DeviceModel {
344344
dh::safe_cuda(cudaSetDevice(gpu_id));
345345

346346
// Copy decision trees to device
347-
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
347+
tree_segments = HostDeviceVector<size_t>({}, gpu_id);
348348
auto& h_tree_segments = tree_segments.HostVector();
349349
h_tree_segments.reserve((tree_end - tree_begin) + 1);
350350
size_t sum = 0;
@@ -354,10 +354,8 @@ class DeviceModel {
354354
h_tree_segments.push_back(sum);
355355
}
356356

357-
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
358-
gpu_id));
359-
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
360-
RTreeNodeStat(), gpu_id));
357+
nodes = HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), gpu_id);
358+
stats = HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(), RTreeNodeStat(), gpu_id);
361359
auto d_nodes = nodes.DevicePointer();
362360
auto d_stats = stats.DevicePointer();
363361
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
@@ -371,7 +369,7 @@ class DeviceModel {
371369
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
372370
}
373371

374-
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
372+
tree_group = HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id);
375373
auto& h_tree_group = tree_group.HostVector();
376374
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
377375

@@ -435,7 +433,7 @@ struct ShapSplitCondition {
435433
bool is_missing_branch;
436434

437435
// Does this instance flow down this path?
438-
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
436+
[[nodiscard]] XGBOOST_DEVICE bool EvaluateSplit(float x) const {
439437
// is nan
440438
if (isnan(x)) {
441439
return is_missing_branch;

0 commit comments

Comments
 (0)