Skip to content

Commit b4792fb

Browse files
authored
Merge pull request #799 from mkstoyanov/more_cpp17_features
* added usage of std::string_view * better initialization of many structures * replaced old make-unique and switch many unique_ptr to optional
2 parents b56e151 + 7bb507d commit b4792fb

18 files changed

+140
-177
lines changed

SparseGrids/TasmanianSparseGrid.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ bool TasmanianSparseGrid::isDpcppEnabled(){
7272
#endif
7373
}
7474

75-
TasmanianSparseGrid::TasmanianSparseGrid() : acceleration(std::make_unique<AccelerationContext>()), using_dynamic_construction(false){}
75+
TasmanianSparseGrid::TasmanianSparseGrid() : acceleration(std::make_unique<AccelerationContext>()){}
7676

7777
TasmanianSparseGrid::TasmanianSparseGrid(const TasmanianSparseGrid &source) :
78-
acceleration(std::make_unique<AccelerationContext>()), using_dynamic_construction(false){
78+
acceleration(std::make_unique<AccelerationContext>()){
7979
copyGrid(&source);
8080
}
8181

@@ -160,7 +160,6 @@ void TasmanianSparseGrid::makeGlobalGrid(int dimensions, int outputs, int depth,
160160
clear();
161161
llimits = level_limits;
162162
base = GridGlobal(acceleration.get(), dimensions, outputs, depth, type, rule, anisotropic_weights, alpha, beta, custom_filename, llimits);
163-
//base = Utils::make_unique<GridGlobal>(acceleration.get(), dimensions, outputs, depth, type, rule, anisotropic_weights, alpha, beta, custom_filename, llimits);
164163
}
165164
void TasmanianSparseGrid::makeGlobalGrid(int dimensions, int outputs, int depth, TypeDepth type, CustomTabulated &&crule,
166165
const int *anisotropic_weights, const int *level_limits){
@@ -191,7 +190,7 @@ void TasmanianSparseGrid::makeSequenceGrid(int dimensions, int outputs, int dept
191190
if (outputs < 0) throw std::invalid_argument("ERROR: makeSequenceGrid() requires non-negative outputs");
192191
if (depth < 0) throw std::invalid_argument("ERROR: makeSequenceGrid() requires non-negative depth");
193192
if (!OneDimensionalMeta::isSequence(rule)){
194-
std::string message = "ERROR: makeSequenceGrid() is called with rule: " + IO::getRuleString(rule) + ", which is not a sequence rule";
193+
std::string message = "ERROR: makeSequenceGrid() is called with rule: " + std::string(IO::getRuleString(rule)) + ", which is not a sequence rule";
195194
throw std::invalid_argument(message);
196195
}
197196
size_t expected_aw_size = (OneDimensionalMeta::isTypeCurved(type)) ? 2*dimensions : dimensions;
@@ -217,7 +216,7 @@ void TasmanianSparseGrid::makeLocalPolynomialGrid(int dimensions, int outputs, i
217216
throw std::invalid_argument(message);
218217
}
219218
if (!OneDimensionalMeta::isLocalPolynomial(rule)){
220-
std::string message = "ERROR: makeLocalPolynomialGrid() is called with rule: " + IO::getRuleString(rule) + ", which is not a local polynomial rule";
219+
std::string message = "ERROR: makeLocalPolynomialGrid() is called with rule: " + std::string(IO::getRuleString(rule)) + ", which is not a local polynomial rule";
221220
throw std::invalid_argument(message);
222221
}
223222
if ((!level_limits.empty()) && (level_limits.size() != (size_t) dimensions)) throw std::invalid_argument("ERROR: makeLocalPolynomialGrid() requires level_limits with either 0 or dimensions entries");

SparseGrids/TasmanianSparseGrid.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2269,7 +2269,7 @@ class TasmanianSparseGrid{
22692269
// must not invalidate aliases when the grid is moved, must be destroyed last for sycl
22702270
std::unique_ptr<AccelerationContext> acceleration;
22712271

2272-
// contains all possible sparse grids with a null-grid (int) as the default
2272+
// contains all possible sparse grids with a null-grid std::monostate as the default
22732273
using grid_variant = std::variant<std::monostate,
22742274
GridGlobal, // index 1
22752275
GridSequence, // index 2
@@ -2283,7 +2283,7 @@ class TasmanianSparseGrid{
22832283
std::vector<int> conformal_asin_power;
22842284
std::vector<int> llimits;
22852285

2286-
bool using_dynamic_construction;
2286+
bool using_dynamic_construction = false;
22872287

22882288
mutable AccelerationDomainTransform acc_domain;
22892289
};

SparseGrids/TasmanianSparseGridWrapC.cpp

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@
3131
#ifndef __TASMANIAN_SPARSE_GRID_WRAPC_CPP
3232
#define __TASMANIAN_SPARSE_GRID_WRAPC_CPP
3333

34+
#include <cstdlib>
35+
3436
#include "TasmanianSparseGrid.hpp"
3537

3638
// ------------ C Interface for use with Python ctypes and potentially other C codes -------------- //
@@ -149,18 +151,22 @@ int tsgGetOrder(void *grid){ return ((TasmanianSparseGrid*) grid)->getOrder(); }
149151
int tsgGetNumDimensions(void *grid){ return ((TasmanianSparseGrid*) grid)->getNumDimensions(); }
150152
int tsgGetNumOutputs(void *grid){ return ((TasmanianSparseGrid*) grid)->getNumOutputs(); }
151153
char* tsgGetRule(void *grid){
152-
std::string cppstring = IO::getRuleString( ((TasmanianSparseGrid*) grid)->getRule() );
153-
char *cstring = new char[cppstring.size() + 1];
154-
for(size_t i=0; i<cppstring.size(); i++) cstring[i] = cppstring[i];
154+
std::string_view cppstring = IO::getRuleString( ((TasmanianSparseGrid*) grid)->getRule() );
155+
char *cstring = (char*) malloc((cppstring.size() + 1) * sizeof(char));
156+
std::copy_n(cppstring.begin(), cppstring.size(), cstring);
155157
cstring[cppstring.size()] = '\0';
156158
return cstring;
157159
}
158-
void tsgCopyRuleChars(void *grid, int buffer_size, char *name, int *num_actual){
159-
std::string cppstring = IO::getRuleString( ((TasmanianSparseGrid*) grid)->getRule() );
160-
size_t max_num = std::min((size_t) buffer_size - 1, cppstring.size());
160+
void tsgCopyRuleChars(void *grid, size_t buffer_size, char *name, size_t *num_actual){
161+
if (buffer_size == 0) {
162+
*num_actual = 0;
163+
return;
164+
}
165+
std::string_view cppstring = IO::getRuleString( ((TasmanianSparseGrid*) grid)->getRule() );
166+
size_t max_num = std::min(buffer_size - 1, cppstring.size());
161167
std::copy_n(cppstring.begin(), max_num, name);
162168
name[max_num] = '\0';
163-
*num_actual = (int) max_num;
169+
*num_actual = max_num;
164170
}
165171
const char* tsgGetCustomRuleDescription(void *grid){ return ((TasmanianSparseGrid*) grid)->getCustomRuleDescription(); }
166172

SparseGrids/tsgAcceleratedDataStructures.hpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,9 @@ class GpuVector{
117117
}
118118

119119
//! \brief Default constructor, creates an empty (null) array.
120-
GpuVector() : num_entries(0), gpu_data(nullptr){}
120+
GpuVector() = default;
121121
//! \brief Construct a vector with \b count number of entries.
122-
GpuVector(AccelerationContext const *acc, size_t count) : num_entries(0), gpu_data(nullptr){ resize(acc, count); }
122+
GpuVector(AccelerationContext const *acc, size_t count){ resize(acc, count); }
123123

124124
/*!
125125
* \brief Same as \b GpuVector(dim1 * dim2), but guards against overflow.
@@ -129,11 +129,11 @@ class GpuVector{
129129
* and both integers are converted to size_t before multiplication which prevents overflow.
130130
* Note: the dimensions \b will \b not be stored, the underlying data is still one dimensional.
131131
*/
132-
GpuVector(AccelerationContext const *acc, int dim1, int dim2) : num_entries(0), gpu_data(nullptr){ resize(acc, Utils::size_mult(dim1, dim2)); }
132+
GpuVector(AccelerationContext const *acc, int dim1, int dim2){ resize(acc, Utils::size_mult(dim1, dim2)); }
133133
//! \brief Create a vector with size that matches \b cpu_data and copy the data to the GPU device.
134-
GpuVector(AccelerationContext const *acc, const std::vector<T> &cpu_data) : num_entries(0), gpu_data(nullptr){ load(acc, cpu_data); }
134+
GpuVector(AccelerationContext const *acc, const std::vector<T> &cpu_data){ load(acc, cpu_data); }
135135
//! \brief Construct a vector and load with date provided on to the cpu.
136-
GpuVector(AccelerationContext const *acc, int dim1, int dim2, T const *cpu_data) : num_entries(0), gpu_data(nullptr){ load(acc, Utils::size_mult(dim1, dim2), cpu_data); }
136+
GpuVector(AccelerationContext const *acc, int dim1, int dim2, T const *cpu_data){ load(acc, Utils::size_mult(dim1, dim2), cpu_data); }
137137
//! \brief Construct a vector by loading from a given range.
138138
template<typename IteratorLike>
139139
GpuVector(AccelerationContext const *acc, IteratorLike ibegin, IteratorLike iend) : GpuVector(){
@@ -222,10 +222,10 @@ class GpuVector{
222222
*/
223223
void load_internal(AccelerationContext const *acc, size_t count, const T* cpu_data);
224224

225-
size_t num_entries; // keep track of the size, update on every call that changes the gpu_data
226-
T *gpu_data; // the GPU array
225+
size_t num_entries = 0; // keep track of the size, update on every call that changes the gpu_data
226+
T *gpu_data = nullptr; // the GPU array
227227
#ifdef Tasmanian_ENABLE_DPCPP
228-
void* sycl_queue;
228+
void* sycl_queue = nullptr;
229229
#endif
230230
};
231231

@@ -779,7 +779,7 @@ struct AccelerationContext{
779779
*/
780780
struct InternalSyclQueue{
781781
//! \brief Default constructor, assume we are not in a testing mode.
782-
InternalSyclQueue() : use_testing(false){}
782+
InternalSyclQueue() = default;
783783
//! \brief Initialize the testing, in which case the internal queue would be used in place of a new queue.
784784
void init_testing(int gpuid);
785785
//! \brief Auto-converts to a non-owning std::unique_ptr.
@@ -788,7 +788,7 @@ struct InternalSyclQueue{
788788
HandleDeleter<AccHandle::Syclqueue>(false));
789789
}
790790
//! \brief Indicates whether this is a testing run.
791-
bool use_testing;
791+
bool use_testing = false;
792792
//! \brief Holds the internal sycl::queue for testing.
793793
std::unique_ptr<int, HandleDeleter<AccHandle::Syclqueue>> test_queue;
794794
};

SparseGrids/tsgEnumerates.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
#include <optional>
5454
#include <stdexcept>
5555
#include <string>
56+
#include <string_view>
5657
#include <type_traits>
5758
#include <utility>
5859
#include <variant>

SparseGrids/tsgGridFourier.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ GridFourier::GridFourier(AccelerationContext const *acc, GridFourier const *four
8787
max_power(fourier->max_power){
8888

8989
if (fourier->dynamic_values){
90-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(*fourier->dynamic_values);
90+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(*fourier->dynamic_values);
9191
if (num_outputs != fourier->num_outputs) dynamic_values->restrictData(ibegin, iend);
9292
}
9393
}
@@ -633,7 +633,7 @@ void GridFourier::evaluateHierarchicalFunctionsInternalGPU(const T gpu_x[], int
633633
}
634634
template<typename T> void GridFourier::loadGpuNodes() const{
635635
auto& ccache = getGpuCache<T>();
636-
if (!ccache) ccache = Utils::make_unique<CudaFourierData<T>>();
636+
if (!ccache) ccache = CudaFourierData<T>{};
637637
if (!ccache->num_nodes.empty()) return;
638638

639639
std::vector<int> num_nodes(num_dimensions);
@@ -660,7 +660,7 @@ void GridFourier::clearGpuNodes() const{
660660
}
661661
template<typename T> void GridFourier::loadGpuCoefficients() const{
662662
auto& ccache = getGpuCache<T>();
663-
if (!ccache) ccache = Utils::make_unique<CudaFourierData<T>>();
663+
if (!ccache) ccache = CudaFourierData<T>{};
664664
if (!ccache->real.empty()) return;
665665
int num_points = points.getNumIndexes();
666666
size_t num_coeff = Utils::size_mult(num_outputs, num_points);
@@ -850,7 +850,7 @@ void GridFourier::mergeRefinement(){
850850
}
851851

852852
void GridFourier::beginConstruction(){
853-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(num_dimensions, num_outputs);
853+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(num_dimensions, num_outputs);
854854
if (points.empty()){ // if we start dynamic construction from an empty grid
855855
for(int i=0; i<tensors.getNumIndexes(); i++){
856856
const int *t = tensors.getIndex(i);
@@ -869,9 +869,9 @@ void GridFourier::writeConstructionData(std::ostream &os, bool iomode) const{
869869
}
870870
void GridFourier::readConstructionData(std::istream &is, bool iomode){
871871
if (iomode == mode_ascii)
872-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_ascii_type());
872+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_ascii_type());
873873
else
874-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_binary_type());
874+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_binary_type());
875875
int max_level = dynamic_values->getMaxTensor();
876876
if (max_level + 1 > wrapper.getNumLevels())
877877
wrapper = OneDimensionalWrapper(max_level, rule_fourier, 0.0, 0.0);

SparseGrids/tsgGridFourier.hpp

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
namespace TasGrid{
3737

3838
#ifndef __TASMANIAN_DOXYGEN_SKIP
39-
class GridFourier : public BaseCanonicalGrid {
39+
class GridFourier final : public BaseCanonicalGrid {
4040
public:
4141
GridFourier(AccelerationContext const *acc) : BaseCanonicalGrid(acc){};
4242
friend struct GridReaderVersion5<GridFourier>;
@@ -181,27 +181,21 @@ class GridFourier : public BaseCanonicalGrid {
181181

182182
std::vector<int> max_power;
183183

184-
std::unique_ptr<DynamicConstructorDataGlobal> dynamic_values;
185-
186184
template<typename T> void loadGpuNodes() const;
187185
template<typename T> void loadGpuCoefficients() const;
188-
inline std::unique_ptr<CudaFourierData<double>>& getGpuCacheOverload(double) const{ return gpu_cache; }
189-
inline std::unique_ptr<CudaFourierData<float>>& getGpuCacheOverload(float) const{ return gpu_cachef; }
190-
template<typename T> inline std::unique_ptr<CudaFourierData<T>>& getGpuCache() const{
186+
inline std::optional<CudaFourierData<double>>& getGpuCacheOverload(double) const{ return gpu_cache; }
187+
inline std::optional<CudaFourierData<float>>& getGpuCacheOverload(float) const{ return gpu_cachef; }
188+
template<typename T> inline std::optional<CudaFourierData<T>>& getGpuCache() const{
191189
return getGpuCacheOverload(static_cast<T>(0.0));
192190
}
193-
mutable std::unique_ptr<CudaFourierData<double>> gpu_cache;
194-
mutable std::unique_ptr<CudaFourierData<float>> gpu_cachef;
191+
mutable std::optional<CudaFourierData<double>> gpu_cache;
192+
mutable std::optional<CudaFourierData<float>> gpu_cachef;
193+
194+
std::unique_ptr<DynamicConstructorDataGlobal> dynamic_values;
195195
};
196196

197197
// Old version reader
198198
template<> struct GridReaderVersion5<GridFourier>{
199-
template<typename iomode> static std::unique_ptr<GridFourier> read(AccelerationContext const *acc, std::istream &is){
200-
std::unique_ptr<GridFourier> grid = Utils::make_unique<GridFourier>(acc);
201-
read<iomode>(is, grid.get());
202-
return grid;
203-
}
204-
205199
template<typename iomode> static void read(std::istream &is, GridFourier *grid) {
206200
grid->num_dimensions = IO::readNumber<iomode, int>(is);
207201
grid->num_outputs = IO::readNumber<iomode, int>(is);

SparseGrids/tsgGridGlobal.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ GridGlobal::GridGlobal(AccelerationContext const *acc, GridGlobal const *global,
140140
custom((global->rule == rule_customtabulated) ? global->custom : CustomTabulated()){
141141

142142
if (global->dynamic_values){
143-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(*global->dynamic_values);
143+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(*global->dynamic_values);
144144
if (num_outputs != global->num_outputs) dynamic_values->restrictData(ibegin, iend);
145145
}
146146
}
@@ -353,7 +353,7 @@ void GridGlobal::mergeRefinement(){
353353
}
354354

355355
void GridGlobal::beginConstruction(){
356-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(num_dimensions, num_outputs);
356+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(num_dimensions, num_outputs);
357357
if (points.empty()){ // if we start dynamic construction from an empty grid
358358
for(int i=0; i<tensors.getNumIndexes(); i++){
359359
const int *t = tensors.getIndex(i);
@@ -372,9 +372,9 @@ void GridGlobal::writeConstructionData(std::ostream &os, bool iomode) const{
372372
}
373373
void GridGlobal::readConstructionData(std::istream &is, bool iomode){
374374
if (iomode == mode_ascii)
375-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_ascii_type());
375+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_ascii_type());
376376
else
377-
dynamic_values = Utils::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_binary_type());
377+
dynamic_values = std::make_unique<DynamicConstructorDataGlobal>(is, num_dimensions, num_outputs, IO::mode_binary_type());
378378
int max_level = dynamic_values->getMaxTensor();
379379
if (max_level + 1 > wrapper.getNumLevels())
380380
wrapper = OneDimensionalWrapper(custom, max_level, rule, alpha, beta);
@@ -622,13 +622,13 @@ void GridGlobal::evaluateHierarchicalFunctionsGPU(const float gpu_x[], int cpu_n
622622
}
623623
template<typename T> void GridGlobal::loadGpuValues() const{
624624
auto& ccache = getGpuCache<T>();
625-
if (!ccache) ccache = Utils::make_unique<CudaGlobalData<T>>();
625+
if (!ccache) ccache = CudaGlobalData<T>{};
626626
if (ccache->values.empty()) ccache->values.load(acceleration, values.totalSize(), values.data());
627627
}
628628
void GridGlobal::clearGpuValues() const{ if (gpu_cache) gpu_cache->values.clear(); }
629629
template<typename T> void GridGlobal::loadGpuNodes() const{
630630
auto& ccache = getGpuCache<T>();
631-
if (!ccache) ccache = Utils::make_unique<CudaGlobalData<T>>();
631+
if (!ccache) ccache = CudaGlobalData<T>{};
632632
if (!ccache->nodes.empty()) return; // already loaded
633633
// data for stage 1 (Lagrange caching)
634634
ccache->nodes.load(acceleration, (OneDimensionalMeta::isNonNested(rule)) ? wrapper.getAllNodes() : wrapper.getUnique());

SparseGrids/tsgGridGlobal.hpp

Lines changed: 8 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
namespace TasGrid{
3838

3939
#ifndef __TASMANIAN_DOXYGEN_SKIP
40-
class GridGlobal : public BaseCanonicalGrid{
40+
class GridGlobal final : public BaseCanonicalGrid{
4141
public:
4242
GridGlobal(AccelerationContext const *acc) : BaseCanonicalGrid(acc), rule(rule_none), alpha(0.0), beta(0.0){}
4343
friend struct GridReaderVersion5<GridGlobal>;
@@ -162,29 +162,22 @@ class GridGlobal : public BaseCanonicalGrid{
162162

163163
CustomTabulated custom;
164164

165-
std::unique_ptr<DynamicConstructorDataGlobal> dynamic_values;
166-
167165
template<typename T> void loadGpuNodes() const;
168166
template<typename T> void loadGpuValues() const;
169-
inline std::unique_ptr<CudaGlobalData<double>>& getGpuCacheOverload(double) const{ return gpu_cache; }
170-
inline std::unique_ptr<CudaGlobalData<float>>& getGpuCacheOverload(float) const{ return gpu_cachef; }
171-
template<typename T> inline std::unique_ptr<CudaGlobalData<T>>& getGpuCache() const{
167+
inline std::optional<CudaGlobalData<double>>& getGpuCacheOverload(double) const{ return gpu_cache; }
168+
inline std::optional<CudaGlobalData<float>>& getGpuCacheOverload(float) const{ return gpu_cachef; }
169+
template<typename T> inline std::optional<CudaGlobalData<T>>& getGpuCache() const{
172170
return getGpuCacheOverload(static_cast<T>(0.0));
173171
}
174-
mutable std::unique_ptr<CudaGlobalData<double>> gpu_cache;
175-
mutable std::unique_ptr<CudaGlobalData<float>> gpu_cachef;
172+
mutable std::optional<CudaGlobalData<double>> gpu_cache;
173+
mutable std::optional<CudaGlobalData<float>> gpu_cachef;
174+
175+
std::unique_ptr<DynamicConstructorDataGlobal> dynamic_values;
176176
};
177177

178178
// Old version reader
179179
template<> struct GridReaderVersion5<GridGlobal>{
180-
template<typename iomode> static std::unique_ptr<GridGlobal> read(AccelerationContext const *acc, std::istream &is){
181-
std::unique_ptr<GridGlobal> grid = Utils::make_unique<GridGlobal>(acc);
182-
read<iomode>(is, grid.get());
183-
return grid;
184-
}
185-
186180
template<typename iomode> static void read(std::istream &is, GridGlobal *grid) {
187-
188181
grid->num_dimensions = IO::readNumber<iomode, int>(is);
189182
grid->num_outputs = IO::readNumber<iomode, int>(is);
190183
grid->alpha = IO::readNumber<iomode, double>(is);

0 commit comments

Comments
 (0)