Skip to content

Commit dde38e0

Browse files
committed
Remove use of enum for constants
This was a useful trick back in the days, but in C++17 it is not needed any more. The constexpr expression is more clear in its intent. See for instance here: https://stackoverflow.com/questions/37259807/static-constexpr-int-vs-old-fashioned-enum-when-and-why
1 parent 27e2577 commit dde38e0

File tree

1 file changed

+28
-29
lines changed

1 file changed

+28
-29
lines changed

src/db-copy.hpp

Lines changed: 28 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include "pgsql-params.hpp"
1616

1717
#include <cassert>
18+
#include <cstddef>
1819
#include <condition_variable>
1920
#include <deque>
2021
#include <future>
@@ -74,12 +75,11 @@ class db_target_descr_t
7475
*/
7576
class db_deleter_by_id_t
7677
{
77-
enum
78-
{
79-
// There is a trade-off here between sending as few DELETE SQL as
80-
// possible and keeping the size of the deletable vector managable.
81-
Max_entries = 1000000
82-
};
78+
/**
79+
* There is a trade-off here between sending as few DELETE SQL as
80+
* possible and keeping the size of the deletable vector managable.
81+
*/
82+
static constexpr std::size_t Max_entries = 1000000;
8383

8484
public:
8585
bool has_data() const noexcept { return !m_deletables.empty(); }
@@ -101,12 +101,11 @@ class db_deleter_by_id_t
101101
*/
102102
class db_deleter_by_type_and_id_t
103103
{
104-
enum
105-
{
106-
// There is a trade-off here between sending as few DELETE SQL as
107-
// possible and keeping the size of the deletable vector managable.
108-
Max_entries = 1000000
109-
};
104+
/**
105+
* There is a trade-off here between sending as few DELETE SQL as
106+
* possible and keeping the size of the deletable vector managable.
107+
*/
108+
static constexpr std::size_t Max_entries = 1000000;
110109

111110
struct item_t
112111
{
@@ -140,23 +139,23 @@ class db_deleter_by_type_and_id_t
140139

141140
struct db_cmd_copy_t
142141
{
143-
enum
144-
{
145-
/** Size of a single buffer with COPY data for Postgresql.
146-
* This is a trade-off between memory usage and sending large chunks
147-
* to speed up processing. Currently a one-size fits all value.
148-
* Needs more testing and individual values per queue.
149-
*/
150-
Max_buf_size = 10 * 1024 * 1024,
151-
/** Maximum length of the queue with COPY data.
152-
* In the usual case, PostgreSQL should be faster processing the
153-
* data than it can be produced and there should only be one element
154-
* in the queue. If PostgreSQL is slower, then the queue will always
155-
* be full and it is better to keep the queue smaller to reduce memory
156-
* usage. Current value is just assumed to be a reasonable trade off.
157-
*/
158-
Max_buffers = 10
159-
};
142+
/**
143+
* Size of a single buffer with COPY data for Postgresql.
144+
* This is a trade-off between memory usage and sending large chunks
145+
* to speed up processing. Currently a one-size fits all value.
146+
* Needs more testing and individual values per queue.
147+
*/
148+
static constexpr std::size_t Max_buf_size = 10 * 1024 * 1024;
149+
150+
/**
151+
* Maximum length of the queue with COPY data.
152+
* In the usual case, PostgreSQL should be faster processing the
153+
* data than it can be produced and there should only be one element
154+
* in the queue. If PostgreSQL is slower, then the queue will always
155+
* be full and it is better to keep the queue smaller to reduce memory
156+
* usage. Current value is just assumed to be a reasonable trade off.
157+
*/
158+
static constexpr std::size_t Max_buffers = 10;
160159

161160
/// Name of the target table for the copy operation
162161
std::shared_ptr<db_target_descr_t> target;

0 commit comments

Comments
 (0)