-
Notifications
You must be signed in to change notification settings - Fork 639
Expand file tree
/
Copy pathconn_pool.go
More file actions
179 lines (145 loc) · 3.23 KB
/
conn_pool.go
File metadata and controls
179 lines (145 loc) · 3.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
package clickhouse
import (
"context"
"errors"
"sync"
"time"
"github.com/ClickHouse/clickhouse-go/v2/internal/circular"
)
var errQueueEmpty = errors.New("clickhouse: connection pool queue is empty")
type connPool struct {
mu sync.RWMutex
conns *circular.Queue[nativeTransport]
ticker *time.Ticker
finish chan struct{}
finished chan struct{}
maxConnLifetime time.Duration
}
func newConnPool(lifetime time.Duration, capacity int) *connPool {
pool := &connPool{
conns: circular.New[nativeTransport](capacity),
ticker: time.NewTicker(lifetime),
finish: make(chan struct{}),
finished: make(chan struct{}),
maxConnLifetime: lifetime,
}
go pool.runDrainPool()
return pool
}
func (i *connPool) Len() int {
i.mu.RLock()
defer i.mu.RUnlock()
return i.conns.Len()
}
func (i *connPool) Cap() int {
i.mu.RLock()
defer i.mu.RUnlock()
return i.conns.Cap()
}
func (i *connPool) Get(ctx context.Context) (nativeTransport, error) {
i.mu.Lock()
defer i.mu.Unlock()
// check if pool was closed while we waited on the lock
// return early if pool already closed
// otherwise, pool wont close again while we hold lock
// and so we continue
if i.closed() {
return nil, ErrConnectionClosed
}
// this loop continues until either:
// a) the provided context is cancelled
// b) the underlying circular queue is empty
// c) it finds a non-expired connection
for {
if err := ctx.Err(); err != nil {
// context has been cancelled
return nil, context.Cause(ctx)
}
// Try to pull a connection
conn, ok := i.conns.Pull()
if !ok {
return nil, errQueueEmpty // queue is empty
}
if !i.isExpired(conn) {
return conn, nil
}
conn.close()
}
}
func (i *connPool) Put(conn nativeTransport) {
if i.isExpired(conn) || conn.isBad() {
conn.close()
return
}
i.mu.Lock()
defer i.mu.Unlock()
if i.closed() {
return
}
// Try to push the connection
if !i.conns.Push(conn) {
// Buffer is full, close the connection
conn.close()
}
}
func (i *connPool) Close() error {
i.mu.Lock()
defer i.mu.Unlock()
if i.closed() {
return nil
}
close(i.finish)
<-i.finished
// Drain all remaining connections from the pool
i.drainPool()
return nil
}
func (i *connPool) closed() bool {
select {
case <-i.finished:
return true
default:
return false
}
}
func (i *connPool) runDrainPool() {
defer func() {
i.ticker.Stop()
close(i.finished)
}()
for {
select {
case <-i.ticker.C:
i.mu.Lock()
i.drainPool()
i.mu.Unlock()
case <-i.finish:
return
}
}
}
// drainPool removes connections from the pool.
// If the pool is closed, it removes all connections.
// Otherwise, it only removes expired connections.
// Must be called with i.mu held.
func (i *connPool) drainPool() {
if i.closed() {
// Close all connections
for conn := range i.conns.Clear() {
conn.close()
}
return
}
// Remove only expired connections
for conn := range i.conns.DeleteFunc(func(conn nativeTransport) bool {
return i.isExpired(conn)
}) {
conn.close()
}
}
func (i *connPool) isExpired(conn nativeTransport) bool {
return !time.Now().Before(i.expires(conn))
}
func (i *connPool) expires(conn nativeTransport) time.Time {
return conn.connectedAtTime().Add(i.maxConnLifetime)
}