forked from processout/grpc-go-pool
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pool.go
285 lines (248 loc) · 7.03 KB
/
pool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
// Package grpcpool provides a pool of grpc clients
package grpcpool
import (
"context"
"errors"
"sync"
"time"
"google.golang.org/grpc"
)
var (
// ErrClosed is the error when the client pool is closed
ErrClosed = errors.New("grpc pool: client pool is closed")
// ErrTimeout is the error when the client pool timed out
ErrTimeout = errors.New("grpc pool: client pool timed out")
// ErrAlreadyClosed is the error when the client conn was already closed
ErrAlreadyClosed = errors.New("grpc pool: the connection was already closed")
// ErrFullPool is the error when the pool is already full
ErrFullPool = errors.New("grpc pool: closing a ClientConn into a full pool")
)
// Factory is a function type creating a grpc client
type Factory func() (*grpc.ClientConn, error)
// FactoryWithContext is a function type creating a grpc client
// that accepts the context parameter that could be passed from
// Get or NewWithContext method.
type FactoryWithContext func(context.Context) (*grpc.ClientConn, error)
// Pool is the grpc client pool
type Pool struct {
clients chan *ClientConn
factory FactoryWithContext
idleTimeout time.Duration
maxLifeDuration time.Duration
mu sync.RWMutex
}
// ClientConn is the wrapper for a grpc client conn
type ClientConn struct {
*grpc.ClientConn
pool *Pool
timeUsed time.Time
timeInitiated time.Time
unhealthy bool
mu sync.Mutex
}
// New creates a new clients pool with the given initial and maximum capacity,
// and the timeout for the idle clients. Returns an error if the initial
// clients could not be created
func New(factory Factory, init, capacity int, idleTimeout time.Duration,
maxLifeDuration ...time.Duration) (*Pool, error) {
return NewWithContext(context.Background(), func(ctx context.Context) (*grpc.ClientConn, error) { return factory() },
init, capacity, idleTimeout, maxLifeDuration...)
}
// NewWithContext creates a new clients pool with the given initial and maximum
// capacity, and the timeout for the idle clients. The context parameter would
// be passed to the factory method during initialization. Returns an error if the
// initial clients could not be created.
func NewWithContext(ctx context.Context, factory FactoryWithContext, init, capacity int, idleTimeout time.Duration,
maxLifeDuration ...time.Duration) (*Pool, error) {
if capacity <= 0 {
capacity = 1
}
if init < 0 {
init = 0
}
if init > capacity {
init = capacity
}
p := &Pool{
clients: make(chan *ClientConn, capacity),
factory: factory,
idleTimeout: idleTimeout,
}
if len(maxLifeDuration) > 0 {
p.maxLifeDuration = maxLifeDuration[0]
}
for i := 0; i < init; i++ {
c, err := factory(ctx)
if err != nil {
return nil, err
}
p.clients <- &ClientConn{
ClientConn: c,
pool: p,
timeUsed: time.Now(),
timeInitiated: time.Now(),
}
}
// Fill the rest of the pool with empty clients
for i := 0; i < capacity-init; i++ {
p.clients <- &ClientConn{
pool: p,
}
}
return p, nil
}
func (p *Pool) getClients() <-chan *ClientConn {
if p == nil {
return nil
}
p.mu.RLock()
defer p.mu.RUnlock()
return p.clients
}
// Close empties the pool calling Close on all its clients.
// You can call Close while there are outstanding clients.
// The pool channel is then closed, and Get will not be allowed anymore
func (p *Pool) Close() {
if p == nil {
return
}
p.mu.Lock()
clients := p.clients
p.clients = nil
p.mu.Unlock()
if clients == nil {
return
}
close(clients)
for client := range clients {
if client.ClientConn == nil {
continue
}
client.ClientConn.Close()
}
}
// IsClosed returns true if the client pool is closed.
func (p *Pool) IsClosed() bool {
return p == nil || p.getClients() == nil
}
// interlnal method to put clients into pool
func (p *Pool) put(client *ClientConn) error {
if p == nil {
return ErrClosed
}
p.mu.Lock()
defer p.mu.Unlock()
select {
case p.clients <- client:
return nil
default:
return ErrFullPool
}
}
// Get will return the next available client. If capacity
// has not been reached, it will create a new one using the factory. Otherwise,
// it will wait till the next client becomes available or a timeout.
// A timeout of 0 is an indefinite wait
func (p *Pool) Get(ctx context.Context) (*ClientConn, error) {
clients := p.getClients()
if clients == nil {
return nil, ErrClosed
}
var wrapper *ClientConn
select {
case wrapper = <-clients:
// All good
case <-ctx.Done():
return nil, ErrTimeout // it would better returns ctx.Err()
}
// If the wrapper was idle too long, close the connection and create a new
// one. It's safe to assume that there isn't any newer client as the client
// we fetched is the first in the channel
idleTimeout := p.idleTimeout
if wrapper.ClientConn != nil && idleTimeout > 0 &&
wrapper.timeUsed.Add(idleTimeout).Before(time.Now()) {
wrapper.ClientConn.Close()
wrapper.ClientConn = nil
}
var err error
if wrapper.ClientConn == nil {
wrapper.ClientConn, err = p.factory(ctx)
if err != nil {
// If there was an error, we want to put back a placeholder
// client in the channel
go p.put(&ClientConn{
pool: p,
})
}
// This is a new connection, reset its initiated time
wrapper.timeInitiated = time.Now()
}
return wrapper, err
}
// Unhealthy marks the client conn as unhealthy, so that the connection
// gets reset when closed
func (c *ClientConn) Unhealthy() {
if c == nil {
return
}
c.mu.Lock()
c.unhealthy = true
c.mu.Unlock()
}
// Close returns a ClientConn to the pool. It is safe to call multiple time,
// but will return an error after first time
func (c *ClientConn) Close() error {
if c == nil {
return nil
}
c.mu.Lock()
defer c.mu.Unlock()
if c.ClientConn == nil {
return ErrAlreadyClosed
}
if c.pool.IsClosed() {
return ErrClosed
}
// If the wrapper connection has become too old, we want to recycle it. To
// clarify the logic: if the sum of the initialization time and the max
// duration is before Now(), it means the initialization is so old adding
// the maximum duration couldn't put in the future. This sum therefore
// corresponds to the cut-off point: if it's in the future we still have
// time, if it's in the past it's too old
maxDuration := c.pool.maxLifeDuration
if maxDuration > 0 && c.timeInitiated.Add(maxDuration).Before(time.Now()) {
c.unhealthy = true
}
// We're cloning the wrapper so we can set ClientConn to nil in the one
// used by the user
wrapper := &ClientConn{
pool: c.pool,
ClientConn: c.ClientConn,
timeUsed: time.Now(),
}
if c.unhealthy {
wrapper.ClientConn.Close()
wrapper.ClientConn = nil
} else {
wrapper.timeInitiated = c.timeInitiated
}
if err := c.pool.put(wrapper); err != nil {
return err
}
c.ClientConn = nil // Mark as closed
return nil
}
// Capacity returns the capacity
func (p *Pool) Capacity() int {
if p.IsClosed() {
return 0
}
return cap(p.getClients())
}
// Available returns the number of currently unused clients
func (p *Pool) Available() int {
if p.IsClosed() {
return 0
}
return len(p.getClients())
}