-
Notifications
You must be signed in to change notification settings - Fork 15
/
redhub.go
180 lines (153 loc) · 5.07 KB
/
redhub.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
package redhub
import (
"bytes"
"sync"
"time"
"github.com/IceFireDB/redhub/pkg/resp"
"github.com/panjf2000/gnet"
)
type Action int
const (
// None indicates that no action should occur following an event.
None Action = iota
// Close closes the connection.
Close
// Shutdown shutdowns the server.
Shutdown
)
type Conn struct {
gnet.Conn
}
type Options struct {
// Multicore indicates whether the server will be effectively created with multi-cores, if so,
// then you must take care with synchronizing memory between all event callbacks, otherwise,
// it will run the server with single thread. The number of threads in the server will be automatically
// assigned to the value of logical CPUs usable by the current process.
Multicore bool
// LockOSThread is used to determine whether each I/O event-loop is associated to an OS thread, it is useful when you
// need some kind of mechanisms like thread local storage, or invoke certain C libraries (such as graphics lib: GLib)
// that require thread-level manipulation via cgo, or want all I/O event-loops to actually run in parallel for a
// potential higher performance.
LockOSThread bool
// ReadBufferCap is the maximum number of bytes that can be read from the client when the readable event comes.
// The default value is 64KB, it can be reduced to avoid starving subsequent client connections.
//
// Note that ReadBufferCap will be always converted to the least power of two integer value greater than
// or equal to its real amount.
ReadBufferCap int
// LB represents the load-balancing algorithm used when assigning new connections.
LB gnet.LoadBalancing
// NumEventLoop is set up to start the given number of event-loop goroutine.
// Note: Setting up NumEventLoop will override Multicore.
NumEventLoop int
// ReusePort indicates whether to set up the SO_REUSEPORT socket option.
ReusePort bool
// Ticker indicates whether the ticker has been set up.
Ticker bool
// TCPKeepAlive sets up a duration for (SO_KEEPALIVE) socket option.
TCPKeepAlive time.Duration
// TCPNoDelay controls whether the operating system should delay
// packet transmission in hopes of sending fewer packets (Nagle's algorithm).
//
// The default is true (no delay), meaning that data is sent
// as soon as possible after a Write.
TCPNoDelay gnet.TCPSocketOpt
// SocketRecvBuffer sets the maximum socket receive buffer in bytes.
SocketRecvBuffer int
// SocketSendBuffer sets the maximum socket send buffer in bytes.
SocketSendBuffer int
// ICodec encodes and decodes TCP stream.
Codec gnet.ICodec
}
func NewRedHub(
onOpened func(c *Conn) (out []byte, action Action),
onClosed func(c *Conn, err error) (action Action),
handler func(cmd resp.Command, out []byte) ([]byte, Action),
) *redHub {
return &redHub{
redHubBufMap: make(map[gnet.Conn]*connBuffer),
connSync: sync.RWMutex{},
onOpened: onOpened,
onClosed: onClosed,
handler: handler,
}
}
type redHub struct {
*gnet.EventServer
onOpened func(c *Conn) (out []byte, action Action)
onClosed func(c *Conn, err error) (action Action)
handler func(cmd resp.Command, out []byte) ([]byte, Action)
redHubBufMap map[gnet.Conn]*connBuffer
connSync sync.RWMutex
}
type connBuffer struct {
buf bytes.Buffer
command []resp.Command
}
func (rs *redHub) OnOpened(c gnet.Conn) (out []byte, action gnet.Action) {
rs.connSync.Lock()
defer rs.connSync.Unlock()
rs.redHubBufMap[c] = new(connBuffer)
rs.onOpened(&Conn{Conn: c})
return
}
func (rs *redHub) OnClosed(c gnet.Conn, err error) (action gnet.Action) {
rs.connSync.Lock()
defer rs.connSync.Unlock()
delete(rs.redHubBufMap, c)
rs.onClosed(&Conn{Conn: c}, err)
return
}
func (rs *redHub) React(frame []byte, c gnet.Conn) (out []byte, action gnet.Action) {
rs.connSync.RLock()
defer rs.connSync.RUnlock()
cb, ok := rs.redHubBufMap[c]
if !ok {
out = resp.AppendError(out, "ERR Client is closed")
return
}
cb.buf.Write(frame)
cmds, lastbyte, err := resp.ReadCommands(cb.buf.Bytes())
if err != nil {
out = resp.AppendError(out, "ERR "+err.Error())
return
}
cb.command = append(cb.command, cmds...)
cb.buf.Reset()
if len(lastbyte) == 0 {
var status Action
for len(cb.command) > 0 {
cmd := cb.command[0]
if len(cb.command) == 1 {
cb.command = nil
} else {
cb.command = cb.command[1:]
}
out, status = rs.handler(cmd, out)
switch status {
case Close:
action = gnet.Close
}
}
} else {
cb.buf.Write(lastbyte)
}
return
}
func ListendAndServe(addr string, options Options, rh *redHub) error {
serveOptions := gnet.Options{
Multicore: options.Multicore,
LockOSThread: options.LockOSThread,
ReadBufferCap: options.ReadBufferCap,
LB: options.LB,
NumEventLoop: options.NumEventLoop,
ReusePort: options.ReusePort,
Ticker: options.Ticker,
TCPKeepAlive: options.TCPKeepAlive,
TCPNoDelay: options.TCPNoDelay,
SocketRecvBuffer: options.SocketRecvBuffer,
SocketSendBuffer: options.SocketSendBuffer,
Codec: options.Codec,
}
return gnet.Serve(rh, addr, gnet.WithOptions(serveOptions))
}