diff --git a/control/control_plane.go b/control/control_plane.go index 8f1fdf7a3..40f521c95 100644 --- a/control/control_plane.go +++ b/control/control_plane.go @@ -221,6 +221,9 @@ func NewControlPlane( if err = core.setupSkPidMonitor(); err != nil { log.WithError(err).Warnln("cgroup2 is not enabled; pname routing cannot be used") } + if err = core.setupLocalTcpFastRedirect(); err != nil { + log.WithError(err).Warnln("failed to setup local tcp fast redirect") + } for _, ifname := range global.WanInterface { if err = core.bindWan(ifname, global.AutoConfigKernelParameter); err != nil { return nil, fmt.Errorf("bindWan: %v: %w", ifname, err) diff --git a/control/control_plane_core.go b/control/control_plane_core.go index 0c2176f94..f73edfa15 100644 --- a/control/control_plane_core.go +++ b/control/control_plane_core.go @@ -15,6 +15,7 @@ import ( "sync" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" ciliumLink "github.com/cilium/ebpf/link" "github.com/daeuniverse/dae/common" "github.com/daeuniverse/dae/common/consts" @@ -382,6 +383,32 @@ func (c *controlPlaneCore) setupSkPidMonitor() error { return nil } +func (c *controlPlaneCore) setupLocalTcpFastRedirect() (err error) { + cgroupPath, err := detectCgroupPath() + if err != nil { + return + } + cg, err := link.AttachCgroup(link.CgroupOptions{ + Path: cgroupPath, + Program: c.bpf.LocalTcpSockops, // todo@gray: rename + Attach: ebpf.AttachCGroupSockOps, + }) + if err != nil { + return fmt.Errorf("AttachCgroupSockOps: %w", err) + } + c.deferFuncs = append(c.deferFuncs, cg.Close) + + if err = link.RawAttachProgram(link.RawAttachProgramOptions{ + Target: c.bpf.FastSock.FD(), + Program: c.bpf.SkMsgFastRedirect, + Attach: ebpf.AttachSkMsgVerdict, + }); err != nil { + return fmt.Errorf("AttachSkMsgVerdict: %w", err) + } + return nil + +} + func (c *controlPlaneCore) bindWan(ifname string, autoConfigKernelParameter bool) error { return c._bindWan(ifname) } diff --git a/control/kern/tproxy.c b/control/kern/tproxy.c index 709c5fabc..369618c22 100644 --- a/control/kern/tproxy.c +++ b/control/kern/tproxy.c @@ -16,7 +16,7 @@ // #define __DEBUG_ROUTING // #define __PRINT_ROUTING_RESULT // #define __PRINT_SETUP_PROCESS_CONNNECTION -// #define __DEBUG +#define __DEBUG // #define __UNROLL_ROUTE_LOOP #ifndef __DEBUG @@ -193,6 +193,17 @@ struct { __uint(pinning, LIBBPF_PIN_BY_NAME); } routing_tuples_map SEC(".maps"); +/* Sockets in fast_sock map are used for fast-redirecting via + * sk_msg/fast_redirect. Sockets are automactically deleted from map once + * closed, so we don't need to worry about stale entries. + */ +struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __type(key, struct tuples_key); + __type(value, __u64); + __uint(max_entries, 65535); +} fast_sock SEC(".maps"); + // Link to type: #define LinkType_None 0 #define LinkType_Ethernet 1 @@ -1594,4 +1605,83 @@ int tproxy_wan_cg_sendmsg6(struct bpf_sock_addr *ctx) return 1; } +SEC("sockops") +int local_tcp_sockops(struct bpf_sock_ops *skops) +{ + struct task_struct *task = (struct task_struct *)bpf_get_current_task(); + __u32 pid = BPF_CORE_READ(task, pid); + + /* Only local TCP connection has non-zero pids. */ + if (pid == 0) + return 0; + + struct tuples_key tuple = {}; + + tuple.l4proto = IPPROTO_TCP; + tuple.sport = bpf_htonl(skops->local_port) >> 16; + tuple.dport = skops->remote_port >> 16; + tuple.sip.u6_addr32[2] = bpf_htonl(0x0000ffff); + tuple.sip.u6_addr32[3] = skops->local_ip4; + tuple.dip.u6_addr32[2] = bpf_htonl(0x0000ffff); + tuple.dip.u6_addr32[3] = skops->remote_ip4; + + switch (skops->op) { + case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: // dae sockets + { + struct tuples_key rev_tuple = {}; + + rev_tuple.l4proto = IPPROTO_TCP; + rev_tuple.sport = tuple.dport; + rev_tuple.dport = tuple.sport; + __builtin_memcpy(&rev_tuple.sip, &tuple.dip, IPV6_BYTE_LENGTH); + __builtin_memcpy(&rev_tuple.dip, &tuple.sip, IPV6_BYTE_LENGTH); + + if (!bpf_map_lookup_elem(&routing_tuples_map, &rev_tuple)) + break; + + if (!bpf_sock_hash_update(skops, &fast_sock, &tuple, BPF_ANY)) + bpf_printk("fast_sock added: %pI4:%lu -> %pI4:%lu", + &tuple.sip.u6_addr32[3], bpf_ntohs(tuple.sport), + &tuple.dip.u6_addr32[3], bpf_ntohs(tuple.dport)); + break; + } + + case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: // local client sockets + if (!bpf_map_lookup_elem(&routing_tuples_map, &tuple)) + break; + + if (!bpf_sock_hash_update(skops, &fast_sock, &tuple, BPF_ANY)) + bpf_printk("fast_sock added: %pI4:%lu -> %pI4:%lu", + &tuple.sip.u6_addr32[3], bpf_ntohs(tuple.sport), + &tuple.dip.u6_addr32[3], bpf_ntohs(tuple.dport)); + break; + + default: + break; + } + + return 0; +} + +SEC("sk_msg/fast_redirect") +int sk_msg_fast_redirect(struct sk_msg_md *msg) +{ + struct tuples_key rev_tuple = {}; + + rev_tuple.l4proto = IPPROTO_TCP; + rev_tuple.sport = msg->remote_port >> 16; + rev_tuple.dport = bpf_htonl(msg->local_port) >> 16; + rev_tuple.sip.u6_addr32[2] = bpf_htonl(0x0000ffff); + rev_tuple.sip.u6_addr32[3] = msg->remote_ip4; + rev_tuple.dip.u6_addr32[2] = bpf_htonl(0x0000ffff); + rev_tuple.dip.u6_addr32[3] = msg->local_ip4; + + if (bpf_msg_redirect_hash(msg, &fast_sock, &rev_tuple, BPF_F_INGRESS) == SK_PASS) + bpf_printk("tcp fast redirect: %pI4:%lu -> %pI4:%lu", + &rev_tuple.sip.u6_addr32[3], bpf_ntohs(rev_tuple.sport), + &rev_tuple.dip.u6_addr32[3], bpf_ntohs(rev_tuple.dport)); + + return SK_PASS; +} + SEC("license") const char __license[] = "Dual BSD/GPL";