Zephyr网络传输Offloading--offload socket

Creative Commons
本作品采用知识共享署名

本文说明第三方socket offload如何注册到zephyr并被调用。

Zephyr网络传输Offloading–添加offload socket一文中说明了如何将第三方socket作为offload socket注册到zephyr,本文分析注册和Zephyr调用第三方socket的流程。

添加offload socket过程可以看到,socket offloading被分为了DNS API和socket API两部份注册,因此这里也分两部分来分析其流程

Socket DNS offload

文件socket_offload.c实现了offload socket dns相关API,由socket_offload_dns_register进行注册

1
2
3
4
void socket_offload_dns_register(const struct socket_dns_offload *ops)
{
dns_offload = ops;
}

然后由下面两个API对外提供

1
2
3
4
5
6
7
8
9
10
11
int socket_offload_getaddrinfo(const char *node, const char *service,
const struct zsock_addrinfo *hints,
struct zsock_addrinfo **res)
{
return dns_offload->getaddrinfo(node, service, hints, res);
}

void socket_offload_freeaddrinfo(struct zsock_addrinfo *res)
{
return dns_offload->freeaddrinfo(res);
}

Zephyr socket的调用关系如下:
include\posix\netdb.h getaddrinfo/freeaddrinfo
subsys\net\lib\sockets\getaddrinfo.c zsock_getaddrinfo/zsock_freeaddrinfo
getaddrinfo->zsock_getaddrinfo->socket_offload_getaddrinfo
freeaddrinfo->zsock_freeaddrinfo->socket_offload_freeaddrinfo

Socket API offload

注册

通过NET_SOCKET_REGISTER注册判断函数和create函数

1
NET_SOCKET_REGISTER(vender, AF_UNSPEC, vender_is_supported, vender_socket_create);

将NET_SOCKET_REGISTER展开

1
2
3
4
5
static const __aligned(__alignof(struct net_socket_register)) __net_socket_register_vender __attribute__(section(".net_socket_register.static.__net_socket_register_vender")) __attribute__((__used__)) = {
.family = AF_UNSPEC,
.is_supported = vender_is_supported,
.handler = vender_socket_create,
};

也就是定义了一个结构体const量__net_socket_register_vender放到.net_socket_register的section内

Socket API

本小结内容和fdtable相关,可参考Zephyr的文件描述符管理模块fdtable一文

创建socket

创建socket的流程
socket->zsock_socket->z_impl_zsock_socket

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
int z_impl_zsock_socket(int family, int type, int proto)
{
//在net_socket_register的section中变量
Z_STRUCT_SECTION_FOREACH(net_socket_register, sock_family) {
if (sock_family->family != family &&
sock_family->family != AF_UNSPEC) {
continue;
}

NET_ASSERT(sock_family->is_supported);
//判断要创建的socket是否支持
if (!sock_family->is_supported(family, type, proto)) {
continue;
}

//支持就进行创建,申请fd,注册vtable
return sock_family->handler(family, type, proto);
}

if (IS_ENABLED(CONFIG_NET_NATIVE)) {
return zsock_socket_internal(family, type, proto);
}

errno = EAFNOSUPPORT;
return -1;
}

前文的vender_socket_create内容再看一下, 申请fd就是将socket的vender实现API的vtable注册进去。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
static const struct socket_op_vtable vender_socket_fd_op_vtable = {
.fd_vtable = {
.read = vender_read,
.write = vender_write,
.close = vender_close,
.ioctl = vender_ioctl,
},
.bind = vender_bind,
.connect = vender_connect,
.listen = vender_listen,
.accept = vender_socket_accept,
.sendto = vender_sendto,
.sendmsg = vender_sendmsg,
.recvfrom = vender_recvfrom,
.getsockopt = vender_getsockopt,
.setsockopt = vender_setsockopt,
};
static int vender_socket_create(int family, int type, int proto)
{
//预定fd
int fd = z_reserve_fd();
int sock;

if (fd < 0) {
return -1;
}

//这里是实际的vender socket操作
sock = vender_socket(family, type, proto);
if (sock < 0) {
z_free_fd(fd);
return -1;
}

//将vender_socket_fd_op_vtable注册给fd,之后zephyr的socket实现可以通过fd的vtable来调用vender_socket_fd_op_vtable中的socket API
z_finalize_fd(fd, SD_TO_OBJ(sock),
(const struct fd_op_vtable *)
&vender_socket_fd_op_vtable);

return fd;
}

标准IO

Zephyr本身提供的socket API name都是以zsock_开头,例如zsock_socket/zsock_close/zsock_send等,当配置了CONFIG_NET_SOCKETS_POSIX_NAMES=y,Zephyr支持使用标准的posix socket name,使用者需要include “include/net/socket.h”。
在socket.h中使用static inline的标准socket name API对zsock_ API进行包装,例如

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
static inline int close(int sock)
{
return zsock_close(sock);
}

static inline int shutdown(int sock, int how)
{
return zsock_shutdown(sock, how);
}

static inline int bind(int sock, const struct sockaddr *addr, socklen_t addrlen)
{
return zsock_bind(sock, addr, addrlen);
}

static inline int connect(int sock, const struct sockaddr *addr,
socklen_t addrlen)
{
return zsock_connect(sock, addr, addrlen);
}

但是在“include/net/socket.h”中会发现找不write/read/ioctl的API,在socket创建可以看到标准IO的操作被注册到fdtable,最后给出的socket id就是fd,socket的write/read/ioctl就是fdtable里面会提供标准的write/read/ioctl函数。

关闭socket

而关闭socket由于在include/net/socket.h中用static inline实现,编译时会优先使用该符号替代,而不是在连接时去fdtable里面找close

1
2
3
4
static inline int close(int sock)
{
return zsock_close(sock);
}

实际的调用过程是close(socket)->zsock_close->z_impl_zsock_close,最后落实在z_impl_zsock_close

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
static inline void *get_sock_vtable(
int sock, const struct socket_op_vtable **vtable)
{
void *ctx;
//通过fd 找到vtable
ctx = z_get_fd_obj_and_vtable(sock,
(const struct fd_op_vtable **)vtable);

if (ctx == NULL) {
NET_ERR("invalid access on sock %d by thread %p", sock,
_current);
}

return ctx;
}

int z_impl_zsock_close(int sock)
{
const struct socket_op_vtable *vtable;
//找到vtable
void *ctx = get_sock_vtable(sock, &vtable);
int ret;

if (ctx == NULL) {
errno = EBADF;
return -1;
}

NET_DBG("close: ctx=%p, fd=%d", ctx, sock);
//调用vtable的close,这里就是vender_close
ret = vtable->fd_vtable.close(ctx);

z_free_fd(sock);

return ret;
}

可以比较一下z_impl_zsock_close和fdtable里面的close,做的事情都一样,这里包一层是因为zsock_close里面有一些用户空间的处理,本文不介绍用户空间,就不再做展开了。

Socket操作

socket的其它操作诸如bind/connect/listen/accept/send等等标准 socket API, 都和close进行了类似的包装, 都是按下面的模式命名调用
api -> zsock_api -> z_impl_zsock_api,这里我们以send为例进行分析介绍
send->zsock_send->zsock_sendto->z_impl_zsock_sendto

1
2
3
4
5
ssize_t z_impl_zsock_sendto(int sock, const void *buf, size_t len, int flags,
const struct sockaddr *dest_addr, socklen_t addrlen)
{
VTABLE_CALL(sendto, sock, buf, len, flags, dest_addr, addrlen);
}

这里看到一个宏VTABLE_CALL,展开看一下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
	do { \
const struct socket_op_vtable *vtable; \
void *ctx = get_sock_vtable(sock, &vtable); \
if (ctx == NULL || vtable->fn == NULL) { \
errno = EBADF; \
return -1; \
} \
return vtable->fn(ctx, __VA_ARGS__); \ //对于sendto来说这里调用的就是vtable->sendto
} while (0)

static inline void *get_sock_vtable(
int sock, const struct socket_op_vtable **vtable)
{
void *ctx;
//从fdtable中查找vtable和obj
ctx = z_get_fd_obj_and_vtable(sock,
(const struct fd_op_vtable **)vtable);


if (ctx == NULL) {
NET_ERR("invalid access on sock %d by thread %p", sock,
_current);
}

return ctx;
}

我们再回过来看fdtable的注册,fdtable希望的的vtable形式如下

1
2
3
4
5
6
struct fd_op_vtable {
ssize_t (*read)(void *obj, void *buf, size_t sz);
ssize_t (*write)(void *obj, const void *buf, size_t sz);
int (*close)(void *obj);
int (*ioctl)(void *obj, unsigned int request, va_list args);
};

而我们实际在vender_socket_create注册的如下:

1
2
3
z_finalize_fd(fd, SD_TO_OBJ(sock),
(const struct fd_op_vtable *)
&vender_socket_fd_op_vtable);

vender_socket_fd_op_vtable类型是下面:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
struct socket_op_vtable {
struct fd_op_vtable fd_vtable;
int (*bind)(void *obj, const struct sockaddr *addr, socklen_t addrlen);
int (*connect)(void *obj, const struct sockaddr *addr,
socklen_t addrlen);
int (*listen)(void *obj, int backlog);
int (*accept)(void *obj, struct sockaddr *addr, socklen_t *addrlen);
ssize_t (*sendto)(void *obj, const void *buf, size_t len, int flags,
const struct sockaddr *dest_addr, socklen_t addrlen);
ssize_t (*recvfrom)(void *obj, void *buf, size_t max_len, int flags,
struct sockaddr *src_addr, socklen_t *addrlen);
int (*getsockopt)(void *obj, int level, int optname,
void *optval, socklen_t *optlen);
int (*setsockopt)(void *obj, int level, int optname,
const void *optval, socklen_t optlen);
ssize_t (*sendmsg)(void *obj, const struct msghdr *msg, int flags);
int (*getsockname)(void *obj, struct sockaddr *addr,
socklen_t *addrlen);
};

由于z_finalize_fd注册保存的是struct fd_op_vtable指针,而struct socket_op_vtable最开始成员又是struct fd_op_vtable,所以对于fdtable来说按照struct fd_op_vtable操作没有问题,当socket从fdtable取出这个指针后,由于知晓自己是注册的struct socket_op_vtable,因此按照struct socket_op_vtable还原,然后应用起成员。
这里引用的sendto也就是vender_sendto. 因此就可以串接起来send->zsock_send->zsock_sendto->z_impl_zsock_sendto->vender_sendto.
其它的socket操作API流程也大致相同,大家可以按照上面流程进行分析,这里就不再一一列出来了。