1 #include <linux/types.h>
2 #include <linux/spinlock.h>
3 #include <linux/sock_diag.h>
4 #include <linux/unix_diag.h>
5 #include <linux/skbuff.h>
6 #include <linux/module.h>
7 #include <net/netlink.h>
8 #include <net/af_unix.h>
9 #include <net/tcp_states.h>
11 static int sk_diag_dump_name(struct sock
*sk
, struct sk_buff
*nlskb
)
13 struct unix_address
*addr
= unix_sk(sk
)->addr
;
18 return nla_put(nlskb
, UNIX_DIAG_NAME
, addr
->len
- sizeof(short),
19 addr
->name
->sun_path
);
22 static int sk_diag_dump_vfs(struct sock
*sk
, struct sk_buff
*nlskb
)
24 struct dentry
*dentry
= unix_sk(sk
)->path
.dentry
;
27 struct unix_diag_vfs uv
= {
28 .udiag_vfs_ino
= dentry
->d_inode
->i_ino
,
29 .udiag_vfs_dev
= dentry
->d_sb
->s_dev
,
32 return nla_put(nlskb
, UNIX_DIAG_VFS
, sizeof(uv
), &uv
);
38 static int sk_diag_dump_peer(struct sock
*sk
, struct sk_buff
*nlskb
)
43 peer
= unix_peer_get(sk
);
45 unix_state_lock(peer
);
46 ino
= sock_i_ino(peer
);
47 unix_state_unlock(peer
);
50 return nla_put_u32(nlskb
, UNIX_DIAG_PEER
, ino
);
56 static int sk_diag_dump_icons(struct sock
*sk
, struct sk_buff
*nlskb
)
63 if (sk
->sk_state
== TCP_LISTEN
) {
64 spin_lock(&sk
->sk_receive_queue
.lock
);
66 attr
= nla_reserve(nlskb
, UNIX_DIAG_ICONS
,
67 sk
->sk_receive_queue
.qlen
* sizeof(u32
));
73 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
74 struct sock
*req
, *peer
;
78 * The state lock is outer for the same sk's
79 * queue lock. With the other's queue locked it's
80 * OK to lock the state.
82 unix_state_lock_nested(req
);
83 peer
= unix_sk(req
)->peer
;
84 buf
[i
++] = (peer
? sock_i_ino(peer
) : 0);
85 unix_state_unlock(req
);
87 spin_unlock(&sk
->sk_receive_queue
.lock
);
93 spin_unlock(&sk
->sk_receive_queue
.lock
);
97 static int sk_diag_show_rqlen(struct sock
*sk
, struct sk_buff
*nlskb
)
99 struct unix_diag_rqlen rql
;
101 if (sk
->sk_state
== TCP_LISTEN
) {
102 rql
.udiag_rqueue
= sk
->sk_receive_queue
.qlen
;
103 rql
.udiag_wqueue
= sk
->sk_max_ack_backlog
;
105 rql
.udiag_rqueue
= (u32
) unix_inq_len(sk
);
106 rql
.udiag_wqueue
= (u32
) unix_outq_len(sk
);
109 return nla_put(nlskb
, UNIX_DIAG_RQLEN
, sizeof(rql
), &rql
);
112 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
113 u32 portid
, u32 seq
, u32 flags
, int sk_ino
)
115 struct nlmsghdr
*nlh
;
116 struct unix_diag_msg
*rep
;
118 nlh
= nlmsg_put(skb
, portid
, seq
, SOCK_DIAG_BY_FAMILY
, sizeof(*rep
),
123 rep
= nlmsg_data(nlh
);
124 rep
->udiag_family
= AF_UNIX
;
125 rep
->udiag_type
= sk
->sk_type
;
126 rep
->udiag_state
= sk
->sk_state
;
128 rep
->udiag_ino
= sk_ino
;
129 sock_diag_save_cookie(sk
, rep
->udiag_cookie
);
131 if ((req
->udiag_show
& UDIAG_SHOW_NAME
) &&
132 sk_diag_dump_name(sk
, skb
))
135 if ((req
->udiag_show
& UDIAG_SHOW_VFS
) &&
136 sk_diag_dump_vfs(sk
, skb
))
139 if ((req
->udiag_show
& UDIAG_SHOW_PEER
) &&
140 sk_diag_dump_peer(sk
, skb
))
143 if ((req
->udiag_show
& UDIAG_SHOW_ICONS
) &&
144 sk_diag_dump_icons(sk
, skb
))
147 if ((req
->udiag_show
& UDIAG_SHOW_RQLEN
) &&
148 sk_diag_show_rqlen(sk
, skb
))
151 if ((req
->udiag_show
& UDIAG_SHOW_MEMINFO
) &&
152 sock_diag_put_meminfo(sk
, skb
, UNIX_DIAG_MEMINFO
))
155 if (nla_put_u8(skb
, UNIX_DIAG_SHUTDOWN
, sk
->sk_shutdown
))
158 return nlmsg_end(skb
, nlh
);
161 nlmsg_cancel(skb
, nlh
);
165 static int sk_diag_dump(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
166 u32 portid
, u32 seq
, u32 flags
)
171 sk_ino
= sock_i_ino(sk
);
172 unix_state_unlock(sk
);
177 return sk_diag_fill(sk
, skb
, req
, portid
, seq
, flags
, sk_ino
);
180 static int unix_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
182 struct unix_diag_req
*req
;
183 int num
, s_num
, slot
, s_slot
;
184 struct net
*net
= sock_net(skb
->sk
);
186 req
= nlmsg_data(cb
->nlh
);
188 s_slot
= cb
->args
[0];
189 num
= s_num
= cb
->args
[1];
191 spin_lock(&unix_table_lock
);
193 slot
< ARRAY_SIZE(unix_socket_table
);
198 sk_for_each(sk
, &unix_socket_table
[slot
]) {
199 if (!net_eq(sock_net(sk
), net
))
203 if (!(req
->udiag_states
& (1 << sk
->sk_state
)))
205 if (sk_diag_dump(sk
, skb
, req
,
206 NETLINK_CB(cb
->skb
).portid
,
215 spin_unlock(&unix_table_lock
);
222 static struct sock
*unix_lookup_by_ino(int ino
)
227 spin_lock(&unix_table_lock
);
228 for (i
= 0; i
< ARRAY_SIZE(unix_socket_table
); i
++) {
229 sk_for_each(sk
, &unix_socket_table
[i
])
230 if (ino
== sock_i_ino(sk
)) {
232 spin_unlock(&unix_table_lock
);
238 spin_unlock(&unix_table_lock
);
242 static int unix_diag_get_exact(struct sk_buff
*in_skb
,
243 const struct nlmsghdr
*nlh
,
244 struct unix_diag_req
*req
)
249 unsigned int extra_len
;
250 struct net
*net
= sock_net(in_skb
->sk
);
252 if (req
->udiag_ino
== 0)
255 sk
= unix_lookup_by_ino(req
->udiag_ino
);
260 err
= sock_diag_check_cookie(sk
, req
->udiag_cookie
);
267 rep
= nlmsg_new(sizeof(struct unix_diag_msg
) + extra_len
, GFP_KERNEL
);
271 err
= sk_diag_fill(sk
, rep
, req
, NETLINK_CB(in_skb
).portid
,
272 nlh
->nlmsg_seq
, 0, req
->udiag_ino
);
276 if (extra_len
>= PAGE_SIZE
)
281 err
= netlink_unicast(net
->diag_nlsk
, rep
, NETLINK_CB(in_skb
).portid
,
292 static int unix_diag_handler_dump(struct sk_buff
*skb
, struct nlmsghdr
*h
)
294 int hdrlen
= sizeof(struct unix_diag_req
);
295 struct net
*net
= sock_net(skb
->sk
);
297 if (nlmsg_len(h
) < hdrlen
)
300 if (h
->nlmsg_flags
& NLM_F_DUMP
) {
301 struct netlink_dump_control c
= {
302 .dump
= unix_diag_dump
,
304 return netlink_dump_start(net
->diag_nlsk
, skb
, h
, &c
);
306 return unix_diag_get_exact(skb
, h
, nlmsg_data(h
));
309 static const struct sock_diag_handler unix_diag_handler
= {
311 .dump
= unix_diag_handler_dump
,
314 static int __init
unix_diag_init(void)
316 return sock_diag_register(&unix_diag_handler
);
319 static void __exit
unix_diag_exit(void)
321 sock_diag_unregister(&unix_diag_handler
);
324 module_init(unix_diag_init
);
325 module_exit(unix_diag_exit
);
326 MODULE_LICENSE("GPL");
327 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 1 /* AF_LOCAL */);