2 * gw.c - CAN frame Gateway/Router/Bridge with netlink interface
4 * Copyright (c) 2017 Volkswagen Group Electronic Research
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
48 #include <linux/rcupdate.h>
49 #include <linux/rculist.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/if_arp.h>
53 #include <linux/skbuff.h>
54 #include <linux/can.h>
55 #include <linux/can/core.h>
56 #include <linux/can/skb.h>
57 #include <linux/can/gw.h>
58 #include <net/rtnetlink.h>
59 #include <net/net_namespace.h>
62 #define CAN_GW_VERSION "20170425"
63 #define CAN_GW_NAME "can-gw"
65 MODULE_DESCRIPTION("PF_CAN netlink gateway");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
68 MODULE_ALIAS(CAN_GW_NAME
);
70 #define CGW_MIN_HOPS 1
71 #define CGW_MAX_HOPS 6
72 #define CGW_DEFAULT_HOPS 1
74 static unsigned int max_hops __read_mostly
= CGW_DEFAULT_HOPS
;
75 module_param(max_hops
, uint
, 0444);
76 MODULE_PARM_DESC(max_hops
,
77 "maximum " CAN_GW_NAME
" routing hops for CAN frames "
78 "(valid values: " __stringify(CGW_MIN_HOPS
) "-"
79 __stringify(CGW_MAX_HOPS
) " hops, "
80 "default: " __stringify(CGW_DEFAULT_HOPS
) ")");
82 static struct notifier_block notifier
;
83 static struct kmem_cache
*cgw_cache __read_mostly
;
85 /* structure that contains the (on-the-fly) CAN frame modifications */
99 void (*modfunc
[MAX_MODFUNCTIONS
])(struct can_frame
*cf
,
102 /* CAN frame checksum calculation after CAN frame modifications */
104 struct cgw_csum_xor
xor;
105 struct cgw_csum_crc8 crc8
;
108 void (*xor)(struct can_frame
*cf
, struct cgw_csum_xor
*xor);
109 void (*crc8
)(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
);
116 * So far we just support CAN -> CAN routing and frame modifications.
118 * The internal can_can_gw structure contains data and attributes for
119 * a CAN -> CAN gateway job.
122 struct can_filter filter
;
127 /* list entry for CAN gateways jobs */
129 struct hlist_node list
;
136 /* CAN frame data source */
137 struct net_device
*dev
;
140 /* CAN frame data destination */
141 struct net_device
*dev
;
144 struct can_can_gw ccgw
;
152 /* modification functions that are invoked in the hot path in can_can_gw_rcv */
154 #define MODFUNC(func, op) static void func(struct can_frame *cf, \
155 struct cf_mod *mod) { op ; }
157 MODFUNC(mod_and_id
, cf
->can_id
&= mod
->modframe
.and.can_id
)
158 MODFUNC(mod_and_dlc
, cf
->can_dlc
&= mod
->modframe
.and.can_dlc
)
159 MODFUNC(mod_and_data
, *(u64
*)cf
->data
&= *(u64
*)mod
->modframe
.and.data
)
160 MODFUNC(mod_or_id
, cf
->can_id
|= mod
->modframe
.or.can_id
)
161 MODFUNC(mod_or_dlc
, cf
->can_dlc
|= mod
->modframe
.or.can_dlc
)
162 MODFUNC(mod_or_data
, *(u64
*)cf
->data
|= *(u64
*)mod
->modframe
.or.data
)
163 MODFUNC(mod_xor_id
, cf
->can_id
^= mod
->modframe
.xor.can_id
)
164 MODFUNC(mod_xor_dlc
, cf
->can_dlc
^= mod
->modframe
.xor.can_dlc
)
165 MODFUNC(mod_xor_data
, *(u64
*)cf
->data
^= *(u64
*)mod
->modframe
.xor.data
)
166 MODFUNC(mod_set_id
, cf
->can_id
= mod
->modframe
.set
.can_id
)
167 MODFUNC(mod_set_dlc
, cf
->can_dlc
= mod
->modframe
.set
.can_dlc
)
168 MODFUNC(mod_set_data
, *(u64
*)cf
->data
= *(u64
*)mod
->modframe
.set
.data
)
170 static inline void canframecpy(struct can_frame
*dst
, struct can_frame
*src
)
173 * Copy the struct members separately to ensure that no uninitialized
174 * data are copied in the 3 bytes hole of the struct. This is needed
175 * to make easy compares of the data in the struct cf_mod.
178 dst
->can_id
= src
->can_id
;
179 dst
->can_dlc
= src
->can_dlc
;
180 *(u64
*)dst
->data
= *(u64
*)src
->data
;
183 static int cgw_chk_csum_parms(s8 fr
, s8 to
, s8 re
)
186 * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
187 * relative to received dlc -1 .. -8 :
188 * e.g. for received dlc = 8
189 * -1 => index = 7 (data[7])
190 * -3 => index = 5 (data[5])
191 * -8 => index = 0 (data[0])
194 if (fr
> -9 && fr
< 8 &&
202 static inline int calc_idx(int idx
, int rx_dlc
)
210 static void cgw_csum_xor_rel(struct can_frame
*cf
, struct cgw_csum_xor
*xor)
212 int from
= calc_idx(xor->from_idx
, cf
->can_dlc
);
213 int to
= calc_idx(xor->to_idx
, cf
->can_dlc
);
214 int res
= calc_idx(xor->result_idx
, cf
->can_dlc
);
215 u8 val
= xor->init_xor_val
;
218 if (from
< 0 || to
< 0 || res
< 0)
222 for (i
= from
; i
<= to
; i
++)
225 for (i
= from
; i
>= to
; i
--)
232 static void cgw_csum_xor_pos(struct can_frame
*cf
, struct cgw_csum_xor
*xor)
234 u8 val
= xor->init_xor_val
;
237 for (i
= xor->from_idx
; i
<= xor->to_idx
; i
++)
240 cf
->data
[xor->result_idx
] = val
;
243 static void cgw_csum_xor_neg(struct can_frame
*cf
, struct cgw_csum_xor
*xor)
245 u8 val
= xor->init_xor_val
;
248 for (i
= xor->from_idx
; i
>= xor->to_idx
; i
--)
251 cf
->data
[xor->result_idx
] = val
;
254 static void cgw_csum_crc8_rel(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
)
256 int from
= calc_idx(crc8
->from_idx
, cf
->can_dlc
);
257 int to
= calc_idx(crc8
->to_idx
, cf
->can_dlc
);
258 int res
= calc_idx(crc8
->result_idx
, cf
->can_dlc
);
259 u8 crc
= crc8
->init_crc_val
;
262 if (from
< 0 || to
< 0 || res
< 0)
266 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
267 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
269 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
270 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
273 switch (crc8
->profile
) {
275 case CGW_CRC8PRF_1U8
:
276 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[0]];
279 case CGW_CRC8PRF_16U8
:
280 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[cf
->data
[1] & 0xF]];
283 case CGW_CRC8PRF_SFFID_XOR
:
284 crc
= crc8
->crctab
[crc
^(cf
->can_id
& 0xFF)^
285 (cf
->can_id
>> 8 & 0xFF)];
290 cf
->data
[crc8
->result_idx
] = crc
^crc8
->final_xor_val
;
293 static void cgw_csum_crc8_pos(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
)
295 u8 crc
= crc8
->init_crc_val
;
298 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
299 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
301 switch (crc8
->profile
) {
303 case CGW_CRC8PRF_1U8
:
304 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[0]];
307 case CGW_CRC8PRF_16U8
:
308 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[cf
->data
[1] & 0xF]];
311 case CGW_CRC8PRF_SFFID_XOR
:
312 crc
= crc8
->crctab
[crc
^(cf
->can_id
& 0xFF)^
313 (cf
->can_id
>> 8 & 0xFF)];
317 cf
->data
[crc8
->result_idx
] = crc
^crc8
->final_xor_val
;
320 static void cgw_csum_crc8_neg(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
)
322 u8 crc
= crc8
->init_crc_val
;
325 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
326 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
328 switch (crc8
->profile
) {
330 case CGW_CRC8PRF_1U8
:
331 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[0]];
334 case CGW_CRC8PRF_16U8
:
335 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[cf
->data
[1] & 0xF]];
338 case CGW_CRC8PRF_SFFID_XOR
:
339 crc
= crc8
->crctab
[crc
^(cf
->can_id
& 0xFF)^
340 (cf
->can_id
>> 8 & 0xFF)];
344 cf
->data
[crc8
->result_idx
] = crc
^crc8
->final_xor_val
;
347 /* the receive & process & send function */
348 static void can_can_gw_rcv(struct sk_buff
*skb
, void *data
)
350 struct cgw_job
*gwj
= (struct cgw_job
*)data
;
351 struct can_frame
*cf
;
352 struct sk_buff
*nskb
;
356 * Do not handle CAN frames routed more than 'max_hops' times.
357 * In general we should never catch this delimiter which is intended
358 * to cover a misconfiguration protection (e.g. circular CAN routes).
360 * The Controller Area Network controllers only accept CAN frames with
361 * correct CRCs - which are not visible in the controller registers.
362 * According to skbuff.h documentation the csum_start element for IP
363 * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
364 * Only CAN skbs can be processed here which already have this property.
367 #define cgw_hops(skb) ((skb)->csum_start)
369 BUG_ON(skb
->ip_summed
!= CHECKSUM_UNNECESSARY
);
371 if (cgw_hops(skb
) >= max_hops
) {
372 /* indicate deleted frames due to misconfiguration */
373 gwj
->deleted_frames
++;
377 if (!(gwj
->dst
.dev
->flags
& IFF_UP
)) {
378 gwj
->dropped_frames
++;
382 /* is sending the skb back to the incoming interface not allowed? */
383 if (!(gwj
->flags
& CGW_FLAGS_CAN_IIF_TX_OK
) &&
384 can_skb_prv(skb
)->ifindex
== gwj
->dst
.dev
->ifindex
)
388 * clone the given skb, which has not been done in can_rcv()
390 * When there is at least one modification function activated,
391 * we need to copy the skb as we want to modify skb->data.
393 if (gwj
->mod
.modfunc
[0])
394 nskb
= skb_copy(skb
, GFP_ATOMIC
);
396 nskb
= skb_clone(skb
, GFP_ATOMIC
);
399 gwj
->dropped_frames
++;
403 /* put the incremented hop counter in the cloned skb */
404 cgw_hops(nskb
) = cgw_hops(skb
) + 1;
406 /* first processing of this CAN frame -> adjust to private hop limit */
407 if (gwj
->limit_hops
&& cgw_hops(nskb
) == 1)
408 cgw_hops(nskb
) = max_hops
- gwj
->limit_hops
+ 1;
410 nskb
->dev
= gwj
->dst
.dev
;
412 /* pointer to modifiable CAN frame */
413 cf
= (struct can_frame
*)nskb
->data
;
415 /* perform preprocessed modification functions if there are any */
416 while (modidx
< MAX_MODFUNCTIONS
&& gwj
->mod
.modfunc
[modidx
])
417 (*gwj
->mod
.modfunc
[modidx
++])(cf
, &gwj
->mod
);
419 /* check for checksum updates when the CAN frame has been modified */
421 if (gwj
->mod
.csumfunc
.crc8
)
422 (*gwj
->mod
.csumfunc
.crc8
)(cf
, &gwj
->mod
.csum
.crc8
);
424 if (gwj
->mod
.csumfunc
.xor)
425 (*gwj
->mod
.csumfunc
.xor)(cf
, &gwj
->mod
.csum
.xor);
428 /* clear the skb timestamp if not configured the other way */
429 if (!(gwj
->flags
& CGW_FLAGS_CAN_SRC_TSTAMP
))
432 /* send to netdevice */
433 if (can_send(nskb
, gwj
->flags
& CGW_FLAGS_CAN_ECHO
))
434 gwj
->dropped_frames
++;
436 gwj
->handled_frames
++;
439 static inline int cgw_register_filter(struct net
*net
, struct cgw_job
*gwj
)
441 return can_rx_register(net
, gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
442 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
,
446 static inline void cgw_unregister_filter(struct net
*net
, struct cgw_job
*gwj
)
448 can_rx_unregister(net
, gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
449 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
, gwj
);
452 static int cgw_notifier(struct notifier_block
*nb
,
453 unsigned long msg
, void *ptr
)
455 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
456 struct net
*net
= dev_net(dev
);
458 if (dev
->type
!= ARPHRD_CAN
)
461 if (msg
== NETDEV_UNREGISTER
) {
463 struct cgw_job
*gwj
= NULL
;
464 struct hlist_node
*nx
;
468 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
470 if (gwj
->src
.dev
== dev
|| gwj
->dst
.dev
== dev
) {
471 hlist_del(&gwj
->list
);
472 cgw_unregister_filter(net
, gwj
);
473 kmem_cache_free(cgw_cache
, gwj
);
481 static int cgw_put_job(struct sk_buff
*skb
, struct cgw_job
*gwj
, int type
,
482 u32 pid
, u32 seq
, int flags
)
484 struct cgw_frame_mod mb
;
485 struct rtcanmsg
*rtcan
;
486 struct nlmsghdr
*nlh
;
488 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*rtcan
), flags
);
492 rtcan
= nlmsg_data(nlh
);
493 rtcan
->can_family
= AF_CAN
;
494 rtcan
->gwtype
= gwj
->gwtype
;
495 rtcan
->flags
= gwj
->flags
;
497 /* add statistics if available */
499 if (gwj
->handled_frames
) {
500 if (nla_put_u32(skb
, CGW_HANDLED
, gwj
->handled_frames
) < 0)
504 if (gwj
->dropped_frames
) {
505 if (nla_put_u32(skb
, CGW_DROPPED
, gwj
->dropped_frames
) < 0)
509 if (gwj
->deleted_frames
) {
510 if (nla_put_u32(skb
, CGW_DELETED
, gwj
->deleted_frames
) < 0)
514 /* check non default settings of attributes */
516 if (gwj
->limit_hops
) {
517 if (nla_put_u8(skb
, CGW_LIM_HOPS
, gwj
->limit_hops
) < 0)
521 if (gwj
->mod
.modtype
.and) {
522 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.and, sizeof(mb
.cf
));
523 mb
.modtype
= gwj
->mod
.modtype
.and;
524 if (nla_put(skb
, CGW_MOD_AND
, sizeof(mb
), &mb
) < 0)
528 if (gwj
->mod
.modtype
.or) {
529 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.or, sizeof(mb
.cf
));
530 mb
.modtype
= gwj
->mod
.modtype
.or;
531 if (nla_put(skb
, CGW_MOD_OR
, sizeof(mb
), &mb
) < 0)
535 if (gwj
->mod
.modtype
.xor) {
536 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.xor, sizeof(mb
.cf
));
537 mb
.modtype
= gwj
->mod
.modtype
.xor;
538 if (nla_put(skb
, CGW_MOD_XOR
, sizeof(mb
), &mb
) < 0)
542 if (gwj
->mod
.modtype
.set
) {
543 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.set
, sizeof(mb
.cf
));
544 mb
.modtype
= gwj
->mod
.modtype
.set
;
545 if (nla_put(skb
, CGW_MOD_SET
, sizeof(mb
), &mb
) < 0)
550 if (nla_put_u32(skb
, CGW_MOD_UID
, gwj
->mod
.uid
) < 0)
554 if (gwj
->mod
.csumfunc
.crc8
) {
555 if (nla_put(skb
, CGW_CS_CRC8
, CGW_CS_CRC8_LEN
,
556 &gwj
->mod
.csum
.crc8
) < 0)
560 if (gwj
->mod
.csumfunc
.xor) {
561 if (nla_put(skb
, CGW_CS_XOR
, CGW_CS_XOR_LEN
,
562 &gwj
->mod
.csum
.xor) < 0)
566 if (gwj
->gwtype
== CGW_TYPE_CAN_CAN
) {
568 if (gwj
->ccgw
.filter
.can_id
|| gwj
->ccgw
.filter
.can_mask
) {
569 if (nla_put(skb
, CGW_FILTER
, sizeof(struct can_filter
),
570 &gwj
->ccgw
.filter
) < 0)
574 if (nla_put_u32(skb
, CGW_SRC_IF
, gwj
->ccgw
.src_idx
) < 0)
577 if (nla_put_u32(skb
, CGW_DST_IF
, gwj
->ccgw
.dst_idx
) < 0)
585 nlmsg_cancel(skb
, nlh
);
589 /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
590 static int cgw_dump_jobs(struct sk_buff
*skb
, struct netlink_callback
*cb
)
592 struct net
*net
= sock_net(skb
->sk
);
593 struct cgw_job
*gwj
= NULL
;
595 int s_idx
= cb
->args
[0];
598 hlist_for_each_entry_rcu(gwj
, &net
->can
.cgw_list
, list
) {
602 if (cgw_put_job(skb
, gwj
, RTM_NEWROUTE
, NETLINK_CB(cb
->skb
).portid
,
603 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
) < 0)
615 static const struct nla_policy cgw_policy
[CGW_MAX
+1] = {
616 [CGW_MOD_AND
] = { .len
= sizeof(struct cgw_frame_mod
) },
617 [CGW_MOD_OR
] = { .len
= sizeof(struct cgw_frame_mod
) },
618 [CGW_MOD_XOR
] = { .len
= sizeof(struct cgw_frame_mod
) },
619 [CGW_MOD_SET
] = { .len
= sizeof(struct cgw_frame_mod
) },
620 [CGW_CS_XOR
] = { .len
= sizeof(struct cgw_csum_xor
) },
621 [CGW_CS_CRC8
] = { .len
= sizeof(struct cgw_csum_crc8
) },
622 [CGW_SRC_IF
] = { .type
= NLA_U32
},
623 [CGW_DST_IF
] = { .type
= NLA_U32
},
624 [CGW_FILTER
] = { .len
= sizeof(struct can_filter
) },
625 [CGW_LIM_HOPS
] = { .type
= NLA_U8
},
626 [CGW_MOD_UID
] = { .type
= NLA_U32
},
629 /* check for common and gwtype specific attributes */
630 static int cgw_parse_attr(struct nlmsghdr
*nlh
, struct cf_mod
*mod
,
631 u8 gwtype
, void *gwtypeattr
, u8
*limhops
)
633 struct nlattr
*tb
[CGW_MAX
+1];
634 struct cgw_frame_mod mb
;
638 /* initialize modification & checksum data space */
639 memset(mod
, 0, sizeof(*mod
));
641 err
= nlmsg_parse(nlh
, sizeof(struct rtcanmsg
), tb
, CGW_MAX
,
646 if (tb
[CGW_LIM_HOPS
]) {
647 *limhops
= nla_get_u8(tb
[CGW_LIM_HOPS
]);
649 if (*limhops
< 1 || *limhops
> max_hops
)
653 /* check for AND/OR/XOR/SET modifications */
655 if (tb
[CGW_MOD_AND
]) {
656 nla_memcpy(&mb
, tb
[CGW_MOD_AND
], CGW_MODATTR_LEN
);
658 canframecpy(&mod
->modframe
.and, &mb
.cf
);
659 mod
->modtype
.and = mb
.modtype
;
661 if (mb
.modtype
& CGW_MOD_ID
)
662 mod
->modfunc
[modidx
++] = mod_and_id
;
664 if (mb
.modtype
& CGW_MOD_DLC
)
665 mod
->modfunc
[modidx
++] = mod_and_dlc
;
667 if (mb
.modtype
& CGW_MOD_DATA
)
668 mod
->modfunc
[modidx
++] = mod_and_data
;
671 if (tb
[CGW_MOD_OR
]) {
672 nla_memcpy(&mb
, tb
[CGW_MOD_OR
], CGW_MODATTR_LEN
);
674 canframecpy(&mod
->modframe
.or, &mb
.cf
);
675 mod
->modtype
.or = mb
.modtype
;
677 if (mb
.modtype
& CGW_MOD_ID
)
678 mod
->modfunc
[modidx
++] = mod_or_id
;
680 if (mb
.modtype
& CGW_MOD_DLC
)
681 mod
->modfunc
[modidx
++] = mod_or_dlc
;
683 if (mb
.modtype
& CGW_MOD_DATA
)
684 mod
->modfunc
[modidx
++] = mod_or_data
;
687 if (tb
[CGW_MOD_XOR
]) {
688 nla_memcpy(&mb
, tb
[CGW_MOD_XOR
], CGW_MODATTR_LEN
);
690 canframecpy(&mod
->modframe
.xor, &mb
.cf
);
691 mod
->modtype
.xor = mb
.modtype
;
693 if (mb
.modtype
& CGW_MOD_ID
)
694 mod
->modfunc
[modidx
++] = mod_xor_id
;
696 if (mb
.modtype
& CGW_MOD_DLC
)
697 mod
->modfunc
[modidx
++] = mod_xor_dlc
;
699 if (mb
.modtype
& CGW_MOD_DATA
)
700 mod
->modfunc
[modidx
++] = mod_xor_data
;
703 if (tb
[CGW_MOD_SET
]) {
704 nla_memcpy(&mb
, tb
[CGW_MOD_SET
], CGW_MODATTR_LEN
);
706 canframecpy(&mod
->modframe
.set
, &mb
.cf
);
707 mod
->modtype
.set
= mb
.modtype
;
709 if (mb
.modtype
& CGW_MOD_ID
)
710 mod
->modfunc
[modidx
++] = mod_set_id
;
712 if (mb
.modtype
& CGW_MOD_DLC
)
713 mod
->modfunc
[modidx
++] = mod_set_dlc
;
715 if (mb
.modtype
& CGW_MOD_DATA
)
716 mod
->modfunc
[modidx
++] = mod_set_data
;
719 /* check for checksum operations after CAN frame modifications */
722 if (tb
[CGW_CS_CRC8
]) {
723 struct cgw_csum_crc8
*c
= nla_data(tb
[CGW_CS_CRC8
]);
725 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
730 nla_memcpy(&mod
->csum
.crc8
, tb
[CGW_CS_CRC8
],
734 * select dedicated processing function to reduce
735 * runtime operations in receive hot path.
737 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
739 mod
->csumfunc
.crc8
= cgw_csum_crc8_rel
;
740 else if (c
->from_idx
<= c
->to_idx
)
741 mod
->csumfunc
.crc8
= cgw_csum_crc8_pos
;
743 mod
->csumfunc
.crc8
= cgw_csum_crc8_neg
;
746 if (tb
[CGW_CS_XOR
]) {
747 struct cgw_csum_xor
*c
= nla_data(tb
[CGW_CS_XOR
]);
749 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
754 nla_memcpy(&mod
->csum
.xor, tb
[CGW_CS_XOR
],
758 * select dedicated processing function to reduce
759 * runtime operations in receive hot path.
761 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
763 mod
->csumfunc
.xor = cgw_csum_xor_rel
;
764 else if (c
->from_idx
<= c
->to_idx
)
765 mod
->csumfunc
.xor = cgw_csum_xor_pos
;
767 mod
->csumfunc
.xor = cgw_csum_xor_neg
;
770 if (tb
[CGW_MOD_UID
]) {
771 nla_memcpy(&mod
->uid
, tb
[CGW_MOD_UID
], sizeof(u32
));
775 if (gwtype
== CGW_TYPE_CAN_CAN
) {
777 /* check CGW_TYPE_CAN_CAN specific attributes */
779 struct can_can_gw
*ccgw
= (struct can_can_gw
*)gwtypeattr
;
780 memset(ccgw
, 0, sizeof(*ccgw
));
782 /* check for can_filter in attributes */
784 nla_memcpy(&ccgw
->filter
, tb
[CGW_FILTER
],
785 sizeof(struct can_filter
));
789 /* specifying two interfaces is mandatory */
790 if (!tb
[CGW_SRC_IF
] || !tb
[CGW_DST_IF
])
793 ccgw
->src_idx
= nla_get_u32(tb
[CGW_SRC_IF
]);
794 ccgw
->dst_idx
= nla_get_u32(tb
[CGW_DST_IF
]);
796 /* both indices set to 0 for flushing all routing entries */
797 if (!ccgw
->src_idx
&& !ccgw
->dst_idx
)
800 /* only one index set to 0 is an error */
801 if (!ccgw
->src_idx
|| !ccgw
->dst_idx
)
805 /* add the checks for other gwtypes here */
810 static int cgw_create_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
811 struct netlink_ext_ack
*extack
)
813 struct net
*net
= sock_net(skb
->sk
);
817 struct can_can_gw ccgw
;
821 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
824 if (nlmsg_len(nlh
) < sizeof(*r
))
828 if (r
->can_family
!= AF_CAN
)
829 return -EPFNOSUPPORT
;
831 /* so far we only support CAN -> CAN routings */
832 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
835 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
843 /* check for updating an existing job with identical uid */
844 hlist_for_each_entry(gwj
, &net
->can
.cgw_list
, list
) {
846 if (gwj
->mod
.uid
!= mod
.uid
)
849 /* interfaces & filters must be identical */
850 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
853 /* update modifications with disabled softirq & quit */
855 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
861 /* ifindex == 0 is not allowed for job creation */
862 if (!ccgw
.src_idx
|| !ccgw
.dst_idx
)
865 gwj
= kmem_cache_alloc(cgw_cache
, GFP_KERNEL
);
869 gwj
->handled_frames
= 0;
870 gwj
->dropped_frames
= 0;
871 gwj
->deleted_frames
= 0;
872 gwj
->flags
= r
->flags
;
873 gwj
->gwtype
= r
->gwtype
;
874 gwj
->limit_hops
= limhops
;
876 /* insert already parsed information */
877 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
878 memcpy(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
));
882 gwj
->src
.dev
= __dev_get_by_index(net
, gwj
->ccgw
.src_idx
);
887 if (gwj
->src
.dev
->type
!= ARPHRD_CAN
)
890 gwj
->dst
.dev
= __dev_get_by_index(net
, gwj
->ccgw
.dst_idx
);
895 if (gwj
->dst
.dev
->type
!= ARPHRD_CAN
)
900 err
= cgw_register_filter(net
, gwj
);
902 hlist_add_head_rcu(&gwj
->list
, &net
->can
.cgw_list
);
905 kmem_cache_free(cgw_cache
, gwj
);
910 static void cgw_remove_all_jobs(struct net
*net
)
912 struct cgw_job
*gwj
= NULL
;
913 struct hlist_node
*nx
;
917 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
918 hlist_del(&gwj
->list
);
919 cgw_unregister_filter(net
, gwj
);
920 kmem_cache_free(cgw_cache
, gwj
);
924 static int cgw_remove_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
925 struct netlink_ext_ack
*extack
)
927 struct net
*net
= sock_net(skb
->sk
);
928 struct cgw_job
*gwj
= NULL
;
929 struct hlist_node
*nx
;
932 struct can_can_gw ccgw
;
936 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
939 if (nlmsg_len(nlh
) < sizeof(*r
))
943 if (r
->can_family
!= AF_CAN
)
944 return -EPFNOSUPPORT
;
946 /* so far we only support CAN -> CAN routings */
947 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
950 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
954 /* two interface indices both set to 0 => remove all entries */
955 if (!ccgw
.src_idx
&& !ccgw
.dst_idx
) {
956 cgw_remove_all_jobs(net
);
964 /* remove only the first matching entry */
965 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
967 if (gwj
->flags
!= r
->flags
)
970 if (gwj
->limit_hops
!= limhops
)
973 /* we have a match when uid is enabled and identical */
974 if (gwj
->mod
.uid
|| mod
.uid
) {
975 if (gwj
->mod
.uid
!= mod
.uid
)
978 /* no uid => check for identical modifications */
979 if (memcmp(&gwj
->mod
, &mod
, sizeof(mod
)))
983 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
984 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
987 hlist_del(&gwj
->list
);
988 cgw_unregister_filter(net
, gwj
);
989 kmem_cache_free(cgw_cache
, gwj
);
997 static int __net_init
cangw_pernet_init(struct net
*net
)
999 INIT_HLIST_HEAD(&net
->can
.cgw_list
);
1003 static void __net_exit
cangw_pernet_exit(struct net
*net
)
1006 cgw_remove_all_jobs(net
);
1010 static struct pernet_operations cangw_pernet_ops
= {
1011 .init
= cangw_pernet_init
,
1012 .exit
= cangw_pernet_exit
,
1015 static __init
int cgw_module_init(void)
1019 /* sanitize given module parameter */
1020 max_hops
= clamp_t(unsigned int, max_hops
, CGW_MIN_HOPS
, CGW_MAX_HOPS
);
1022 pr_info("can: netlink gateway (rev " CAN_GW_VERSION
") max_hops=%d\n",
1025 register_pernet_subsys(&cangw_pernet_ops
);
1026 cgw_cache
= kmem_cache_create("can_gw", sizeof(struct cgw_job
),
1033 notifier
.notifier_call
= cgw_notifier
;
1034 register_netdevice_notifier(¬ifier
);
1036 ret
= rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_GETROUTE
,
1037 NULL
, cgw_dump_jobs
, 0);
1039 unregister_netdevice_notifier(¬ifier
);
1040 kmem_cache_destroy(cgw_cache
);
1044 /* Only the first call to rtnl_register_module can fail */
1045 rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_NEWROUTE
,
1046 cgw_create_job
, NULL
, 0);
1047 rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_DELROUTE
,
1048 cgw_remove_job
, NULL
, 0);
1053 static __exit
void cgw_module_exit(void)
1055 rtnl_unregister_all(PF_CAN
);
1057 unregister_netdevice_notifier(¬ifier
);
1059 unregister_pernet_subsys(&cangw_pernet_ops
);
1060 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1062 kmem_cache_destroy(cgw_cache
);
1065 module_init(cgw_module_init
);
1066 module_exit(cgw_module_exit
);