mlxsw: spectrum_router: Add support for nexthop group consolidation for IPv6
[linux-2.6/btrfs-unstable.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_router.c
blob16676fffbf7051a6dcff5ca15db3524ee233151b
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <linux/inetdevice.h>
44 #include <linux/netdevice.h>
45 #include <linux/if_bridge.h>
46 #include <linux/socket.h>
47 #include <linux/route.h>
48 #include <net/netevent.h>
49 #include <net/neighbour.h>
50 #include <net/arp.h>
51 #include <net/ip_fib.h>
52 #include <net/ip6_fib.h>
53 #include <net/fib_rules.h>
54 #include <net/l3mdev.h>
55 #include <net/addrconf.h>
56 #include <net/ndisc.h>
57 #include <net/ipv6.h>
58 #include <net/fib_notifier.h>
60 #include "spectrum.h"
61 #include "core.h"
62 #include "reg.h"
63 #include "spectrum_cnt.h"
64 #include "spectrum_dpipe.h"
65 #include "spectrum_router.h"
67 struct mlxsw_sp_vr;
68 struct mlxsw_sp_lpm_tree;
69 struct mlxsw_sp_rif_ops;
71 struct mlxsw_sp_router {
72 struct mlxsw_sp *mlxsw_sp;
73 struct mlxsw_sp_rif **rifs;
74 struct mlxsw_sp_vr *vrs;
75 struct rhashtable neigh_ht;
76 struct rhashtable nexthop_group_ht;
77 struct rhashtable nexthop_ht;
78 struct {
79 struct mlxsw_sp_lpm_tree *trees;
80 unsigned int tree_count;
81 } lpm;
82 struct {
83 struct delayed_work dw;
84 unsigned long interval; /* ms */
85 } neighs_update;
86 struct delayed_work nexthop_probe_dw;
87 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
88 struct list_head nexthop_neighs_list;
89 bool aborted;
90 struct notifier_block fib_nb;
91 const struct mlxsw_sp_rif_ops **rif_ops_arr;
94 struct mlxsw_sp_rif {
95 struct list_head nexthop_list;
96 struct list_head neigh_list;
97 struct net_device *dev;
98 struct mlxsw_sp_fid *fid;
99 unsigned char addr[ETH_ALEN];
100 int mtu;
101 u16 rif_index;
102 u16 vr_id;
103 const struct mlxsw_sp_rif_ops *ops;
104 struct mlxsw_sp *mlxsw_sp;
106 unsigned int counter_ingress;
107 bool counter_ingress_valid;
108 unsigned int counter_egress;
109 bool counter_egress_valid;
112 struct mlxsw_sp_rif_params {
113 struct net_device *dev;
114 union {
115 u16 system_port;
116 u16 lag_id;
118 u16 vid;
119 bool lag;
122 struct mlxsw_sp_rif_subport {
123 struct mlxsw_sp_rif common;
124 union {
125 u16 system_port;
126 u16 lag_id;
128 u16 vid;
129 bool lag;
132 struct mlxsw_sp_rif_ops {
133 enum mlxsw_sp_rif_type type;
134 size_t rif_size;
136 void (*setup)(struct mlxsw_sp_rif *rif,
137 const struct mlxsw_sp_rif_params *params);
138 int (*configure)(struct mlxsw_sp_rif *rif);
139 void (*deconfigure)(struct mlxsw_sp_rif *rif);
140 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
143 static unsigned int *
144 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
145 enum mlxsw_sp_rif_counter_dir dir)
147 switch (dir) {
148 case MLXSW_SP_RIF_COUNTER_EGRESS:
149 return &rif->counter_egress;
150 case MLXSW_SP_RIF_COUNTER_INGRESS:
151 return &rif->counter_ingress;
153 return NULL;
156 static bool
157 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
158 enum mlxsw_sp_rif_counter_dir dir)
160 switch (dir) {
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 return rif->counter_egress_valid;
163 case MLXSW_SP_RIF_COUNTER_INGRESS:
164 return rif->counter_ingress_valid;
166 return false;
169 static void
170 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
171 enum mlxsw_sp_rif_counter_dir dir,
172 bool valid)
174 switch (dir) {
175 case MLXSW_SP_RIF_COUNTER_EGRESS:
176 rif->counter_egress_valid = valid;
177 break;
178 case MLXSW_SP_RIF_COUNTER_INGRESS:
179 rif->counter_ingress_valid = valid;
180 break;
184 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
185 unsigned int counter_index, bool enable,
186 enum mlxsw_sp_rif_counter_dir dir)
188 char ritr_pl[MLXSW_REG_RITR_LEN];
189 bool is_egress = false;
190 int err;
192 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
193 is_egress = true;
194 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
196 if (err)
197 return err;
199 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
200 is_egress);
201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
204 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
205 struct mlxsw_sp_rif *rif,
206 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
208 char ricnt_pl[MLXSW_REG_RICNT_LEN];
209 unsigned int *p_counter_index;
210 bool valid;
211 int err;
213 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
214 if (!valid)
215 return -EINVAL;
217 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
218 if (!p_counter_index)
219 return -EINVAL;
220 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
221 MLXSW_REG_RICNT_OPCODE_NOP);
222 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
223 if (err)
224 return err;
225 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
226 return 0;
229 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
230 unsigned int counter_index)
232 char ricnt_pl[MLXSW_REG_RICNT_LEN];
234 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
235 MLXSW_REG_RICNT_OPCODE_CLEAR);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
239 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
240 struct mlxsw_sp_rif *rif,
241 enum mlxsw_sp_rif_counter_dir dir)
243 unsigned int *p_counter_index;
244 int err;
246 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
247 if (!p_counter_index)
248 return -EINVAL;
249 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
250 p_counter_index);
251 if (err)
252 return err;
254 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
255 if (err)
256 goto err_counter_clear;
258 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
259 *p_counter_index, true, dir);
260 if (err)
261 goto err_counter_edit;
262 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
263 return 0;
265 err_counter_edit:
266 err_counter_clear:
267 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
268 *p_counter_index);
269 return err;
272 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
273 struct mlxsw_sp_rif *rif,
274 enum mlxsw_sp_rif_counter_dir dir)
276 unsigned int *p_counter_index;
278 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
279 return;
281 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
282 if (WARN_ON(!p_counter_index))
283 return;
284 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
285 *p_counter_index, false, dir);
286 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
287 *p_counter_index);
288 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
291 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
293 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
294 struct devlink *devlink;
296 devlink = priv_to_devlink(mlxsw_sp->core);
297 if (!devlink_dpipe_table_counter_enabled(devlink,
298 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
299 return;
300 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
303 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
305 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
307 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
310 static struct mlxsw_sp_rif *
311 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
312 const struct net_device *dev);
314 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
316 struct mlxsw_sp_prefix_usage {
317 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
320 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
321 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
323 static bool
324 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
325 struct mlxsw_sp_prefix_usage *prefix_usage2)
327 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
330 static bool
331 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
333 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
335 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
338 static void
339 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
340 struct mlxsw_sp_prefix_usage *prefix_usage2)
342 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
345 static void
346 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
347 unsigned char prefix_len)
349 set_bit(prefix_len, prefix_usage->b);
352 static void
353 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
354 unsigned char prefix_len)
356 clear_bit(prefix_len, prefix_usage->b);
359 struct mlxsw_sp_fib_key {
360 unsigned char addr[sizeof(struct in6_addr)];
361 unsigned char prefix_len;
364 enum mlxsw_sp_fib_entry_type {
365 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
366 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
367 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
370 struct mlxsw_sp_nexthop_group;
371 struct mlxsw_sp_fib;
373 struct mlxsw_sp_fib_node {
374 struct list_head entry_list;
375 struct list_head list;
376 struct rhash_head ht_node;
377 struct mlxsw_sp_fib *fib;
378 struct mlxsw_sp_fib_key key;
381 struct mlxsw_sp_fib_entry {
382 struct list_head list;
383 struct mlxsw_sp_fib_node *fib_node;
384 enum mlxsw_sp_fib_entry_type type;
385 struct list_head nexthop_group_node;
386 struct mlxsw_sp_nexthop_group *nh_group;
389 struct mlxsw_sp_fib4_entry {
390 struct mlxsw_sp_fib_entry common;
391 u32 tb_id;
392 u32 prio;
393 u8 tos;
394 u8 type;
397 struct mlxsw_sp_fib6_entry {
398 struct mlxsw_sp_fib_entry common;
399 struct list_head rt6_list;
400 unsigned int nrt6;
403 struct mlxsw_sp_rt6 {
404 struct list_head list;
405 struct rt6_info *rt;
408 enum mlxsw_sp_l3proto {
409 MLXSW_SP_L3_PROTO_IPV4,
410 MLXSW_SP_L3_PROTO_IPV6,
413 struct mlxsw_sp_lpm_tree {
414 u8 id; /* tree ID */
415 unsigned int ref_count;
416 enum mlxsw_sp_l3proto proto;
417 struct mlxsw_sp_prefix_usage prefix_usage;
420 struct mlxsw_sp_fib {
421 struct rhashtable ht;
422 struct list_head node_list;
423 struct mlxsw_sp_vr *vr;
424 struct mlxsw_sp_lpm_tree *lpm_tree;
425 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
426 struct mlxsw_sp_prefix_usage prefix_usage;
427 enum mlxsw_sp_l3proto proto;
430 struct mlxsw_sp_vr {
431 u16 id; /* virtual router ID */
432 u32 tb_id; /* kernel fib table id */
433 unsigned int rif_count;
434 struct mlxsw_sp_fib *fib4;
435 struct mlxsw_sp_fib *fib6;
438 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
440 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
441 enum mlxsw_sp_l3proto proto)
443 struct mlxsw_sp_fib *fib;
444 int err;
446 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
447 if (!fib)
448 return ERR_PTR(-ENOMEM);
449 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
450 if (err)
451 goto err_rhashtable_init;
452 INIT_LIST_HEAD(&fib->node_list);
453 fib->proto = proto;
454 fib->vr = vr;
455 return fib;
457 err_rhashtable_init:
458 kfree(fib);
459 return ERR_PTR(err);
462 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
464 WARN_ON(!list_empty(&fib->node_list));
465 WARN_ON(fib->lpm_tree);
466 rhashtable_destroy(&fib->ht);
467 kfree(fib);
470 static struct mlxsw_sp_lpm_tree *
471 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
473 static struct mlxsw_sp_lpm_tree *lpm_tree;
474 int i;
476 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
477 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
478 if (lpm_tree->ref_count == 0)
479 return lpm_tree;
481 return NULL;
484 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
485 struct mlxsw_sp_lpm_tree *lpm_tree)
487 char ralta_pl[MLXSW_REG_RALTA_LEN];
489 mlxsw_reg_ralta_pack(ralta_pl, true,
490 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
491 lpm_tree->id);
492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
495 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
496 struct mlxsw_sp_lpm_tree *lpm_tree)
498 char ralta_pl[MLXSW_REG_RALTA_LEN];
500 mlxsw_reg_ralta_pack(ralta_pl, false,
501 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
502 lpm_tree->id);
503 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
506 static int
507 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
508 struct mlxsw_sp_prefix_usage *prefix_usage,
509 struct mlxsw_sp_lpm_tree *lpm_tree)
511 char ralst_pl[MLXSW_REG_RALST_LEN];
512 u8 root_bin = 0;
513 u8 prefix;
514 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
516 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
517 root_bin = prefix;
519 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
520 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
521 if (prefix == 0)
522 continue;
523 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
524 MLXSW_REG_RALST_BIN_NO_CHILD);
525 last_prefix = prefix;
527 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
530 static struct mlxsw_sp_lpm_tree *
531 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
532 struct mlxsw_sp_prefix_usage *prefix_usage,
533 enum mlxsw_sp_l3proto proto)
535 struct mlxsw_sp_lpm_tree *lpm_tree;
536 int err;
538 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
539 if (!lpm_tree)
540 return ERR_PTR(-EBUSY);
541 lpm_tree->proto = proto;
542 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
543 if (err)
544 return ERR_PTR(err);
546 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
547 lpm_tree);
548 if (err)
549 goto err_left_struct_set;
550 memcpy(&lpm_tree->prefix_usage, prefix_usage,
551 sizeof(lpm_tree->prefix_usage));
552 return lpm_tree;
554 err_left_struct_set:
555 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
556 return ERR_PTR(err);
559 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
560 struct mlxsw_sp_lpm_tree *lpm_tree)
562 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
565 static struct mlxsw_sp_lpm_tree *
566 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
567 struct mlxsw_sp_prefix_usage *prefix_usage,
568 enum mlxsw_sp_l3proto proto)
570 struct mlxsw_sp_lpm_tree *lpm_tree;
571 int i;
573 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
574 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
575 if (lpm_tree->ref_count != 0 &&
576 lpm_tree->proto == proto &&
577 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
578 prefix_usage))
579 return lpm_tree;
581 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
584 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
586 lpm_tree->ref_count++;
589 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
590 struct mlxsw_sp_lpm_tree *lpm_tree)
592 if (--lpm_tree->ref_count == 0)
593 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
596 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
598 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
600 struct mlxsw_sp_lpm_tree *lpm_tree;
601 u64 max_trees;
602 int i;
604 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
605 return -EIO;
607 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
608 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
609 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
610 sizeof(struct mlxsw_sp_lpm_tree),
611 GFP_KERNEL);
612 if (!mlxsw_sp->router->lpm.trees)
613 return -ENOMEM;
615 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
616 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
617 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
620 return 0;
623 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
625 kfree(mlxsw_sp->router->lpm.trees);
628 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
630 return !!vr->fib4 || !!vr->fib6;
633 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
635 struct mlxsw_sp_vr *vr;
636 int i;
638 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
639 vr = &mlxsw_sp->router->vrs[i];
640 if (!mlxsw_sp_vr_is_used(vr))
641 return vr;
643 return NULL;
646 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
647 const struct mlxsw_sp_fib *fib, u8 tree_id)
649 char raltb_pl[MLXSW_REG_RALTB_LEN];
651 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
652 (enum mlxsw_reg_ralxx_protocol) fib->proto,
653 tree_id);
654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
657 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
658 const struct mlxsw_sp_fib *fib)
660 char raltb_pl[MLXSW_REG_RALTB_LEN];
662 /* Bind to tree 0 which is default */
663 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
664 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
665 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
668 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
670 /* For our purpose, squash main and local table into one */
671 if (tb_id == RT_TABLE_LOCAL)
672 tb_id = RT_TABLE_MAIN;
673 return tb_id;
676 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
677 u32 tb_id)
679 struct mlxsw_sp_vr *vr;
680 int i;
682 tb_id = mlxsw_sp_fix_tb_id(tb_id);
684 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
685 vr = &mlxsw_sp->router->vrs[i];
686 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
687 return vr;
689 return NULL;
692 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
693 enum mlxsw_sp_l3proto proto)
695 switch (proto) {
696 case MLXSW_SP_L3_PROTO_IPV4:
697 return vr->fib4;
698 case MLXSW_SP_L3_PROTO_IPV6:
699 return vr->fib6;
701 return NULL;
704 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
705 u32 tb_id)
707 struct mlxsw_sp_vr *vr;
708 int err;
710 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
711 if (!vr)
712 return ERR_PTR(-EBUSY);
713 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
714 if (IS_ERR(vr->fib4))
715 return ERR_CAST(vr->fib4);
716 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
717 if (IS_ERR(vr->fib6)) {
718 err = PTR_ERR(vr->fib6);
719 goto err_fib6_create;
721 vr->tb_id = tb_id;
722 return vr;
724 err_fib6_create:
725 mlxsw_sp_fib_destroy(vr->fib4);
726 vr->fib4 = NULL;
727 return ERR_PTR(err);
730 static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
732 mlxsw_sp_fib_destroy(vr->fib6);
733 vr->fib6 = NULL;
734 mlxsw_sp_fib_destroy(vr->fib4);
735 vr->fib4 = NULL;
738 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
740 struct mlxsw_sp_vr *vr;
742 tb_id = mlxsw_sp_fix_tb_id(tb_id);
743 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
744 if (!vr)
745 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
746 return vr;
749 static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
751 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
752 list_empty(&vr->fib6->node_list))
753 mlxsw_sp_vr_destroy(vr);
756 static bool
757 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
758 enum mlxsw_sp_l3proto proto, u8 tree_id)
760 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
762 if (!mlxsw_sp_vr_is_used(vr))
763 return false;
764 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
765 return true;
766 return false;
769 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
770 struct mlxsw_sp_fib *fib,
771 struct mlxsw_sp_lpm_tree *new_tree)
773 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
774 int err;
776 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
777 if (err)
778 return err;
779 fib->lpm_tree = new_tree;
780 mlxsw_sp_lpm_tree_hold(new_tree);
781 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
782 return 0;
785 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
786 struct mlxsw_sp_fib *fib,
787 struct mlxsw_sp_lpm_tree *new_tree)
789 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
790 enum mlxsw_sp_l3proto proto = fib->proto;
791 u8 old_id, new_id = new_tree->id;
792 struct mlxsw_sp_vr *vr;
793 int i, err;
795 if (!old_tree)
796 goto no_replace;
797 old_id = old_tree->id;
799 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
800 vr = &mlxsw_sp->router->vrs[i];
801 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
802 continue;
803 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
804 mlxsw_sp_vr_fib(vr, proto),
805 new_tree);
806 if (err)
807 goto err_tree_replace;
810 return 0;
812 err_tree_replace:
813 for (i--; i >= 0; i--) {
814 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
815 continue;
816 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
817 mlxsw_sp_vr_fib(vr, proto),
818 old_tree);
820 return err;
822 no_replace:
823 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
824 if (err)
825 return err;
826 fib->lpm_tree = new_tree;
827 mlxsw_sp_lpm_tree_hold(new_tree);
828 return 0;
831 static void
832 mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
833 enum mlxsw_sp_l3proto proto,
834 struct mlxsw_sp_prefix_usage *req_prefix_usage)
836 int i;
838 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
839 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
840 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
841 unsigned char prefix;
843 if (!mlxsw_sp_vr_is_used(vr))
844 continue;
845 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
846 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
850 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
852 struct mlxsw_sp_vr *vr;
853 u64 max_vrs;
854 int i;
856 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
857 return -EIO;
859 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
860 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
861 GFP_KERNEL);
862 if (!mlxsw_sp->router->vrs)
863 return -ENOMEM;
865 for (i = 0; i < max_vrs; i++) {
866 vr = &mlxsw_sp->router->vrs[i];
867 vr->id = i;
870 return 0;
873 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
875 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
877 /* At this stage we're guaranteed not to have new incoming
878 * FIB notifications and the work queue is free from FIBs
879 * sitting on top of mlxsw netdevs. However, we can still
880 * have other FIBs queued. Flush the queue before flushing
881 * the device's tables. No need for locks, as we're the only
882 * writer.
884 mlxsw_core_flush_owq();
885 mlxsw_sp_router_fib_flush(mlxsw_sp);
886 kfree(mlxsw_sp->router->vrs);
889 struct mlxsw_sp_neigh_key {
890 struct neighbour *n;
893 struct mlxsw_sp_neigh_entry {
894 struct list_head rif_list_node;
895 struct rhash_head ht_node;
896 struct mlxsw_sp_neigh_key key;
897 u16 rif;
898 bool connected;
899 unsigned char ha[ETH_ALEN];
900 struct list_head nexthop_list; /* list of nexthops using
901 * this neigh entry
903 struct list_head nexthop_neighs_list_node;
906 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
907 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
908 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
909 .key_len = sizeof(struct mlxsw_sp_neigh_key),
912 static struct mlxsw_sp_neigh_entry *
913 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
914 u16 rif)
916 struct mlxsw_sp_neigh_entry *neigh_entry;
918 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
919 if (!neigh_entry)
920 return NULL;
922 neigh_entry->key.n = n;
923 neigh_entry->rif = rif;
924 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
926 return neigh_entry;
929 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
931 kfree(neigh_entry);
934 static int
935 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
936 struct mlxsw_sp_neigh_entry *neigh_entry)
938 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
939 &neigh_entry->ht_node,
940 mlxsw_sp_neigh_ht_params);
943 static void
944 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
945 struct mlxsw_sp_neigh_entry *neigh_entry)
947 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
948 &neigh_entry->ht_node,
949 mlxsw_sp_neigh_ht_params);
952 static struct mlxsw_sp_neigh_entry *
953 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
955 struct mlxsw_sp_neigh_entry *neigh_entry;
956 struct mlxsw_sp_rif *rif;
957 int err;
959 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
960 if (!rif)
961 return ERR_PTR(-EINVAL);
963 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
964 if (!neigh_entry)
965 return ERR_PTR(-ENOMEM);
967 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
968 if (err)
969 goto err_neigh_entry_insert;
971 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
973 return neigh_entry;
975 err_neigh_entry_insert:
976 mlxsw_sp_neigh_entry_free(neigh_entry);
977 return ERR_PTR(err);
980 static void
981 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
982 struct mlxsw_sp_neigh_entry *neigh_entry)
984 list_del(&neigh_entry->rif_list_node);
985 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
986 mlxsw_sp_neigh_entry_free(neigh_entry);
989 static struct mlxsw_sp_neigh_entry *
990 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
992 struct mlxsw_sp_neigh_key key;
994 key.n = n;
995 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
996 &key, mlxsw_sp_neigh_ht_params);
999 static void
1000 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1002 unsigned long interval;
1004 #if IS_ENABLED(CONFIG_IPV6)
1005 interval = min_t(unsigned long,
1006 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1007 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
1008 #else
1009 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1010 #endif
1011 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
1014 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1015 char *rauhtd_pl,
1016 int ent_index)
1018 struct net_device *dev;
1019 struct neighbour *n;
1020 __be32 dipn;
1021 u32 dip;
1022 u16 rif;
1024 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1026 if (!mlxsw_sp->router->rifs[rif]) {
1027 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1028 return;
1031 dipn = htonl(dip);
1032 dev = mlxsw_sp->router->rifs[rif]->dev;
1033 n = neigh_lookup(&arp_tbl, &dipn, dev);
1034 if (!n) {
1035 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1036 &dip);
1037 return;
1040 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1041 neigh_event_send(n, NULL);
1042 neigh_release(n);
1045 #if IS_ENABLED(IPV6)
1046 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1047 char *rauhtd_pl,
1048 int rec_index)
1050 struct net_device *dev;
1051 struct neighbour *n;
1052 struct in6_addr dip;
1053 u16 rif;
1055 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1056 (char *) &dip);
1058 if (!mlxsw_sp->router->rifs[rif]) {
1059 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1060 return;
1063 dev = mlxsw_sp->router->rifs[rif]->dev;
1064 n = neigh_lookup(&nd_tbl, &dip, dev);
1065 if (!n) {
1066 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1067 &dip);
1068 return;
1071 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1072 neigh_event_send(n, NULL);
1073 neigh_release(n);
1075 #else
1076 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1077 char *rauhtd_pl,
1078 int rec_index)
1081 #endif
1083 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1084 char *rauhtd_pl,
1085 int rec_index)
1087 u8 num_entries;
1088 int i;
1090 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1091 rec_index);
1092 /* Hardware starts counting at 0, so add 1. */
1093 num_entries++;
1095 /* Each record consists of several neighbour entries. */
1096 for (i = 0; i < num_entries; i++) {
1097 int ent_index;
1099 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1100 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1101 ent_index);
1106 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1107 char *rauhtd_pl,
1108 int rec_index)
1110 /* One record contains one entry. */
1111 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1112 rec_index);
1115 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1116 char *rauhtd_pl, int rec_index)
1118 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1119 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1120 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1121 rec_index);
1122 break;
1123 case MLXSW_REG_RAUHTD_TYPE_IPV6:
1124 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1125 rec_index);
1126 break;
1130 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1132 u8 num_rec, last_rec_index, num_entries;
1134 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1135 last_rec_index = num_rec - 1;
1137 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1138 return false;
1139 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1140 MLXSW_REG_RAUHTD_TYPE_IPV6)
1141 return true;
1143 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1144 last_rec_index);
1145 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1146 return true;
1147 return false;
1150 static int
1151 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1152 char *rauhtd_pl,
1153 enum mlxsw_reg_rauhtd_type type)
1155 int i, num_rec;
1156 int err;
1158 /* Make sure the neighbour's netdev isn't removed in the
1159 * process.
1161 rtnl_lock();
1162 do {
1163 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
1164 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1165 rauhtd_pl);
1166 if (err) {
1167 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
1168 break;
1170 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1171 for (i = 0; i < num_rec; i++)
1172 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1174 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
1175 rtnl_unlock();
1177 return err;
1180 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1182 enum mlxsw_reg_rauhtd_type type;
1183 char *rauhtd_pl;
1184 int err;
1186 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1187 if (!rauhtd_pl)
1188 return -ENOMEM;
1190 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1191 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1192 if (err)
1193 goto out;
1195 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1196 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1197 out:
1198 kfree(rauhtd_pl);
1199 return err;
1202 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1204 struct mlxsw_sp_neigh_entry *neigh_entry;
1206 /* Take RTNL mutex here to prevent lists from changes */
1207 rtnl_lock();
1208 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
1209 nexthop_neighs_list_node)
1210 /* If this neigh have nexthops, make the kernel think this neigh
1211 * is active regardless of the traffic.
1213 neigh_event_send(neigh_entry->key.n, NULL);
1214 rtnl_unlock();
1217 static void
1218 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1220 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
1222 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
1223 msecs_to_jiffies(interval));
1226 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1228 struct mlxsw_sp_router *router;
1229 int err;
1231 router = container_of(work, struct mlxsw_sp_router,
1232 neighs_update.dw.work);
1233 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
1234 if (err)
1235 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
1237 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
1239 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
1242 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1244 struct mlxsw_sp_neigh_entry *neigh_entry;
1245 struct mlxsw_sp_router *router;
1247 router = container_of(work, struct mlxsw_sp_router,
1248 nexthop_probe_dw.work);
1249 /* Iterate over nexthop neighbours, find those who are unresolved and
1250 * send arp on them. This solves the chicken-egg problem when
1251 * the nexthop wouldn't get offloaded until the neighbor is resolved
1252 * but it wouldn't get resolved ever in case traffic is flowing in HW
1253 * using different nexthop.
1255 * Take RTNL mutex here to prevent lists from changes.
1257 rtnl_lock();
1258 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
1259 nexthop_neighs_list_node)
1260 if (!neigh_entry->connected)
1261 neigh_event_send(neigh_entry->key.n, NULL);
1262 rtnl_unlock();
1264 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
1265 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1268 static void
1269 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1270 struct mlxsw_sp_neigh_entry *neigh_entry,
1271 bool removing);
1273 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
1275 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1276 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1279 static void
1280 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1281 struct mlxsw_sp_neigh_entry *neigh_entry,
1282 enum mlxsw_reg_rauht_op op)
1284 struct neighbour *n = neigh_entry->key.n;
1285 u32 dip = ntohl(*((__be32 *) n->primary_key));
1286 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1288 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1289 dip);
1290 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1293 static void
1294 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1295 struct mlxsw_sp_neigh_entry *neigh_entry,
1296 enum mlxsw_reg_rauht_op op)
1298 struct neighbour *n = neigh_entry->key.n;
1299 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1300 const char *dip = n->primary_key;
1302 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1303 dip);
1304 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1307 static bool mlxsw_sp_neigh_ipv6_ignore(struct neighbour *n)
1309 /* Packets with a link-local destination address are trapped
1310 * after LPM lookup and never reach the neighbour table, so
1311 * there is no need to program such neighbours to the device.
1313 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1314 IPV6_ADDR_LINKLOCAL)
1315 return true;
1316 return false;
1319 static void
1320 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1321 struct mlxsw_sp_neigh_entry *neigh_entry,
1322 bool adding)
1324 if (!adding && !neigh_entry->connected)
1325 return;
1326 neigh_entry->connected = adding;
1327 if (neigh_entry->key.n->tbl->family == AF_INET) {
1328 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1329 mlxsw_sp_rauht_op(adding));
1330 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
1331 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry->key.n))
1332 return;
1333 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
1334 mlxsw_sp_rauht_op(adding));
1335 } else {
1336 WARN_ON_ONCE(1);
1340 struct mlxsw_sp_neigh_event_work {
1341 struct work_struct work;
1342 struct mlxsw_sp *mlxsw_sp;
1343 struct neighbour *n;
1346 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1348 struct mlxsw_sp_neigh_event_work *neigh_work =
1349 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1350 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1351 struct mlxsw_sp_neigh_entry *neigh_entry;
1352 struct neighbour *n = neigh_work->n;
1353 unsigned char ha[ETH_ALEN];
1354 bool entry_connected;
1355 u8 nud_state, dead;
1357 /* If these parameters are changed after we release the lock,
1358 * then we are guaranteed to receive another event letting us
1359 * know about it.
1361 read_lock_bh(&n->lock);
1362 memcpy(ha, n->ha, ETH_ALEN);
1363 nud_state = n->nud_state;
1364 dead = n->dead;
1365 read_unlock_bh(&n->lock);
1367 rtnl_lock();
1368 entry_connected = nud_state & NUD_VALID && !dead;
1369 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1370 if (!entry_connected && !neigh_entry)
1371 goto out;
1372 if (!neigh_entry) {
1373 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1374 if (IS_ERR(neigh_entry))
1375 goto out;
1378 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1379 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1380 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1382 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1383 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1385 out:
1386 rtnl_unlock();
1387 neigh_release(n);
1388 kfree(neigh_work);
1391 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1392 unsigned long event, void *ptr)
1394 struct mlxsw_sp_neigh_event_work *neigh_work;
1395 struct mlxsw_sp_port *mlxsw_sp_port;
1396 struct mlxsw_sp *mlxsw_sp;
1397 unsigned long interval;
1398 struct neigh_parms *p;
1399 struct neighbour *n;
1401 switch (event) {
1402 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1403 p = ptr;
1405 /* We don't care about changes in the default table. */
1406 if (!p->dev || (p->tbl->family != AF_INET &&
1407 p->tbl->family != AF_INET6))
1408 return NOTIFY_DONE;
1410 /* We are in atomic context and can't take RTNL mutex,
1411 * so use RCU variant to walk the device chain.
1413 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1414 if (!mlxsw_sp_port)
1415 return NOTIFY_DONE;
1417 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1418 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1419 mlxsw_sp->router->neighs_update.interval = interval;
1421 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1422 break;
1423 case NETEVENT_NEIGH_UPDATE:
1424 n = ptr;
1426 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
1427 return NOTIFY_DONE;
1429 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
1430 if (!mlxsw_sp_port)
1431 return NOTIFY_DONE;
1433 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1434 if (!neigh_work) {
1435 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1436 return NOTIFY_BAD;
1439 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1440 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1441 neigh_work->n = n;
1443 /* Take a reference to ensure the neighbour won't be
1444 * destructed until we drop the reference in delayed
1445 * work.
1447 neigh_clone(n);
1448 mlxsw_core_schedule_work(&neigh_work->work);
1449 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1450 break;
1453 return NOTIFY_DONE;
1456 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1458 int err;
1460 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
1461 &mlxsw_sp_neigh_ht_params);
1462 if (err)
1463 return err;
1465 /* Initialize the polling interval according to the default
1466 * table.
1468 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1470 /* Create the delayed works for the activity_update */
1471 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
1472 mlxsw_sp_router_neighs_update_work);
1473 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
1474 mlxsw_sp_router_probe_unresolved_nexthops);
1475 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1476 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
1477 return 0;
1480 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1482 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1483 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1484 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
1487 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1488 struct mlxsw_sp_rif *rif)
1490 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1492 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
1493 rif_list_node) {
1494 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
1495 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1499 struct mlxsw_sp_nexthop_key {
1500 struct fib_nh *fib_nh;
1503 struct mlxsw_sp_nexthop {
1504 struct list_head neigh_list_node; /* member of neigh entry list */
1505 struct list_head rif_list_node;
1506 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1507 * this belongs to
1509 struct rhash_head ht_node;
1510 struct mlxsw_sp_nexthop_key key;
1511 unsigned char gw_addr[sizeof(struct in6_addr)];
1512 int ifindex;
1513 struct mlxsw_sp_rif *rif;
1514 u8 should_offload:1, /* set indicates this neigh is connected and
1515 * should be put to KVD linear area of this group.
1517 offloaded:1, /* set in case the neigh is actually put into
1518 * KVD linear area of this group.
1520 update:1; /* set indicates that MAC of this neigh should be
1521 * updated in HW
1523 struct mlxsw_sp_neigh_entry *neigh_entry;
1526 struct mlxsw_sp_nexthop_group {
1527 void *priv;
1528 struct rhash_head ht_node;
1529 struct list_head fib_list; /* list of fib entries that use this group */
1530 struct neigh_table *neigh_tbl;
1531 u8 adj_index_valid:1,
1532 gateway:1; /* routes using the group use a gateway */
1533 u32 adj_index;
1534 u16 ecmp_size;
1535 u16 count;
1536 struct mlxsw_sp_nexthop nexthops[0];
1537 #define nh_rif nexthops[0].rif
1540 static struct fib_info *
1541 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
1543 return nh_grp->priv;
1546 struct mlxsw_sp_nexthop_group_cmp_arg {
1547 enum mlxsw_sp_l3proto proto;
1548 union {
1549 struct fib_info *fi;
1550 struct mlxsw_sp_fib6_entry *fib6_entry;
1554 static bool
1555 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
1556 const struct in6_addr *gw, int ifindex)
1558 int i;
1560 for (i = 0; i < nh_grp->count; i++) {
1561 const struct mlxsw_sp_nexthop *nh;
1563 nh = &nh_grp->nexthops[i];
1564 if (nh->ifindex == ifindex &&
1565 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
1566 return true;
1569 return false;
1572 static bool
1573 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
1574 const struct mlxsw_sp_fib6_entry *fib6_entry)
1576 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
1578 if (nh_grp->count != fib6_entry->nrt6)
1579 return false;
1581 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
1582 struct in6_addr *gw;
1583 int ifindex;
1585 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
1586 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
1587 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
1588 return false;
1591 return true;
1594 static int
1595 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
1597 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
1598 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
1600 switch (cmp_arg->proto) {
1601 case MLXSW_SP_L3_PROTO_IPV4:
1602 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
1603 case MLXSW_SP_L3_PROTO_IPV6:
1604 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
1605 cmp_arg->fib6_entry);
1606 default:
1607 WARN_ON(1);
1608 return 1;
1612 static int
1613 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
1615 return nh_grp->neigh_tbl->family;
1618 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
1620 const struct mlxsw_sp_nexthop_group *nh_grp = data;
1621 const struct mlxsw_sp_nexthop *nh;
1622 struct fib_info *fi;
1623 unsigned int val;
1624 int i;
1626 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
1627 case AF_INET:
1628 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
1629 return jhash(&fi, sizeof(fi), seed);
1630 case AF_INET6:
1631 val = nh_grp->count;
1632 for (i = 0; i < nh_grp->count; i++) {
1633 nh = &nh_grp->nexthops[i];
1634 val ^= nh->ifindex;
1636 return jhash(&val, sizeof(val), seed);
1637 default:
1638 WARN_ON(1);
1639 return 0;
1643 static u32
1644 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
1646 unsigned int val = fib6_entry->nrt6;
1647 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
1648 struct net_device *dev;
1650 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
1651 dev = mlxsw_sp_rt6->rt->dst.dev;
1652 val ^= dev->ifindex;
1655 return jhash(&val, sizeof(val), seed);
1658 static u32
1659 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
1661 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
1663 switch (cmp_arg->proto) {
1664 case MLXSW_SP_L3_PROTO_IPV4:
1665 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
1666 case MLXSW_SP_L3_PROTO_IPV6:
1667 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
1668 default:
1669 WARN_ON(1);
1670 return 0;
1674 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1675 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1676 .hashfn = mlxsw_sp_nexthop_group_hash,
1677 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
1678 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
1681 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1682 struct mlxsw_sp_nexthop_group *nh_grp)
1684 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
1685 !nh_grp->gateway)
1686 return 0;
1688 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
1689 &nh_grp->ht_node,
1690 mlxsw_sp_nexthop_group_ht_params);
1693 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1694 struct mlxsw_sp_nexthop_group *nh_grp)
1696 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
1697 !nh_grp->gateway)
1698 return;
1700 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
1701 &nh_grp->ht_node,
1702 mlxsw_sp_nexthop_group_ht_params);
1705 static struct mlxsw_sp_nexthop_group *
1706 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
1707 struct fib_info *fi)
1709 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
1711 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
1712 cmp_arg.fi = fi;
1713 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
1714 &cmp_arg,
1715 mlxsw_sp_nexthop_group_ht_params);
1718 static struct mlxsw_sp_nexthop_group *
1719 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
1720 struct mlxsw_sp_fib6_entry *fib6_entry)
1722 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
1724 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
1725 cmp_arg.fib6_entry = fib6_entry;
1726 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
1727 &cmp_arg,
1728 mlxsw_sp_nexthop_group_ht_params);
1731 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1732 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1733 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1734 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1737 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1738 struct mlxsw_sp_nexthop *nh)
1740 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
1741 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1744 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1745 struct mlxsw_sp_nexthop *nh)
1747 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
1748 mlxsw_sp_nexthop_ht_params);
1751 static struct mlxsw_sp_nexthop *
1752 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1753 struct mlxsw_sp_nexthop_key key)
1755 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
1756 mlxsw_sp_nexthop_ht_params);
1759 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1760 const struct mlxsw_sp_fib *fib,
1761 u32 adj_index, u16 ecmp_size,
1762 u32 new_adj_index,
1763 u16 new_ecmp_size)
1765 char raleu_pl[MLXSW_REG_RALEU_LEN];
1767 mlxsw_reg_raleu_pack(raleu_pl,
1768 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1769 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1770 new_ecmp_size);
1771 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1774 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1775 struct mlxsw_sp_nexthop_group *nh_grp,
1776 u32 old_adj_index, u16 old_ecmp_size)
1778 struct mlxsw_sp_fib_entry *fib_entry;
1779 struct mlxsw_sp_fib *fib = NULL;
1780 int err;
1782 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1783 if (fib == fib_entry->fib_node->fib)
1784 continue;
1785 fib = fib_entry->fib_node->fib;
1786 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
1787 old_adj_index,
1788 old_ecmp_size,
1789 nh_grp->adj_index,
1790 nh_grp->ecmp_size);
1791 if (err)
1792 return err;
1794 return 0;
1797 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1798 struct mlxsw_sp_nexthop *nh)
1800 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1801 char ratr_pl[MLXSW_REG_RATR_LEN];
1803 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1804 true, adj_index, neigh_entry->rif);
1805 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1806 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1809 static int
1810 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1811 struct mlxsw_sp_nexthop_group *nh_grp,
1812 bool reallocate)
1814 u32 adj_index = nh_grp->adj_index; /* base */
1815 struct mlxsw_sp_nexthop *nh;
1816 int i;
1817 int err;
1819 for (i = 0; i < nh_grp->count; i++) {
1820 nh = &nh_grp->nexthops[i];
1822 if (!nh->should_offload) {
1823 nh->offloaded = 0;
1824 continue;
1827 if (nh->update || reallocate) {
1828 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1829 adj_index, nh);
1830 if (err)
1831 return err;
1832 nh->update = 0;
1833 nh->offloaded = 1;
1835 adj_index++;
1837 return 0;
1840 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1841 struct mlxsw_sp_fib_entry *fib_entry);
1843 static bool
1844 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
1845 const struct mlxsw_sp_fib_entry *fib_entry);
1847 static int
1848 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1849 struct mlxsw_sp_nexthop_group *nh_grp)
1851 struct mlxsw_sp_fib_entry *fib_entry;
1852 int err;
1854 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1855 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1856 fib_entry))
1857 continue;
1858 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1859 if (err)
1860 return err;
1862 return 0;
1865 static void
1866 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1867 enum mlxsw_reg_ralue_op op, int err);
1869 static void
1870 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
1872 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
1873 struct mlxsw_sp_fib_entry *fib_entry;
1875 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1876 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1877 fib_entry))
1878 continue;
1879 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
1883 static void
1884 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1885 struct mlxsw_sp_nexthop_group *nh_grp)
1887 struct mlxsw_sp_nexthop *nh;
1888 bool offload_change = false;
1889 u32 adj_index;
1890 u16 ecmp_size = 0;
1891 bool old_adj_index_valid;
1892 u32 old_adj_index;
1893 u16 old_ecmp_size;
1894 int i;
1895 int err;
1897 if (!nh_grp->gateway) {
1898 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1899 return;
1902 for (i = 0; i < nh_grp->count; i++) {
1903 nh = &nh_grp->nexthops[i];
1905 if (nh->should_offload != nh->offloaded) {
1906 offload_change = true;
1907 if (nh->should_offload)
1908 nh->update = 1;
1910 if (nh->should_offload)
1911 ecmp_size++;
1913 if (!offload_change) {
1914 /* Nothing was added or removed, so no need to reallocate. Just
1915 * update MAC on existing adjacency indexes.
1917 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1918 false);
1919 if (err) {
1920 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1921 goto set_trap;
1923 return;
1925 if (!ecmp_size)
1926 /* No neigh of this group is connected so we just set
1927 * the trap and let everthing flow through kernel.
1929 goto set_trap;
1931 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1932 if (err) {
1933 /* We ran out of KVD linear space, just set the
1934 * trap and let everything flow through kernel.
1936 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1937 goto set_trap;
1939 old_adj_index_valid = nh_grp->adj_index_valid;
1940 old_adj_index = nh_grp->adj_index;
1941 old_ecmp_size = nh_grp->ecmp_size;
1942 nh_grp->adj_index_valid = 1;
1943 nh_grp->adj_index = adj_index;
1944 nh_grp->ecmp_size = ecmp_size;
1945 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1946 if (err) {
1947 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1948 goto set_trap;
1951 if (!old_adj_index_valid) {
1952 /* The trap was set for fib entries, so we have to call
1953 * fib entry update to unset it and use adjacency index.
1955 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1956 if (err) {
1957 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1958 goto set_trap;
1960 return;
1963 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1964 old_adj_index, old_ecmp_size);
1965 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1966 if (err) {
1967 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1968 goto set_trap;
1971 /* Offload state within the group changed, so update the flags. */
1972 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
1974 return;
1976 set_trap:
1977 old_adj_index_valid = nh_grp->adj_index_valid;
1978 nh_grp->adj_index_valid = 0;
1979 for (i = 0; i < nh_grp->count; i++) {
1980 nh = &nh_grp->nexthops[i];
1981 nh->offloaded = 0;
1983 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1984 if (err)
1985 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1986 if (old_adj_index_valid)
1987 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1990 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1991 bool removing)
1993 if (!removing)
1994 nh->should_offload = 1;
1995 else if (nh->offloaded)
1996 nh->should_offload = 0;
1997 nh->update = 1;
2000 static void
2001 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2002 struct mlxsw_sp_neigh_entry *neigh_entry,
2003 bool removing)
2005 struct mlxsw_sp_nexthop *nh;
2007 list_for_each_entry(nh, &neigh_entry->nexthop_list,
2008 neigh_list_node) {
2009 __mlxsw_sp_nexthop_neigh_update(nh, removing);
2010 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2014 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
2015 struct mlxsw_sp_rif *rif)
2017 if (nh->rif)
2018 return;
2020 nh->rif = rif;
2021 list_add(&nh->rif_list_node, &rif->nexthop_list);
2024 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
2026 if (!nh->rif)
2027 return;
2029 list_del(&nh->rif_list_node);
2030 nh->rif = NULL;
2033 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
2034 struct mlxsw_sp_nexthop *nh)
2036 struct mlxsw_sp_neigh_entry *neigh_entry;
2037 struct neighbour *n;
2038 u8 nud_state, dead;
2039 int err;
2041 if (!nh->nh_grp->gateway || nh->neigh_entry)
2042 return 0;
2044 /* Take a reference of neigh here ensuring that neigh would
2045 * not be destructed before the nexthop entry is finished.
2046 * The reference is taken either in neigh_lookup() or
2047 * in neigh_create() in case n is not found.
2049 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
2050 if (!n) {
2051 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
2052 nh->rif->dev);
2053 if (IS_ERR(n))
2054 return PTR_ERR(n);
2055 neigh_event_send(n, NULL);
2057 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2058 if (!neigh_entry) {
2059 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2060 if (IS_ERR(neigh_entry)) {
2061 err = -EINVAL;
2062 goto err_neigh_entry_create;
2066 /* If that is the first nexthop connected to that neigh, add to
2067 * nexthop_neighs_list
2069 if (list_empty(&neigh_entry->nexthop_list))
2070 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
2071 &mlxsw_sp->router->nexthop_neighs_list);
2073 nh->neigh_entry = neigh_entry;
2074 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
2075 read_lock_bh(&n->lock);
2076 nud_state = n->nud_state;
2077 dead = n->dead;
2078 read_unlock_bh(&n->lock);
2079 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
2081 return 0;
2083 err_neigh_entry_create:
2084 neigh_release(n);
2085 return err;
2088 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
2089 struct mlxsw_sp_nexthop *nh)
2091 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2092 struct neighbour *n;
2094 if (!neigh_entry)
2095 return;
2096 n = neigh_entry->key.n;
2098 __mlxsw_sp_nexthop_neigh_update(nh, true);
2099 list_del(&nh->neigh_list_node);
2100 nh->neigh_entry = NULL;
2102 /* If that is the last nexthop connected to that neigh, remove from
2103 * nexthop_neighs_list
2105 if (list_empty(&neigh_entry->nexthop_list))
2106 list_del(&neigh_entry->nexthop_neighs_list_node);
2108 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2109 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2111 neigh_release(n);
2114 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
2115 struct mlxsw_sp_nexthop_group *nh_grp,
2116 struct mlxsw_sp_nexthop *nh,
2117 struct fib_nh *fib_nh)
2119 struct net_device *dev = fib_nh->nh_dev;
2120 struct in_device *in_dev;
2121 struct mlxsw_sp_rif *rif;
2122 int err;
2124 nh->nh_grp = nh_grp;
2125 nh->key.fib_nh = fib_nh;
2126 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
2127 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
2128 if (err)
2129 return err;
2131 if (!dev)
2132 return 0;
2134 in_dev = __in_dev_get_rtnl(dev);
2135 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
2136 fib_nh->nh_flags & RTNH_F_LINKDOWN)
2137 return 0;
2139 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2140 if (!rif)
2141 return 0;
2142 mlxsw_sp_nexthop_rif_init(nh, rif);
2144 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
2145 if (err)
2146 goto err_nexthop_neigh_init;
2148 return 0;
2150 err_nexthop_neigh_init:
2151 mlxsw_sp_nexthop_rif_fini(nh);
2152 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
2153 return err;
2156 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
2157 struct mlxsw_sp_nexthop *nh)
2159 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
2160 mlxsw_sp_nexthop_rif_fini(nh);
2161 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
2164 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
2165 unsigned long event, struct fib_nh *fib_nh)
2167 struct mlxsw_sp_nexthop_key key;
2168 struct mlxsw_sp_nexthop *nh;
2169 struct mlxsw_sp_rif *rif;
2171 if (mlxsw_sp->router->aborted)
2172 return;
2174 key.fib_nh = fib_nh;
2175 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
2176 if (WARN_ON_ONCE(!nh))
2177 return;
2179 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
2180 if (!rif)
2181 return;
2183 switch (event) {
2184 case FIB_EVENT_NH_ADD:
2185 mlxsw_sp_nexthop_rif_init(nh, rif);
2186 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
2187 break;
2188 case FIB_EVENT_NH_DEL:
2189 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
2190 mlxsw_sp_nexthop_rif_fini(nh);
2191 break;
2194 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2197 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2198 struct mlxsw_sp_rif *rif)
2200 struct mlxsw_sp_nexthop *nh, *tmp;
2202 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
2203 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
2204 mlxsw_sp_nexthop_rif_fini(nh);
2205 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2209 static struct mlxsw_sp_nexthop_group *
2210 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
2212 struct mlxsw_sp_nexthop_group *nh_grp;
2213 struct mlxsw_sp_nexthop *nh;
2214 struct fib_nh *fib_nh;
2215 size_t alloc_size;
2216 int i;
2217 int err;
2219 alloc_size = sizeof(*nh_grp) +
2220 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
2221 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
2222 if (!nh_grp)
2223 return ERR_PTR(-ENOMEM);
2224 nh_grp->priv = fi;
2225 INIT_LIST_HEAD(&nh_grp->fib_list);
2226 nh_grp->neigh_tbl = &arp_tbl;
2228 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
2229 nh_grp->count = fi->fib_nhs;
2230 fib_info_hold(fi);
2231 for (i = 0; i < nh_grp->count; i++) {
2232 nh = &nh_grp->nexthops[i];
2233 fib_nh = &fi->fib_nh[i];
2234 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
2235 if (err)
2236 goto err_nexthop4_init;
2238 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
2239 if (err)
2240 goto err_nexthop_group_insert;
2241 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
2242 return nh_grp;
2244 err_nexthop_group_insert:
2245 err_nexthop4_init:
2246 for (i--; i >= 0; i--) {
2247 nh = &nh_grp->nexthops[i];
2248 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
2250 fib_info_put(fi);
2251 kfree(nh_grp);
2252 return ERR_PTR(err);
2255 static void
2256 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
2257 struct mlxsw_sp_nexthop_group *nh_grp)
2259 struct mlxsw_sp_nexthop *nh;
2260 int i;
2262 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
2263 for (i = 0; i < nh_grp->count; i++) {
2264 nh = &nh_grp->nexthops[i];
2265 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
2267 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
2268 WARN_ON_ONCE(nh_grp->adj_index_valid);
2269 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
2270 kfree(nh_grp);
2273 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
2274 struct mlxsw_sp_fib_entry *fib_entry,
2275 struct fib_info *fi)
2277 struct mlxsw_sp_nexthop_group *nh_grp;
2279 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
2280 if (!nh_grp) {
2281 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
2282 if (IS_ERR(nh_grp))
2283 return PTR_ERR(nh_grp);
2285 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
2286 fib_entry->nh_group = nh_grp;
2287 return 0;
2290 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
2291 struct mlxsw_sp_fib_entry *fib_entry)
2293 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2295 list_del(&fib_entry->nexthop_group_node);
2296 if (!list_empty(&nh_grp->fib_list))
2297 return;
2298 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
2301 static bool
2302 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
2304 struct mlxsw_sp_fib4_entry *fib4_entry;
2306 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
2307 common);
2308 return !fib4_entry->tos;
2311 static bool
2312 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
2314 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
2316 switch (fib_entry->fib_node->fib->proto) {
2317 case MLXSW_SP_L3_PROTO_IPV4:
2318 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
2319 return false;
2320 break;
2321 case MLXSW_SP_L3_PROTO_IPV6:
2322 break;
2325 switch (fib_entry->type) {
2326 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2327 return !!nh_group->adj_index_valid;
2328 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2329 return !!nh_group->nh_rif;
2330 default:
2331 return false;
2335 static struct mlxsw_sp_nexthop *
2336 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
2337 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
2339 int i;
2341 for (i = 0; i < nh_grp->count; i++) {
2342 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2343 struct rt6_info *rt = mlxsw_sp_rt6->rt;
2345 if (nh->rif && nh->rif->dev == rt->dst.dev &&
2346 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
2347 &rt->rt6i_gateway))
2348 return nh;
2349 continue;
2352 return NULL;
2355 static void
2356 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2358 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2359 int i;
2361 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
2362 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2363 return;
2366 for (i = 0; i < nh_grp->count; i++) {
2367 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2369 if (nh->offloaded)
2370 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2371 else
2372 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2376 static void
2377 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2379 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2380 int i;
2382 for (i = 0; i < nh_grp->count; i++) {
2383 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2385 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2389 static void
2390 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2392 struct mlxsw_sp_fib6_entry *fib6_entry;
2393 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2395 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
2396 common);
2398 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
2399 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
2400 list)->rt->rt6i_flags |= RTF_OFFLOAD;
2401 return;
2404 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2405 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2406 struct mlxsw_sp_nexthop *nh;
2408 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
2409 if (nh && nh->offloaded)
2410 mlxsw_sp_rt6->rt->rt6i_flags |= RTF_OFFLOAD;
2411 else
2412 mlxsw_sp_rt6->rt->rt6i_flags &= ~RTF_OFFLOAD;
2416 static void
2417 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2419 struct mlxsw_sp_fib6_entry *fib6_entry;
2420 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2422 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
2423 common);
2424 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2425 struct rt6_info *rt = mlxsw_sp_rt6->rt;
2427 rt->rt6i_flags &= ~RTF_OFFLOAD;
2431 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2433 switch (fib_entry->fib_node->fib->proto) {
2434 case MLXSW_SP_L3_PROTO_IPV4:
2435 mlxsw_sp_fib4_entry_offload_set(fib_entry);
2436 break;
2437 case MLXSW_SP_L3_PROTO_IPV6:
2438 mlxsw_sp_fib6_entry_offload_set(fib_entry);
2439 break;
2443 static void
2444 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2446 switch (fib_entry->fib_node->fib->proto) {
2447 case MLXSW_SP_L3_PROTO_IPV4:
2448 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
2449 break;
2450 case MLXSW_SP_L3_PROTO_IPV6:
2451 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
2452 break;
2456 static void
2457 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2458 enum mlxsw_reg_ralue_op op, int err)
2460 switch (op) {
2461 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
2462 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
2463 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
2464 if (err)
2465 return;
2466 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
2467 mlxsw_sp_fib_entry_offload_set(fib_entry);
2468 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry))
2469 mlxsw_sp_fib_entry_offload_unset(fib_entry);
2470 return;
2471 default:
2472 return;
2476 static void
2477 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
2478 const struct mlxsw_sp_fib_entry *fib_entry,
2479 enum mlxsw_reg_ralue_op op)
2481 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
2482 enum mlxsw_reg_ralxx_protocol proto;
2483 u32 *p_dip;
2485 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
2487 switch (fib->proto) {
2488 case MLXSW_SP_L3_PROTO_IPV4:
2489 p_dip = (u32 *) fib_entry->fib_node->key.addr;
2490 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
2491 fib_entry->fib_node->key.prefix_len,
2492 *p_dip);
2493 break;
2494 case MLXSW_SP_L3_PROTO_IPV6:
2495 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
2496 fib_entry->fib_node->key.prefix_len,
2497 fib_entry->fib_node->key.addr);
2498 break;
2502 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
2503 struct mlxsw_sp_fib_entry *fib_entry,
2504 enum mlxsw_reg_ralue_op op)
2506 char ralue_pl[MLXSW_REG_RALUE_LEN];
2507 enum mlxsw_reg_ralue_trap_action trap_action;
2508 u16 trap_id = 0;
2509 u32 adjacency_index = 0;
2510 u16 ecmp_size = 0;
2512 /* In case the nexthop group adjacency index is valid, use it
2513 * with provided ECMP size. Otherwise, setup trap and pass
2514 * traffic to kernel.
2516 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2517 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2518 adjacency_index = fib_entry->nh_group->adj_index;
2519 ecmp_size = fib_entry->nh_group->ecmp_size;
2520 } else {
2521 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2522 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2525 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2526 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
2527 adjacency_index, ecmp_size);
2528 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2531 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
2532 struct mlxsw_sp_fib_entry *fib_entry,
2533 enum mlxsw_reg_ralue_op op)
2535 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
2536 enum mlxsw_reg_ralue_trap_action trap_action;
2537 char ralue_pl[MLXSW_REG_RALUE_LEN];
2538 u16 trap_id = 0;
2539 u16 rif_index = 0;
2541 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2542 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2543 rif_index = rif->rif_index;
2544 } else {
2545 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2546 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2549 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2550 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2551 rif_index);
2552 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2555 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
2556 struct mlxsw_sp_fib_entry *fib_entry,
2557 enum mlxsw_reg_ralue_op op)
2559 char ralue_pl[MLXSW_REG_RALUE_LEN];
2561 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2562 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2563 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2566 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2567 struct mlxsw_sp_fib_entry *fib_entry,
2568 enum mlxsw_reg_ralue_op op)
2570 switch (fib_entry->type) {
2571 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2572 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
2573 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2574 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
2575 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2576 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
2578 return -EINVAL;
2581 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2582 struct mlxsw_sp_fib_entry *fib_entry,
2583 enum mlxsw_reg_ralue_op op)
2585 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
2587 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2589 return err;
2592 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2593 struct mlxsw_sp_fib_entry *fib_entry)
2595 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2596 MLXSW_REG_RALUE_OP_WRITE_WRITE);
2599 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2600 struct mlxsw_sp_fib_entry *fib_entry)
2602 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2603 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2606 static int
2607 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2608 const struct fib_entry_notifier_info *fen_info,
2609 struct mlxsw_sp_fib_entry *fib_entry)
2611 struct fib_info *fi = fen_info->fi;
2613 switch (fen_info->type) {
2614 case RTN_BROADCAST: /* fall through */
2615 case RTN_LOCAL:
2616 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2617 return 0;
2618 case RTN_UNREACHABLE: /* fall through */
2619 case RTN_BLACKHOLE: /* fall through */
2620 case RTN_PROHIBIT:
2621 /* Packets hitting these routes need to be trapped, but
2622 * can do so with a lower priority than packets directed
2623 * at the host, so use action type local instead of trap.
2625 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2626 return 0;
2627 case RTN_UNICAST:
2628 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2629 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2630 else
2631 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2632 return 0;
2633 default:
2634 return -EINVAL;
2638 static struct mlxsw_sp_fib4_entry *
2639 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2640 struct mlxsw_sp_fib_node *fib_node,
2641 const struct fib_entry_notifier_info *fen_info)
2643 struct mlxsw_sp_fib4_entry *fib4_entry;
2644 struct mlxsw_sp_fib_entry *fib_entry;
2645 int err;
2647 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
2648 if (!fib4_entry)
2649 return ERR_PTR(-ENOMEM);
2650 fib_entry = &fib4_entry->common;
2652 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2653 if (err)
2654 goto err_fib4_entry_type_set;
2656 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2657 if (err)
2658 goto err_nexthop4_group_get;
2660 fib4_entry->prio = fen_info->fi->fib_priority;
2661 fib4_entry->tb_id = fen_info->tb_id;
2662 fib4_entry->type = fen_info->type;
2663 fib4_entry->tos = fen_info->tos;
2665 fib_entry->fib_node = fib_node;
2667 return fib4_entry;
2669 err_nexthop4_group_get:
2670 err_fib4_entry_type_set:
2671 kfree(fib4_entry);
2672 return ERR_PTR(err);
2675 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2676 struct mlxsw_sp_fib4_entry *fib4_entry)
2678 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
2679 kfree(fib4_entry);
2682 static struct mlxsw_sp_fib_node *
2683 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2684 size_t addr_len, unsigned char prefix_len);
2686 static struct mlxsw_sp_fib4_entry *
2687 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2688 const struct fib_entry_notifier_info *fen_info)
2690 struct mlxsw_sp_fib4_entry *fib4_entry;
2691 struct mlxsw_sp_fib_node *fib_node;
2692 struct mlxsw_sp_fib *fib;
2693 struct mlxsw_sp_vr *vr;
2695 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
2696 if (!vr)
2697 return NULL;
2698 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
2700 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
2701 sizeof(fen_info->dst),
2702 fen_info->dst_len);
2703 if (!fib_node)
2704 return NULL;
2706 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
2707 if (fib4_entry->tb_id == fen_info->tb_id &&
2708 fib4_entry->tos == fen_info->tos &&
2709 fib4_entry->type == fen_info->type &&
2710 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
2711 fen_info->fi) {
2712 return fib4_entry;
2716 return NULL;
2719 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2720 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2721 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2722 .key_len = sizeof(struct mlxsw_sp_fib_key),
2723 .automatic_shrinking = true,
2726 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2727 struct mlxsw_sp_fib_node *fib_node)
2729 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2730 mlxsw_sp_fib_ht_params);
2733 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2734 struct mlxsw_sp_fib_node *fib_node)
2736 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2737 mlxsw_sp_fib_ht_params);
2740 static struct mlxsw_sp_fib_node *
2741 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2742 size_t addr_len, unsigned char prefix_len)
2744 struct mlxsw_sp_fib_key key;
2746 memset(&key, 0, sizeof(key));
2747 memcpy(key.addr, addr, addr_len);
2748 key.prefix_len = prefix_len;
2749 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2752 static struct mlxsw_sp_fib_node *
2753 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
2754 size_t addr_len, unsigned char prefix_len)
2756 struct mlxsw_sp_fib_node *fib_node;
2758 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2759 if (!fib_node)
2760 return NULL;
2762 INIT_LIST_HEAD(&fib_node->entry_list);
2763 list_add(&fib_node->list, &fib->node_list);
2764 memcpy(fib_node->key.addr, addr, addr_len);
2765 fib_node->key.prefix_len = prefix_len;
2767 return fib_node;
2770 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2772 list_del(&fib_node->list);
2773 WARN_ON(!list_empty(&fib_node->entry_list));
2774 kfree(fib_node);
2777 static bool
2778 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2779 const struct mlxsw_sp_fib_entry *fib_entry)
2781 return list_first_entry(&fib_node->entry_list,
2782 struct mlxsw_sp_fib_entry, list) == fib_entry;
2785 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
2786 struct mlxsw_sp_fib *fib,
2787 struct mlxsw_sp_fib_node *fib_node)
2789 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
2790 struct mlxsw_sp_lpm_tree *lpm_tree;
2791 int err;
2793 /* Since the tree is shared between all virtual routers we must
2794 * make sure it contains all the required prefix lengths. This
2795 * can be computed by either adding the new prefix length to the
2796 * existing prefix usage of a bound tree, or by aggregating the
2797 * prefix lengths across all virtual routers and adding the new
2798 * one as well.
2800 if (fib->lpm_tree)
2801 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
2802 &fib->lpm_tree->prefix_usage);
2803 else
2804 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
2805 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2807 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2808 fib->proto);
2809 if (IS_ERR(lpm_tree))
2810 return PTR_ERR(lpm_tree);
2812 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
2813 return 0;
2815 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
2816 if (err)
2817 return err;
2819 return 0;
2822 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
2823 struct mlxsw_sp_fib *fib)
2825 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
2826 struct mlxsw_sp_lpm_tree *lpm_tree;
2828 /* Aggregate prefix lengths across all virtual routers to make
2829 * sure we only have used prefix lengths in the LPM tree.
2831 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
2832 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2833 fib->proto);
2834 if (IS_ERR(lpm_tree))
2835 goto err_tree_get;
2836 mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
2838 err_tree_get:
2839 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
2840 return;
2841 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2842 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
2843 fib->lpm_tree = NULL;
2846 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2848 unsigned char prefix_len = fib_node->key.prefix_len;
2849 struct mlxsw_sp_fib *fib = fib_node->fib;
2851 if (fib->prefix_ref_count[prefix_len]++ == 0)
2852 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2855 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2857 unsigned char prefix_len = fib_node->key.prefix_len;
2858 struct mlxsw_sp_fib *fib = fib_node->fib;
2860 if (--fib->prefix_ref_count[prefix_len] == 0)
2861 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2864 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2865 struct mlxsw_sp_fib_node *fib_node,
2866 struct mlxsw_sp_fib *fib)
2868 int err;
2870 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2871 if (err)
2872 return err;
2873 fib_node->fib = fib;
2875 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
2876 if (err)
2877 goto err_fib_lpm_tree_link;
2879 mlxsw_sp_fib_node_prefix_inc(fib_node);
2881 return 0;
2883 err_fib_lpm_tree_link:
2884 fib_node->fib = NULL;
2885 mlxsw_sp_fib_node_remove(fib, fib_node);
2886 return err;
2889 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2890 struct mlxsw_sp_fib_node *fib_node)
2892 struct mlxsw_sp_fib *fib = fib_node->fib;
2894 mlxsw_sp_fib_node_prefix_dec(fib_node);
2895 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
2896 fib_node->fib = NULL;
2897 mlxsw_sp_fib_node_remove(fib, fib_node);
2900 static struct mlxsw_sp_fib_node *
2901 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
2902 size_t addr_len, unsigned char prefix_len,
2903 enum mlxsw_sp_l3proto proto)
2905 struct mlxsw_sp_fib_node *fib_node;
2906 struct mlxsw_sp_fib *fib;
2907 struct mlxsw_sp_vr *vr;
2908 int err;
2910 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id);
2911 if (IS_ERR(vr))
2912 return ERR_CAST(vr);
2913 fib = mlxsw_sp_vr_fib(vr, proto);
2915 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
2916 if (fib_node)
2917 return fib_node;
2919 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
2920 if (!fib_node) {
2921 err = -ENOMEM;
2922 goto err_fib_node_create;
2925 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2926 if (err)
2927 goto err_fib_node_init;
2929 return fib_node;
2931 err_fib_node_init:
2932 mlxsw_sp_fib_node_destroy(fib_node);
2933 err_fib_node_create:
2934 mlxsw_sp_vr_put(vr);
2935 return ERR_PTR(err);
2938 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
2939 struct mlxsw_sp_fib_node *fib_node)
2941 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
2943 if (!list_empty(&fib_node->entry_list))
2944 return;
2945 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
2946 mlxsw_sp_fib_node_destroy(fib_node);
2947 mlxsw_sp_vr_put(vr);
2950 static struct mlxsw_sp_fib4_entry *
2951 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2952 const struct mlxsw_sp_fib4_entry *new4_entry)
2954 struct mlxsw_sp_fib4_entry *fib4_entry;
2956 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
2957 if (fib4_entry->tb_id > new4_entry->tb_id)
2958 continue;
2959 if (fib4_entry->tb_id != new4_entry->tb_id)
2960 break;
2961 if (fib4_entry->tos > new4_entry->tos)
2962 continue;
2963 if (fib4_entry->prio >= new4_entry->prio ||
2964 fib4_entry->tos < new4_entry->tos)
2965 return fib4_entry;
2968 return NULL;
2971 static int
2972 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
2973 struct mlxsw_sp_fib4_entry *new4_entry)
2975 struct mlxsw_sp_fib_node *fib_node;
2977 if (WARN_ON(!fib4_entry))
2978 return -EINVAL;
2980 fib_node = fib4_entry->common.fib_node;
2981 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
2982 common.list) {
2983 if (fib4_entry->tb_id != new4_entry->tb_id ||
2984 fib4_entry->tos != new4_entry->tos ||
2985 fib4_entry->prio != new4_entry->prio)
2986 break;
2989 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
2990 return 0;
2993 static int
2994 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
2995 bool replace, bool append)
2997 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
2998 struct mlxsw_sp_fib4_entry *fib4_entry;
3000 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
3002 if (append)
3003 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
3004 if (replace && WARN_ON(!fib4_entry))
3005 return -EINVAL;
3007 /* Insert new entry before replaced one, so that we can later
3008 * remove the second.
3010 if (fib4_entry) {
3011 list_add_tail(&new4_entry->common.list,
3012 &fib4_entry->common.list);
3013 } else {
3014 struct mlxsw_sp_fib4_entry *last;
3016 list_for_each_entry(last, &fib_node->entry_list, common.list) {
3017 if (new4_entry->tb_id > last->tb_id)
3018 break;
3019 fib4_entry = last;
3022 if (fib4_entry)
3023 list_add(&new4_entry->common.list,
3024 &fib4_entry->common.list);
3025 else
3026 list_add(&new4_entry->common.list,
3027 &fib_node->entry_list);
3030 return 0;
3033 static void
3034 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
3036 list_del(&fib4_entry->common.list);
3039 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
3040 struct mlxsw_sp_fib_entry *fib_entry)
3042 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
3044 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
3045 return 0;
3047 /* To prevent packet loss, overwrite the previously offloaded
3048 * entry.
3050 if (!list_is_singular(&fib_node->entry_list)) {
3051 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
3052 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
3054 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
3057 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3060 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
3061 struct mlxsw_sp_fib_entry *fib_entry)
3063 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
3065 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
3066 return;
3068 /* Promote the next entry by overwriting the deleted entry */
3069 if (!list_is_singular(&fib_node->entry_list)) {
3070 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
3071 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
3073 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
3074 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3075 return;
3078 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
3081 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
3082 struct mlxsw_sp_fib4_entry *fib4_entry,
3083 bool replace, bool append)
3085 int err;
3087 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
3088 if (err)
3089 return err;
3091 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
3092 if (err)
3093 goto err_fib_node_entry_add;
3095 return 0;
3097 err_fib_node_entry_add:
3098 mlxsw_sp_fib4_node_list_remove(fib4_entry);
3099 return err;
3102 static void
3103 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
3104 struct mlxsw_sp_fib4_entry *fib4_entry)
3106 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
3107 mlxsw_sp_fib4_node_list_remove(fib4_entry);
3110 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
3111 struct mlxsw_sp_fib4_entry *fib4_entry,
3112 bool replace)
3114 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
3115 struct mlxsw_sp_fib4_entry *replaced;
3117 if (!replace)
3118 return;
3120 /* We inserted the new entry before replaced one */
3121 replaced = list_next_entry(fib4_entry, common.list);
3123 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
3124 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
3125 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3128 static int
3129 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
3130 const struct fib_entry_notifier_info *fen_info,
3131 bool replace, bool append)
3133 struct mlxsw_sp_fib4_entry *fib4_entry;
3134 struct mlxsw_sp_fib_node *fib_node;
3135 int err;
3137 if (mlxsw_sp->router->aborted)
3138 return 0;
3140 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
3141 &fen_info->dst, sizeof(fen_info->dst),
3142 fen_info->dst_len,
3143 MLXSW_SP_L3_PROTO_IPV4);
3144 if (IS_ERR(fib_node)) {
3145 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
3146 return PTR_ERR(fib_node);
3149 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
3150 if (IS_ERR(fib4_entry)) {
3151 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
3152 err = PTR_ERR(fib4_entry);
3153 goto err_fib4_entry_create;
3156 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
3157 append);
3158 if (err) {
3159 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
3160 goto err_fib4_node_entry_link;
3163 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
3165 return 0;
3167 err_fib4_node_entry_link:
3168 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
3169 err_fib4_entry_create:
3170 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3171 return err;
3174 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
3175 struct fib_entry_notifier_info *fen_info)
3177 struct mlxsw_sp_fib4_entry *fib4_entry;
3178 struct mlxsw_sp_fib_node *fib_node;
3180 if (mlxsw_sp->router->aborted)
3181 return;
3183 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
3184 if (WARN_ON(!fib4_entry))
3185 return;
3186 fib_node = fib4_entry->common.fib_node;
3188 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
3189 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
3190 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3193 static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
3195 /* Packets with link-local destination IP arriving to the router
3196 * are trapped to the CPU, so no need to program specific routes
3197 * for them.
3199 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
3200 return true;
3202 /* Multicast routes aren't supported, so ignore them. Neighbour
3203 * Discovery packets are specifically trapped.
3205 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
3206 return true;
3208 /* Cloned routes are irrelevant in the forwarding path. */
3209 if (rt->rt6i_flags & RTF_CACHE)
3210 return true;
3212 return false;
3215 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
3217 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3219 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
3220 if (!mlxsw_sp_rt6)
3221 return ERR_PTR(-ENOMEM);
3223 /* In case of route replace, replaced route is deleted with
3224 * no notification. Take reference to prevent accessing freed
3225 * memory.
3227 mlxsw_sp_rt6->rt = rt;
3228 rt6_hold(rt);
3230 return mlxsw_sp_rt6;
3233 #if IS_ENABLED(CONFIG_IPV6)
3234 static void mlxsw_sp_rt6_release(struct rt6_info *rt)
3236 rt6_release(rt);
3238 #else
3239 static void mlxsw_sp_rt6_release(struct rt6_info *rt)
3242 #endif
3244 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3246 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
3247 kfree(mlxsw_sp_rt6);
3250 static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
3252 /* RTF_CACHE routes are ignored */
3253 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
3256 static struct rt6_info *
3257 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
3259 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
3260 list)->rt;
3263 static struct mlxsw_sp_fib6_entry *
3264 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
3265 const struct rt6_info *nrt, bool replace)
3267 struct mlxsw_sp_fib6_entry *fib6_entry;
3269 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
3270 return NULL;
3272 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3273 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3275 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
3276 * virtual router.
3278 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
3279 continue;
3280 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
3281 break;
3282 if (rt->rt6i_metric < nrt->rt6i_metric)
3283 continue;
3284 if (rt->rt6i_metric == nrt->rt6i_metric &&
3285 mlxsw_sp_fib6_rt_can_mp(rt))
3286 return fib6_entry;
3287 if (rt->rt6i_metric > nrt->rt6i_metric)
3288 break;
3291 return NULL;
3294 static struct mlxsw_sp_rt6 *
3295 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
3296 const struct rt6_info *rt)
3298 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3300 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3301 if (mlxsw_sp_rt6->rt == rt)
3302 return mlxsw_sp_rt6;
3305 return NULL;
3308 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
3309 struct mlxsw_sp_nexthop_group *nh_grp,
3310 struct mlxsw_sp_nexthop *nh,
3311 const struct rt6_info *rt)
3313 struct net_device *dev = rt->dst.dev;
3314 struct mlxsw_sp_rif *rif;
3315 int err;
3317 nh->nh_grp = nh_grp;
3318 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
3320 if (!dev)
3321 return 0;
3322 nh->ifindex = dev->ifindex;
3324 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3325 if (!rif)
3326 return 0;
3327 mlxsw_sp_nexthop_rif_init(nh, rif);
3329 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3330 if (err)
3331 goto err_nexthop_neigh_init;
3333 return 0;
3335 err_nexthop_neigh_init:
3336 mlxsw_sp_nexthop_rif_fini(nh);
3337 return err;
3340 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
3341 struct mlxsw_sp_nexthop *nh)
3343 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3344 mlxsw_sp_nexthop_rif_fini(nh);
3347 static struct mlxsw_sp_nexthop_group *
3348 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
3349 struct mlxsw_sp_fib6_entry *fib6_entry)
3351 struct mlxsw_sp_nexthop_group *nh_grp;
3352 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3353 struct mlxsw_sp_nexthop *nh;
3354 size_t alloc_size;
3355 int i = 0;
3356 int err;
3358 alloc_size = sizeof(*nh_grp) +
3359 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
3360 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3361 if (!nh_grp)
3362 return ERR_PTR(-ENOMEM);
3363 INIT_LIST_HEAD(&nh_grp->fib_list);
3364 #if IS_ENABLED(CONFIG_IPV6)
3365 nh_grp->neigh_tbl = &nd_tbl;
3366 #endif
3367 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
3368 struct mlxsw_sp_rt6, list);
3369 nh_grp->gateway = !!(mlxsw_sp_rt6->rt->rt6i_flags & RTF_GATEWAY);
3370 nh_grp->count = fib6_entry->nrt6;
3371 for (i = 0; i < nh_grp->count; i++) {
3372 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3374 nh = &nh_grp->nexthops[i];
3375 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
3376 if (err)
3377 goto err_nexthop6_init;
3378 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
3381 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3382 if (err)
3383 goto err_nexthop_group_insert;
3385 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3386 return nh_grp;
3388 err_nexthop_group_insert:
3389 err_nexthop6_init:
3390 for (i--; i >= 0; i--) {
3391 nh = &nh_grp->nexthops[i];
3392 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
3394 kfree(nh_grp);
3395 return ERR_PTR(err);
3398 static void
3399 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
3400 struct mlxsw_sp_nexthop_group *nh_grp)
3402 struct mlxsw_sp_nexthop *nh;
3403 int i = nh_grp->count;
3405 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
3406 for (i--; i >= 0; i--) {
3407 nh = &nh_grp->nexthops[i];
3408 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
3410 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3411 WARN_ON(nh_grp->adj_index_valid);
3412 kfree(nh_grp);
3415 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
3416 struct mlxsw_sp_fib6_entry *fib6_entry)
3418 struct mlxsw_sp_nexthop_group *nh_grp;
3420 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
3421 if (!nh_grp) {
3422 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
3423 if (IS_ERR(nh_grp))
3424 return PTR_ERR(nh_grp);
3427 list_add_tail(&fib6_entry->common.nexthop_group_node,
3428 &nh_grp->fib_list);
3429 fib6_entry->common.nh_group = nh_grp;
3431 return 0;
3434 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
3435 struct mlxsw_sp_fib_entry *fib_entry)
3437 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3439 list_del(&fib_entry->nexthop_group_node);
3440 if (!list_empty(&nh_grp->fib_list))
3441 return;
3442 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
3445 static int
3446 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
3447 struct mlxsw_sp_fib6_entry *fib6_entry)
3449 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
3450 int err;
3452 fib6_entry->common.nh_group = NULL;
3453 list_del(&fib6_entry->common.nexthop_group_node);
3455 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
3456 if (err)
3457 goto err_nexthop6_group_get;
3459 /* In case this entry is offloaded, then the adjacency index
3460 * currently associated with it in the device's table is that
3461 * of the old group. Start using the new one instead.
3463 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
3464 if (err)
3465 goto err_fib_node_entry_add;
3467 if (list_empty(&old_nh_grp->fib_list))
3468 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
3470 return 0;
3472 err_fib_node_entry_add:
3473 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
3474 err_nexthop6_group_get:
3475 list_add_tail(&fib6_entry->common.nexthop_group_node,
3476 &old_nh_grp->fib_list);
3477 fib6_entry->common.nh_group = old_nh_grp;
3478 return err;
3481 static int
3482 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
3483 struct mlxsw_sp_fib6_entry *fib6_entry,
3484 struct rt6_info *rt)
3486 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3487 int err;
3489 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
3490 if (IS_ERR(mlxsw_sp_rt6))
3491 return PTR_ERR(mlxsw_sp_rt6);
3493 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
3494 fib6_entry->nrt6++;
3496 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
3497 if (err)
3498 goto err_nexthop6_group_update;
3500 return 0;
3502 err_nexthop6_group_update:
3503 fib6_entry->nrt6--;
3504 list_del(&mlxsw_sp_rt6->list);
3505 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3506 return err;
3509 static void
3510 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
3511 struct mlxsw_sp_fib6_entry *fib6_entry,
3512 struct rt6_info *rt)
3514 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3516 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
3517 if (WARN_ON(!mlxsw_sp_rt6))
3518 return;
3520 fib6_entry->nrt6--;
3521 list_del(&mlxsw_sp_rt6->list);
3522 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
3523 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3526 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp_fib_entry *fib_entry,
3527 const struct rt6_info *rt)
3529 /* Packets hitting RTF_REJECT routes need to be discarded by the
3530 * stack. We can rely on their destination device not having a
3531 * RIF (it's the loopback device) and can thus use action type
3532 * local, which will cause them to be trapped with a lower
3533 * priority than packets that need to be locally received.
3535 if (rt->rt6i_flags & RTF_LOCAL)
3536 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3537 else if (rt->rt6i_flags & RTF_REJECT)
3538 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
3539 else if (rt->rt6i_flags & RTF_GATEWAY)
3540 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
3541 else
3542 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
3545 static void
3546 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
3548 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
3550 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
3551 list) {
3552 fib6_entry->nrt6--;
3553 list_del(&mlxsw_sp_rt6->list);
3554 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3558 static struct mlxsw_sp_fib6_entry *
3559 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
3560 struct mlxsw_sp_fib_node *fib_node,
3561 struct rt6_info *rt)
3563 struct mlxsw_sp_fib6_entry *fib6_entry;
3564 struct mlxsw_sp_fib_entry *fib_entry;
3565 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3566 int err;
3568 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
3569 if (!fib6_entry)
3570 return ERR_PTR(-ENOMEM);
3571 fib_entry = &fib6_entry->common;
3573 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
3574 if (IS_ERR(mlxsw_sp_rt6)) {
3575 err = PTR_ERR(mlxsw_sp_rt6);
3576 goto err_rt6_create;
3579 mlxsw_sp_fib6_entry_type_set(fib_entry, mlxsw_sp_rt6->rt);
3581 INIT_LIST_HEAD(&fib6_entry->rt6_list);
3582 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
3583 fib6_entry->nrt6 = 1;
3584 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
3585 if (err)
3586 goto err_nexthop6_group_get;
3588 fib_entry->fib_node = fib_node;
3590 return fib6_entry;
3592 err_nexthop6_group_get:
3593 list_del(&mlxsw_sp_rt6->list);
3594 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3595 err_rt6_create:
3596 kfree(fib6_entry);
3597 return ERR_PTR(err);
3600 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
3601 struct mlxsw_sp_fib6_entry *fib6_entry)
3603 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
3604 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
3605 WARN_ON(fib6_entry->nrt6);
3606 kfree(fib6_entry);
3609 static struct mlxsw_sp_fib6_entry *
3610 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
3611 const struct rt6_info *nrt, bool replace)
3613 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
3615 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3616 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3618 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
3619 continue;
3620 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
3621 break;
3622 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
3623 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
3624 mlxsw_sp_fib6_rt_can_mp(nrt))
3625 return fib6_entry;
3626 if (mlxsw_sp_fib6_rt_can_mp(nrt))
3627 fallback = fallback ?: fib6_entry;
3629 if (rt->rt6i_metric > nrt->rt6i_metric)
3630 return fallback ?: fib6_entry;
3633 return fallback;
3636 static int
3637 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
3638 bool replace)
3640 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
3641 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
3642 struct mlxsw_sp_fib6_entry *fib6_entry;
3644 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
3646 if (replace && WARN_ON(!fib6_entry))
3647 return -EINVAL;
3649 if (fib6_entry) {
3650 list_add_tail(&new6_entry->common.list,
3651 &fib6_entry->common.list);
3652 } else {
3653 struct mlxsw_sp_fib6_entry *last;
3655 list_for_each_entry(last, &fib_node->entry_list, common.list) {
3656 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
3658 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
3659 break;
3660 fib6_entry = last;
3663 if (fib6_entry)
3664 list_add(&new6_entry->common.list,
3665 &fib6_entry->common.list);
3666 else
3667 list_add(&new6_entry->common.list,
3668 &fib_node->entry_list);
3671 return 0;
3674 static void
3675 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
3677 list_del(&fib6_entry->common.list);
3680 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
3681 struct mlxsw_sp_fib6_entry *fib6_entry,
3682 bool replace)
3684 int err;
3686 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
3687 if (err)
3688 return err;
3690 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
3691 if (err)
3692 goto err_fib_node_entry_add;
3694 return 0;
3696 err_fib_node_entry_add:
3697 mlxsw_sp_fib6_node_list_remove(fib6_entry);
3698 return err;
3701 static void
3702 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
3703 struct mlxsw_sp_fib6_entry *fib6_entry)
3705 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
3706 mlxsw_sp_fib6_node_list_remove(fib6_entry);
3709 static struct mlxsw_sp_fib6_entry *
3710 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3711 const struct rt6_info *rt)
3713 struct mlxsw_sp_fib6_entry *fib6_entry;
3714 struct mlxsw_sp_fib_node *fib_node;
3715 struct mlxsw_sp_fib *fib;
3716 struct mlxsw_sp_vr *vr;
3718 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
3719 if (!vr)
3720 return NULL;
3721 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
3723 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
3724 sizeof(rt->rt6i_dst.addr),
3725 rt->rt6i_dst.plen);
3726 if (!fib_node)
3727 return NULL;
3729 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3730 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3732 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
3733 rt->rt6i_metric == iter_rt->rt6i_metric &&
3734 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
3735 return fib6_entry;
3738 return NULL;
3741 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
3742 struct mlxsw_sp_fib6_entry *fib6_entry,
3743 bool replace)
3745 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
3746 struct mlxsw_sp_fib6_entry *replaced;
3748 if (!replace)
3749 return;
3751 replaced = list_next_entry(fib6_entry, common.list);
3753 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
3754 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
3755 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3758 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
3759 struct rt6_info *rt, bool replace)
3761 struct mlxsw_sp_fib6_entry *fib6_entry;
3762 struct mlxsw_sp_fib_node *fib_node;
3763 int err;
3765 if (mlxsw_sp->router->aborted)
3766 return 0;
3768 if (rt->rt6i_src.plen)
3769 return -EINVAL;
3771 if (mlxsw_sp_fib6_rt_should_ignore(rt))
3772 return 0;
3774 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
3775 &rt->rt6i_dst.addr,
3776 sizeof(rt->rt6i_dst.addr),
3777 rt->rt6i_dst.plen,
3778 MLXSW_SP_L3_PROTO_IPV6);
3779 if (IS_ERR(fib_node))
3780 return PTR_ERR(fib_node);
3782 /* Before creating a new entry, try to append route to an existing
3783 * multipath entry.
3785 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
3786 if (fib6_entry) {
3787 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
3788 if (err)
3789 goto err_fib6_entry_nexthop_add;
3790 return 0;
3793 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
3794 if (IS_ERR(fib6_entry)) {
3795 err = PTR_ERR(fib6_entry);
3796 goto err_fib6_entry_create;
3799 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
3800 if (err)
3801 goto err_fib6_node_entry_link;
3803 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
3805 return 0;
3807 err_fib6_node_entry_link:
3808 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3809 err_fib6_entry_create:
3810 err_fib6_entry_nexthop_add:
3811 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3812 return err;
3815 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
3816 struct rt6_info *rt)
3818 struct mlxsw_sp_fib6_entry *fib6_entry;
3819 struct mlxsw_sp_fib_node *fib_node;
3821 if (mlxsw_sp->router->aborted)
3822 return;
3824 if (mlxsw_sp_fib6_rt_should_ignore(rt))
3825 return;
3827 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
3828 if (WARN_ON(!fib6_entry))
3829 return;
3831 /* If route is part of a multipath entry, but not the last one
3832 * removed, then only reduce its nexthop group.
3834 if (!list_is_singular(&fib6_entry->rt6_list)) {
3835 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
3836 return;
3839 fib_node = fib6_entry->common.fib_node;
3841 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
3842 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3843 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3846 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
3847 enum mlxsw_reg_ralxx_protocol proto,
3848 u8 tree_id)
3850 char ralta_pl[MLXSW_REG_RALTA_LEN];
3851 char ralst_pl[MLXSW_REG_RALST_LEN];
3852 int i, err;
3854 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
3855 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
3856 if (err)
3857 return err;
3859 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
3860 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
3861 if (err)
3862 return err;
3864 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
3865 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
3866 char raltb_pl[MLXSW_REG_RALTB_LEN];
3867 char ralue_pl[MLXSW_REG_RALUE_LEN];
3869 if (!mlxsw_sp_vr_is_used(vr))
3870 continue;
3872 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
3873 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
3874 raltb_pl);
3875 if (err)
3876 return err;
3878 mlxsw_reg_ralue_pack(ralue_pl, proto,
3879 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
3880 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3881 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
3882 ralue_pl);
3883 if (err)
3884 return err;
3887 return 0;
3890 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
3892 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
3893 int err;
3895 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
3896 MLXSW_SP_LPM_TREE_MIN);
3897 if (err)
3898 return err;
3900 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
3901 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
3902 MLXSW_SP_LPM_TREE_MIN + 1);
3905 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
3906 struct mlxsw_sp_fib_node *fib_node)
3908 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
3910 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
3911 common.list) {
3912 bool do_break = &tmp->common.list == &fib_node->entry_list;
3914 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
3915 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
3916 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3917 /* Break when entry list is empty and node was freed.
3918 * Otherwise, we'll access freed memory in the next
3919 * iteration.
3921 if (do_break)
3922 break;
3926 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
3927 struct mlxsw_sp_fib_node *fib_node)
3929 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
3931 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
3932 common.list) {
3933 bool do_break = &tmp->common.list == &fib_node->entry_list;
3935 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
3936 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3937 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3938 if (do_break)
3939 break;
3943 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
3944 struct mlxsw_sp_fib_node *fib_node)
3946 switch (fib_node->fib->proto) {
3947 case MLXSW_SP_L3_PROTO_IPV4:
3948 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
3949 break;
3950 case MLXSW_SP_L3_PROTO_IPV6:
3951 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
3952 break;
3956 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
3957 struct mlxsw_sp_vr *vr,
3958 enum mlxsw_sp_l3proto proto)
3960 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
3961 struct mlxsw_sp_fib_node *fib_node, *tmp;
3963 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
3964 bool do_break = &tmp->list == &fib->node_list;
3966 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
3967 if (do_break)
3968 break;
3972 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
3974 int i;
3976 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
3977 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
3979 if (!mlxsw_sp_vr_is_used(vr))
3980 continue;
3981 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
3983 /* If virtual router was only used for IPv4, then it's no
3984 * longer used.
3986 if (!mlxsw_sp_vr_is_used(vr))
3987 continue;
3988 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
3992 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
3994 int err;
3996 if (mlxsw_sp->router->aborted)
3997 return;
3998 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
3999 mlxsw_sp_router_fib_flush(mlxsw_sp);
4000 mlxsw_sp->router->aborted = true;
4001 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
4002 if (err)
4003 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
4006 struct mlxsw_sp_fib_event_work {
4007 struct work_struct work;
4008 union {
4009 struct fib6_entry_notifier_info fen6_info;
4010 struct fib_entry_notifier_info fen_info;
4011 struct fib_rule_notifier_info fr_info;
4012 struct fib_nh_notifier_info fnh_info;
4014 struct mlxsw_sp *mlxsw_sp;
4015 unsigned long event;
4018 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
4020 struct mlxsw_sp_fib_event_work *fib_work =
4021 container_of(work, struct mlxsw_sp_fib_event_work, work);
4022 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
4023 struct fib_rule *rule;
4024 bool replace, append;
4025 int err;
4027 /* Protect internal structures from changes */
4028 rtnl_lock();
4029 switch (fib_work->event) {
4030 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4031 case FIB_EVENT_ENTRY_APPEND: /* fall through */
4032 case FIB_EVENT_ENTRY_ADD:
4033 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
4034 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
4035 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
4036 replace, append);
4037 if (err)
4038 mlxsw_sp_router_fib_abort(mlxsw_sp);
4039 fib_info_put(fib_work->fen_info.fi);
4040 break;
4041 case FIB_EVENT_ENTRY_DEL:
4042 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
4043 fib_info_put(fib_work->fen_info.fi);
4044 break;
4045 case FIB_EVENT_RULE_ADD: /* fall through */
4046 case FIB_EVENT_RULE_DEL:
4047 rule = fib_work->fr_info.rule;
4048 if (!fib4_rule_default(rule) && !rule->l3mdev)
4049 mlxsw_sp_router_fib_abort(mlxsw_sp);
4050 fib_rule_put(rule);
4051 break;
4052 case FIB_EVENT_NH_ADD: /* fall through */
4053 case FIB_EVENT_NH_DEL:
4054 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
4055 fib_work->fnh_info.fib_nh);
4056 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
4057 break;
4059 rtnl_unlock();
4060 kfree(fib_work);
4063 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
4065 struct mlxsw_sp_fib_event_work *fib_work =
4066 container_of(work, struct mlxsw_sp_fib_event_work, work);
4067 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
4068 struct fib_rule *rule;
4069 bool replace;
4070 int err;
4072 rtnl_lock();
4073 switch (fib_work->event) {
4074 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4075 case FIB_EVENT_ENTRY_ADD:
4076 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
4077 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
4078 fib_work->fen6_info.rt, replace);
4079 if (err)
4080 mlxsw_sp_router_fib_abort(mlxsw_sp);
4081 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
4082 break;
4083 case FIB_EVENT_ENTRY_DEL:
4084 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
4085 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
4086 break;
4087 case FIB_EVENT_RULE_ADD: /* fall through */
4088 case FIB_EVENT_RULE_DEL:
4089 rule = fib_work->fr_info.rule;
4090 if (!fib6_rule_default(rule) && !rule->l3mdev)
4091 mlxsw_sp_router_fib_abort(mlxsw_sp);
4092 fib_rule_put(rule);
4093 break;
4095 rtnl_unlock();
4096 kfree(fib_work);
4099 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
4100 struct fib_notifier_info *info)
4102 switch (fib_work->event) {
4103 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4104 case FIB_EVENT_ENTRY_APPEND: /* fall through */
4105 case FIB_EVENT_ENTRY_ADD: /* fall through */
4106 case FIB_EVENT_ENTRY_DEL:
4107 memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info));
4108 /* Take referece on fib_info to prevent it from being
4109 * freed while work is queued. Release it afterwards.
4111 fib_info_hold(fib_work->fen_info.fi);
4112 break;
4113 case FIB_EVENT_RULE_ADD: /* fall through */
4114 case FIB_EVENT_RULE_DEL:
4115 memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
4116 fib_rule_get(fib_work->fr_info.rule);
4117 break;
4118 case FIB_EVENT_NH_ADD: /* fall through */
4119 case FIB_EVENT_NH_DEL:
4120 memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info));
4121 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
4122 break;
4126 static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
4127 struct fib_notifier_info *info)
4129 switch (fib_work->event) {
4130 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4131 case FIB_EVENT_ENTRY_ADD: /* fall through */
4132 case FIB_EVENT_ENTRY_DEL:
4133 memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info));
4134 rt6_hold(fib_work->fen6_info.rt);
4135 break;
4136 case FIB_EVENT_RULE_ADD: /* fall through */
4137 case FIB_EVENT_RULE_DEL:
4138 memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
4139 fib_rule_get(fib_work->fr_info.rule);
4140 break;
4144 /* Called with rcu_read_lock() */
4145 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
4146 unsigned long event, void *ptr)
4148 struct mlxsw_sp_fib_event_work *fib_work;
4149 struct fib_notifier_info *info = ptr;
4150 struct mlxsw_sp_router *router;
4152 if (!net_eq(info->net, &init_net))
4153 return NOTIFY_DONE;
4155 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
4156 if (WARN_ON(!fib_work))
4157 return NOTIFY_BAD;
4159 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
4160 fib_work->mlxsw_sp = router->mlxsw_sp;
4161 fib_work->event = event;
4163 switch (info->family) {
4164 case AF_INET:
4165 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
4166 mlxsw_sp_router_fib4_event(fib_work, info);
4167 break;
4168 case AF_INET6:
4169 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
4170 mlxsw_sp_router_fib6_event(fib_work, info);
4171 break;
4174 mlxsw_core_schedule_work(&fib_work->work);
4176 return NOTIFY_DONE;
4179 static struct mlxsw_sp_rif *
4180 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
4181 const struct net_device *dev)
4183 int i;
4185 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
4186 if (mlxsw_sp->router->rifs[i] &&
4187 mlxsw_sp->router->rifs[i]->dev == dev)
4188 return mlxsw_sp->router->rifs[i];
4190 return NULL;
4193 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
4195 char ritr_pl[MLXSW_REG_RITR_LEN];
4196 int err;
4198 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
4199 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4200 if (WARN_ON_ONCE(err))
4201 return err;
4203 mlxsw_reg_ritr_enable_set(ritr_pl, false);
4204 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4207 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4208 struct mlxsw_sp_rif *rif)
4210 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
4211 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
4212 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
4215 static bool
4216 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
4217 unsigned long event)
4219 struct inet6_dev *inet6_dev;
4220 bool addr_list_empty = true;
4221 struct in_device *idev;
4223 switch (event) {
4224 case NETDEV_UP:
4225 return rif == NULL;
4226 case NETDEV_DOWN:
4227 idev = __in_dev_get_rtnl(dev);
4228 if (idev && idev->ifa_list)
4229 addr_list_empty = false;
4231 inet6_dev = __in6_dev_get(dev);
4232 if (addr_list_empty && inet6_dev &&
4233 !list_empty(&inet6_dev->addr_list))
4234 addr_list_empty = false;
4236 if (rif && addr_list_empty &&
4237 !netif_is_l3_slave(rif->dev))
4238 return true;
4239 /* It is possible we already removed the RIF ourselves
4240 * if it was assigned to a netdev that is now a bridge
4241 * or LAG slave.
4243 return false;
4246 return false;
4249 static enum mlxsw_sp_rif_type
4250 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
4251 const struct net_device *dev)
4253 enum mlxsw_sp_fid_type type;
4255 /* RIF type is derived from the type of the underlying FID */
4256 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
4257 type = MLXSW_SP_FID_TYPE_8021Q;
4258 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
4259 type = MLXSW_SP_FID_TYPE_8021Q;
4260 else if (netif_is_bridge_master(dev))
4261 type = MLXSW_SP_FID_TYPE_8021D;
4262 else
4263 type = MLXSW_SP_FID_TYPE_RFID;
4265 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
4268 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
4270 int i;
4272 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
4273 if (!mlxsw_sp->router->rifs[i]) {
4274 *p_rif_index = i;
4275 return 0;
4279 return -ENOBUFS;
4282 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
4283 u16 vr_id,
4284 struct net_device *l3_dev)
4286 struct mlxsw_sp_rif *rif;
4288 rif = kzalloc(rif_size, GFP_KERNEL);
4289 if (!rif)
4290 return NULL;
4292 INIT_LIST_HEAD(&rif->nexthop_list);
4293 INIT_LIST_HEAD(&rif->neigh_list);
4294 ether_addr_copy(rif->addr, l3_dev->dev_addr);
4295 rif->mtu = l3_dev->mtu;
4296 rif->vr_id = vr_id;
4297 rif->dev = l3_dev;
4298 rif->rif_index = rif_index;
4300 return rif;
4303 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
4304 u16 rif_index)
4306 return mlxsw_sp->router->rifs[rif_index];
4309 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
4311 return rif->rif_index;
4314 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
4316 return rif->dev->ifindex;
4319 static struct mlxsw_sp_rif *
4320 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
4321 const struct mlxsw_sp_rif_params *params)
4323 u32 tb_id = l3mdev_fib_table(params->dev);
4324 const struct mlxsw_sp_rif_ops *ops;
4325 enum mlxsw_sp_rif_type type;
4326 struct mlxsw_sp_rif *rif;
4327 struct mlxsw_sp_fid *fid;
4328 struct mlxsw_sp_vr *vr;
4329 u16 rif_index;
4330 int err;
4332 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
4333 ops = mlxsw_sp->router->rif_ops_arr[type];
4335 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
4336 if (IS_ERR(vr))
4337 return ERR_CAST(vr);
4339 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
4340 if (err)
4341 goto err_rif_index_alloc;
4343 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
4344 if (!rif) {
4345 err = -ENOMEM;
4346 goto err_rif_alloc;
4348 rif->mlxsw_sp = mlxsw_sp;
4349 rif->ops = ops;
4351 fid = ops->fid_get(rif);
4352 if (IS_ERR(fid)) {
4353 err = PTR_ERR(fid);
4354 goto err_fid_get;
4356 rif->fid = fid;
4358 if (ops->setup)
4359 ops->setup(rif, params);
4361 err = ops->configure(rif);
4362 if (err)
4363 goto err_configure;
4365 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
4366 mlxsw_sp_fid_index(fid), true);
4367 if (err)
4368 goto err_rif_fdb_op;
4370 mlxsw_sp_rif_counters_alloc(rif);
4371 mlxsw_sp_fid_rif_set(fid, rif);
4372 mlxsw_sp->router->rifs[rif_index] = rif;
4373 vr->rif_count++;
4375 return rif;
4377 err_rif_fdb_op:
4378 ops->deconfigure(rif);
4379 err_configure:
4380 mlxsw_sp_fid_put(fid);
4381 err_fid_get:
4382 kfree(rif);
4383 err_rif_alloc:
4384 err_rif_index_alloc:
4385 mlxsw_sp_vr_put(vr);
4386 return ERR_PTR(err);
4389 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
4391 const struct mlxsw_sp_rif_ops *ops = rif->ops;
4392 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4393 struct mlxsw_sp_fid *fid = rif->fid;
4394 struct mlxsw_sp_vr *vr;
4396 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
4397 vr = &mlxsw_sp->router->vrs[rif->vr_id];
4399 vr->rif_count--;
4400 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
4401 mlxsw_sp_fid_rif_set(fid, NULL);
4402 mlxsw_sp_rif_counters_free(rif);
4403 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
4404 mlxsw_sp_fid_index(fid), false);
4405 ops->deconfigure(rif);
4406 mlxsw_sp_fid_put(fid);
4407 kfree(rif);
4408 mlxsw_sp_vr_put(vr);
4411 static void
4412 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
4413 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4415 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4417 params->vid = mlxsw_sp_port_vlan->vid;
4418 params->lag = mlxsw_sp_port->lagged;
4419 if (params->lag)
4420 params->lag_id = mlxsw_sp_port->lag_id;
4421 else
4422 params->system_port = mlxsw_sp_port->local_port;
4425 static int
4426 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
4427 struct net_device *l3_dev)
4429 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4430 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4431 u16 vid = mlxsw_sp_port_vlan->vid;
4432 struct mlxsw_sp_rif *rif;
4433 struct mlxsw_sp_fid *fid;
4434 int err;
4436 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4437 if (!rif) {
4438 struct mlxsw_sp_rif_params params = {
4439 .dev = l3_dev,
4442 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
4443 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
4444 if (IS_ERR(rif))
4445 return PTR_ERR(rif);
4448 /* FID was already created, just take a reference */
4449 fid = rif->ops->fid_get(rif);
4450 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
4451 if (err)
4452 goto err_fid_port_vid_map;
4454 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
4455 if (err)
4456 goto err_port_vid_learning_set;
4458 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
4459 BR_STATE_FORWARDING);
4460 if (err)
4461 goto err_port_vid_stp_set;
4463 mlxsw_sp_port_vlan->fid = fid;
4465 return 0;
4467 err_port_vid_stp_set:
4468 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4469 err_port_vid_learning_set:
4470 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
4471 err_fid_port_vid_map:
4472 mlxsw_sp_fid_put(fid);
4473 return err;
4476 void
4477 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4479 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4480 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
4481 u16 vid = mlxsw_sp_port_vlan->vid;
4483 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
4484 return;
4486 mlxsw_sp_port_vlan->fid = NULL;
4487 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
4488 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4489 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
4490 /* If router port holds the last reference on the rFID, then the
4491 * associated Sub-port RIF will be destroyed.
4493 mlxsw_sp_fid_put(fid);
4496 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
4497 struct net_device *port_dev,
4498 unsigned long event, u16 vid)
4500 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
4501 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4503 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
4504 if (WARN_ON(!mlxsw_sp_port_vlan))
4505 return -EINVAL;
4507 switch (event) {
4508 case NETDEV_UP:
4509 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
4510 l3_dev);
4511 case NETDEV_DOWN:
4512 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4513 break;
4516 return 0;
4519 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
4520 unsigned long event)
4522 if (netif_is_bridge_port(port_dev) ||
4523 netif_is_lag_port(port_dev) ||
4524 netif_is_ovs_port(port_dev))
4525 return 0;
4527 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
4530 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
4531 struct net_device *lag_dev,
4532 unsigned long event, u16 vid)
4534 struct net_device *port_dev;
4535 struct list_head *iter;
4536 int err;
4538 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
4539 if (mlxsw_sp_port_dev_check(port_dev)) {
4540 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
4541 port_dev,
4542 event, vid);
4543 if (err)
4544 return err;
4548 return 0;
4551 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
4552 unsigned long event)
4554 if (netif_is_bridge_port(lag_dev))
4555 return 0;
4557 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
4560 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
4561 unsigned long event)
4563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
4564 struct mlxsw_sp_rif_params params = {
4565 .dev = l3_dev,
4567 struct mlxsw_sp_rif *rif;
4569 switch (event) {
4570 case NETDEV_UP:
4571 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
4572 if (IS_ERR(rif))
4573 return PTR_ERR(rif);
4574 break;
4575 case NETDEV_DOWN:
4576 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4577 mlxsw_sp_rif_destroy(rif);
4578 break;
4581 return 0;
4584 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
4585 unsigned long event)
4587 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4588 u16 vid = vlan_dev_vlan_id(vlan_dev);
4590 if (netif_is_bridge_port(vlan_dev))
4591 return 0;
4593 if (mlxsw_sp_port_dev_check(real_dev))
4594 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
4595 event, vid);
4596 else if (netif_is_lag_master(real_dev))
4597 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
4598 vid);
4599 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
4600 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
4602 return 0;
4605 static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
4606 unsigned long event)
4608 if (mlxsw_sp_port_dev_check(dev))
4609 return mlxsw_sp_inetaddr_port_event(dev, event);
4610 else if (netif_is_lag_master(dev))
4611 return mlxsw_sp_inetaddr_lag_event(dev, event);
4612 else if (netif_is_bridge_master(dev))
4613 return mlxsw_sp_inetaddr_bridge_event(dev, event);
4614 else if (is_vlan_dev(dev))
4615 return mlxsw_sp_inetaddr_vlan_event(dev, event);
4616 else
4617 return 0;
4620 int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
4621 unsigned long event, void *ptr)
4623 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
4624 struct net_device *dev = ifa->ifa_dev->dev;
4625 struct mlxsw_sp *mlxsw_sp;
4626 struct mlxsw_sp_rif *rif;
4627 int err = 0;
4629 mlxsw_sp = mlxsw_sp_lower_get(dev);
4630 if (!mlxsw_sp)
4631 goto out;
4633 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4634 if (!mlxsw_sp_rif_should_config(rif, dev, event))
4635 goto out;
4637 err = __mlxsw_sp_inetaddr_event(dev, event);
4638 out:
4639 return notifier_from_errno(err);
4642 struct mlxsw_sp_inet6addr_event_work {
4643 struct work_struct work;
4644 struct net_device *dev;
4645 unsigned long event;
4648 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
4650 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
4651 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
4652 struct net_device *dev = inet6addr_work->dev;
4653 unsigned long event = inet6addr_work->event;
4654 struct mlxsw_sp *mlxsw_sp;
4655 struct mlxsw_sp_rif *rif;
4657 rtnl_lock();
4658 mlxsw_sp = mlxsw_sp_lower_get(dev);
4659 if (!mlxsw_sp)
4660 goto out;
4662 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4663 if (!mlxsw_sp_rif_should_config(rif, dev, event))
4664 goto out;
4666 __mlxsw_sp_inetaddr_event(dev, event);
4667 out:
4668 rtnl_unlock();
4669 dev_put(dev);
4670 kfree(inet6addr_work);
4673 /* Called with rcu_read_lock() */
4674 int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
4675 unsigned long event, void *ptr)
4677 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
4678 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
4679 struct net_device *dev = if6->idev->dev;
4681 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
4682 return NOTIFY_DONE;
4684 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
4685 if (!inet6addr_work)
4686 return NOTIFY_BAD;
4688 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
4689 inet6addr_work->dev = dev;
4690 inet6addr_work->event = event;
4691 dev_hold(dev);
4692 mlxsw_core_schedule_work(&inet6addr_work->work);
4694 return NOTIFY_DONE;
4697 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
4698 const char *mac, int mtu)
4700 char ritr_pl[MLXSW_REG_RITR_LEN];
4701 int err;
4703 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
4704 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4705 if (err)
4706 return err;
4708 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
4709 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
4710 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
4711 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4714 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
4716 struct mlxsw_sp *mlxsw_sp;
4717 struct mlxsw_sp_rif *rif;
4718 u16 fid_index;
4719 int err;
4721 mlxsw_sp = mlxsw_sp_lower_get(dev);
4722 if (!mlxsw_sp)
4723 return 0;
4725 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4726 if (!rif)
4727 return 0;
4728 fid_index = mlxsw_sp_fid_index(rif->fid);
4730 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
4731 if (err)
4732 return err;
4734 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
4735 dev->mtu);
4736 if (err)
4737 goto err_rif_edit;
4739 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
4740 if (err)
4741 goto err_rif_fdb_op;
4743 ether_addr_copy(rif->addr, dev->dev_addr);
4744 rif->mtu = dev->mtu;
4746 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
4748 return 0;
4750 err_rif_fdb_op:
4751 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
4752 err_rif_edit:
4753 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
4754 return err;
4757 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
4758 struct net_device *l3_dev)
4760 struct mlxsw_sp_rif *rif;
4762 /* If netdev is already associated with a RIF, then we need to
4763 * destroy it and create a new one with the new virtual router ID.
4765 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4766 if (rif)
4767 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
4769 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
4772 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
4773 struct net_device *l3_dev)
4775 struct mlxsw_sp_rif *rif;
4777 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4778 if (!rif)
4779 return;
4780 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
4783 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
4784 struct netdev_notifier_changeupper_info *info)
4786 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
4787 int err = 0;
4789 if (!mlxsw_sp)
4790 return 0;
4792 switch (event) {
4793 case NETDEV_PRECHANGEUPPER:
4794 return 0;
4795 case NETDEV_CHANGEUPPER:
4796 if (info->linking)
4797 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
4798 else
4799 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
4800 break;
4803 return err;
4806 static struct mlxsw_sp_rif_subport *
4807 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
4809 return container_of(rif, struct mlxsw_sp_rif_subport, common);
4812 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
4813 const struct mlxsw_sp_rif_params *params)
4815 struct mlxsw_sp_rif_subport *rif_subport;
4817 rif_subport = mlxsw_sp_rif_subport_rif(rif);
4818 rif_subport->vid = params->vid;
4819 rif_subport->lag = params->lag;
4820 if (params->lag)
4821 rif_subport->lag_id = params->lag_id;
4822 else
4823 rif_subport->system_port = params->system_port;
4826 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
4828 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4829 struct mlxsw_sp_rif_subport *rif_subport;
4830 char ritr_pl[MLXSW_REG_RITR_LEN];
4832 rif_subport = mlxsw_sp_rif_subport_rif(rif);
4833 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
4834 rif->rif_index, rif->vr_id, rif->dev->mtu,
4835 rif->dev->dev_addr);
4836 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
4837 rif_subport->lag ? rif_subport->lag_id :
4838 rif_subport->system_port,
4839 rif_subport->vid);
4841 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4844 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
4846 return mlxsw_sp_rif_subport_op(rif, true);
4849 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
4851 mlxsw_sp_rif_subport_op(rif, false);
4854 static struct mlxsw_sp_fid *
4855 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
4857 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
4860 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
4861 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
4862 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
4863 .setup = mlxsw_sp_rif_subport_setup,
4864 .configure = mlxsw_sp_rif_subport_configure,
4865 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
4866 .fid_get = mlxsw_sp_rif_subport_fid_get,
4869 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
4870 enum mlxsw_reg_ritr_if_type type,
4871 u16 vid_fid, bool enable)
4873 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4874 char ritr_pl[MLXSW_REG_RITR_LEN];
4876 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
4877 rif->dev->mtu, rif->dev->dev_addr);
4878 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
4880 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4883 static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
4885 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
4888 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
4890 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4891 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
4892 int err;
4894 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
4895 if (err)
4896 return err;
4898 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4899 mlxsw_sp_router_port(mlxsw_sp), true);
4900 if (err)
4901 goto err_fid_mc_flood_set;
4903 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4904 mlxsw_sp_router_port(mlxsw_sp), true);
4905 if (err)
4906 goto err_fid_bc_flood_set;
4908 return 0;
4910 err_fid_bc_flood_set:
4911 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4912 mlxsw_sp_router_port(mlxsw_sp), false);
4913 err_fid_mc_flood_set:
4914 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
4915 return err;
4918 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
4920 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4921 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
4923 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4924 mlxsw_sp_router_port(mlxsw_sp), false);
4925 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4926 mlxsw_sp_router_port(mlxsw_sp), false);
4927 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
4930 static struct mlxsw_sp_fid *
4931 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
4933 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
4935 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
4938 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
4939 .type = MLXSW_SP_RIF_TYPE_VLAN,
4940 .rif_size = sizeof(struct mlxsw_sp_rif),
4941 .configure = mlxsw_sp_rif_vlan_configure,
4942 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
4943 .fid_get = mlxsw_sp_rif_vlan_fid_get,
4946 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
4948 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4949 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
4950 int err;
4952 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
4953 true);
4954 if (err)
4955 return err;
4957 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4958 mlxsw_sp_router_port(mlxsw_sp), true);
4959 if (err)
4960 goto err_fid_mc_flood_set;
4962 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4963 mlxsw_sp_router_port(mlxsw_sp), true);
4964 if (err)
4965 goto err_fid_bc_flood_set;
4967 return 0;
4969 err_fid_bc_flood_set:
4970 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4971 mlxsw_sp_router_port(mlxsw_sp), false);
4972 err_fid_mc_flood_set:
4973 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
4974 return err;
4977 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
4979 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4980 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
4982 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4983 mlxsw_sp_router_port(mlxsw_sp), false);
4984 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4985 mlxsw_sp_router_port(mlxsw_sp), false);
4986 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
4989 static struct mlxsw_sp_fid *
4990 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
4992 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
4995 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
4996 .type = MLXSW_SP_RIF_TYPE_FID,
4997 .rif_size = sizeof(struct mlxsw_sp_rif),
4998 .configure = mlxsw_sp_rif_fid_configure,
4999 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
5000 .fid_get = mlxsw_sp_rif_fid_fid_get,
5003 static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
5004 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
5005 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
5006 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
5009 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
5011 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
5013 mlxsw_sp->router->rifs = kcalloc(max_rifs,
5014 sizeof(struct mlxsw_sp_rif *),
5015 GFP_KERNEL);
5016 if (!mlxsw_sp->router->rifs)
5017 return -ENOMEM;
5019 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
5021 return 0;
5024 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
5026 int i;
5028 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
5029 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
5031 kfree(mlxsw_sp->router->rifs);
5034 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
5036 struct mlxsw_sp_router *router;
5038 /* Flush pending FIB notifications and then flush the device's
5039 * table before requesting another dump. The FIB notification
5040 * block is unregistered, so no need to take RTNL.
5042 mlxsw_core_flush_owq();
5043 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5044 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
5047 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
5049 char rgcr_pl[MLXSW_REG_RGCR_LEN];
5050 u64 max_rifs;
5051 int err;
5053 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
5054 return -EIO;
5055 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
5057 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
5058 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
5059 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
5060 if (err)
5061 return err;
5062 return 0;
5065 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
5067 char rgcr_pl[MLXSW_REG_RGCR_LEN];
5069 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
5070 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
5073 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
5075 struct mlxsw_sp_router *router;
5076 int err;
5078 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
5079 if (!router)
5080 return -ENOMEM;
5081 mlxsw_sp->router = router;
5082 router->mlxsw_sp = mlxsw_sp;
5084 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
5085 err = __mlxsw_sp_router_init(mlxsw_sp);
5086 if (err)
5087 goto err_router_init;
5089 err = mlxsw_sp_rifs_init(mlxsw_sp);
5090 if (err)
5091 goto err_rifs_init;
5093 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
5094 &mlxsw_sp_nexthop_ht_params);
5095 if (err)
5096 goto err_nexthop_ht_init;
5098 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
5099 &mlxsw_sp_nexthop_group_ht_params);
5100 if (err)
5101 goto err_nexthop_group_ht_init;
5103 err = mlxsw_sp_lpm_init(mlxsw_sp);
5104 if (err)
5105 goto err_lpm_init;
5107 err = mlxsw_sp_vrs_init(mlxsw_sp);
5108 if (err)
5109 goto err_vrs_init;
5111 err = mlxsw_sp_neigh_init(mlxsw_sp);
5112 if (err)
5113 goto err_neigh_init;
5115 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
5116 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
5117 mlxsw_sp_router_fib_dump_flush);
5118 if (err)
5119 goto err_register_fib_notifier;
5121 return 0;
5123 err_register_fib_notifier:
5124 mlxsw_sp_neigh_fini(mlxsw_sp);
5125 err_neigh_init:
5126 mlxsw_sp_vrs_fini(mlxsw_sp);
5127 err_vrs_init:
5128 mlxsw_sp_lpm_fini(mlxsw_sp);
5129 err_lpm_init:
5130 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
5131 err_nexthop_group_ht_init:
5132 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
5133 err_nexthop_ht_init:
5134 mlxsw_sp_rifs_fini(mlxsw_sp);
5135 err_rifs_init:
5136 __mlxsw_sp_router_fini(mlxsw_sp);
5137 err_router_init:
5138 kfree(mlxsw_sp->router);
5139 return err;
5142 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
5144 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
5145 mlxsw_sp_neigh_fini(mlxsw_sp);
5146 mlxsw_sp_vrs_fini(mlxsw_sp);
5147 mlxsw_sp_lpm_fini(mlxsw_sp);
5148 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
5149 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
5150 mlxsw_sp_rifs_fini(mlxsw_sp);
5151 __mlxsw_sp_router_fini(mlxsw_sp);
5152 kfree(mlxsw_sp->router);