2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
10 * Based on: net/ipv4/ip_fragment.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen Make it work with multiple hosts.
21 * More RFC compliance.
23 * Horst von Brand Add missing #include <linux/string.h>
24 * Alexey Kuznetsov SMP races, threading, cleanup.
25 * Patrick McHardy LRU queue of frag heads for evictor.
26 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
28 * YOSHIFUJI,H. @USAGI Always remove fragment header to
29 * calculate ICV correctly.
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45 #include <linux/skbuff.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 #include <net/inet_frag.h>
61 struct inet6_skb_parm h
;
65 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
69 * Equivalent of ipv4 struct ipq
74 struct inet_frag_queue q
;
76 __be32 id
; /* fragment id */
77 struct in6_addr saddr
;
78 struct in6_addr daddr
;
85 static struct inet_frags ip6_frags
;
87 int ip6_frag_nqueues(struct net
*net
)
89 return net
->ipv6
.frags
.nqueues
;
92 int ip6_frag_mem(struct net
*net
)
94 return atomic_read(&net
->ipv6
.frags
.mem
);
97 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
98 struct net_device
*dev
);
101 * callers should be careful not to use the hash value outside the ipfrag_lock
102 * as doing so could race with ipfrag_hash_rnd being recalculated.
104 static unsigned int ip6qhashfn(__be32 id
, struct in6_addr
*saddr
,
105 struct in6_addr
*daddr
)
109 a
= (__force u32
)saddr
->s6_addr32
[0];
110 b
= (__force u32
)saddr
->s6_addr32
[1];
111 c
= (__force u32
)saddr
->s6_addr32
[2];
113 a
+= JHASH_GOLDEN_RATIO
;
114 b
+= JHASH_GOLDEN_RATIO
;
116 __jhash_mix(a
, b
, c
);
118 a
+= (__force u32
)saddr
->s6_addr32
[3];
119 b
+= (__force u32
)daddr
->s6_addr32
[0];
120 c
+= (__force u32
)daddr
->s6_addr32
[1];
121 __jhash_mix(a
, b
, c
);
123 a
+= (__force u32
)daddr
->s6_addr32
[2];
124 b
+= (__force u32
)daddr
->s6_addr32
[3];
125 c
+= (__force u32
)id
;
126 __jhash_mix(a
, b
, c
);
128 return c
& (INETFRAGS_HASHSZ
- 1);
131 static unsigned int ip6_hashfn(struct inet_frag_queue
*q
)
133 struct frag_queue
*fq
;
135 fq
= container_of(q
, struct frag_queue
, q
);
136 return ip6qhashfn(fq
->id
, &fq
->saddr
, &fq
->daddr
);
139 int ip6_frag_match(struct inet_frag_queue
*q
, void *a
)
141 struct frag_queue
*fq
;
142 struct ip6_create_arg
*arg
= a
;
144 fq
= container_of(q
, struct frag_queue
, q
);
145 return (fq
->id
== arg
->id
&&
146 ipv6_addr_equal(&fq
->saddr
, arg
->src
) &&
147 ipv6_addr_equal(&fq
->daddr
, arg
->dst
));
149 EXPORT_SYMBOL(ip6_frag_match
);
151 /* Memory Tracking Functions. */
152 static inline void frag_kfree_skb(struct netns_frags
*nf
,
153 struct sk_buff
*skb
, int *work
)
156 *work
-= skb
->truesize
;
157 atomic_sub(skb
->truesize
, &nf
->mem
);
161 void ip6_frag_init(struct inet_frag_queue
*q
, void *a
)
163 struct frag_queue
*fq
= container_of(q
, struct frag_queue
, q
);
164 struct ip6_create_arg
*arg
= a
;
167 ipv6_addr_copy(&fq
->saddr
, arg
->src
);
168 ipv6_addr_copy(&fq
->daddr
, arg
->dst
);
170 EXPORT_SYMBOL(ip6_frag_init
);
172 /* Destruction primitives. */
174 static __inline__
void fq_put(struct frag_queue
*fq
)
176 inet_frag_put(&fq
->q
, &ip6_frags
);
179 /* Kill fq entry. It is not destroyed immediately,
180 * because caller (and someone more) holds reference count.
182 static __inline__
void fq_kill(struct frag_queue
*fq
)
184 inet_frag_kill(&fq
->q
, &ip6_frags
);
187 static void ip6_evictor(struct net
*net
, struct inet6_dev
*idev
)
191 evicted
= inet_frag_evictor(&net
->ipv6
.frags
, &ip6_frags
);
193 IP6_ADD_STATS_BH(idev
, IPSTATS_MIB_REASMFAILS
, evicted
);
196 static void ip6_frag_expire(unsigned long data
)
198 struct frag_queue
*fq
;
199 struct net_device
*dev
= NULL
;
201 fq
= container_of((struct inet_frag_queue
*)data
, struct frag_queue
, q
);
203 spin_lock(&fq
->q
.lock
);
205 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
210 dev
= dev_get_by_index(&init_net
, fq
->iif
);
215 IP6_INC_STATS_BH(__in6_dev_get(dev
), IPSTATS_MIB_REASMTIMEOUT
);
216 IP6_INC_STATS_BH(__in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
219 /* Don't send error if the first segment did not arrive. */
220 if (!(fq
->q
.last_in
& INET_FRAG_FIRST_IN
) || !fq
->q
.fragments
)
224 But use as source device on which LAST ARRIVED
225 segment was received. And do not use fq->dev
226 pointer directly, device might already disappeared.
228 fq
->q
.fragments
->dev
= dev
;
229 icmpv6_send(fq
->q
.fragments
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_FRAGTIME
, 0, dev
);
233 spin_unlock(&fq
->q
.lock
);
237 static __inline__
struct frag_queue
*
238 fq_find(struct net
*net
, __be32 id
, struct in6_addr
*src
, struct in6_addr
*dst
,
239 struct inet6_dev
*idev
)
241 struct inet_frag_queue
*q
;
242 struct ip6_create_arg arg
;
248 hash
= ip6qhashfn(id
, src
, dst
);
250 q
= inet_frag_find(&net
->ipv6
.frags
, &ip6_frags
, &arg
, hash
);
254 return container_of(q
, struct frag_queue
, q
);
257 IP6_INC_STATS_BH(idev
, IPSTATS_MIB_REASMFAILS
);
261 static int ip6_frag_queue(struct frag_queue
*fq
, struct sk_buff
*skb
,
262 struct frag_hdr
*fhdr
, int nhoff
)
264 struct sk_buff
*prev
, *next
;
265 struct net_device
*dev
;
268 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
271 offset
= ntohs(fhdr
->frag_off
) & ~0x7;
272 end
= offset
+ (ntohs(ipv6_hdr(skb
)->payload_len
) -
273 ((u8
*)(fhdr
+ 1) - (u8
*)(ipv6_hdr(skb
) + 1)));
275 if ((unsigned int)end
> IPV6_MAXPLEN
) {
276 IP6_INC_STATS_BH(ip6_dst_idev(skb
->dst
),
277 IPSTATS_MIB_INHDRERRORS
);
278 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
279 ((u8
*)&fhdr
->frag_off
-
280 skb_network_header(skb
)));
284 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
285 const unsigned char *nh
= skb_network_header(skb
);
286 skb
->csum
= csum_sub(skb
->csum
,
287 csum_partial(nh
, (u8
*)(fhdr
+ 1) - nh
,
291 /* Is this the final fragment? */
292 if (!(fhdr
->frag_off
& htons(IP6_MF
))) {
293 /* If we already have some bits beyond end
294 * or have different end, the segment is corrupted.
296 if (end
< fq
->q
.len
||
297 ((fq
->q
.last_in
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
299 fq
->q
.last_in
|= INET_FRAG_LAST_IN
;
302 /* Check if the fragment is rounded to 8 bytes.
303 * Required by the RFC.
306 /* RFC2460 says always send parameter problem in
309 IP6_INC_STATS_BH(ip6_dst_idev(skb
->dst
),
310 IPSTATS_MIB_INHDRERRORS
);
311 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
312 offsetof(struct ipv6hdr
, payload_len
));
315 if (end
> fq
->q
.len
) {
316 /* Some bits beyond end -> corruption. */
317 if (fq
->q
.last_in
& INET_FRAG_LAST_IN
)
326 /* Point into the IP datagram 'data' part. */
327 if (!pskb_pull(skb
, (u8
*) (fhdr
+ 1) - skb
->data
))
330 if (pskb_trim_rcsum(skb
, end
- offset
))
333 /* Find out which fragments are in front and at the back of us
334 * in the chain of fragments so far. We must know where to put
335 * this fragment, right?
338 for(next
= fq
->q
.fragments
; next
!= NULL
; next
= next
->next
) {
339 if (FRAG6_CB(next
)->offset
>= offset
)
344 /* We found where to put this one. Check for overlap with
345 * preceding fragment, and, if needed, align things so that
346 * any overlaps are eliminated.
349 int i
= (FRAG6_CB(prev
)->offset
+ prev
->len
) - offset
;
355 if (!pskb_pull(skb
, i
))
357 if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)
358 skb
->ip_summed
= CHECKSUM_NONE
;
362 /* Look for overlap with succeeding segments.
363 * If we can merge fragments, do it.
365 while (next
&& FRAG6_CB(next
)->offset
< end
) {
366 int i
= end
- FRAG6_CB(next
)->offset
; /* overlap is 'i' bytes */
369 /* Eat head of the next overlapped fragment
370 * and leave the loop. The next ones cannot overlap.
372 if (!pskb_pull(next
, i
))
374 FRAG6_CB(next
)->offset
+= i
; /* next fragment */
376 if (next
->ip_summed
!= CHECKSUM_UNNECESSARY
)
377 next
->ip_summed
= CHECKSUM_NONE
;
380 struct sk_buff
*free_it
= next
;
382 /* Old fragment is completely overridden with
390 fq
->q
.fragments
= next
;
392 fq
->q
.meat
-= free_it
->len
;
393 frag_kfree_skb(fq
->q
.net
, free_it
, NULL
);
397 FRAG6_CB(skb
)->offset
= offset
;
399 /* Insert this fragment in the chain of fragments. */
404 fq
->q
.fragments
= skb
;
408 fq
->iif
= dev
->ifindex
;
411 fq
->q
.stamp
= skb
->tstamp
;
412 fq
->q
.meat
+= skb
->len
;
413 atomic_add(skb
->truesize
, &fq
->q
.net
->mem
);
415 /* The first fragment.
416 * nhoffset is obtained from the first fragment, of course.
419 fq
->nhoffset
= nhoff
;
420 fq
->q
.last_in
|= INET_FRAG_FIRST_IN
;
423 if (fq
->q
.last_in
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
424 fq
->q
.meat
== fq
->q
.len
)
425 return ip6_frag_reasm(fq
, prev
, dev
);
427 write_lock(&ip6_frags
.lock
);
428 list_move_tail(&fq
->q
.lru_list
, &fq
->q
.net
->lru_list
);
429 write_unlock(&ip6_frags
.lock
);
433 IP6_INC_STATS(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_REASMFAILS
);
439 * Check if this packet is complete.
440 * Returns NULL on failure by any reason, and pointer
441 * to current nexthdr field in reassembled frame.
443 * It is called with locked fq, and caller must check that
444 * queue is eligible for reassembly i.e. it is not COMPLETE,
445 * the last and the first frames arrived and all the bits are here.
447 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
448 struct net_device
*dev
)
450 struct sk_buff
*fp
, *head
= fq
->q
.fragments
;
456 /* Make the one we just received the head. */
459 fp
= skb_clone(head
, GFP_ATOMIC
);
464 fp
->next
= head
->next
;
467 skb_morph(head
, fq
->q
.fragments
);
468 head
->next
= fq
->q
.fragments
->next
;
470 kfree_skb(fq
->q
.fragments
);
471 fq
->q
.fragments
= head
;
474 BUG_TRAP(head
!= NULL
);
475 BUG_TRAP(FRAG6_CB(head
)->offset
== 0);
477 /* Unfragmented part is taken from the first segment. */
478 payload_len
= ((head
->data
- skb_network_header(head
)) -
479 sizeof(struct ipv6hdr
) + fq
->q
.len
-
480 sizeof(struct frag_hdr
));
481 if (payload_len
> IPV6_MAXPLEN
)
484 /* Head of list must not be cloned. */
485 if (skb_cloned(head
) && pskb_expand_head(head
, 0, 0, GFP_ATOMIC
))
488 /* If the first fragment is fragmented itself, we split
489 * it to two chunks: the first with data and paged part
490 * and the second, holding only fragments. */
491 if (skb_shinfo(head
)->frag_list
) {
492 struct sk_buff
*clone
;
495 if ((clone
= alloc_skb(0, GFP_ATOMIC
)) == NULL
)
497 clone
->next
= head
->next
;
499 skb_shinfo(clone
)->frag_list
= skb_shinfo(head
)->frag_list
;
500 skb_shinfo(head
)->frag_list
= NULL
;
501 for (i
=0; i
<skb_shinfo(head
)->nr_frags
; i
++)
502 plen
+= skb_shinfo(head
)->frags
[i
].size
;
503 clone
->len
= clone
->data_len
= head
->data_len
- plen
;
504 head
->data_len
-= clone
->len
;
505 head
->len
-= clone
->len
;
507 clone
->ip_summed
= head
->ip_summed
;
508 atomic_add(clone
->truesize
, &fq
->q
.net
->mem
);
511 /* We have to remove fragment header from datagram and to relocate
512 * header in order to calculate ICV correctly. */
513 nhoff
= fq
->nhoffset
;
514 skb_network_header(head
)[nhoff
] = skb_transport_header(head
)[0];
515 memmove(head
->head
+ sizeof(struct frag_hdr
), head
->head
,
516 (head
->data
- head
->head
) - sizeof(struct frag_hdr
));
517 head
->mac_header
+= sizeof(struct frag_hdr
);
518 head
->network_header
+= sizeof(struct frag_hdr
);
520 skb_shinfo(head
)->frag_list
= head
->next
;
521 skb_reset_transport_header(head
);
522 skb_push(head
, head
->data
- skb_network_header(head
));
523 atomic_sub(head
->truesize
, &fq
->q
.net
->mem
);
525 for (fp
=head
->next
; fp
; fp
= fp
->next
) {
526 head
->data_len
+= fp
->len
;
527 head
->len
+= fp
->len
;
528 if (head
->ip_summed
!= fp
->ip_summed
)
529 head
->ip_summed
= CHECKSUM_NONE
;
530 else if (head
->ip_summed
== CHECKSUM_COMPLETE
)
531 head
->csum
= csum_add(head
->csum
, fp
->csum
);
532 head
->truesize
+= fp
->truesize
;
533 atomic_sub(fp
->truesize
, &fq
->q
.net
->mem
);
538 head
->tstamp
= fq
->q
.stamp
;
539 ipv6_hdr(head
)->payload_len
= htons(payload_len
);
540 IP6CB(head
)->nhoff
= nhoff
;
542 /* Yes, and fold redundant checksum back. 8) */
543 if (head
->ip_summed
== CHECKSUM_COMPLETE
)
544 head
->csum
= csum_partial(skb_network_header(head
),
545 skb_network_header_len(head
),
549 IP6_INC_STATS_BH(__in6_dev_get(dev
), IPSTATS_MIB_REASMOKS
);
551 fq
->q
.fragments
= NULL
;
556 printk(KERN_DEBUG
"ip6_frag_reasm: payload len = %d\n", payload_len
);
560 printk(KERN_DEBUG
"ip6_frag_reasm: no memory for reassembly\n");
563 IP6_INC_STATS_BH(__in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
568 static int ipv6_frag_rcv(struct sk_buff
*skb
)
570 struct frag_hdr
*fhdr
;
571 struct frag_queue
*fq
;
572 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
575 IP6_INC_STATS_BH(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_REASMREQDS
);
577 /* Jumbo payload inhibits frag. header */
578 if (hdr
->payload_len
==0) {
579 IP6_INC_STATS(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_INHDRERRORS
);
580 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
581 skb_network_header_len(skb
));
584 if (!pskb_may_pull(skb
, (skb_transport_offset(skb
) +
585 sizeof(struct frag_hdr
)))) {
586 IP6_INC_STATS(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_INHDRERRORS
);
587 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
588 skb_network_header_len(skb
));
593 fhdr
= (struct frag_hdr
*)skb_transport_header(skb
);
595 if (!(fhdr
->frag_off
& htons(0xFFF9))) {
596 /* It is not a fragmented frame */
597 skb
->transport_header
+= sizeof(struct frag_hdr
);
598 IP6_INC_STATS_BH(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_REASMOKS
);
600 IP6CB(skb
)->nhoff
= (u8
*)fhdr
- skb_network_header(skb
);
604 net
= dev_net(skb
->dev
);
605 if (atomic_read(&net
->ipv6
.frags
.mem
) > net
->ipv6
.frags
.high_thresh
)
606 ip6_evictor(net
, ip6_dst_idev(skb
->dst
));
608 if ((fq
= fq_find(net
, fhdr
->identification
, &hdr
->saddr
, &hdr
->daddr
,
609 ip6_dst_idev(skb
->dst
))) != NULL
) {
612 spin_lock(&fq
->q
.lock
);
614 ret
= ip6_frag_queue(fq
, skb
, fhdr
, IP6CB(skb
)->nhoff
);
616 spin_unlock(&fq
->q
.lock
);
621 IP6_INC_STATS_BH(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_REASMFAILS
);
626 static struct inet6_protocol frag_protocol
=
628 .handler
= ipv6_frag_rcv
,
629 .flags
= INET6_PROTO_NOPOLICY
,
633 static struct ctl_table ip6_frags_ctl_table
[] = {
635 .ctl_name
= NET_IPV6_IP6FRAG_HIGH_THRESH
,
636 .procname
= "ip6frag_high_thresh",
637 .data
= &init_net
.ipv6
.frags
.high_thresh
,
638 .maxlen
= sizeof(int),
640 .proc_handler
= &proc_dointvec
643 .ctl_name
= NET_IPV6_IP6FRAG_LOW_THRESH
,
644 .procname
= "ip6frag_low_thresh",
645 .data
= &init_net
.ipv6
.frags
.low_thresh
,
646 .maxlen
= sizeof(int),
648 .proc_handler
= &proc_dointvec
651 .ctl_name
= NET_IPV6_IP6FRAG_TIME
,
652 .procname
= "ip6frag_time",
653 .data
= &init_net
.ipv6
.frags
.timeout
,
654 .maxlen
= sizeof(int),
656 .proc_handler
= &proc_dointvec_jiffies
,
657 .strategy
= &sysctl_jiffies
,
660 .ctl_name
= NET_IPV6_IP6FRAG_SECRET_INTERVAL
,
661 .procname
= "ip6frag_secret_interval",
662 .data
= &ip6_frags
.secret_interval
,
663 .maxlen
= sizeof(int),
665 .proc_handler
= &proc_dointvec_jiffies
,
666 .strategy
= &sysctl_jiffies
671 static int ip6_frags_sysctl_register(struct net
*net
)
673 struct ctl_table
*table
;
674 struct ctl_table_header
*hdr
;
676 table
= ip6_frags_ctl_table
;
677 if (net
!= &init_net
) {
678 table
= kmemdup(table
, sizeof(ip6_frags_ctl_table
), GFP_KERNEL
);
682 table
[0].data
= &net
->ipv6
.frags
.high_thresh
;
683 table
[1].data
= &net
->ipv6
.frags
.low_thresh
;
684 table
[2].data
= &net
->ipv6
.frags
.timeout
;
685 table
[3].mode
&= ~0222;
688 hdr
= register_net_sysctl_table(net
, net_ipv6_ctl_path
, table
);
692 net
->ipv6
.sysctl
.frags_hdr
= hdr
;
696 if (net
!= &init_net
)
702 static void ip6_frags_sysctl_unregister(struct net
*net
)
704 struct ctl_table
*table
;
706 table
= net
->ipv6
.sysctl
.frags_hdr
->ctl_table_arg
;
707 unregister_net_sysctl_table(net
->ipv6
.sysctl
.frags_hdr
);
711 static inline int ip6_frags_sysctl_register(struct net
*net
)
716 static inline void ip6_frags_sysctl_unregister(struct net
*net
)
721 static int ipv6_frags_init_net(struct net
*net
)
723 net
->ipv6
.frags
.high_thresh
= 256 * 1024;
724 net
->ipv6
.frags
.low_thresh
= 192 * 1024;
725 net
->ipv6
.frags
.timeout
= IPV6_FRAG_TIMEOUT
;
727 inet_frags_init_net(&net
->ipv6
.frags
);
729 return ip6_frags_sysctl_register(net
);
732 static void ipv6_frags_exit_net(struct net
*net
)
734 ip6_frags_sysctl_unregister(net
);
735 inet_frags_exit_net(&net
->ipv6
.frags
, &ip6_frags
);
738 static struct pernet_operations ip6_frags_ops
= {
739 .init
= ipv6_frags_init_net
,
740 .exit
= ipv6_frags_exit_net
,
743 int __init
ipv6_frag_init(void)
747 ret
= inet6_add_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
751 register_pernet_subsys(&ip6_frags_ops
);
753 ip6_frags
.hashfn
= ip6_hashfn
;
754 ip6_frags
.constructor
= ip6_frag_init
;
755 ip6_frags
.destructor
= NULL
;
756 ip6_frags
.skb_free
= NULL
;
757 ip6_frags
.qsize
= sizeof(struct frag_queue
);
758 ip6_frags
.match
= ip6_frag_match
;
759 ip6_frags
.frag_expire
= ip6_frag_expire
;
760 ip6_frags
.secret_interval
= 10 * 60 * HZ
;
761 inet_frags_init(&ip6_frags
);
766 void ipv6_frag_exit(void)
768 inet_frags_fini(&ip6_frags
);
769 unregister_pernet_subsys(&ip6_frags_ops
);
770 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);