2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
9 #include <linux/stddef.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <asm/unaligned.h>
13 #include <net/caif/caif_layer.h>
14 #include <net/caif/cfsrvl.h>
15 #include <net/caif/cfpkt.h>
17 #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
18 #define RFM_SEGMENTATION_BIT 0x01
19 #define RFM_HEAD_SIZE 7
21 static int cfrfml_receive(struct cflayer
*layr
, struct cfpkt
*pkt
);
22 static int cfrfml_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
);
26 struct cfpkt
*incomplete_frm
;
30 /* Protects serialized processing of packets */
34 static void cfrfml_release(struct kref
*kref
)
36 struct cfsrvl
*srvl
= container_of(kref
, struct cfsrvl
, ref
);
37 struct cfrfml
*rfml
= container_obj(&srvl
->layer
);
39 if (rfml
->incomplete_frm
)
40 cfpkt_destroy(rfml
->incomplete_frm
);
45 struct cflayer
*cfrfml_create(u8 channel_id
, struct dev_info
*dev_info
,
50 kzalloc(sizeof(struct cfrfml
), GFP_ATOMIC
);
53 pr_warn("Out of memory\n");
57 cfsrvl_init(&this->serv
, channel_id
, dev_info
, false);
58 this->serv
.release
= cfrfml_release
;
59 this->serv
.layer
.receive
= cfrfml_receive
;
60 this->serv
.layer
.transmit
= cfrfml_transmit
;
62 /* Round down to closest multiple of 16 */
63 tmp
= (mtu_size
- RFM_HEAD_SIZE
- 6) / 16;
66 this->fragment_size
= tmp
;
67 spin_lock_init(&this->sync
);
68 snprintf(this->serv
.layer
.name
, CAIF_LAYER_NAME_SZ
,
71 return &this->serv
.layer
;
74 static struct cfpkt
*rfm_append(struct cfrfml
*rfml
, char *seghead
,
75 struct cfpkt
*pkt
, int *err
)
79 /* n-th but not last segment */
81 if (cfpkt_extr_head(pkt
, seghead
, 6) < 0)
84 /* Verify correct header */
85 if (memcmp(seghead
, rfml
->seghead
, 6) != 0)
88 tmppkt
= cfpkt_append(rfml
->incomplete_frm
, pkt
,
89 rfml
->pdu_size
+ RFM_HEAD_SIZE
);
91 /* If cfpkt_append failes input pkts are not freed */
100 static int cfrfml_receive(struct cflayer
*layr
, struct cfpkt
*pkt
)
107 struct cfpkt
*tmppkt
= NULL
;
109 caif_assert(layr
->up
!= NULL
);
110 caif_assert(layr
->receive
!= NULL
);
111 rfml
= container_obj(layr
);
112 spin_lock(&rfml
->sync
);
115 if (cfpkt_extr_head(pkt
, &tmp
, 1) < 0)
117 segmented
= tmp
& RFM_SEGMENTATION_BIT
;
120 if (rfml
->incomplete_frm
== NULL
) {
121 /* Initial Segment */
122 if (cfpkt_peek_head(pkt
, rfml
->seghead
, 6) < 0)
125 rfml
->pdu_size
= get_unaligned_le16(rfml
->seghead
+4);
127 if (cfpkt_erroneous(pkt
))
129 rfml
->incomplete_frm
= pkt
;
133 tmppkt
= rfm_append(rfml
, seghead
, pkt
, &err
);
137 if (cfpkt_erroneous(tmppkt
))
140 rfml
->incomplete_frm
= tmppkt
;
143 if (cfpkt_erroneous(tmppkt
))
150 if (rfml
->incomplete_frm
) {
153 tmppkt
= rfm_append(rfml
, seghead
, pkt
, &err
);
157 if (cfpkt_erroneous(tmppkt
))
160 rfml
->incomplete_frm
= NULL
;
164 /* Verify that length is correct */
166 if (rfml
->pdu_size
!= cfpkt_getlen(pkt
) - RFM_HEAD_SIZE
+ 1)
170 err
= rfml
->serv
.layer
.up
->receive(rfml
->serv
.layer
.up
, pkt
);
176 cfpkt_destroy(tmppkt
);
179 if (rfml
->incomplete_frm
)
180 cfpkt_destroy(rfml
->incomplete_frm
);
181 rfml
->incomplete_frm
= NULL
;
183 pr_info("Connection error %d triggered on RFM link\n", err
);
185 /* Trigger connection error upon failure.*/
186 layr
->up
->ctrlcmd(layr
->up
, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND
,
187 rfml
->serv
.dev_info
.id
);
189 spin_unlock(&rfml
->sync
);
194 static int cfrfml_transmit_segment(struct cfrfml
*rfml
, struct cfpkt
*pkt
)
196 caif_assert(cfpkt_getlen(pkt
) < rfml
->fragment_size
);
198 /* Add info for MUX-layer to route the packet out. */
199 cfpkt_info(pkt
)->channel_id
= rfml
->serv
.layer
.id
;
202 * To optimize alignment, we add up the size of CAIF header before
205 cfpkt_info(pkt
)->hdr_len
= RFM_HEAD_SIZE
;
206 cfpkt_info(pkt
)->dev_info
= &rfml
->serv
.dev_info
;
208 return rfml
->serv
.layer
.dn
->transmit(rfml
->serv
.layer
.dn
, pkt
);
211 static int cfrfml_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
)
216 struct cfpkt
*rearpkt
= NULL
;
217 struct cfpkt
*frontpkt
= pkt
;
218 struct cfrfml
*rfml
= container_obj(layr
);
220 caif_assert(layr
->dn
!= NULL
);
221 caif_assert(layr
->dn
->transmit
!= NULL
);
223 if (!cfsrvl_ready(&rfml
->serv
, &err
))
227 if (cfpkt_getlen(pkt
) <= RFM_HEAD_SIZE
-1)
231 if (cfpkt_getlen(pkt
) > rfml
->fragment_size
+ RFM_HEAD_SIZE
)
232 err
= cfpkt_peek_head(pkt
, head
, 6);
237 while (cfpkt_getlen(frontpkt
) > rfml
->fragment_size
+ RFM_HEAD_SIZE
) {
242 if (cfpkt_add_head(frontpkt
, &seg
, 1) < 0)
245 * On OOM error cfpkt_split returns NULL.
247 * NOTE: Segmented pdu is not correctly aligned.
248 * This has negative performance impact.
251 rearpkt
= cfpkt_split(frontpkt
, rfml
->fragment_size
);
255 err
= cfrfml_transmit_segment(rfml
, frontpkt
);
263 if (frontpkt
== NULL
)
266 if (cfpkt_add_head(frontpkt
, head
, 6) < 0)
274 if (cfpkt_add_head(frontpkt
, &seg
, 1) < 0)
277 err
= cfrfml_transmit_segment(rfml
, frontpkt
);
283 pr_info("Connection error %d triggered on RFM link\n", err
);
284 /* Trigger connection error upon failure.*/
286 layr
->up
->ctrlcmd(layr
->up
, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND
,
287 rfml
->serv
.dev_info
.id
);
290 cfpkt_destroy(rearpkt
);
292 if (frontpkt
&& frontpkt
!= pkt
) {
294 cfpkt_destroy(frontpkt
);
296 * Socket layer will free the original packet,
297 * but this packet may already be sent and
298 * freed. So we have to return 0 in this case
299 * to avoid socket layer to re-free this packet.
300 * The return of shutdown indication will
301 * cause connection to be invalidated anyhow.