2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
6 #include <linux/stddef.h>
7 #include <linux/spinlock.h>
8 #include <linux/slab.h>
9 #include <net/caif/cfpkt.h>
10 #include <net/caif/cfmuxl.h>
11 #include <net/caif/cfsrvl.h>
12 #include <net/caif/cffrml.h>
14 #define container_obj(layr) container_of(layr, struct cfmuxl, layer)
16 #define CAIF_CTRL_CHANNEL 0
17 #define UP_CACHE_SIZE 8
18 #define DN_CACHE_SIZE 8
22 struct list_head srvl_list
;
23 struct list_head frml_list
;
24 struct cflayer
*up_cache
[UP_CACHE_SIZE
];
25 struct cflayer
*dn_cache
[DN_CACHE_SIZE
];
27 * Set when inserting or removing downwards layers.
29 spinlock_t transmit_lock
;
32 * Set when inserting or removing upwards layers.
34 spinlock_t receive_lock
;
38 static int cfmuxl_receive(struct cflayer
*layr
, struct cfpkt
*pkt
);
39 static int cfmuxl_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
);
40 static void cfmuxl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
42 static struct cflayer
*get_up(struct cfmuxl
*muxl
, u16 id
);
44 struct cflayer
*cfmuxl_create(void)
46 struct cfmuxl
*this = kmalloc(sizeof(struct cfmuxl
), GFP_ATOMIC
);
49 memset(this, 0, sizeof(*this));
50 this->layer
.receive
= cfmuxl_receive
;
51 this->layer
.transmit
= cfmuxl_transmit
;
52 this->layer
.ctrlcmd
= cfmuxl_ctrlcmd
;
53 INIT_LIST_HEAD(&this->srvl_list
);
54 INIT_LIST_HEAD(&this->frml_list
);
55 spin_lock_init(&this->transmit_lock
);
56 spin_lock_init(&this->receive_lock
);
57 snprintf(this->layer
.name
, CAIF_LAYER_NAME_SZ
, "mux");
61 int cfmuxl_set_uplayer(struct cflayer
*layr
, struct cflayer
*up
, u8 linkid
)
63 struct cfmuxl
*muxl
= container_obj(layr
);
64 spin_lock(&muxl
->receive_lock
);
66 list_add(&up
->node
, &muxl
->srvl_list
);
67 spin_unlock(&muxl
->receive_lock
);
71 bool cfmuxl_is_phy_inuse(struct cflayer
*layr
, u8 phyid
)
73 struct list_head
*node
;
74 struct cflayer
*layer
;
75 struct cfmuxl
*muxl
= container_obj(layr
);
77 spin_lock(&muxl
->receive_lock
);
79 list_for_each(node
, &muxl
->srvl_list
) {
80 layer
= list_entry(node
, struct cflayer
, node
);
81 if (cfsrvl_phyid_match(layer
, phyid
)) {
87 spin_unlock(&muxl
->receive_lock
);
91 u8
cfmuxl_get_phyid(struct cflayer
*layr
, u8 channel_id
)
95 struct cfmuxl
*muxl
= container_obj(layr
);
96 spin_lock(&muxl
->receive_lock
);
97 up
= get_up(muxl
, channel_id
);
99 phyid
= cfsrvl_getphyid(up
);
102 spin_unlock(&muxl
->receive_lock
);
106 int cfmuxl_set_dnlayer(struct cflayer
*layr
, struct cflayer
*dn
, u8 phyid
)
108 struct cfmuxl
*muxl
= (struct cfmuxl
*) layr
;
109 spin_lock(&muxl
->transmit_lock
);
110 list_add(&dn
->node
, &muxl
->frml_list
);
111 spin_unlock(&muxl
->transmit_lock
);
115 static struct cflayer
*get_from_id(struct list_head
*list
, u16 id
)
117 struct list_head
*node
;
118 struct cflayer
*layer
;
119 list_for_each(node
, list
) {
120 layer
= list_entry(node
, struct cflayer
, node
);
127 struct cflayer
*cfmuxl_remove_dnlayer(struct cflayer
*layr
, u8 phyid
)
129 struct cfmuxl
*muxl
= container_obj(layr
);
131 spin_lock(&muxl
->transmit_lock
);
132 memset(muxl
->dn_cache
, 0, sizeof(muxl
->dn_cache
));
133 dn
= get_from_id(&muxl
->frml_list
, phyid
);
135 spin_unlock(&muxl
->transmit_lock
);
139 caif_assert(dn
!= NULL
);
140 spin_unlock(&muxl
->transmit_lock
);
144 /* Invariant: lock is taken */
145 static struct cflayer
*get_up(struct cfmuxl
*muxl
, u16 id
)
148 int idx
= id
% UP_CACHE_SIZE
;
149 up
= muxl
->up_cache
[idx
];
150 if (up
== NULL
|| up
->id
!= id
) {
151 up
= get_from_id(&muxl
->srvl_list
, id
);
152 muxl
->up_cache
[idx
] = up
;
157 /* Invariant: lock is taken */
158 static struct cflayer
*get_dn(struct cfmuxl
*muxl
, struct dev_info
*dev_info
)
161 int idx
= dev_info
->id
% DN_CACHE_SIZE
;
162 dn
= muxl
->dn_cache
[idx
];
163 if (dn
== NULL
|| dn
->id
!= dev_info
->id
) {
164 dn
= get_from_id(&muxl
->frml_list
, dev_info
->id
);
165 muxl
->dn_cache
[idx
] = dn
;
170 struct cflayer
*cfmuxl_remove_uplayer(struct cflayer
*layr
, u8 id
)
173 struct cfmuxl
*muxl
= container_obj(layr
);
174 spin_lock(&muxl
->receive_lock
);
175 up
= get_up(muxl
, id
);
178 memset(muxl
->up_cache
, 0, sizeof(muxl
->up_cache
));
182 spin_unlock(&muxl
->receive_lock
);
186 static int cfmuxl_receive(struct cflayer
*layr
, struct cfpkt
*pkt
)
189 struct cfmuxl
*muxl
= container_obj(layr
);
192 if (cfpkt_extr_head(pkt
, &id
, 1) < 0) {
193 pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__
);
198 spin_lock(&muxl
->receive_lock
);
199 up
= get_up(muxl
, id
);
200 spin_unlock(&muxl
->receive_lock
);
202 pr_info("CAIF: %s():Received data on unknown link ID = %d "
203 "(0x%x) up == NULL", __func__
, id
, id
);
206 * Don't return ERROR, since modem misbehaves and sends out
207 * flow on before linksetup response.
209 return /* CFGLU_EPROT; */ 0;
212 ret
= up
->receive(up
, pkt
);
217 static int cfmuxl_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
)
220 struct cfmuxl
*muxl
= container_obj(layr
);
223 struct caif_payload_info
*info
= cfpkt_info(pkt
);
224 dn
= get_dn(muxl
, cfpkt_info(pkt
)->dev_info
);
226 pr_warning("CAIF: %s(): Send data on unknown phy "
228 __func__
, info
->dev_info
->id
, info
->dev_info
->id
);
232 linkid
= info
->channel_id
;
233 cfpkt_add_head(pkt
, &linkid
, 1);
234 ret
= dn
->transmit(dn
, pkt
);
235 /* Remove MUX protocol header upon error. */
237 cfpkt_extr_head(pkt
, &linkid
, 1);
241 static void cfmuxl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
244 struct cfmuxl
*muxl
= container_obj(layr
);
245 struct list_head
*node
;
246 struct cflayer
*layer
;
247 list_for_each(node
, &muxl
->srvl_list
) {
248 layer
= list_entry(node
, struct cflayer
, node
);
249 if (cfsrvl_phyid_match(layer
, phyid
))
250 layer
->ctrlcmd(layer
, ctrl
, phyid
);