2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
9 #include <linux/stddef.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
13 #include <net/caif/cfpkt.h>
14 #include <net/caif/cfmuxl.h>
15 #include <net/caif/cfsrvl.h>
16 #include <net/caif/cffrml.h>
18 #define container_obj(layr) container_of(layr, struct cfmuxl, layer)
20 #define CAIF_CTRL_CHANNEL 0
21 #define UP_CACHE_SIZE 8
22 #define DN_CACHE_SIZE 8
26 struct list_head srvl_list
;
27 struct list_head frml_list
;
28 struct cflayer
*up_cache
[UP_CACHE_SIZE
];
29 struct cflayer
*dn_cache
[DN_CACHE_SIZE
];
31 * Set when inserting or removing downwards layers.
33 spinlock_t transmit_lock
;
36 * Set when inserting or removing upwards layers.
38 spinlock_t receive_lock
;
42 static int cfmuxl_receive(struct cflayer
*layr
, struct cfpkt
*pkt
);
43 static int cfmuxl_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
);
44 static void cfmuxl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
46 static struct cflayer
*get_up(struct cfmuxl
*muxl
, u16 id
);
48 struct cflayer
*cfmuxl_create(void)
50 struct cfmuxl
*this = kmalloc(sizeof(struct cfmuxl
), GFP_ATOMIC
);
53 memset(this, 0, sizeof(*this));
54 this->layer
.receive
= cfmuxl_receive
;
55 this->layer
.transmit
= cfmuxl_transmit
;
56 this->layer
.ctrlcmd
= cfmuxl_ctrlcmd
;
57 INIT_LIST_HEAD(&this->srvl_list
);
58 INIT_LIST_HEAD(&this->frml_list
);
59 spin_lock_init(&this->transmit_lock
);
60 spin_lock_init(&this->receive_lock
);
61 snprintf(this->layer
.name
, CAIF_LAYER_NAME_SZ
, "mux");
65 int cfmuxl_set_dnlayer(struct cflayer
*layr
, struct cflayer
*dn
, u8 phyid
)
67 struct cfmuxl
*muxl
= (struct cfmuxl
*) layr
;
69 spin_lock_bh(&muxl
->transmit_lock
);
70 list_add_rcu(&dn
->node
, &muxl
->frml_list
);
71 spin_unlock_bh(&muxl
->transmit_lock
);
75 static struct cflayer
*get_from_id(struct list_head
*list
, u16 id
)
78 list_for_each_entry_rcu(lyr
, list
, node
) {
86 int cfmuxl_set_uplayer(struct cflayer
*layr
, struct cflayer
*up
, u8 linkid
)
88 struct cfmuxl
*muxl
= container_obj(layr
);
91 spin_lock_bh(&muxl
->receive_lock
);
93 /* Two entries with same id is wrong, so remove old layer from mux */
94 old
= get_from_id(&muxl
->srvl_list
, linkid
);
96 list_del_rcu(&old
->node
);
98 list_add_rcu(&up
->node
, &muxl
->srvl_list
);
99 spin_unlock_bh(&muxl
->receive_lock
);
104 struct cflayer
*cfmuxl_remove_dnlayer(struct cflayer
*layr
, u8 phyid
)
106 struct cfmuxl
*muxl
= container_obj(layr
);
108 int idx
= phyid
% DN_CACHE_SIZE
;
110 spin_lock_bh(&muxl
->transmit_lock
);
111 rcu_assign_pointer(muxl
->dn_cache
[idx
], NULL
);
112 dn
= get_from_id(&muxl
->frml_list
, phyid
);
116 list_del_rcu(&dn
->node
);
117 caif_assert(dn
!= NULL
);
119 spin_unlock_bh(&muxl
->transmit_lock
);
123 static struct cflayer
*get_up(struct cfmuxl
*muxl
, u16 id
)
126 int idx
= id
% UP_CACHE_SIZE
;
127 up
= rcu_dereference(muxl
->up_cache
[idx
]);
128 if (up
== NULL
|| up
->id
!= id
) {
129 spin_lock_bh(&muxl
->receive_lock
);
130 up
= get_from_id(&muxl
->srvl_list
, id
);
131 rcu_assign_pointer(muxl
->up_cache
[idx
], up
);
132 spin_unlock_bh(&muxl
->receive_lock
);
137 static struct cflayer
*get_dn(struct cfmuxl
*muxl
, struct dev_info
*dev_info
)
140 int idx
= dev_info
->id
% DN_CACHE_SIZE
;
141 dn
= rcu_dereference(muxl
->dn_cache
[idx
]);
142 if (dn
== NULL
|| dn
->id
!= dev_info
->id
) {
143 spin_lock_bh(&muxl
->transmit_lock
);
144 dn
= get_from_id(&muxl
->frml_list
, dev_info
->id
);
145 rcu_assign_pointer(muxl
->dn_cache
[idx
], dn
);
146 spin_unlock_bh(&muxl
->transmit_lock
);
151 struct cflayer
*cfmuxl_remove_uplayer(struct cflayer
*layr
, u8 id
)
154 struct cfmuxl
*muxl
= container_obj(layr
);
155 int idx
= id
% UP_CACHE_SIZE
;
158 pr_warn("Trying to remove control layer\n");
162 spin_lock_bh(&muxl
->receive_lock
);
163 up
= get_from_id(&muxl
->srvl_list
, id
);
167 rcu_assign_pointer(muxl
->up_cache
[idx
], NULL
);
168 list_del_rcu(&up
->node
);
170 spin_unlock_bh(&muxl
->receive_lock
);
174 static int cfmuxl_receive(struct cflayer
*layr
, struct cfpkt
*pkt
)
177 struct cfmuxl
*muxl
= container_obj(layr
);
180 if (cfpkt_extr_head(pkt
, &id
, 1) < 0) {
181 pr_err("erroneous Caif Packet\n");
186 up
= get_up(muxl
, id
);
189 pr_debug("Received data on unknown link ID = %d (0x%x)"
190 " up == NULL", id
, id
);
193 * Don't return ERROR, since modem misbehaves and sends out
194 * flow on before linksetup response.
198 return /* CFGLU_EPROT; */ 0;
201 /* We can't hold rcu_lock during receive, so take a ref count instead */
205 ret
= up
->receive(up
, pkt
);
211 static int cfmuxl_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
)
213 struct cfmuxl
*muxl
= container_obj(layr
);
217 struct caif_payload_info
*info
= cfpkt_info(pkt
);
222 dn
= get_dn(muxl
, info
->dev_info
);
224 pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
225 info
->dev_info
->id
, info
->dev_info
->id
);
232 linkid
= info
->channel_id
;
233 cfpkt_add_head(pkt
, &linkid
, 1);
235 /* We can't hold rcu_lock during receive, so take a ref count instead */
240 err
= dn
->transmit(dn
, pkt
);
246 static void cfmuxl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
249 struct cfmuxl
*muxl
= container_obj(layr
);
250 struct cflayer
*layer
;
254 list_for_each_entry_rcu(layer
, &muxl
->srvl_list
, node
) {
256 if (cfsrvl_phyid_match(layer
, phyid
) && layer
->ctrlcmd
) {
258 if ((ctrl
== _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND
||
259 ctrl
== CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND
) &&
262 idx
= layer
->id
% UP_CACHE_SIZE
;
263 spin_lock_bh(&muxl
->receive_lock
);
264 rcu_assign_pointer(muxl
->up_cache
[idx
], NULL
);
265 list_del_rcu(&layer
->node
);
266 spin_unlock_bh(&muxl
->receive_lock
);
268 /* NOTE: ctrlcmd is not allowed to block */
269 layer
->ctrlcmd(layer
, ctrl
, phyid
);