davinci: edma: fix coding style issue related to breaking lines
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / firewire / core-iso.c
blob8f5aebfb29dfbd5988fd7c5fcb5adfadd8b71b76
1 /*
2 * Isochronous I/O functionality:
3 * - Isochronous DMA context management
4 * - Isochronous bus resource management (channels, bandwidth), client side
6 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/dma-mapping.h>
24 #include <linux/errno.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-constants.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/vmalloc.h>
33 #include <asm/byteorder.h>
35 #include "core.h"
38 * Isochronous DMA context management
41 int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
42 int page_count, enum dma_data_direction direction)
44 int i, j;
45 dma_addr_t address;
47 buffer->page_count = page_count;
48 buffer->direction = direction;
50 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
51 GFP_KERNEL);
52 if (buffer->pages == NULL)
53 goto out;
55 for (i = 0; i < buffer->page_count; i++) {
56 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
57 if (buffer->pages[i] == NULL)
58 goto out_pages;
60 address = dma_map_page(card->device, buffer->pages[i],
61 0, PAGE_SIZE, direction);
62 if (dma_mapping_error(card->device, address)) {
63 __free_page(buffer->pages[i]);
64 goto out_pages;
66 set_page_private(buffer->pages[i], address);
69 return 0;
71 out_pages:
72 for (j = 0; j < i; j++) {
73 address = page_private(buffer->pages[j]);
74 dma_unmap_page(card->device, address,
75 PAGE_SIZE, direction);
76 __free_page(buffer->pages[j]);
78 kfree(buffer->pages);
79 out:
80 buffer->pages = NULL;
82 return -ENOMEM;
84 EXPORT_SYMBOL(fw_iso_buffer_init);
86 int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
88 unsigned long uaddr;
89 int i, err;
91 uaddr = vma->vm_start;
92 for (i = 0; i < buffer->page_count; i++) {
93 err = vm_insert_page(vma, uaddr, buffer->pages[i]);
94 if (err)
95 return err;
97 uaddr += PAGE_SIZE;
100 return 0;
103 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
104 struct fw_card *card)
106 int i;
107 dma_addr_t address;
109 for (i = 0; i < buffer->page_count; i++) {
110 address = page_private(buffer->pages[i]);
111 dma_unmap_page(card->device, address,
112 PAGE_SIZE, buffer->direction);
113 __free_page(buffer->pages[i]);
116 kfree(buffer->pages);
117 buffer->pages = NULL;
119 EXPORT_SYMBOL(fw_iso_buffer_destroy);
121 struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
122 int type, int channel, int speed, size_t header_size,
123 fw_iso_callback_t callback, void *callback_data)
125 struct fw_iso_context *ctx;
127 ctx = card->driver->allocate_iso_context(card,
128 type, channel, header_size);
129 if (IS_ERR(ctx))
130 return ctx;
132 ctx->card = card;
133 ctx->type = type;
134 ctx->channel = channel;
135 ctx->speed = speed;
136 ctx->header_size = header_size;
137 ctx->callback = callback;
138 ctx->callback_data = callback_data;
140 return ctx;
142 EXPORT_SYMBOL(fw_iso_context_create);
144 void fw_iso_context_destroy(struct fw_iso_context *ctx)
146 struct fw_card *card = ctx->card;
148 card->driver->free_iso_context(ctx);
150 EXPORT_SYMBOL(fw_iso_context_destroy);
152 int fw_iso_context_start(struct fw_iso_context *ctx,
153 int cycle, int sync, int tags)
155 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
157 EXPORT_SYMBOL(fw_iso_context_start);
159 int fw_iso_context_queue(struct fw_iso_context *ctx,
160 struct fw_iso_packet *packet,
161 struct fw_iso_buffer *buffer,
162 unsigned long payload)
164 struct fw_card *card = ctx->card;
166 return card->driver->queue_iso(ctx, packet, buffer, payload);
168 EXPORT_SYMBOL(fw_iso_context_queue);
170 int fw_iso_context_stop(struct fw_iso_context *ctx)
172 return ctx->card->driver->stop_iso(ctx);
174 EXPORT_SYMBOL(fw_iso_context_stop);
177 * Isochronous bus resource management (channels, bandwidth), client side
180 static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
181 int bandwidth, bool allocate, __be32 data[2])
183 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
186 * On a 1394a IRM with low contention, try < 1 is enough.
187 * On a 1394-1995 IRM, we need at least try < 2.
188 * Let's just do try < 5.
190 for (try = 0; try < 5; try++) {
191 new = allocate ? old - bandwidth : old + bandwidth;
192 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
193 return -EBUSY;
195 data[0] = cpu_to_be32(old);
196 data[1] = cpu_to_be32(new);
197 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
198 irm_id, generation, SCODE_100,
199 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
200 data, 8)) {
201 case RCODE_GENERATION:
202 /* A generation change frees all bandwidth. */
203 return allocate ? -EAGAIN : bandwidth;
205 case RCODE_COMPLETE:
206 if (be32_to_cpup(data) == old)
207 return bandwidth;
209 old = be32_to_cpup(data);
210 /* Fall through. */
214 return -EIO;
217 static int manage_channel(struct fw_card *card, int irm_id, int generation,
218 u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
220 __be32 c, all, old;
221 int i, ret = -EIO, retry = 5;
223 old = all = allocate ? cpu_to_be32(~0) : 0;
225 for (i = 0; i < 32; i++) {
226 if (!(channels_mask & 1 << i))
227 continue;
229 ret = -EBUSY;
231 c = cpu_to_be32(1 << (31 - i));
232 if ((old & c) != (all & c))
233 continue;
235 data[0] = old;
236 data[1] = old ^ c;
237 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
238 irm_id, generation, SCODE_100,
239 offset, data, 8)) {
240 case RCODE_GENERATION:
241 /* A generation change frees all channels. */
242 return allocate ? -EAGAIN : i;
244 case RCODE_COMPLETE:
245 if (data[0] == old)
246 return i;
248 old = data[0];
250 /* Is the IRM 1394a-2000 compliant? */
251 if ((data[0] & c) == (data[1] & c))
252 continue;
254 /* 1394-1995 IRM, fall through to retry. */
255 default:
256 if (retry) {
257 retry--;
258 i--;
259 } else {
260 ret = -EIO;
265 return ret;
268 static void deallocate_channel(struct fw_card *card, int irm_id,
269 int generation, int channel, __be32 buffer[2])
271 u32 mask;
272 u64 offset;
274 mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
275 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
276 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
278 manage_channel(card, irm_id, generation, mask, offset, false, buffer);
282 * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
284 * In parameters: card, generation, channels_mask, bandwidth, allocate
285 * Out parameters: channel, bandwidth
286 * This function blocks (sleeps) during communication with the IRM.
288 * Allocates or deallocates at most one channel out of channels_mask.
289 * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
290 * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
291 * channel 0 and LSB for channel 63.)
292 * Allocates or deallocates as many bandwidth allocation units as specified.
294 * Returns channel < 0 if no channel was allocated or deallocated.
295 * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
297 * If generation is stale, deallocations succeed but allocations fail with
298 * channel = -EAGAIN.
300 * If channel allocation fails, no bandwidth will be allocated either.
301 * If bandwidth allocation fails, no channel will be allocated either.
302 * But deallocations of channel and bandwidth are tried independently
303 * of each other's success.
305 void fw_iso_resource_manage(struct fw_card *card, int generation,
306 u64 channels_mask, int *channel, int *bandwidth,
307 bool allocate, __be32 buffer[2])
309 u32 channels_hi = channels_mask; /* channels 31...0 */
310 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
311 int irm_id, ret, c = -EINVAL;
313 spin_lock_irq(&card->lock);
314 irm_id = card->irm_node->node_id;
315 spin_unlock_irq(&card->lock);
317 if (channels_hi)
318 c = manage_channel(card, irm_id, generation, channels_hi,
319 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
320 allocate, buffer);
321 if (channels_lo && c < 0) {
322 c = manage_channel(card, irm_id, generation, channels_lo,
323 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
324 allocate, buffer);
325 if (c >= 0)
326 c += 32;
328 *channel = c;
330 if (allocate && channels_mask != 0 && c < 0)
331 *bandwidth = 0;
333 if (*bandwidth == 0)
334 return;
336 ret = manage_bandwidth(card, irm_id, generation, *bandwidth,
337 allocate, buffer);
338 if (ret < 0)
339 *bandwidth = 0;
341 if (allocate && ret < 0) {
342 if (c >= 0)
343 deallocate_channel(card, irm_id, generation, c, buffer);
344 *channel = ret;