[CPUFREQ] Fix the p4-clockmod N60 errata workaround.
[linux-2.6/mini2440.git] / drivers / scsi / aacraid / comminit.c
blob1628d094943df1c85f425f0392730b0318549076
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 * Module Name:
25 * comminit.c
27 * Abstract: This supports the initialization of the host adapter commuication interface.
28 * This is a platform dependent module for the pci cyclone board.
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/blkdev.h>
40 #include <linux/completion.h>
41 #include <linux/mm.h>
42 #include <scsi/scsi_host.h>
43 #include <asm/semaphore.h>
45 #include "aacraid.h"
47 struct aac_common aac_config = {
48 .irq_mod = 1
51 static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
53 unsigned char *base;
54 unsigned long size, align;
55 const unsigned long fibsize = 4096;
56 const unsigned long printfbufsiz = 256;
57 struct aac_init *init;
58 dma_addr_t phys;
60 size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
63 base = pci_alloc_consistent(dev->pdev, size, &phys);
65 if(base == NULL)
67 printk(KERN_ERR "aacraid: unable to create mapping.\n");
68 return 0;
70 dev->comm_addr = (void *)base;
71 dev->comm_phys = phys;
72 dev->comm_size = size;
74 dev->init = (struct aac_init *)(base + fibsize);
75 dev->init_pa = phys + fibsize;
77 init = dev->init;
79 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
80 if (dev->max_fib_size != sizeof(struct hw_fib))
81 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
82 init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
83 init->fsrev = cpu_to_le32(dev->fsrev);
86 * Adapter Fibs are the first thing allocated so that they
87 * start page aligned
89 dev->aif_base_va = (struct hw_fib *)base;
91 init->AdapterFibsVirtualAddress = 0;
92 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
93 init->AdapterFibsSize = cpu_to_le32(fibsize);
94 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
95 /*
96 * number of 4k pages of host physical memory. The aacraid fw needs
97 * this number to be less than 4gb worth of pages. num_physpages is in
98 * system page units. New firmware doesn't have any issues with the
99 * mapping system, but older Firmware did, and had *troubles* dealing
100 * with the math overloading past 32 bits, thus we must limit this
101 * field.
103 * This assumes the memory is mapped zero->n, which isnt
104 * always true on real computers. It also has some slight problems
105 * with the GART on x86-64. I've btw never tried DMA from PCI space
106 * on this platform but don't be suprised if its problematic.
108 #ifndef CONFIG_GART_IOMMU
109 if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
110 init->HostPhysMemPages =
111 cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
112 } else
113 #endif
115 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
118 init->InitFlags = 0;
119 if (dev->new_comm_interface) {
120 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
121 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
123 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
124 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
125 init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
128 * Increment the base address by the amount already used
130 base = base + fibsize + sizeof(struct aac_init);
131 phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
133 * Align the beginning of Headers to commalign
135 align = (commalign - ((unsigned long)(base) & (commalign - 1)));
136 base = base + align;
137 phys = phys + align;
139 * Fill in addresses of the Comm Area Headers and Queues
141 *commaddr = base;
142 init->CommHeaderAddress = cpu_to_le32((u32)phys);
144 * Increment the base address by the size of the CommArea
146 base = base + commsize;
147 phys = phys + commsize;
149 * Place the Printf buffer area after the Fast I/O comm area.
151 dev->printfbuf = (void *)base;
152 init->printfbuf = cpu_to_le32(phys);
153 init->printfbufsiz = cpu_to_le32(printfbufsiz);
154 memset(base, 0, printfbufsiz);
155 return 1;
158 static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
160 q->numpending = 0;
161 q->dev = dev;
162 INIT_LIST_HEAD(&q->pendingq);
163 init_waitqueue_head(&q->cmdready);
164 INIT_LIST_HEAD(&q->cmdq);
165 init_waitqueue_head(&q->qfull);
166 spin_lock_init(&q->lockdata);
167 q->lock = &q->lockdata;
168 q->headers.producer = (__le32 *)mem;
169 q->headers.consumer = (__le32 *)(mem+1);
170 *(q->headers.producer) = cpu_to_le32(qsize);
171 *(q->headers.consumer) = cpu_to_le32(qsize);
172 q->entries = qsize;
176 * aac_send_shutdown - shutdown an adapter
177 * @dev: Adapter to shutdown
179 * This routine will send a VM_CloseAll (shutdown) request to the adapter.
182 int aac_send_shutdown(struct aac_dev * dev)
184 struct fib * fibctx;
185 struct aac_close *cmd;
186 int status;
188 fibctx = aac_fib_alloc(dev);
189 if (!fibctx)
190 return -ENOMEM;
191 aac_fib_init(fibctx);
193 cmd = (struct aac_close *) fib_data(fibctx);
195 cmd->command = cpu_to_le32(VM_CloseAll);
196 cmd->cid = cpu_to_le32(0xffffffff);
198 status = aac_fib_send(ContainerCommand,
199 fibctx,
200 sizeof(struct aac_close),
201 FsaNormal,
202 -2 /* Timeout silently */, 1,
203 NULL, NULL);
205 if (status == 0)
206 aac_fib_complete(fibctx);
207 aac_fib_free(fibctx);
208 return status;
212 * aac_comm_init - Initialise FSA data structures
213 * @dev: Adapter to initialise
215 * Initializes the data structures that are required for the FSA commuication
216 * interface to operate.
217 * Returns
218 * 1 - if we were able to init the commuication interface.
219 * 0 - If there were errors initing. This is a fatal error.
222 static int aac_comm_init(struct aac_dev * dev)
224 unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
225 unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
226 u32 *headers;
227 struct aac_entry * queues;
228 unsigned long size;
229 struct aac_queue_block * comm = dev->queues;
231 * Now allocate and initialize the zone structures used as our
232 * pool of FIB context records. The size of the zone is based
233 * on the system memory size. We also initialize the mutex used
234 * to protect the zone.
236 spin_lock_init(&dev->fib_lock);
239 * Allocate the physically contigous space for the commuication
240 * queue headers.
243 size = hdrsize + queuesize;
245 if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
246 return -ENOMEM;
248 queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
250 /* Adapter to Host normal priority Command queue */
251 comm->queue[HostNormCmdQueue].base = queues;
252 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
253 queues += HOST_NORM_CMD_ENTRIES;
254 headers += 2;
256 /* Adapter to Host high priority command queue */
257 comm->queue[HostHighCmdQueue].base = queues;
258 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
260 queues += HOST_HIGH_CMD_ENTRIES;
261 headers +=2;
263 /* Host to adapter normal priority command queue */
264 comm->queue[AdapNormCmdQueue].base = queues;
265 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
267 queues += ADAP_NORM_CMD_ENTRIES;
268 headers += 2;
270 /* host to adapter high priority command queue */
271 comm->queue[AdapHighCmdQueue].base = queues;
272 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
274 queues += ADAP_HIGH_CMD_ENTRIES;
275 headers += 2;
277 /* adapter to host normal priority response queue */
278 comm->queue[HostNormRespQueue].base = queues;
279 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
280 queues += HOST_NORM_RESP_ENTRIES;
281 headers += 2;
283 /* adapter to host high priority response queue */
284 comm->queue[HostHighRespQueue].base = queues;
285 aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
287 queues += HOST_HIGH_RESP_ENTRIES;
288 headers += 2;
290 /* host to adapter normal priority response queue */
291 comm->queue[AdapNormRespQueue].base = queues;
292 aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
294 queues += ADAP_NORM_RESP_ENTRIES;
295 headers += 2;
297 /* host to adapter high priority response queue */
298 comm->queue[AdapHighRespQueue].base = queues;
299 aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
301 comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
302 comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
303 comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
304 comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
306 return 0;
309 struct aac_dev *aac_init_adapter(struct aac_dev *dev)
311 u32 status[5];
312 struct Scsi_Host * host = dev->scsi_host_ptr;
315 * Check the preferred comm settings, defaults from template.
317 dev->max_fib_size = sizeof(struct hw_fib);
318 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
319 - sizeof(struct aac_fibhdr)
320 - sizeof(struct aac_write) + sizeof(struct sgentry))
321 / sizeof(struct sgentry);
322 dev->new_comm_interface = 0;
323 dev->raw_io_64 = 0;
324 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
325 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
326 (status[0] == 0x00000001)) {
327 if (status[1] & AAC_OPT_NEW_COMM_64)
328 dev->raw_io_64 = 1;
329 if (status[1] & AAC_OPT_NEW_COMM)
330 dev->new_comm_interface = dev->a_ops.adapter_send != 0;
331 if (dev->new_comm_interface && (status[2] > dev->base_size)) {
332 iounmap(dev->regs.sa);
333 dev->base_size = status[2];
334 dprintk((KERN_DEBUG "ioremap(%lx,%d)\n",
335 host->base, status[2]));
336 dev->regs.sa = ioremap(host->base, status[2]);
337 if (dev->regs.sa == NULL) {
338 /* remap failed, go back ... */
339 dev->new_comm_interface = 0;
340 dev->regs.sa = ioremap(host->base,
341 AAC_MIN_FOOTPRINT_SIZE);
342 if (dev->regs.sa == NULL) {
343 printk(KERN_WARNING
344 "aacraid: unable to map adapter.\n");
345 return NULL;
350 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
351 0, 0, 0, 0, 0, 0,
352 status+0, status+1, status+2, status+3, status+4))
353 && (status[0] == 0x00000001)) {
355 * status[1] >> 16 maximum command size in KB
356 * status[1] & 0xFFFF maximum FIB size
357 * status[2] >> 16 maximum SG elements to driver
358 * status[2] & 0xFFFF maximum SG elements from driver
359 * status[3] & 0xFFFF maximum number FIBs outstanding
361 host->max_sectors = (status[1] >> 16) << 1;
362 dev->max_fib_size = status[1] & 0xFFFF;
363 host->sg_tablesize = status[2] >> 16;
364 dev->sg_tablesize = status[2] & 0xFFFF;
365 host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
367 * NOTE:
368 * All these overrides are based on a fixed internal
369 * knowledge and understanding of existing adapters,
370 * acbsize should be set with caution.
372 if (acbsize == 512) {
373 host->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
374 dev->max_fib_size = 512;
375 dev->sg_tablesize = host->sg_tablesize
376 = (512 - sizeof(struct aac_fibhdr)
377 - sizeof(struct aac_write) + sizeof(struct sgentry))
378 / sizeof(struct sgentry);
379 host->can_queue = AAC_NUM_IO_FIB;
380 } else if (acbsize == 2048) {
381 host->max_sectors = 512;
382 dev->max_fib_size = 2048;
383 host->sg_tablesize = 65;
384 dev->sg_tablesize = 81;
385 host->can_queue = 512 - AAC_NUM_MGT_FIB;
386 } else if (acbsize == 4096) {
387 host->max_sectors = 1024;
388 dev->max_fib_size = 4096;
389 host->sg_tablesize = 129;
390 dev->sg_tablesize = 166;
391 host->can_queue = 256 - AAC_NUM_MGT_FIB;
392 } else if (acbsize == 8192) {
393 host->max_sectors = 2048;
394 dev->max_fib_size = 8192;
395 host->sg_tablesize = 257;
396 dev->sg_tablesize = 337;
397 host->can_queue = 128 - AAC_NUM_MGT_FIB;
398 } else if (acbsize > 0) {
399 printk("Illegal acbsize=%d ignored\n", acbsize);
404 if (numacb > 0) {
405 if (numacb < host->can_queue)
406 host->can_queue = numacb;
407 else
408 printk("numacb=%d ignored\n", numacb);
413 * Ok now init the communication subsystem
416 dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
417 if (dev->queues == NULL) {
418 printk(KERN_ERR "Error could not allocate comm region.\n");
419 return NULL;
421 memset(dev->queues, 0, sizeof(struct aac_queue_block));
423 if (aac_comm_init(dev)<0){
424 kfree(dev->queues);
425 return NULL;
428 * Initialize the list of fibs
430 if (aac_fib_setup(dev) < 0) {
431 kfree(dev->queues);
432 return NULL;
435 INIT_LIST_HEAD(&dev->fib_list);
436 init_completion(&dev->aif_completion);
438 return dev;