NFE - Change default RX ring size from 128 -> 256, Adjust moderation timer.
[dragonfly.git] / sys / dev / disk / mpt / mpt_user.c
bloba11d8802689dca168b85e631113511c2677922bc
1 /*-
2 * Copyright (c) 2008 Yahoo!, Inc.
3 * All rights reserved.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the author nor the names of any co-contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
30 * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31 * $FreeBSD: src/sys/dev/mpt/mpt_user.c,v 1.4 2009/05/20 17:29:21 imp Exp $
34 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/errno.h>
39 #include <sys/ioccom.h>
40 #include <sys/device.h>
41 #include <sys/mpt_ioctl.h>
43 #include <dev/disk/mpt/mpt.h>
45 struct mpt_user_raid_action_result {
46 uint32_t volume_status;
47 uint32_t action_data[4];
48 uint16_t action_status;
51 struct mpt_page_memory {
52 bus_dma_tag_t tag;
53 bus_dmamap_t map;
54 bus_addr_t paddr;
55 void *vaddr;
58 static mpt_probe_handler_t mpt_user_probe;
59 static mpt_attach_handler_t mpt_user_attach;
60 static mpt_enable_handler_t mpt_user_enable;
61 static mpt_ready_handler_t mpt_user_ready;
62 static mpt_event_handler_t mpt_user_event;
63 static mpt_reset_handler_t mpt_user_reset;
64 static mpt_detach_handler_t mpt_user_detach;
66 static struct mpt_personality mpt_user_personality = {
67 .name = "mpt_user",
68 .probe = mpt_user_probe,
69 .attach = mpt_user_attach,
70 .enable = mpt_user_enable,
71 .ready = mpt_user_ready,
72 .event = mpt_user_event,
73 .reset = mpt_user_reset,
74 .detach = mpt_user_detach,
77 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
79 static mpt_reply_handler_t mpt_user_reply_handler;
81 static int mpt_open(struct dev_open_args *ap);
82 static int mpt_close(struct dev_close_args *ap);
83 static int mpt_ioctl(struct dev_ioctl_args *ap);
85 static struct dev_ops mpt_cdevsw = {
86 .d_open = mpt_open,
87 .d_close = mpt_close,
88 .d_ioctl = mpt_ioctl,
91 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
93 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
95 int
96 mpt_user_probe(struct mpt_softc *mpt)
99 /* Attach to every controller. */
100 return (0);
104 mpt_user_attach(struct mpt_softc *mpt)
106 mpt_handler_t handler;
107 int error, unit;
109 MPT_LOCK(mpt);
110 handler.reply_handler = mpt_user_reply_handler;
111 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
112 &user_handler_id);
113 MPT_UNLOCK(mpt);
114 if (error != 0) {
115 mpt_prt(mpt, "Unable to register user handler!\n");
116 return (error);
118 unit = device_get_unit(mpt->dev);
119 mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
120 "mpt%d", unit);
121 if (mpt->cdev == NULL) {
122 MPT_LOCK(mpt);
123 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
124 user_handler_id);
125 MPT_UNLOCK(mpt);
126 return (ENOMEM);
128 mpt->cdev->si_drv1 = mpt;
129 return (0);
133 mpt_user_enable(struct mpt_softc *mpt)
136 return (0);
139 void
140 mpt_user_ready(struct mpt_softc *mpt)
145 mpt_user_event(struct mpt_softc *mpt, request_t *req,
146 MSG_EVENT_NOTIFY_REPLY *msg)
149 /* Someday we may want to let a user daemon listen for events? */
150 return (0);
153 void
154 mpt_user_reset(struct mpt_softc *mpt, int type)
158 void
159 mpt_user_detach(struct mpt_softc *mpt)
161 mpt_handler_t handler;
163 /* XXX: do a purge of pending requests? */
164 destroy_dev(mpt->cdev);
166 MPT_LOCK(mpt);
167 handler.reply_handler = mpt_user_reply_handler;
168 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
169 user_handler_id);
170 MPT_UNLOCK(mpt);
173 static int
174 mpt_open(struct dev_open_args *ap)
177 return (0);
180 static int
181 mpt_close(struct dev_close_args *ap)
184 return (0);
187 static int
188 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
189 size_t len)
191 struct mpt_map_info mi;
192 int error;
194 page_mem->vaddr = NULL;
196 /* Limit requests to 16M. */
197 if (len > 16 * 1024 * 1024)
198 return (ENOSPC);
199 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
200 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
201 len, 1, len, 0, &page_mem->tag);
202 if (error)
203 return (error);
204 error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
205 BUS_DMA_NOWAIT, &page_mem->map);
206 if (error) {
207 bus_dma_tag_destroy(page_mem->tag);
208 return (error);
210 mi.mpt = mpt;
211 error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
212 len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
213 if (error == 0)
214 error = mi.error;
215 if (error) {
216 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
217 bus_dma_tag_destroy(page_mem->tag);
218 page_mem->vaddr = NULL;
219 return (error);
221 page_mem->paddr = mi.phys;
222 return (0);
225 static void
226 mpt_free_buffer(struct mpt_page_memory *page_mem)
229 if (page_mem->vaddr == NULL)
230 return;
231 bus_dmamap_unload(page_mem->tag, page_mem->map);
232 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
233 bus_dma_tag_destroy(page_mem->tag);
234 page_mem->vaddr = NULL;
237 static int
238 mpt_user_read_cfg_header(struct mpt_softc *mpt,
239 struct mpt_cfg_page_req *page_req)
241 request_t *req;
242 cfgparms_t params;
243 MSG_CONFIG *cfgp;
244 int error;
246 req = mpt_get_request(mpt, TRUE);
247 if (req == NULL) {
248 mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
249 return (ENOMEM);
252 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
253 params.PageVersion = 0;
254 params.PageLength = 0;
255 params.PageNumber = page_req->header.PageNumber;
256 params.PageType = page_req->header.PageType;
257 params.PageAddress = le32toh(page_req->page_address);
258 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
259 TRUE, 5000);
260 if (error != 0) {
262 * Leave the request. Without resetting the chip, it's
263 * still owned by it and we'll just get into trouble
264 * freeing it now. Mark it as abandoned so that if it
265 * shows up later it can be freed.
267 mpt_prt(mpt, "read_cfg_header timed out\n");
268 return (ETIMEDOUT);
271 page_req->ioc_status = htole16(req->IOCStatus);
272 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
273 cfgp = req->req_vbuf;
274 bcopy(&cfgp->Header, &page_req->header,
275 sizeof(page_req->header));
277 mpt_free_request(mpt, req);
278 return (0);
281 static int
282 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
283 struct mpt_page_memory *mpt_page)
285 CONFIG_PAGE_HEADER *hdr;
286 request_t *req;
287 cfgparms_t params;
288 int error;
290 req = mpt_get_request(mpt, TRUE);
291 if (req == NULL) {
292 mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
293 return (ENOMEM);
296 hdr = mpt_page->vaddr;
297 params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
298 params.PageVersion = hdr->PageVersion;
299 params.PageLength = hdr->PageLength;
300 params.PageNumber = hdr->PageNumber;
301 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
302 params.PageAddress = le32toh(page_req->page_address);
303 error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
304 le32toh(page_req->len), TRUE, 5000);
305 if (error != 0) {
306 mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
307 return (ETIMEDOUT);
310 page_req->ioc_status = htole16(req->IOCStatus);
311 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
312 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
313 BUS_DMASYNC_POSTREAD);
314 mpt_free_request(mpt, req);
315 return (0);
318 static int
319 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
320 struct mpt_ext_cfg_page_req *ext_page_req)
322 request_t *req;
323 cfgparms_t params;
324 MSG_CONFIG_REPLY *cfgp;
325 int error;
327 req = mpt_get_request(mpt, TRUE);
328 if (req == NULL) {
329 mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
330 return (ENOMEM);
333 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
334 params.PageVersion = ext_page_req->header.PageVersion;
335 params.PageLength = 0;
336 params.PageNumber = ext_page_req->header.PageNumber;
337 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
338 params.PageAddress = le32toh(ext_page_req->page_address);
339 params.ExtPageType = ext_page_req->header.ExtPageType;
340 params.ExtPageLength = 0;
341 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
342 TRUE, 5000);
343 if (error != 0) {
345 * Leave the request. Without resetting the chip, it's
346 * still owned by it and we'll just get into trouble
347 * freeing it now. Mark it as abandoned so that if it
348 * shows up later it can be freed.
350 mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
351 return (ETIMEDOUT);
354 ext_page_req->ioc_status = htole16(req->IOCStatus);
355 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
356 cfgp = req->req_vbuf;
357 ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
358 ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
359 ext_page_req->header.PageType = cfgp->Header.PageType;
360 ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
361 ext_page_req->header.ExtPageType = cfgp->ExtPageType;
363 mpt_free_request(mpt, req);
364 return (0);
367 static int
368 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
369 struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
371 CONFIG_EXTENDED_PAGE_HEADER *hdr;
372 request_t *req;
373 cfgparms_t params;
374 int error;
376 req = mpt_get_request(mpt, TRUE);
377 if (req == NULL) {
378 mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
379 return (ENOMEM);
382 hdr = mpt_page->vaddr;
383 params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
384 params.PageVersion = hdr->PageVersion;
385 params.PageLength = 0;
386 params.PageNumber = hdr->PageNumber;
387 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
388 params.PageAddress = le32toh(ext_page_req->page_address);
389 params.ExtPageType = hdr->ExtPageType;
390 params.ExtPageLength = hdr->ExtPageLength;
391 error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
392 le32toh(ext_page_req->len), TRUE, 5000);
393 if (error != 0) {
394 mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
395 return (ETIMEDOUT);
398 ext_page_req->ioc_status = htole16(req->IOCStatus);
399 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
400 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
401 BUS_DMASYNC_POSTREAD);
402 mpt_free_request(mpt, req);
403 return (0);
406 static int
407 mpt_user_write_cfg_page(struct mpt_softc *mpt,
408 struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
410 CONFIG_PAGE_HEADER *hdr;
411 request_t *req;
412 cfgparms_t params;
413 u_int hdr_attr;
414 int error;
416 hdr = mpt_page->vaddr;
417 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
418 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
419 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
420 mpt_prt(mpt, "page type 0x%x not changeable\n",
421 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
422 return (EINVAL);
425 #if 0
427 * We shouldn't mask off other bits here.
429 hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
430 #endif
432 req = mpt_get_request(mpt, TRUE);
433 if (req == NULL)
434 return (ENOMEM);
436 bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREWRITE);
439 * There isn't any point in restoring stripped out attributes
440 * if you then mask them going down to issue the request.
443 params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
444 params.PageVersion = hdr->PageVersion;
445 params.PageLength = hdr->PageLength;
446 params.PageNumber = hdr->PageNumber;
447 params.PageAddress = le32toh(page_req->page_address);
448 #if 0
449 /* Restore stripped out attributes */
450 hdr->PageType |= hdr_attr;
451 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
452 #else
453 params.PageType = hdr->PageType;
454 #endif
455 error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
456 le32toh(page_req->len), TRUE, 5000);
457 if (error != 0) {
458 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
459 return (ETIMEDOUT);
462 page_req->ioc_status = htole16(req->IOCStatus);
463 mpt_free_request(mpt, req);
464 return (0);
467 static int
468 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
469 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
471 MSG_RAID_ACTION_REPLY *reply;
472 struct mpt_user_raid_action_result *res;
474 if (req == NULL)
475 return (TRUE);
477 if (reply_frame != NULL) {
478 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
479 BUS_DMASYNC_POSTREAD);
480 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
481 req->IOCStatus = le16toh(reply->IOCStatus);
482 res = (struct mpt_user_raid_action_result *)
483 (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
484 res->action_status = reply->ActionStatus;
485 res->volume_status = reply->VolumeStatus;
486 bcopy(&reply->ActionData, res->action_data,
487 sizeof(res->action_data));
490 req->state &= ~REQ_STATE_QUEUED;
491 req->state |= REQ_STATE_DONE;
492 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
494 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
495 wakeup(req);
496 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
498 * Whew- we can free this request (late completion)
500 mpt_free_request(mpt, req);
503 return (TRUE);
507 * We use the first part of the request buffer after the request frame
508 * to hold the action data and action status from the RAID reply. The
509 * rest of the request buffer is used to hold the buffer for the
510 * action SGE.
512 static int
513 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
514 struct mpt_page_memory *mpt_page)
516 request_t *req;
517 struct mpt_user_raid_action_result *res;
518 MSG_RAID_ACTION_REQUEST *rap;
519 SGE_SIMPLE32 *se;
520 int error;
522 req = mpt_get_request(mpt, TRUE);
523 if (req == NULL)
524 return (ENOMEM);
525 rap = req->req_vbuf;
526 memset(rap, 0, sizeof *rap);
527 rap->Action = raid_act->action;
528 rap->ActionDataWord = raid_act->action_data_word;
529 rap->Function = MPI_FUNCTION_RAID_ACTION;
530 rap->VolumeID = raid_act->volume_id;
531 rap->VolumeBus = raid_act->volume_bus;
532 rap->PhysDiskNum = raid_act->phys_disk_num;
533 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
534 if (mpt_page->vaddr != NULL && raid_act->len != 0) {
535 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
536 BUS_DMASYNC_PREWRITE);
537 se->Address = htole32(mpt_page->paddr);
538 MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
539 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
540 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
541 MPI_SGE_FLAGS_END_OF_LIST |
542 raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
543 MPI_SGE_FLAGS_IOC_TO_HOST));
545 se->FlagsLength = htole32(se->FlagsLength);
546 rap->MsgContext = htole32(req->index | user_handler_id);
548 mpt_check_doorbell(mpt);
549 mpt_send_cmd(mpt, req);
551 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
552 2000);
553 if (error != 0) {
555 * Leave request so it can be cleaned up later.
557 mpt_prt(mpt, "mpt_user_raid_action timed out\n");
558 return (error);
561 raid_act->ioc_status = htole16(req->IOCStatus);
562 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
563 mpt_free_request(mpt, req);
564 return (0);
567 res = (struct mpt_user_raid_action_result *)
568 (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
569 raid_act->volume_status = res->volume_status;
570 raid_act->action_status = res->action_status;
571 bcopy(res->action_data, raid_act->action_data,
572 sizeof(res->action_data));
573 if (mpt_page->vaddr != NULL)
574 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
575 BUS_DMASYNC_POSTREAD);
576 mpt_free_request(mpt, req);
577 return (0);
580 #ifdef __amd64__
581 #define PTRIN(p) ((void *)(uintptr_t)(p))
582 #define PTROUT(v) ((u_int32_t)(uintptr_t)(v))
583 #endif
585 static int
586 mpt_ioctl(struct dev_ioctl_args *ap)
588 struct mpt_softc *mpt;
589 struct mpt_cfg_page_req *page_req;
590 struct mpt_ext_cfg_page_req *ext_page_req;
591 struct mpt_raid_action *raid_act;
592 struct mpt_page_memory mpt_page;
593 #ifdef __amd64__
594 struct mpt_cfg_page_req32 *page_req32;
595 struct mpt_cfg_page_req page_req_swab;
596 struct mpt_ext_cfg_page_req32 *ext_page_req32;
597 struct mpt_ext_cfg_page_req ext_page_req_swab;
598 struct mpt_raid_action32 *raid_act32;
599 struct mpt_raid_action raid_act_swab;
600 #endif
601 u_long cmd = ap->a_cmd;
602 caddr_t arg = ap->a_data;
603 struct cdev *kdev = ap->a_head.a_dev;
604 int error;
606 mpt = kdev->si_drv1;
607 page_req = (void *)arg;
608 ext_page_req = (void *)arg;
609 raid_act = (void *)arg;
610 mpt_page.vaddr = NULL;
612 #ifdef __amd64__
613 /* Convert 32-bit structs to native ones. */
614 page_req32 = (void *)arg;
615 ext_page_req32 = (void *)arg;
616 raid_act32 = (void *)arg;
617 switch (cmd) {
618 case MPTIO_READ_CFG_HEADER32:
619 case MPTIO_READ_CFG_PAGE32:
620 case MPTIO_WRITE_CFG_PAGE32:
621 page_req = &page_req_swab;
622 page_req->header = page_req32->header;
623 page_req->page_address = page_req32->page_address;
624 page_req->buf = PTRIN(page_req32->buf);
625 page_req->len = page_req32->len;
626 page_req->ioc_status = page_req32->ioc_status;
627 break;
628 case MPTIO_READ_EXT_CFG_HEADER32:
629 case MPTIO_READ_EXT_CFG_PAGE32:
630 ext_page_req = &ext_page_req_swab;
631 ext_page_req->header = ext_page_req32->header;
632 ext_page_req->page_address = ext_page_req32->page_address;
633 ext_page_req->buf = PTRIN(ext_page_req32->buf);
634 ext_page_req->len = ext_page_req32->len;
635 ext_page_req->ioc_status = ext_page_req32->ioc_status;
636 break;
637 case MPTIO_RAID_ACTION32:
638 raid_act = &raid_act_swab;
639 raid_act->action = raid_act32->action;
640 raid_act->volume_bus = raid_act32->volume_bus;
641 raid_act->volume_id = raid_act32->volume_id;
642 raid_act->phys_disk_num = raid_act32->phys_disk_num;
643 raid_act->action_data_word = raid_act32->action_data_word;
644 raid_act->buf = PTRIN(raid_act32->buf);
645 raid_act->len = raid_act32->len;
646 raid_act->volume_status = raid_act32->volume_status;
647 bcopy(raid_act32->action_data, raid_act->action_data,
648 sizeof(raid_act->action_data));
649 raid_act->action_status = raid_act32->action_status;
650 raid_act->ioc_status = raid_act32->ioc_status;
651 raid_act->write = raid_act32->write;
652 break;
654 #endif
656 switch (cmd) {
657 #ifdef __amd64__
658 case MPTIO_READ_CFG_HEADER32:
659 #endif
660 case MPTIO_READ_CFG_HEADER:
661 MPT_LOCK(mpt);
662 error = mpt_user_read_cfg_header(mpt, page_req);
663 MPT_UNLOCK(mpt);
664 break;
665 #ifdef __amd64__
666 case MPTIO_READ_CFG_PAGE32:
667 #endif
668 case MPTIO_READ_CFG_PAGE:
669 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
670 if (error)
671 break;
672 error = copyin(page_req->buf, mpt_page.vaddr,
673 sizeof(CONFIG_PAGE_HEADER));
674 if (error)
675 break;
676 MPT_LOCK(mpt);
677 error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
678 MPT_UNLOCK(mpt);
679 if (error)
680 break;
681 error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
682 break;
683 #ifdef __amd64__
684 case MPTIO_READ_EXT_CFG_HEADER32:
685 #endif
686 case MPTIO_READ_EXT_CFG_HEADER:
687 MPT_LOCK(mpt);
688 error = mpt_user_read_extcfg_header(mpt, ext_page_req);
689 MPT_UNLOCK(mpt);
690 break;
691 #ifdef __amd64__
692 case MPTIO_READ_EXT_CFG_PAGE32:
693 #endif
694 case MPTIO_READ_EXT_CFG_PAGE:
695 error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
696 if (error)
697 break;
698 error = copyin(ext_page_req->buf, mpt_page.vaddr,
699 sizeof(CONFIG_EXTENDED_PAGE_HEADER));
700 if (error)
701 break;
702 MPT_LOCK(mpt);
703 error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
704 MPT_UNLOCK(mpt);
705 if (error)
706 break;
707 error = copyout(mpt_page.vaddr, ext_page_req->buf,
708 ext_page_req->len);
709 break;
710 #ifdef __amd64__
711 case MPTIO_WRITE_CFG_PAGE32:
712 #endif
713 case MPTIO_WRITE_CFG_PAGE:
714 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
715 if (error)
716 break;
717 error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
718 if (error)
719 break;
720 MPT_LOCK(mpt);
721 error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
722 MPT_UNLOCK(mpt);
723 break;
724 #ifdef __amd64__
725 case MPTIO_RAID_ACTION32:
726 #endif
727 case MPTIO_RAID_ACTION:
728 if (raid_act->buf != NULL) {
729 error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
730 if (error)
731 break;
732 error = copyin(raid_act->buf, mpt_page.vaddr,
733 raid_act->len);
734 if (error)
735 break;
737 MPT_LOCK(mpt);
738 error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
739 MPT_UNLOCK(mpt);
740 if (error)
741 break;
742 if (raid_act->buf != NULL)
743 error = copyout(mpt_page.vaddr, raid_act->buf,
744 raid_act->len);
745 break;
746 default:
747 error = ENOIOCTL;
748 break;
751 mpt_free_buffer(&mpt_page);
753 if (error)
754 return (error);
756 #ifdef __amd64__
757 /* Convert native structs to 32-bit ones. */
758 switch (cmd) {
759 case MPTIO_READ_CFG_HEADER32:
760 case MPTIO_READ_CFG_PAGE32:
761 case MPTIO_WRITE_CFG_PAGE32:
762 page_req32->header = page_req->header;
763 page_req32->page_address = page_req->page_address;
764 page_req32->buf = PTROUT(page_req->buf);
765 page_req32->len = page_req->len;
766 page_req32->ioc_status = page_req->ioc_status;
767 break;
768 case MPTIO_READ_EXT_CFG_HEADER32:
769 case MPTIO_READ_EXT_CFG_PAGE32:
770 ext_page_req32->header = ext_page_req->header;
771 ext_page_req32->page_address = ext_page_req->page_address;
772 ext_page_req32->buf = PTROUT(ext_page_req->buf);
773 ext_page_req32->len = ext_page_req->len;
774 ext_page_req32->ioc_status = ext_page_req->ioc_status;
775 break;
776 case MPTIO_RAID_ACTION32:
777 raid_act32->action = raid_act->action;
778 raid_act32->volume_bus = raid_act->volume_bus;
779 raid_act32->volume_id = raid_act->volume_id;
780 raid_act32->phys_disk_num = raid_act->phys_disk_num;
781 raid_act32->action_data_word = raid_act->action_data_word;
782 raid_act32->buf = PTROUT(raid_act->buf);
783 raid_act32->len = raid_act->len;
784 raid_act32->volume_status = raid_act->volume_status;
785 bcopy(raid_act->action_data, raid_act32->action_data,
786 sizeof(raid_act->action_data));
787 raid_act32->action_status = raid_act->action_status;
788 raid_act32->ioc_status = raid_act->ioc_status;
789 raid_act32->write = raid_act->write;
790 break;
792 #endif
794 return (0);