isci: cleanup tag macros
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / isci / host.h
blob7d17ab80f1a962b1707aaaad3e5c1d88728be32d
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
5 * GPL LICENSE SUMMARY
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * BSD LICENSE
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 #ifndef _SCI_HOST_H_
56 #define _SCI_HOST_H_
58 #include "remote_device.h"
59 #include "phy.h"
60 #include "isci.h"
61 #include "remote_node_table.h"
62 #include "registers.h"
63 #include "scu_unsolicited_frame.h"
64 #include "unsolicited_frame_control.h"
65 #include "probe_roms.h"
67 struct scic_sds_request;
68 struct scu_task_context;
71 /**
72 * struct scic_power_control -
74 * This structure defines the fields for managing power control for direct
75 * attached disk devices.
77 struct scic_power_control {
78 /**
79 * This field is set when the power control timer is running and cleared when
80 * it is not.
82 bool timer_started;
84 /**
85 * Timer to control when the directed attached disks can consume power.
87 struct sci_timer timer;
89 /**
90 * This field is used to keep track of how many phys are put into the
91 * requesters field.
93 u8 phys_waiting;
95 /**
96 * This field is used to keep track of how many phys have been granted to consume power
98 u8 phys_granted_power;
101 * This field is an array of phys that we are waiting on. The phys are direct
102 * mapped into requesters via struct scic_sds_phy.phy_index
104 struct scic_sds_phy *requesters[SCI_MAX_PHYS];
108 struct scic_sds_port_configuration_agent;
109 typedef void (*port_config_fn)(struct scic_sds_controller *,
110 struct scic_sds_port_configuration_agent *,
111 struct scic_sds_port *, struct scic_sds_phy *);
113 struct scic_sds_port_configuration_agent {
114 u16 phy_configured_mask;
115 u16 phy_ready_mask;
116 struct {
117 u8 min_index;
118 u8 max_index;
119 } phy_valid_port_range[SCI_MAX_PHYS];
120 bool timer_pending;
121 port_config_fn link_up_handler;
122 port_config_fn link_down_handler;
123 struct sci_timer timer;
127 * struct scic_sds_controller -
129 * This structure represents the SCU controller object.
131 struct scic_sds_controller {
133 * This field contains the information for the base controller state
134 * machine.
136 struct sci_base_state_machine sm;
139 * Timer for controller start/stop operations.
141 struct sci_timer timer;
144 * This field contains the user parameters to be utilized for this
145 * core controller object.
147 union scic_user_parameters user_parameters;
150 * This field contains the OEM parameters to be utilized for this
151 * core controller object.
153 union scic_oem_parameters oem_parameters;
156 * This field contains the port configuration agent for this controller.
158 struct scic_sds_port_configuration_agent port_agent;
161 * This field is the array of device objects that are currently constructed
162 * for this controller object. This table is used as a fast lookup of device
163 * objects that need to handle device completion notifications from the
164 * hardware. The table is RNi based.
166 struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
169 * This field is the array of IO request objects that are currently active for
170 * this controller object. This table is used as a fast lookup of the io
171 * request object that need to handle completion queue notifications. The
172 * table is TCi based.
174 struct scic_sds_request *io_request_table[SCI_MAX_IO_REQUESTS];
177 * This field is the free RNi data structure
179 struct scic_remote_node_table available_remote_nodes;
182 * This filed is the struct scic_power_control data used to controll when direct
183 * attached devices can consume power.
185 struct scic_power_control power_control;
187 /* sequence number per tci */
188 u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
191 * This field in the array of sequence values for the RNi. These are used
192 * to control io request build to io request start operations. The sequence
193 * value is recorded into an io request when it is built and is checked on
194 * the io request start operation to make sure that there was not a device
195 * hot plug between the build and start operation.
197 u8 remote_device_sequence[SCI_MAX_REMOTE_DEVICES];
200 * This field is a pointer to the memory allocated by the driver for the task
201 * context table. This data is shared between the hardware and software.
203 struct scu_task_context *task_context_table;
206 * This field is a pointer to the memory allocated by the driver for the
207 * remote node context table. This table is shared between the hardware and
208 * software.
210 union scu_remote_node_context *remote_node_context_table;
213 * This field is a pointer to the completion queue. This memory is
214 * written to by the hardware and read by the software.
216 u32 *completion_queue;
219 * This field is the software copy of the completion queue get pointer. The
220 * controller object writes this value to the hardware after processing the
221 * completion entries.
223 u32 completion_queue_get;
226 * This field is the minimum of the number of hardware supported port entries
227 * and the software requested port entries.
229 u32 logical_port_entries;
232 * This field is the minimum number of devices supported by the hardware and
233 * the number of devices requested by the software.
235 u32 remote_node_entries;
238 * This field is the minimum number of IO requests supported by the hardware
239 * and the number of IO requests requested by the software.
241 u32 task_context_entries;
244 * This object contains all of the unsolicited frame specific
245 * data utilized by the core controller.
247 struct scic_sds_unsolicited_frame_control uf_control;
249 /* Phy Startup Data */
251 * Timer for controller phy request startup. On controller start the
252 * controller will start each PHY individually in order of phy index.
254 struct sci_timer phy_timer;
257 * This field is set when the phy_timer is running and is cleared when
258 * the phy_timer is stopped.
260 bool phy_startup_timer_pending;
263 * This field is the index of the next phy start. It is initialized to 0 and
264 * increments for each phy index that is started.
266 u32 next_phy_to_start;
269 * This field controlls the invalid link up notifications to the SCI_USER. If
270 * an invalid_link_up notification is reported a bit for the PHY index is set
271 * so further notifications are not made. Once the PHY object reports link up
272 * and is made part of a port then this bit for the PHY index is cleared.
274 u8 invalid_phy_mask;
277 * This field saves the current interrupt coalescing number of the controller.
279 u16 interrupt_coalesce_number;
282 * This field saves the current interrupt coalescing timeout value in microseconds.
284 u32 interrupt_coalesce_timeout;
287 * This field is a pointer to the memory mapped register space for the
288 * struct smu_registers.
290 struct smu_registers __iomem *smu_registers;
293 * This field is a pointer to the memory mapped register space for the
294 * struct scu_registers.
296 struct scu_registers __iomem *scu_registers;
300 struct isci_host {
301 struct scic_sds_controller sci;
302 u16 tci_head;
303 u16 tci_tail;
304 u16 tci_pool[SCI_MAX_IO_REQUESTS];
306 union scic_oem_parameters oem_parameters;
308 int id; /* unique within a given pci device */
309 struct dma_pool *dma_pool;
310 struct isci_phy phys[SCI_MAX_PHYS];
311 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
312 struct sas_ha_struct sas_ha;
314 int can_queue;
315 spinlock_t queue_lock;
316 spinlock_t state_lock;
318 struct pci_dev *pdev;
320 enum isci_status status;
321 #define IHOST_START_PENDING 0
322 #define IHOST_STOP_PENDING 1
323 unsigned long flags;
324 wait_queue_head_t eventq;
325 struct Scsi_Host *shost;
326 struct tasklet_struct completion_tasklet;
327 struct list_head requests_to_complete;
328 struct list_head requests_to_errorback;
329 spinlock_t scic_lock;
331 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
335 * enum scic_sds_controller_states - This enumeration depicts all the states
336 * for the common controller state machine.
338 enum scic_sds_controller_states {
340 * Simply the initial state for the base controller state machine.
342 SCIC_INITIAL = 0,
345 * This state indicates that the controller is reset. The memory for
346 * the controller is in it's initial state, but the controller requires
347 * initialization.
348 * This state is entered from the INITIAL state.
349 * This state is entered from the RESETTING state.
351 SCIC_RESET,
354 * This state is typically an action state that indicates the controller
355 * is in the process of initialization. In this state no new IO operations
356 * are permitted.
357 * This state is entered from the RESET state.
359 SCIC_INITIALIZING,
362 * This state indicates that the controller has been successfully
363 * initialized. In this state no new IO operations are permitted.
364 * This state is entered from the INITIALIZING state.
366 SCIC_INITIALIZED,
369 * This state indicates the the controller is in the process of becoming
370 * ready (i.e. starting). In this state no new IO operations are permitted.
371 * This state is entered from the INITIALIZED state.
373 SCIC_STARTING,
376 * This state indicates the controller is now ready. Thus, the user
377 * is able to perform IO operations on the controller.
378 * This state is entered from the STARTING state.
380 SCIC_READY,
383 * This state is typically an action state that indicates the controller
384 * is in the process of resetting. Thus, the user is unable to perform
385 * IO operations on the controller. A reset is considered destructive in
386 * most cases.
387 * This state is entered from the READY state.
388 * This state is entered from the FAILED state.
389 * This state is entered from the STOPPED state.
391 SCIC_RESETTING,
394 * This state indicates that the controller is in the process of stopping.
395 * In this state no new IO operations are permitted, but existing IO
396 * operations are allowed to complete.
397 * This state is entered from the READY state.
399 SCIC_STOPPING,
402 * This state indicates that the controller has successfully been stopped.
403 * In this state no new IO operations are permitted.
404 * This state is entered from the STOPPING state.
406 SCIC_STOPPED,
409 * This state indicates that the controller could not successfully be
410 * initialized. In this state no new IO operations are permitted.
411 * This state is entered from the INITIALIZING state.
412 * This state is entered from the STARTING state.
413 * This state is entered from the STOPPING state.
414 * This state is entered from the RESETTING state.
416 SCIC_FAILED,
420 * struct isci_pci_info - This class represents the pci function containing the
421 * controllers. Depending on PCI SKU, there could be up to 2 controllers in
422 * the PCI function.
424 #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
426 struct isci_pci_info {
427 struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
428 struct isci_host *hosts[SCI_MAX_CONTROLLERS];
429 struct isci_orom *orom;
432 static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
434 return pci_get_drvdata(pdev);
437 #define for_each_isci_host(id, ihost, pdev) \
438 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
439 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
440 ihost = to_pci_info(pdev)->hosts[++id])
442 static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
444 return isci_host->status;
447 static inline void isci_host_change_state(struct isci_host *isci_host,
448 enum isci_status status)
450 unsigned long flags;
452 dev_dbg(&isci_host->pdev->dev,
453 "%s: isci_host = %p, state = 0x%x",
454 __func__,
455 isci_host,
456 status);
457 spin_lock_irqsave(&isci_host->state_lock, flags);
458 isci_host->status = status;
459 spin_unlock_irqrestore(&isci_host->state_lock, flags);
463 static inline int isci_host_can_queue(struct isci_host *isci_host, int num)
465 int ret = 0;
466 unsigned long flags;
468 spin_lock_irqsave(&isci_host->queue_lock, flags);
469 if ((isci_host->can_queue - num) < 0) {
470 dev_dbg(&isci_host->pdev->dev,
471 "%s: isci_host->can_queue = %d\n",
472 __func__,
473 isci_host->can_queue);
474 ret = -SAS_QUEUE_FULL;
476 } else
477 isci_host->can_queue -= num;
479 spin_unlock_irqrestore(&isci_host->queue_lock, flags);
481 return ret;
484 static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num)
486 unsigned long flags;
488 spin_lock_irqsave(&isci_host->queue_lock, flags);
489 isci_host->can_queue += num;
490 spin_unlock_irqrestore(&isci_host->queue_lock, flags);
493 static inline void wait_for_start(struct isci_host *ihost)
495 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
498 static inline void wait_for_stop(struct isci_host *ihost)
500 wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
503 static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
505 wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
508 static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
510 wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
513 static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
515 return dev->port->ha->lldd_ha;
518 static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic)
520 /* XXX delete after merging scic_sds_contoller and isci_host */
521 struct isci_host *ihost = container_of(scic, typeof(*ihost), sci);
523 return ihost;
527 * INCREMENT_QUEUE_GET() -
529 * This macro will increment the specified index to and if the index wraps to 0
530 * it will toggel the cycle bit.
532 #define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \
534 if ((index) + 1 == entry_count) { \
535 (index) = 0; \
536 (cycle) = (cycle) ^ (bit_toggle); \
537 } else { \
538 index = index + 1; \
543 * scic_sds_controller_get_protocol_engine_group() -
545 * This macro returns the protocol engine group for this controller object.
546 * Presently we only support protocol engine group 0 so just return that
548 #define scic_sds_controller_get_protocol_engine_group(controller) 0
550 /* see scic_controller_io_tag_allocate|free for how seq and tci are built */
551 #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
553 /* these are returned by the hardware, so sanitize them */
554 #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
555 #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
557 /* expander attached sata devices require 3 rnc slots */
558 static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device *sci_dev)
560 struct domain_device *dev = sci_dev_to_domain(sci_dev);
562 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
563 !sci_dev->is_direct_attached)
564 return SCU_STP_REMOTE_NODE_COUNT;
565 return SCU_SSP_REMOTE_NODE_COUNT;
569 * scic_sds_controller_set_invalid_phy() -
571 * This macro will set the bit in the invalid phy mask for this controller
572 * object. This is used to control messages reported for invalid link up
573 * notifications.
575 #define scic_sds_controller_set_invalid_phy(controller, phy) \
576 ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
579 * scic_sds_controller_clear_invalid_phy() -
581 * This macro will clear the bit in the invalid phy mask for this controller
582 * object. This is used to control messages reported for invalid link up
583 * notifications.
585 #define scic_sds_controller_clear_invalid_phy(controller, phy) \
586 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
588 static inline struct device *scic_to_dev(struct scic_sds_controller *scic)
590 return &scic_to_ihost(scic)->pdev->dev;
593 static inline struct device *sciphy_to_dev(struct scic_sds_phy *sci_phy)
595 struct isci_phy *iphy = sci_phy_to_iphy(sci_phy);
597 if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
598 return NULL;
600 return &iphy->isci_port->isci_host->pdev->dev;
603 static inline struct device *sciport_to_dev(struct scic_sds_port *sci_port)
605 struct isci_port *iport = sci_port_to_iport(sci_port);
607 if (!iport || !iport->isci_host)
608 return NULL;
610 return &iport->isci_host->pdev->dev;
613 static inline struct device *scirdev_to_dev(struct scic_sds_remote_device *sci_dev)
615 struct isci_remote_device *idev =
616 container_of(sci_dev, typeof(*idev), sci);
618 if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
619 return NULL;
621 return &idev->isci_port->isci_host->pdev->dev;
624 enum {
625 ISCI_SI_REVA0,
626 ISCI_SI_REVA2,
627 ISCI_SI_REVB0,
628 ISCI_SI_REVC0
631 extern int isci_si_rev;
633 static inline bool is_a0(void)
635 return isci_si_rev == ISCI_SI_REVA0;
638 static inline bool is_a2(void)
640 return isci_si_rev == ISCI_SI_REVA2;
643 static inline bool is_b0(void)
645 return isci_si_rev == ISCI_SI_REVB0;
648 static inline bool is_c0(void)
650 return isci_si_rev > ISCI_SI_REVB0;
653 void scic_sds_controller_post_request(struct scic_sds_controller *scic,
654 u32 request);
655 void scic_sds_controller_release_frame(struct scic_sds_controller *scic,
656 u32 frame_index);
657 void scic_sds_controller_copy_sata_response(void *response_buffer,
658 void *frame_header,
659 void *frame_buffer);
660 enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic,
661 struct scic_sds_remote_device *sci_dev,
662 u16 *node_id);
663 void scic_sds_controller_free_remote_node_context(
664 struct scic_sds_controller *scic,
665 struct scic_sds_remote_device *sci_dev,
666 u16 node_id);
667 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
668 struct scic_sds_controller *scic,
669 u16 node_id);
671 struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
672 u16 io_tag);
674 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
675 struct scic_sds_controller *scic,
676 u16 io_tag);
678 void scic_sds_controller_power_control_queue_insert(
679 struct scic_sds_controller *scic,
680 struct scic_sds_phy *sci_phy);
682 void scic_sds_controller_power_control_queue_remove(
683 struct scic_sds_controller *scic,
684 struct scic_sds_phy *sci_phy);
686 void scic_sds_controller_link_up(
687 struct scic_sds_controller *scic,
688 struct scic_sds_port *sci_port,
689 struct scic_sds_phy *sci_phy);
691 void scic_sds_controller_link_down(
692 struct scic_sds_controller *scic,
693 struct scic_sds_port *sci_port,
694 struct scic_sds_phy *sci_phy);
696 void scic_sds_controller_remote_device_stopped(
697 struct scic_sds_controller *scic,
698 struct scic_sds_remote_device *sci_dev);
700 void scic_sds_controller_copy_task_context(
701 struct scic_sds_controller *scic,
702 struct scic_sds_request *this_request);
704 void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
706 enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
707 int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
708 void isci_host_scan_start(struct Scsi_Host *);
710 int isci_host_init(struct isci_host *);
712 void isci_host_init_controller_names(
713 struct isci_host *isci_host,
714 unsigned int controller_idx);
716 void isci_host_deinit(
717 struct isci_host *);
719 void isci_host_port_link_up(
720 struct isci_host *,
721 struct scic_sds_port *,
722 struct scic_sds_phy *);
723 int isci_host_dev_found(struct domain_device *);
725 void isci_host_remote_device_start_complete(
726 struct isci_host *,
727 struct isci_remote_device *,
728 enum sci_status);
730 void scic_controller_disable_interrupts(
731 struct scic_sds_controller *scic);
733 enum sci_status scic_controller_start_io(
734 struct scic_sds_controller *scic,
735 struct scic_sds_remote_device *remote_device,
736 struct scic_sds_request *io_request,
737 u16 io_tag);
739 enum sci_task_status scic_controller_start_task(
740 struct scic_sds_controller *scic,
741 struct scic_sds_remote_device *remote_device,
742 struct scic_sds_request *task_request,
743 u16 io_tag);
745 enum sci_status scic_controller_terminate_request(
746 struct scic_sds_controller *scic,
747 struct scic_sds_remote_device *remote_device,
748 struct scic_sds_request *request);
750 enum sci_status scic_controller_complete_io(
751 struct scic_sds_controller *scic,
752 struct scic_sds_remote_device *remote_device,
753 struct scic_sds_request *io_request);
755 u16 scic_controller_allocate_io_tag(
756 struct scic_sds_controller *scic);
758 enum sci_status scic_controller_free_io_tag(
759 struct scic_sds_controller *scic,
760 u16 io_tag);
762 void scic_sds_port_configuration_agent_construct(
763 struct scic_sds_port_configuration_agent *port_agent);
765 enum sci_status scic_sds_port_configuration_agent_initialize(
766 struct scic_sds_controller *controller,
767 struct scic_sds_port_configuration_agent *port_agent);
768 #endif