1 #ifndef _FIREWIRE_CORE_H
2 #define _FIREWIRE_CORE_H
5 #include <linux/list.h>
7 #include <linux/mm_types.h>
8 #include <linux/rwsem.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
12 #include <asm/atomic.h>
18 struct fw_iso_context
;
26 /* bitfields within the PHY registers */
27 #define PHY_LINK_ACTIVE 0x80
28 #define PHY_CONTENDER 0x40
29 #define PHY_BUS_RESET 0x40
30 #define PHY_BUS_SHORT_RESET 0x40
32 #define BANDWIDTH_AVAILABLE_INITIAL 4915
33 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
34 #define BROADCAST_CHANNEL_VALID (1 << 30)
36 struct fw_card_driver
{
38 * Enable the given card with the given initial config rom.
39 * This function is expected to activate the card, and either
40 * enable the PHY or set the link_on bit and initiate a bus
43 int (*enable
)(struct fw_card
*card
, u32
*config_rom
, size_t length
);
45 int (*update_phy_reg
)(struct fw_card
*card
, int address
,
46 int clear_bits
, int set_bits
);
49 * Update the config rom for an enabled card. This function
50 * should change the config rom that is presented on the bus
51 * an initiate a bus reset.
53 int (*set_config_rom
)(struct fw_card
*card
,
54 u32
*config_rom
, size_t length
);
56 void (*send_request
)(struct fw_card
*card
, struct fw_packet
*packet
);
57 void (*send_response
)(struct fw_card
*card
, struct fw_packet
*packet
);
58 /* Calling cancel is valid once a packet has been submitted. */
59 int (*cancel_packet
)(struct fw_card
*card
, struct fw_packet
*packet
);
62 * Allow the specified node ID to do direct DMA out and in of
63 * host memory. The card will disable this for all node when
64 * a bus reset happens, so driver need to reenable this after
65 * bus reset. Returns 0 on success, -ENODEV if the card
66 * doesn't support this, -ESTALE if the generation doesn't
69 int (*enable_phys_dma
)(struct fw_card
*card
,
70 int node_id
, int generation
);
72 u64 (*get_bus_time
)(struct fw_card
*card
);
74 struct fw_iso_context
*
75 (*allocate_iso_context
)(struct fw_card
*card
,
76 int type
, int channel
, size_t header_size
);
77 void (*free_iso_context
)(struct fw_iso_context
*ctx
);
79 int (*start_iso
)(struct fw_iso_context
*ctx
,
80 s32 cycle
, u32 sync
, u32 tags
);
82 int (*queue_iso
)(struct fw_iso_context
*ctx
,
83 struct fw_iso_packet
*packet
,
84 struct fw_iso_buffer
*buffer
,
85 unsigned long payload
);
87 int (*stop_iso
)(struct fw_iso_context
*ctx
);
90 void fw_card_initialize(struct fw_card
*card
,
91 const struct fw_card_driver
*driver
, struct device
*device
);
92 int fw_card_add(struct fw_card
*card
,
93 u32 max_receive
, u32 link_speed
, u64 guid
);
94 void fw_core_remove_card(struct fw_card
*card
);
95 int fw_core_initiate_bus_reset(struct fw_card
*card
, int short_reset
);
96 int fw_compute_block_crc(u32
*block
);
97 void fw_schedule_bm_work(struct fw_card
*card
, unsigned long delay
);
102 extern const struct file_operations fw_device_ops
;
104 void fw_device_cdev_update(struct fw_device
*device
);
105 void fw_device_cdev_remove(struct fw_device
*device
);
110 extern struct rw_semaphore fw_device_rwsem
;
111 extern struct idr fw_device_idr
;
112 extern int fw_cdev_major
;
114 struct fw_device
*fw_device_get_by_devt(dev_t devt
);
115 int fw_device_set_broadcast_channel(struct device
*dev
, void *gen
);
116 void fw_node_event(struct fw_card
*card
, struct fw_node
*node
, int event
);
121 int fw_iso_buffer_map(struct fw_iso_buffer
*buffer
, struct vm_area_struct
*vma
);
122 void fw_iso_resource_manage(struct fw_card
*card
, int generation
,
123 u64 channels_mask
, int *channel
, int *bandwidth
,
124 bool allocate
, __be32 buffer
[2]);
135 FW_NODE_INITIATED_RESET
,
143 u8 initiated_reset
:1;
145 u8 phy_speed
:2; /* As in the self ID packet. */
146 u8 max_speed
:2; /* Minimum of all phy-speeds on the path from the
147 * local node to this node. */
148 u8 max_depth
:4; /* Maximum depth to any leaf node */
149 u8 max_hops
:4; /* Max hops in this sub tree */
152 /* For serializing node topology into a list. */
153 struct list_head link
;
155 /* Upper layer specific data. */
158 struct fw_node
*ports
[0];
161 static inline struct fw_node
*fw_node_get(struct fw_node
*node
)
163 atomic_inc(&node
->ref_count
);
168 static inline void fw_node_put(struct fw_node
*node
)
170 if (atomic_dec_and_test(&node
->ref_count
))
174 void fw_core_handle_bus_reset(struct fw_card
*card
, int node_id
,
175 int generation
, int self_id_count
, u32
*self_ids
);
176 void fw_destroy_nodes(struct fw_card
*card
);
179 * Check whether new_generation is the immediate successor of old_generation.
180 * Take counter roll-over at 255 (as per OHCI) into account.
182 static inline bool is_next_generation(int new_generation
, int old_generation
)
184 return (new_generation
& 0xff) == ((old_generation
+ 1) & 0xff);
190 #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
191 #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
192 #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
193 #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
194 #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
195 #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
197 #define LOCAL_BUS 0xffc0
199 void fw_core_handle_request(struct fw_card
*card
, struct fw_packet
*request
);
200 void fw_core_handle_response(struct fw_card
*card
, struct fw_packet
*packet
);
201 void fw_fill_response(struct fw_packet
*response
, u32
*request_header
,
202 int rcode
, void *payload
, size_t length
);
203 void fw_flush_transactions(struct fw_card
*card
);
204 void fw_send_phy_config(struct fw_card
*card
,
205 int node_id
, int generation
, int gap_count
);
207 #endif /* _FIREWIRE_CORE_H */