2 * Functions to handle I2O memory
4 * Pulled from the inlines in i2o headers and uninlined
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
13 #include <linux/module.h>
14 #include <linux/i2o.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
20 /* Protects our 32/64bit mask switching */
21 static DEFINE_MUTEX(mem_lock
);
24 * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
25 * @c: I2O controller for which the calculation should be done
26 * @body_size: maximum body size used for message in 32-bit words.
28 * Return the maximum number of SG elements in a SG list.
30 u16
i2o_sg_tablesize(struct i2o_controller
*c
, u16 body_size
)
32 i2o_status_block
*sb
= c
->status_block
.virt
;
34 (sb
->inbound_frame_size
- sizeof(struct i2o_message
) / 4) -
39 * for 64-bit a SG attribute element must be added and each
40 * SG element needs 12 bytes instead of 8.
47 if (c
->short_req
&& (sg_count
> 8))
52 EXPORT_SYMBOL_GPL(i2o_sg_tablesize
);
56 * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
58 * @ptr: pointer to the data which should be mapped
59 * @size: size of data in bytes
60 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
61 * @sg_ptr: pointer to the SG list inside the I2O message
63 * This function does all necessary DMA handling and also writes the I2O
64 * SGL elements into the I2O message. For details on DMA handling see also
65 * dma_map_single(). The pointer sg_ptr will only be set to the end of the
66 * SG list if the allocation was successful.
68 * Returns DMA address which must be checked for failures using
69 * dma_mapping_error().
71 dma_addr_t
i2o_dma_map_single(struct i2o_controller
*c
, void *ptr
,
73 enum dma_data_direction direction
,
82 sg_flags
= 0xd4000000;
85 sg_flags
= 0xd0000000;
91 dma_addr
= dma_map_single(&c
->pdev
->dev
, ptr
, size
, direction
);
92 if (!dma_mapping_error(&c
->pdev
->dev
, dma_addr
)) {
93 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
94 if ((sizeof(dma_addr_t
) > 4) && c
->pae_support
) {
95 *mptr
++ = cpu_to_le32(0x7C020002);
96 *mptr
++ = cpu_to_le32(PAGE_SIZE
);
100 *mptr
++ = cpu_to_le32(sg_flags
| size
);
101 *mptr
++ = cpu_to_le32(i2o_dma_low(dma_addr
));
102 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
103 if ((sizeof(dma_addr_t
) > 4) && c
->pae_support
)
104 *mptr
++ = cpu_to_le32(i2o_dma_high(dma_addr
));
110 EXPORT_SYMBOL_GPL(i2o_dma_map_single
);
113 * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
115 * @sg: SG list to be mapped
116 * @sg_count: number of elements in the SG list
117 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
118 * @sg_ptr: pointer to the SG list inside the I2O message
120 * This function does all necessary DMA handling and also writes the I2O
121 * SGL elements into the I2O message. For details on DMA handling see also
122 * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
123 * list if the allocation was successful.
125 * Returns 0 on failure or 1 on success.
127 int i2o_dma_map_sg(struct i2o_controller
*c
, struct scatterlist
*sg
,
128 int sg_count
, enum dma_data_direction direction
, u32
** sg_ptr
)
135 sg_flags
= 0x14000000;
137 case DMA_FROM_DEVICE
:
138 sg_flags
= 0x10000000;
144 sg_count
= dma_map_sg(&c
->pdev
->dev
, sg
, sg_count
, direction
);
148 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
149 if ((sizeof(dma_addr_t
) > 4) && c
->pae_support
) {
150 *mptr
++ = cpu_to_le32(0x7C020002);
151 *mptr
++ = cpu_to_le32(PAGE_SIZE
);
155 while (sg_count
-- > 0) {
157 sg_flags
|= 0xC0000000;
158 *mptr
++ = cpu_to_le32(sg_flags
| sg_dma_len(sg
));
159 *mptr
++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg
)));
160 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
161 if ((sizeof(dma_addr_t
) > 4) && c
->pae_support
)
162 *mptr
++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg
)));
170 EXPORT_SYMBOL_GPL(i2o_dma_map_sg
);
173 * i2o_dma_alloc - Allocate DMA memory
174 * @dev: struct device pointer to the PCI device of the I2O controller
175 * @addr: i2o_dma struct which should get the DMA buffer
176 * @len: length of the new DMA memory
178 * Allocate a coherent DMA memory and write the pointers into addr.
180 * Returns 0 on success or -ENOMEM on failure.
182 int i2o_dma_alloc(struct device
*dev
, struct i2o_dma
*addr
, size_t len
)
184 struct pci_dev
*pdev
= to_pci_dev(dev
);
187 mutex_lock(&mem_lock
);
188 if ((sizeof(dma_addr_t
) > 4) && (pdev
->dma_mask
== DMA_64BIT_MASK
)) {
190 if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) {
191 mutex_unlock(&mem_lock
);
196 addr
->virt
= dma_alloc_coherent(dev
, len
, &addr
->phys
, GFP_KERNEL
);
198 if ((sizeof(dma_addr_t
) > 4) && dma_64
)
199 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
))
200 printk(KERN_WARNING
"i2o: unable to set 64-bit DMA");
201 mutex_unlock(&mem_lock
);
206 memset(addr
->virt
, 0, len
);
211 EXPORT_SYMBOL_GPL(i2o_dma_alloc
);
215 * i2o_dma_free - Free DMA memory
216 * @dev: struct device pointer to the PCI device of the I2O controller
217 * @addr: i2o_dma struct which contains the DMA buffer
219 * Free a coherent DMA memory and set virtual address of addr to NULL.
221 void i2o_dma_free(struct device
*dev
, struct i2o_dma
*addr
)
225 dma_free_coherent(dev
, addr
->len
, addr
->virt
,
232 EXPORT_SYMBOL_GPL(i2o_dma_free
);
236 * i2o_dma_realloc - Realloc DMA memory
237 * @dev: struct device pointer to the PCI device of the I2O controller
238 * @addr: pointer to a i2o_dma struct DMA buffer
239 * @len: new length of memory
241 * If there was something allocated in the addr, free it first. If len > 0
242 * than try to allocate it and write the addresses back to the addr
243 * structure. If len == 0 set the virtual address to NULL.
245 * Returns the 0 on success or negative error code on failure.
247 int i2o_dma_realloc(struct device
*dev
, struct i2o_dma
*addr
, size_t len
)
249 i2o_dma_free(dev
, addr
);
252 return i2o_dma_alloc(dev
, addr
, len
);
256 EXPORT_SYMBOL_GPL(i2o_dma_realloc
);
259 * i2o_pool_alloc - Allocate an slab cache and mempool
260 * @mempool: pointer to struct i2o_pool to write data into.
261 * @name: name which is used to identify cache
262 * @size: size of each object
263 * @min_nr: minimum number of objects
265 * First allocates a slab cache with name and size. Then allocates a
266 * mempool which uses the slab cache for allocation and freeing.
268 * Returns 0 on success or negative error code on failure.
270 int i2o_pool_alloc(struct i2o_pool
*pool
, const char *name
,
271 size_t size
, int min_nr
)
273 pool
->name
= kmalloc(strlen(name
) + 1, GFP_KERNEL
);
276 strcpy(pool
->name
, name
);
279 kmem_cache_create(pool
->name
, size
, 0, SLAB_HWCACHE_ALIGN
, NULL
);
283 pool
->mempool
= mempool_create_slab_pool(min_nr
, pool
->slab
);
290 kmem_cache_destroy(pool
->slab
);
298 EXPORT_SYMBOL_GPL(i2o_pool_alloc
);
301 * i2o_pool_free - Free slab cache and mempool again
302 * @mempool: pointer to struct i2o_pool which should be freed
304 * Note that you have to return all objects to the mempool again before
305 * calling i2o_pool_free().
307 void i2o_pool_free(struct i2o_pool
*pool
)
309 mempool_destroy(pool
->mempool
);
310 kmem_cache_destroy(pool
->slab
);
313 EXPORT_SYMBOL_GPL(i2o_pool_free
);