7790 Want support for XXV710
[unleashed.git] / usr / src / uts / common / io / i40e / core / i40e_hmc.c
blob4f0de109c341f95f89f1e8504a48692a491f60fd
1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
4 All rights reserved.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
33 /*$FreeBSD$*/
35 #include "i40e_osdep.h"
36 #include "i40e_register.h"
37 #include "i40e_status.h"
38 #include "i40e_alloc.h"
39 #include "i40e_hmc.h"
40 #include "i40e_type.h"
42 /**
43 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
44 * @hw: pointer to our hw struct
45 * @hmc_info: pointer to the HMC configuration information struct
46 * @sd_index: segment descriptor index to manipulate
47 * @type: what type of segment descriptor we're manipulating
48 * @direct_mode_sz: size to alloc in direct mode
49 **/
50 enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
51 struct i40e_hmc_info *hmc_info,
52 u32 sd_index,
53 enum i40e_sd_entry_type type,
54 u64 direct_mode_sz)
56 enum i40e_status_code ret_code = I40E_SUCCESS;
57 struct i40e_hmc_sd_entry *sd_entry;
58 enum i40e_memory_type mem_type;
59 bool dma_mem_alloc_done = FALSE;
60 struct i40e_dma_mem mem;
61 u64 alloc_len;
63 if (NULL == hmc_info->sd_table.sd_entry) {
64 ret_code = I40E_ERR_BAD_PTR;
65 DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
66 goto exit;
69 if (sd_index >= hmc_info->sd_table.sd_cnt) {
70 ret_code = I40E_ERR_INVALID_SD_INDEX;
71 DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
72 goto exit;
75 sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
76 if (!sd_entry->valid) {
77 if (I40E_SD_TYPE_PAGED == type) {
78 mem_type = i40e_mem_pd;
79 alloc_len = I40E_HMC_PAGED_BP_SIZE;
80 } else {
81 mem_type = i40e_mem_bp_jumbo;
82 alloc_len = direct_mode_sz;
85 /* allocate a 4K pd page or 2M backing page */
86 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
87 I40E_HMC_PD_BP_BUF_ALIGNMENT);
88 if (ret_code)
89 goto exit;
90 dma_mem_alloc_done = TRUE;
91 if (I40E_SD_TYPE_PAGED == type) {
92 ret_code = i40e_allocate_virt_mem(hw,
93 &sd_entry->u.pd_table.pd_entry_virt_mem,
94 sizeof(struct i40e_hmc_pd_entry) * 512);
95 if (ret_code)
96 goto exit;
97 sd_entry->u.pd_table.pd_entry =
98 (struct i40e_hmc_pd_entry *)
99 sd_entry->u.pd_table.pd_entry_virt_mem.va;
100 i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
101 &mem, sizeof(struct i40e_dma_mem),
102 I40E_NONDMA_TO_NONDMA);
103 } else {
104 i40e_memcpy(&sd_entry->u.bp.addr,
105 &mem, sizeof(struct i40e_dma_mem),
106 I40E_NONDMA_TO_NONDMA);
107 sd_entry->u.bp.sd_pd_index = sd_index;
109 /* initialize the sd entry */
110 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
112 /* increment the ref count */
113 I40E_INC_SD_REFCNT(&hmc_info->sd_table);
115 /* Increment backing page reference count */
116 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
117 I40E_INC_BP_REFCNT(&sd_entry->u.bp);
118 exit:
119 if (I40E_SUCCESS != ret_code)
120 if (dma_mem_alloc_done)
121 i40e_free_dma_mem(hw, &mem);
123 return ret_code;
127 * i40e_add_pd_table_entry - Adds page descriptor to the specified table
128 * @hw: pointer to our HW structure
129 * @hmc_info: pointer to the HMC configuration information structure
130 * @pd_index: which page descriptor index to manipulate
131 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
133 * This function:
134 * 1. Initializes the pd entry
135 * 2. Adds pd_entry in the pd_table
136 * 3. Mark the entry valid in i40e_hmc_pd_entry structure
137 * 4. Initializes the pd_entry's ref count to 1
138 * assumptions:
139 * 1. The memory for pd should be pinned down, physically contiguous and
140 * aligned on 4K boundary and zeroed memory.
141 * 2. It should be 4K in size.
143 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
144 struct i40e_hmc_info *hmc_info,
145 u32 pd_index,
146 struct i40e_dma_mem *rsrc_pg)
148 enum i40e_status_code ret_code = I40E_SUCCESS;
149 struct i40e_hmc_pd_table *pd_table;
150 struct i40e_hmc_pd_entry *pd_entry;
151 struct i40e_dma_mem mem;
152 struct i40e_dma_mem *page = &mem;
153 u32 sd_idx, rel_pd_idx;
154 u64 *pd_addr;
155 u64 page_desc;
157 if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
158 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
159 DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
160 goto exit;
163 /* find corresponding sd */
164 sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
165 if (I40E_SD_TYPE_PAGED !=
166 hmc_info->sd_table.sd_entry[sd_idx].entry_type)
167 goto exit;
169 rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
170 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
171 pd_entry = &pd_table->pd_entry[rel_pd_idx];
172 if (!pd_entry->valid) {
173 if (rsrc_pg) {
174 pd_entry->rsrc_pg = TRUE;
175 page = rsrc_pg;
176 } else {
177 /* allocate a 4K backing page */
178 ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
179 I40E_HMC_PAGED_BP_SIZE,
180 I40E_HMC_PD_BP_BUF_ALIGNMENT);
181 if (ret_code)
182 goto exit;
183 pd_entry->rsrc_pg = FALSE;
186 i40e_memcpy(&pd_entry->bp.addr, page,
187 sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
188 pd_entry->bp.sd_pd_index = pd_index;
189 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
190 /* Set page address and valid bit */
191 page_desc = page->pa | 0x1;
193 pd_addr = (u64 *)pd_table->pd_page_addr.va;
194 pd_addr += rel_pd_idx;
196 /* Add the backing page physical address in the pd entry */
197 i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
198 I40E_NONDMA_TO_DMA);
200 pd_entry->sd_index = sd_idx;
201 pd_entry->valid = TRUE;
202 I40E_INC_PD_REFCNT(pd_table);
204 I40E_INC_BP_REFCNT(&pd_entry->bp);
205 exit:
206 return ret_code;
210 * i40e_remove_pd_bp - remove a backing page from a page descriptor
211 * @hw: pointer to our HW structure
212 * @hmc_info: pointer to the HMC configuration information structure
213 * @idx: the page index
214 * @is_pf: distinguishes a VF from a PF
216 * This function:
217 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
218 * (for direct address mode) invalid.
219 * 2. Write to register PMPDINV to invalidate the backing page in FV cache
220 * 3. Decrement the ref count for the pd _entry
221 * assumptions:
222 * 1. Caller can deallocate the memory used by backing storage after this
223 * function returns.
225 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
226 struct i40e_hmc_info *hmc_info,
227 u32 idx)
229 enum i40e_status_code ret_code = I40E_SUCCESS;
230 struct i40e_hmc_pd_entry *pd_entry;
231 struct i40e_hmc_pd_table *pd_table;
232 struct i40e_hmc_sd_entry *sd_entry;
233 u32 sd_idx, rel_pd_idx;
234 u64 *pd_addr;
236 /* calculate index */
237 sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
238 rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
239 if (sd_idx >= hmc_info->sd_table.sd_cnt) {
240 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
241 DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
242 goto exit;
244 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
245 if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
246 ret_code = I40E_ERR_INVALID_SD_TYPE;
247 DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
248 goto exit;
250 /* get the entry and decrease its ref counter */
251 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
252 pd_entry = &pd_table->pd_entry[rel_pd_idx];
253 I40E_DEC_BP_REFCNT(&pd_entry->bp);
254 if (pd_entry->bp.ref_cnt)
255 goto exit;
257 /* mark the entry invalid */
258 pd_entry->valid = FALSE;
259 I40E_DEC_PD_REFCNT(pd_table);
260 pd_addr = (u64 *)pd_table->pd_page_addr.va;
261 pd_addr += rel_pd_idx;
262 i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
263 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
265 /* free memory here */
266 if (!pd_entry->rsrc_pg)
267 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
268 if (I40E_SUCCESS != ret_code)
269 goto exit;
270 if (!pd_table->ref_cnt)
271 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
272 exit:
273 return ret_code;
277 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
278 * @hmc_info: pointer to the HMC configuration information structure
279 * @idx: the page index
281 enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
282 u32 idx)
284 enum i40e_status_code ret_code = I40E_SUCCESS;
285 struct i40e_hmc_sd_entry *sd_entry;
287 /* get the entry and decrease its ref counter */
288 sd_entry = &hmc_info->sd_table.sd_entry[idx];
289 I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
290 if (sd_entry->u.bp.ref_cnt) {
291 ret_code = I40E_ERR_NOT_READY;
292 goto exit;
294 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
296 /* mark the entry invalid */
297 sd_entry->valid = FALSE;
298 exit:
299 return ret_code;
303 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
304 * @hw: pointer to our hw struct
305 * @hmc_info: pointer to the HMC configuration information structure
306 * @idx: the page index
307 * @is_pf: used to distinguish between VF and PF
309 enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
310 struct i40e_hmc_info *hmc_info,
311 u32 idx, bool is_pf)
313 struct i40e_hmc_sd_entry *sd_entry;
315 if (!is_pf)
316 return I40E_NOT_SUPPORTED;
318 /* get the entry and decrease its ref counter */
319 sd_entry = &hmc_info->sd_table.sd_entry[idx];
320 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
322 return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
326 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
327 * @hmc_info: pointer to the HMC configuration information structure
328 * @idx: segment descriptor index to find the relevant page descriptor
330 enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
331 u32 idx)
333 enum i40e_status_code ret_code = I40E_SUCCESS;
334 struct i40e_hmc_sd_entry *sd_entry;
336 sd_entry = &hmc_info->sd_table.sd_entry[idx];
338 if (sd_entry->u.pd_table.ref_cnt) {
339 ret_code = I40E_ERR_NOT_READY;
340 goto exit;
343 /* mark the entry invalid */
344 sd_entry->valid = FALSE;
346 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
347 exit:
348 return ret_code;
352 * i40e_remove_pd_page_new - Removes a PD page from sd entry.
353 * @hw: pointer to our hw struct
354 * @hmc_info: pointer to the HMC configuration information structure
355 * @idx: segment descriptor index to find the relevant page descriptor
356 * @is_pf: used to distinguish between VF and PF
358 enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
359 struct i40e_hmc_info *hmc_info,
360 u32 idx, bool is_pf)
362 struct i40e_hmc_sd_entry *sd_entry;
364 if (!is_pf)
365 return I40E_NOT_SUPPORTED;
367 sd_entry = &hmc_info->sd_table.sd_entry[idx];
368 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
370 return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));