Staging: Add ServerEngines benet 10Gb ethernet driver
[linux-2.6/mini2440.git] / drivers / staging / benet / hwlib / hwlib.h
blob0cffe8fe774a7c828b0c6d4d338079a922be234f
1 /*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
17 #ifndef __hwlib_h__
18 #define __hwlib_h__
20 #include <linux/module.h>
21 #include <linux/io.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
25 #include "regmap.h" /* srcgen array map output */
27 #include "asyncmesg.h"
28 #include "fwcmd_opcodes.h"
29 #include "post_codes.h"
30 #include "fwcmd_mcc.h"
32 #include "fwcmd_types_bmap.h"
33 #include "fwcmd_common_bmap.h"
34 #include "fwcmd_eth_bmap.h"
35 #include "bestatus.h"
38 * Macros for reading/writing a protection domain or CSR registers
39 * in BladeEngine.
41 #define PD_READ(_fo_, _field_) \
42 ioread32((_fo_)->db_va + \
43 AMAP_BYTE_OFFSET(PROTECTION_DOMAIN_DBMAP, _field_))
45 #define PD_WRITE(_fo_, _field_, _value_) \
46 iowrite32((_value_), (_fo_)->db_va + \
47 AMAP_BYTE_OFFSET(PROTECTION_DOMAIN_DBMAP, _field_))
49 #define CSR_READ(_fo_, _field_) \
50 ioread32((_fo_)->csr_va + \
51 AMAP_BYTE_OFFSET(BLADE_ENGINE_CSRMAP, _field_))
53 #define CSR_WRITE(_fo_, _field_, _value_) \
54 iowrite32((_value_), (_fo_)->csr_va + \
55 AMAP_BYTE_OFFSET(BLADE_ENGINE_CSRMAP, _field_))
57 #define PCICFG0_READ(_fo_, _field_) \
58 ioread32((_fo_)->pci_va + \
59 AMAP_BYTE_OFFSET(PCICFG0_CSRMAP, _field_))
61 #define PCICFG0_WRITE(_fo_, _field_, _value_) \
62 iowrite32((_value_), (_fo_)->pci_va + \
63 AMAP_BYTE_OFFSET(PCICFG0_CSRMAP, _field_))
65 #define PCICFG1_READ(_fo_, _field_) \
66 ioread32((_fo_)->pci_va + \
67 AMAP_BYTE_OFFSET(PCICFG1_CSRMAP, _field_))
69 #define PCICFG1_WRITE(_fo_, _field_, _value_) \
70 iowrite32((_value_), (_fo_)->pci_va + \
71 AMAP_BYTE_OFFSET(PCICFG1_CSRMAP, _field_))
73 #ifdef BE_DEBUG
74 #define ASSERT(c) BUG_ON(!(c));
75 #else
76 #define ASSERT(c)
77 #endif
79 /* debug levels */
80 enum BE_DEBUG_LEVELS {
81 DL_ALWAYS = 0, /* cannot be masked */
82 DL_ERR = 0x1, /* errors that should never happen */
83 DL_WARN = 0x2, /* something questionable.
84 recoverable errors */
85 DL_NOTE = 0x4, /* infrequent, important debug info */
86 DL_INFO = 0x8, /* debug information */
87 DL_VERBOSE = 0x10, /* detailed info, such as buffer traces */
88 BE_DL_MIN_VALUE = 0x1, /* this is the min value used */
89 BE_DL_MAX_VALUE = 0x80 /* this is the higheset value used */
90 } ;
92 extern unsigned int trace_level;
94 #define TRACE(lm, fmt, args...) { \
95 if (trace_level & lm) { \
96 printk(KERN_NOTICE "BE: %s:%d \n" fmt, \
97 __FILE__ , __LINE__ , ## args); \
98 } \
101 static inline unsigned int be_trace_set_level(unsigned int level)
103 unsigned int old_level = trace_level;
104 trace_level = level;
105 return old_level;
108 #define be_trace_get_level() trace_level
110 * Returns number of pages spanned by the size of data
111 * starting at the given address.
113 #define PAGES_SPANNED(_address, _size) \
114 ((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
115 (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
116 /* Byte offset into the page corresponding to given address */
117 #define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
120 * circular subtract.
121 * Returns a - b assuming a circular number system, where a and b are
122 * in range (0, maxValue-1). If a==b, zero is returned so the
123 * highest value possible with this subtraction is maxValue-1.
125 static inline u32 be_subc(u32 a, u32 b, u32 max)
127 ASSERT(a <= max && b <= max);
128 ASSERT(max > 0);
129 return (a >= b ? (a - b) : (max - b + a));
132 static inline u32 be_addc(u32 a, u32 b, u32 max)
134 ASSERT(a < max);
135 ASSERT(max > 0);
136 return ((max - a > b) ? (a + b) : (b + a - max));
139 /* descriptor for a physically contiguous memory used for ring */
140 struct ring_desc {
141 u32 length; /* length in bytes */
142 void *va; /* virtual address */
143 u64 pa; /* bus address */
147 * This structure stores information about a ring shared between hardware
148 * and software. Each ring is allocated by the driver in the uncached
149 * extension and mapped into BladeEngine's unified table.
151 struct mp_ring {
152 u32 pages; /* queue size in pages */
153 u32 id; /* queue id assigned by beklib */
154 u32 num; /* number of elements in queue */
155 u32 cidx; /* consumer index */
156 u32 pidx; /* producer index -- not used by most rings */
157 u32 itemSize; /* size in bytes of one object */
159 void *va; /* The virtual address of the ring.
160 This should be last to allow 32 & 64
161 bit debugger extensions to work. */
164 /*----------- amap bit filed get / set macros and functions -----*/
166 * Structures defined in the map header files (under fw/amap/) with names
167 * in the format BE_<name>_AMAP are pseudo structures with members
168 * of type u8. These structures are templates that are used in
169 * conjuntion with the structures with names in the format
170 * <name>_AMAP to calculate the bit masks and bit offsets to get or set
171 * bit fields in structures. The structures <name>_AMAP are arrays
172 * of 32 bits words and have the correct size. The following macros
173 * provide convenient ways to get and set the various members
174 * in the structures without using strucctures with bit fields.
175 * Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
176 * macros to extract and set various members.
180 * Returns the a bit mask for the register that is NOT shifted into location.
181 * That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
183 static inline u32 amap_mask(u32 bit_size)
185 return (bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1);
188 #define AMAP_BIT_MASK(_struct_, _register_) \
189 amap_mask(AMAP_BIT_SIZE(_struct_, _register_))
192 * non-optimized set bits function. First clears the bits and then assigns them.
193 * This does not require knowledge of the particular DWORD you are setting.
194 * e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
196 static inline void
197 amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
199 u32 *dw = (u32 *)ptr;
200 *(dw + dw_offset) &= ~(mask << offset);
201 *(dw + dw_offset) |= (mask & value) << offset;
204 #define AMAP_SET_BITS_PTR(_struct_, _register_, _structPtr_, _value_) \
205 amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, _register_), \
206 AMAP_BIT_MASK(_struct_, _register_), \
207 AMAP_BIT_OFFSET(_struct_, _register_), _value_)
210 * Non-optimized routine that gets the bits without knowing the correct DWORD.
211 * e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
213 static inline u32
214 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
216 u32 *dw = (u32 *)ptr;
217 return mask & (*(dw + dw_offset) >> offset);
219 #define AMAP_GET_BITS_PTR(_struct_, _register_, _structPtr_) \
220 amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, _register_), \
221 AMAP_BIT_MASK(_struct_, _register_), \
222 AMAP_BIT_OFFSET(_struct_, _register_))
224 /* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
225 #define AMAP_BIT_OFFSET(_struct_, _register_) \
226 (((size_t)&(((struct BE_ ## _struct_ ## _AMAP*)0)->_register_))%32)
228 /* Returns 0-n representing byte offset of bitfield with the structure. */
229 #define AMAP_BYTE_OFFSET(_struct_, _register_) \
230 (((size_t)&(((struct BE_ ## _struct_ ## _AMAP *)0)->_register_))/8)
232 /* Returns 0-n representing DWORD offset of bitfield within the structure. */
233 #define AMAP_WORD_OFFSET(_struct_, _register_) \
234 (AMAP_BYTE_OFFSET(_struct_, _register_)/4)
237 * Gets a pointer to a field within a structure
238 * The field must be byte aligned.
240 #define AMAP_GET_PTR(_struct_, _register_, _structPtr_) \
241 (void *) ((u8 *)(_structPtr_) + AMAP_BYTE_OFFSET(_struct_, _register_))
243 /* Returns size of bitfield in bits. */
244 #define AMAP_BIT_SIZE(_struct_, _register_) \
245 sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->_register_)
247 /* Returns size of bitfield in bytes. */
248 #define AMAP_BYTE_SIZE(_struct_) (sizeof(struct BE_ ## _struct_ ## _AMAP)/8)
250 /* Returns size of bitfield in DWORDS. */
251 #define AMAP_WORD_SIZE(_struct_) (AMAP_BYTE_SIZE(_struct_)/4)
253 struct be_mcc_wrb_response_copy {
254 u16 length; /* bytes in response */
255 u16 fwcmd_offset; /* offset within the wrb of the response */
256 void *va; /* user's va to copy response into */
259 typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
260 struct MCC_WRB_AMAP *optional_wrb);
261 struct be_mcc_wrb_context {
263 mcc_wrb_cqe_callback internal_cb; /* Function to call on
264 completion */
265 void *internal_cb_context; /* Parameter to pass
266 to completion function */
268 mcc_wrb_cqe_callback cb; /* Function to call on completion */
269 void *cb_context; /* Parameter to pass to completion function */
271 int *users_final_status; /* pointer to a local
272 variable for synchronous
273 commands */
274 struct MCC_WRB_AMAP *wrb; /* pointer to original wrb for embedded
275 commands only */
276 struct list_head next; /* links context structs together in
277 free list */
279 struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
280 embedded response to user's va */
282 #if defined(BE_DEBUG)
283 u16 subsystem, opcode; /* Track this FWCMD for debug builds. */
284 struct MCC_WRB_AMAP *ring_wrb;
285 u32 consumed_count;
286 #endif
290 Represents a function object for network or storage. This
291 is used to manage per-function resources like MCC CQs, etc.
293 struct be_function_object {
295 u32 magic; /*!< magic for detecting memory corruption. */
297 /* PCI BAR mapped addresses */
298 u8 __iomem *csr_va; /* CSR */
299 u8 __iomem *db_va; /* Door Bell */
300 u8 __iomem *pci_va; /* PCI config space */
301 u32 emulate; /* if set, MPU is not available.
302 Emulate everything. */
303 u32 pend_queue_driving; /* if set, drive the queued WRBs
304 after releasing the WRB lock */
306 spinlock_t post_lock; /* lock for verifying one thread posting wrbs */
307 spinlock_t cq_lock; /* lock for verifying one thread
308 processing cq */
309 spinlock_t mcc_context_lock; /* lock for protecting mcc
310 context free list */
311 unsigned long post_irq;
312 unsigned long cq_irq;
314 u32 type;
315 u32 pci_function_number;
317 struct be_mcc_object *mcc; /* mcc rings. */
319 struct {
320 struct MCC_MAILBOX_AMAP *va; /* VA to the mailbox */
321 u64 pa; /* PA to the mailbox */
322 u32 length; /* byte length of mailbox */
324 /* One default context struct used for posting at
325 * least one MCC_WRB
327 struct be_mcc_wrb_context default_context;
328 bool default_context_allocated;
329 } mailbox;
331 struct {
333 /* Wake on lans configured. */
334 u32 wol_bitmask; /* bits 0,1,2,3 are set if
335 corresponding index is enabled */
336 } config;
339 struct BE_FIRMWARE_CONFIG fw_config;
343 Represents an Event Queue
345 struct be_eq_object {
346 u32 magic;
347 atomic_t ref_count;
349 struct be_function_object *parent_function;
351 struct list_head eq_list;
352 struct list_head cq_list_head;
354 u32 eq_id;
355 void *cb_context;
360 Manages a completion queue
362 struct be_cq_object {
363 u32 magic;
364 atomic_t ref_count;
366 struct be_function_object *parent_function;
367 struct be_eq_object *eq_object;
369 struct list_head cq_list;
370 struct list_head cqlist_for_eq;
372 void *va;
373 u32 num_entries;
375 void *cb_context;
377 u32 cq_id;
382 Manages an ethernet send queue
384 struct be_ethsq_object {
385 u32 magic;
387 struct list_head list;
389 struct be_function_object *parent_function;
390 struct be_cq_object *cq_object;
391 u32 bid;
396 @brief
397 Manages an ethernet receive queue
399 struct be_ethrq_object {
400 u32 magic;
401 struct list_head list;
402 struct be_function_object *parent_function;
403 u32 rid;
404 struct be_cq_object *cq_object;
405 struct be_cq_object *rss_cq_object[4];
410 Manages an MCC
412 typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
413 void *event);
414 struct be_mcc_object {
415 u32 magic;
417 struct be_function_object *parent_function;
418 struct list_head mcc_list;
420 struct be_cq_object *cq_object;
422 /* Async event callback for MCC CQ. */
423 mcc_async_event_callback async_cb;
424 void *async_context;
426 struct {
427 struct be_mcc_wrb_context *base;
428 u32 num;
429 struct list_head list_head;
430 } wrb_context;
432 struct {
433 struct ring_desc *rd;
434 struct mp_ring ring;
435 } sq;
437 struct {
438 struct mp_ring ring;
439 } cq;
441 u32 processing; /* flag indicating that one thread
442 is processing CQ */
443 u32 rearm; /* doorbell rearm setting to make
444 sure the active processing thread */
445 /* rearms the CQ if any of the threads requested it. */
447 struct list_head backlog;
448 u32 backlog_length;
449 u32 driving_backlog;
450 u32 consumed_index;
455 /* Queue context header -- the required software information for
456 * queueing a WRB.
458 struct be_queue_driver_context {
459 mcc_wrb_cqe_callback internal_cb; /* Function to call on
460 completion */
461 void *internal_cb_context; /* Parameter to pass
462 to completion function */
464 mcc_wrb_cqe_callback cb; /* Function to call on completion */
465 void *cb_context; /* Parameter to pass to completion function */
467 struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
468 embedded response to user's va */
469 void *optional_fwcmd_va;
470 struct list_head list;
471 u32 bytes;
475 * Common MCC WRB header that all commands require.
477 struct be_mcc_wrb_header {
478 u8 rsvd[AMAP_BYTE_OFFSET(MCC_WRB, payload)];
482 * All non embedded commands supported by hwlib functions only allow
483 * 1 SGE. This queue context handles them all.
485 struct be_nonembedded_q_ctxt {
486 struct be_queue_driver_context context;
487 struct be_mcc_wrb_header wrb_header;
488 struct MCC_SGE_AMAP sge[1];
492 * ------------------------------------------------------------------------
493 * This section contains the specific queue struct for each command.
494 * The user could always provide a be_generic_q_ctxt but this is a
495 * rather large struct. By using the specific struct, memory consumption
496 * can be reduced.
497 * ------------------------------------------------------------------------
500 struct be_link_status_q_ctxt {
501 struct be_queue_driver_context context;
502 struct be_mcc_wrb_header wrb_header;
503 struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
506 struct be_multicast_q_ctxt {
507 struct be_queue_driver_context context;
508 struct be_mcc_wrb_header wrb_header;
509 struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
513 struct be_vlan_q_ctxt {
514 struct be_queue_driver_context context;
515 struct be_mcc_wrb_header wrb_header;
516 struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
519 struct be_promiscuous_q_ctxt {
520 struct be_queue_driver_context context;
521 struct be_mcc_wrb_header wrb_header;
522 struct FWCMD_ETH_PROMISCUOUS fwcmd;
525 struct be_force_failover_q_ctxt {
526 struct be_queue_driver_context context;
527 struct be_mcc_wrb_header wrb_header;
528 struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
532 struct be_rxf_filter_q_ctxt {
533 struct be_queue_driver_context context;
534 struct be_mcc_wrb_header wrb_header;
535 struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
538 struct be_eq_modify_delay_q_ctxt {
539 struct be_queue_driver_context context;
540 struct be_mcc_wrb_header wrb_header;
541 struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
545 * The generic context is the largest size that would be required.
546 * It is the software context plus an entire WRB.
548 struct be_generic_q_ctxt {
549 struct be_queue_driver_context context;
550 struct be_mcc_wrb_header wrb_header;
551 struct MCC_WRB_PAYLOAD_AMAP payload;
555 * Types for the BE_QUEUE_CONTEXT object.
557 #define BE_QUEUE_INVALID (0)
558 #define BE_QUEUE_LINK_STATUS (0xA006)
559 #define BE_QUEUE_ETH_STATS (0xA007)
560 #define BE_QUEUE_TPM_STATS (0xA008)
561 #define BE_QUEUE_TCP_STATS (0xA009)
562 #define BE_QUEUE_MULTICAST (0xA00A)
563 #define BE_QUEUE_VLAN (0xA00B)
564 #define BE_QUEUE_RSS (0xA00C)
565 #define BE_QUEUE_FORCE_FAILOVER (0xA00D)
566 #define BE_QUEUE_PROMISCUOUS (0xA00E)
567 #define BE_QUEUE_WAKE_ON_LAN (0xA00F)
568 #define BE_QUEUE_NOP (0xA010)
570 /* --- BE_FUNCTION_ENUM --- */
571 #define BE_FUNCTION_TYPE_ISCSI (0)
572 #define BE_FUNCTION_TYPE_NETWORK (1)
573 #define BE_FUNCTION_TYPE_ARM (2)
575 /* --- BE_ETH_TX_RING_TYPE_ENUM --- */
576 #define BE_ETH_TX_RING_TYPE_FORWARDING (1) /* Ether ring for forwarding */
577 #define BE_ETH_TX_RING_TYPE_STANDARD (2) /* Ether ring for sending */
578 /* network packets. */
579 #define BE_ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring for sending */
580 /* network packets, bound */
581 /* to a physical port. */
583 * ----------------------------------------------------------------------
584 * API MACROS
585 * ----------------------------------------------------------------------
587 #define BE_FWCMD_NAME(_short_name_) struct FWCMD_##_short_name_
588 #define BE_OPCODE_NAME(_short_name_) OPCODE_##_short_name_
589 #define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
592 #define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \
593 ((BE_FWCMD_NAME(_short_name_) *) \
594 be_function_prepare_embedded_fwcmd(_pfob_, _wrb_, \
595 sizeof(BE_FWCMD_NAME(_short_name_)), \
596 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
597 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
598 BE_OPCODE_NAME(_short_name_), \
599 BE_SUBSYSTEM_NAME(_short_name_)));
601 #define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
602 ((BE_FWCMD_NAME(_short_name_) *) \
603 be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
604 sizeof(BE_FWCMD_NAME(_short_name_)), \
605 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
606 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
607 BE_OPCODE_NAME(_short_name_), \
608 BE_SUBSYSTEM_NAME(_short_name_)));
610 int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
611 u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
612 struct be_function_object *pfob);
614 int be_function_object_destroy(struct be_function_object *pfob);
615 int be_function_cleanup(struct be_function_object *pfob);
618 int be_function_get_fw_version(struct be_function_object *pfob,
619 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
620 mcc_wrb_cqe_callback cb, void *cb_context);
623 int be_eq_modify_delay(struct be_function_object *pfob,
624 u32 num_eq, struct be_eq_object **eq_array,
625 u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
626 void *cb_context,
627 struct be_eq_modify_delay_q_ctxt *q_ctxt);
631 int be_eq_create(struct be_function_object *pfob,
632 struct ring_desc *rd, u32 eqe_size, u32 num_entries,
633 u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
635 int be_eq_destroy(struct be_eq_object *eq);
637 int be_cq_create(struct be_function_object *pfob,
638 struct ring_desc *rd, u32 length,
639 bool solicited_eventable, bool no_delay,
640 u32 wm_thresh, struct be_eq_object *eq_object,
641 struct be_cq_object *cq_object);
643 int be_cq_destroy(struct be_cq_object *cq);
645 int be_mcc_ring_create(struct be_function_object *pfob,
646 struct ring_desc *rd, u32 length,
647 struct be_mcc_wrb_context *context_array,
648 u32 num_context_entries,
649 struct be_cq_object *cq, struct be_mcc_object *mcc);
650 int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
652 int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
654 int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
655 mcc_async_event_callback cb, void *cb_context);
657 int be_pci_soft_reset(struct be_function_object *pfob);
660 int be_drive_POST(struct be_function_object *pfob);
663 int be_eth_sq_create(struct be_function_object *pfob,
664 struct ring_desc *rd, u32 length_in_bytes,
665 u32 type, u32 ulp, struct be_cq_object *cq_object,
666 struct be_ethsq_object *eth_sq);
668 struct be_eth_sq_parameters {
669 u32 port;
670 u32 rsvd0[2];
673 int be_eth_sq_create_ex(struct be_function_object *pfob,
674 struct ring_desc *rd, u32 length_in_bytes,
675 u32 type, u32 ulp, struct be_cq_object *cq_object,
676 struct be_eth_sq_parameters *ex_parameters,
677 struct be_ethsq_object *eth_sq);
678 int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
680 int be_eth_set_flow_control(struct be_function_object *pfob,
681 bool txfc_enable, bool rxfc_enable);
683 int be_eth_get_flow_control(struct be_function_object *pfob,
684 bool *txfc_enable, bool *rxfc_enable);
685 int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
687 int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
689 int be_eth_set_frame_size(struct be_function_object *pfob,
690 u32 *tx_frame_size, u32 *rx_frame_size);
692 int be_eth_rq_create(struct be_function_object *pfob,
693 struct ring_desc *rd, struct be_cq_object *cq_object,
694 struct be_cq_object *bcmc_cq_object,
695 struct be_ethrq_object *eth_rq);
697 int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
699 int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
700 mcc_wrb_cqe_callback cb, void *cb_context);
701 int be_eth_rq_set_frag_size(struct be_function_object *pfob,
702 u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
703 int be_eth_rq_get_frag_size(struct be_function_object *pfob,
704 u32 *frag_size_bytes);
706 void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
707 struct MCC_WRB_AMAP *wrb,
708 u32 payload_length, u32 request_length,
709 u32 response_length, u32 opcode, u32 subsystem);
710 void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
711 struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
712 u32 payload_length, u32 request_length, u32 response_length,
713 u32 opcode, u32 subsystem);
716 struct MCC_WRB_AMAP *
717 be_function_peek_mcc_wrb(struct be_function_object *pfob);
719 int be_rxf_mac_address_read_write(struct be_function_object *pfob,
720 bool port1, bool mac1, bool mgmt,
721 bool write, bool permanent, u8 *mac_address,
722 mcc_wrb_cqe_callback cb,
723 void *cb_context);
725 int be_rxf_multicast_config(struct be_function_object *pfob,
726 bool promiscuous, u32 num, u8 *mac_table,
727 mcc_wrb_cqe_callback cb,
728 void *cb_context,
729 struct be_multicast_q_ctxt *q_ctxt);
731 int be_rxf_vlan_config(struct be_function_object *pfob,
732 bool promiscuous, u32 num, u16 *vlan_tag_array,
733 mcc_wrb_cqe_callback cb, void *cb_context,
734 struct be_vlan_q_ctxt *q_ctxt);
737 int be_rxf_link_status(struct be_function_object *pfob,
738 struct BE_LINK_STATUS *link_status,
739 mcc_wrb_cqe_callback cb,
740 void *cb_context,
741 struct be_link_status_q_ctxt *q_ctxt);
744 int be_rxf_query_eth_statistics(struct be_function_object *pfob,
745 struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
746 u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
747 void *cb_context,
748 struct be_nonembedded_q_ctxt *q_ctxt);
750 int be_rxf_promiscuous(struct be_function_object *pfob,
751 bool enable_port0, bool enable_port1,
752 mcc_wrb_cqe_callback cb, void *cb_context,
753 struct be_promiscuous_q_ctxt *q_ctxt);
756 int be_rxf_filter_config(struct be_function_object *pfob,
757 struct NTWK_RX_FILTER_SETTINGS *settings,
758 mcc_wrb_cqe_callback cb,
759 void *cb_context,
760 struct be_rxf_filter_q_ctxt *q_ctxt);
763 * ------------------------------------------------------
764 * internal functions used by hwlib
765 * ------------------------------------------------------
769 int be_function_ring_destroy(struct be_function_object *pfob,
770 u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
771 void *cb_context,
772 mcc_wrb_cqe_callback internal_cb,
773 void *internal_callback_context);
775 int be_function_post_mcc_wrb(struct be_function_object *pfob,
776 struct MCC_WRB_AMAP *wrb,
777 struct be_generic_q_ctxt *q_ctxt,
778 mcc_wrb_cqe_callback cb, void *cb_context,
779 mcc_wrb_cqe_callback internal_cb,
780 void *internal_cb_context, void *optional_fwcmd_va,
781 struct be_mcc_wrb_response_copy *response_copy);
783 int be_function_queue_mcc_wrb(struct be_function_object *pfob,
784 struct be_generic_q_ctxt *q_ctxt);
787 * ------------------------------------------------------
788 * MCC QUEUE
789 * ------------------------------------------------------
792 int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
795 struct MCC_WRB_AMAP *
796 _be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
798 struct be_mcc_wrb_context *
799 _be_mcc_allocate_wrb_context(struct be_function_object *pfob);
801 void _be_mcc_free_wrb_context(struct be_function_object *pfob,
802 struct be_mcc_wrb_context *context);
804 int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
805 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
807 int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
808 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
810 void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
814 * ------------------------------------------------------
815 * Ring Sizes
816 * ------------------------------------------------------
818 static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
821 ASSERT(encoding != 1); /* 1 is rsvd */
822 ASSERT(encoding < 16);
823 ASSERT(object_size > 0);
825 if (encoding == 0) /* 32k deep */
826 encoding = 16;
828 return (1 << (encoding - 1)) * object_size;
831 static inline
832 u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
835 u32 count, encoding;
837 ASSERT(object_size > 0);
838 ASSERT(length_in_bytes % object_size == 0);
840 count = length_in_bytes / object_size;
842 ASSERT(count > 1);
843 ASSERT(count <= 32 * 1024);
844 ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
846 encoding = __ilog2_u32(count) + 1;
848 if (encoding == 16)
849 encoding = 0; /* 32k deep */
851 return encoding;
854 void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
855 u32 max_num);
856 #endif /* __hwlib_h__ */