2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
25 static void fjes_hw_update_zone_task(struct work_struct
*);
26 static void fjes_hw_epstop_task(struct work_struct
*);
28 /* supported MTU list */
29 const u32 fjes_support_mtu
[] = {
30 FJES_MTU_DEFINE(8 * 1024),
31 FJES_MTU_DEFINE(16 * 1024),
32 FJES_MTU_DEFINE(32 * 1024),
33 FJES_MTU_DEFINE(64 * 1024),
37 u32
fjes_hw_rd32(struct fjes_hw
*hw
, u32 reg
)
42 value
= readl(&base
[reg
]);
47 static u8
*fjes_hw_iomap(struct fjes_hw
*hw
)
51 if (!request_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
,
53 pr_err("request_mem_region failed\n");
57 base
= (u8
*)ioremap_nocache(hw
->hw_res
.start
, hw
->hw_res
.size
);
62 static void fjes_hw_iounmap(struct fjes_hw
*hw
)
65 release_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
);
68 int fjes_hw_reset(struct fjes_hw
*hw
)
75 wr32(XSCT_DCTL
, dctl
.reg
);
77 timeout
= FJES_DEVICE_RESET_TIMEOUT
* 1000;
78 dctl
.reg
= rd32(XSCT_DCTL
);
79 while ((dctl
.bits
.reset
== 1) && (timeout
> 0)) {
81 dctl
.reg
= rd32(XSCT_DCTL
);
85 return timeout
> 0 ? 0 : -EIO
;
88 static int fjes_hw_get_max_epid(struct fjes_hw
*hw
)
90 union REG_MAX_EP info
;
92 info
.reg
= rd32(XSCT_MAX_EP
);
94 return info
.bits
.maxep
;
97 static int fjes_hw_get_my_epid(struct fjes_hw
*hw
)
99 union REG_OWNER_EPID info
;
101 info
.reg
= rd32(XSCT_OWNER_EPID
);
103 return info
.bits
.epid
;
106 static int fjes_hw_alloc_shared_status_region(struct fjes_hw
*hw
)
110 size
= sizeof(struct fjes_device_shared_info
) +
111 (sizeof(u8
) * hw
->max_epid
);
112 hw
->hw_info
.share
= kzalloc(size
, GFP_KERNEL
);
113 if (!hw
->hw_info
.share
)
116 hw
->hw_info
.share
->epnum
= hw
->max_epid
;
121 static void fjes_hw_free_shared_status_region(struct fjes_hw
*hw
)
123 kfree(hw
->hw_info
.share
);
124 hw
->hw_info
.share
= NULL
;
127 static int fjes_hw_alloc_epbuf(struct epbuf_handler
*epbh
)
131 mem
= vzalloc(EP_BUFFER_SIZE
);
136 epbh
->size
= EP_BUFFER_SIZE
;
138 epbh
->info
= (union ep_buffer_info
*)mem
;
139 epbh
->ring
= (u8
*)(mem
+ sizeof(union ep_buffer_info
));
144 static void fjes_hw_free_epbuf(struct epbuf_handler
*epbh
)
154 void fjes_hw_setup_epbuf(struct epbuf_handler
*epbh
, u8
*mac_addr
, u32 mtu
)
156 union ep_buffer_info
*info
= epbh
->info
;
157 u16 vlan_id
[EP_BUFFER_SUPPORT_VLAN_MAX
];
160 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
161 vlan_id
[i
] = info
->v1i
.vlan_id
[i
];
163 memset(info
, 0, sizeof(union ep_buffer_info
));
165 info
->v1i
.version
= 0; /* version 0 */
167 for (i
= 0; i
< ETH_ALEN
; i
++)
168 info
->v1i
.mac_addr
[i
] = mac_addr
[i
];
173 info
->v1i
.info_size
= sizeof(union ep_buffer_info
);
174 info
->v1i
.buffer_size
= epbh
->size
- info
->v1i
.info_size
;
176 info
->v1i
.frame_max
= FJES_MTU_TO_FRAME_SIZE(mtu
);
177 info
->v1i
.count_max
=
178 EP_RING_NUM(info
->v1i
.buffer_size
, info
->v1i
.frame_max
);
180 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
181 info
->v1i
.vlan_id
[i
] = vlan_id
[i
];
183 info
->v1i
.rx_status
|= FJES_RX_MTU_CHANGING_DONE
;
187 fjes_hw_init_command_registers(struct fjes_hw
*hw
,
188 struct fjes_device_command_param
*param
)
190 /* Request Buffer length */
191 wr32(XSCT_REQBL
, (__le32
)(param
->req_len
));
192 /* Response Buffer Length */
193 wr32(XSCT_RESPBL
, (__le32
)(param
->res_len
));
195 /* Request Buffer Address */
197 (__le32
)(param
->req_start
& GENMASK_ULL(31, 0)));
199 (__le32
)((param
->req_start
& GENMASK_ULL(63, 32)) >> 32));
201 /* Response Buffer Address */
203 (__le32
)(param
->res_start
& GENMASK_ULL(31, 0)));
205 (__le32
)((param
->res_start
& GENMASK_ULL(63, 32)) >> 32));
207 /* Share status address */
209 (__le32
)(param
->share_start
& GENMASK_ULL(31, 0)));
211 (__le32
)((param
->share_start
& GENMASK_ULL(63, 32)) >> 32));
214 static int fjes_hw_setup(struct fjes_hw
*hw
)
216 u8 mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
217 struct fjes_device_command_param param
;
218 struct ep_share_mem_info
*buf_pair
;
225 hw
->hw_info
.max_epid
= &hw
->max_epid
;
226 hw
->hw_info
.my_epid
= &hw
->my_epid
;
228 buf
= kcalloc(hw
->max_epid
, sizeof(struct ep_share_mem_info
),
233 hw
->ep_shm_info
= (struct ep_share_mem_info
*)buf
;
235 mem_size
= FJES_DEV_REQ_BUF_SIZE(hw
->max_epid
);
236 hw
->hw_info
.req_buf
= kzalloc(mem_size
, GFP_KERNEL
);
237 if (!(hw
->hw_info
.req_buf
))
240 hw
->hw_info
.req_buf_size
= mem_size
;
242 mem_size
= FJES_DEV_RES_BUF_SIZE(hw
->max_epid
);
243 hw
->hw_info
.res_buf
= kzalloc(mem_size
, GFP_KERNEL
);
244 if (!(hw
->hw_info
.res_buf
))
247 hw
->hw_info
.res_buf_size
= mem_size
;
249 result
= fjes_hw_alloc_shared_status_region(hw
);
253 hw
->hw_info
.buffer_share_bit
= 0;
254 hw
->hw_info
.buffer_unshare_reserve_bit
= 0;
256 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
257 if (epidx
!= hw
->my_epid
) {
258 buf_pair
= &hw
->ep_shm_info
[epidx
];
260 result
= fjes_hw_alloc_epbuf(&buf_pair
->tx
);
264 result
= fjes_hw_alloc_epbuf(&buf_pair
->rx
);
268 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
269 fjes_hw_setup_epbuf(&buf_pair
->tx
, mac
,
270 fjes_support_mtu
[0]);
271 fjes_hw_setup_epbuf(&buf_pair
->rx
, mac
,
272 fjes_support_mtu
[0]);
273 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
277 memset(¶m
, 0, sizeof(param
));
279 param
.req_len
= hw
->hw_info
.req_buf_size
;
280 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
281 param
.res_len
= hw
->hw_info
.res_buf_size
;
282 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
284 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
286 fjes_hw_init_command_registers(hw
, ¶m
);
291 static void fjes_hw_cleanup(struct fjes_hw
*hw
)
295 if (!hw
->ep_shm_info
)
298 fjes_hw_free_shared_status_region(hw
);
300 kfree(hw
->hw_info
.req_buf
);
301 hw
->hw_info
.req_buf
= NULL
;
303 kfree(hw
->hw_info
.res_buf
);
304 hw
->hw_info
.res_buf
= NULL
;
306 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
307 if (epidx
== hw
->my_epid
)
309 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].tx
);
310 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].rx
);
313 kfree(hw
->ep_shm_info
);
314 hw
->ep_shm_info
= NULL
;
317 int fjes_hw_init(struct fjes_hw
*hw
)
321 hw
->base
= fjes_hw_iomap(hw
);
325 ret
= fjes_hw_reset(hw
);
329 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
331 INIT_WORK(&hw
->update_zone_task
, fjes_hw_update_zone_task
);
332 INIT_WORK(&hw
->epstop_task
, fjes_hw_epstop_task
);
334 mutex_init(&hw
->hw_info
.lock
);
335 spin_lock_init(&hw
->rx_status_lock
);
337 hw
->max_epid
= fjes_hw_get_max_epid(hw
);
338 hw
->my_epid
= fjes_hw_get_my_epid(hw
);
340 if ((hw
->max_epid
== 0) || (hw
->my_epid
>= hw
->max_epid
))
343 ret
= fjes_hw_setup(hw
);
348 void fjes_hw_exit(struct fjes_hw
*hw
)
353 ret
= fjes_hw_reset(hw
);
355 pr_err("%s: reset error", __func__
);
363 cancel_work_sync(&hw
->update_zone_task
);
364 cancel_work_sync(&hw
->epstop_task
);
367 static enum fjes_dev_command_response_e
368 fjes_hw_issue_request_command(struct fjes_hw
*hw
,
369 enum fjes_dev_command_request_type type
)
371 enum fjes_dev_command_response_e ret
= FJES_CMD_STATUS_UNKNOWN
;
377 cr
.bits
.req_start
= 1;
378 cr
.bits
.req_code
= type
;
379 wr32(XSCT_CR
, cr
.reg
);
380 cr
.reg
= rd32(XSCT_CR
);
382 if (cr
.bits
.error
== 0) {
383 timeout
= FJES_COMMAND_REQ_TIMEOUT
* 1000;
384 cs
.reg
= rd32(XSCT_CS
);
386 while ((cs
.bits
.complete
!= 1) && timeout
> 0) {
388 cs
.reg
= rd32(XSCT_CS
);
392 if (cs
.bits
.complete
== 1)
393 ret
= FJES_CMD_STATUS_NORMAL
;
394 else if (timeout
<= 0)
395 ret
= FJES_CMD_STATUS_TIMEOUT
;
398 switch (cr
.bits
.err_info
) {
399 case FJES_CMD_REQ_ERR_INFO_PARAM
:
400 ret
= FJES_CMD_STATUS_ERROR_PARAM
;
402 case FJES_CMD_REQ_ERR_INFO_STATUS
:
403 ret
= FJES_CMD_STATUS_ERROR_STATUS
;
406 ret
= FJES_CMD_STATUS_UNKNOWN
;
414 int fjes_hw_request_info(struct fjes_hw
*hw
)
416 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
417 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
418 enum fjes_dev_command_response_e ret
;
421 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
422 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
424 req_buf
->info
.length
= FJES_DEV_COMMAND_INFO_REQ_LEN
;
426 res_buf
->info
.length
= 0;
427 res_buf
->info
.code
= 0;
429 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_INFO
);
433 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw
->hw_info
.max_epid
)) !=
434 res_buf
->info
.length
) {
436 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
437 switch (res_buf
->info
.code
) {
438 case FJES_CMD_REQ_RES_CODE_NORMAL
:
447 case FJES_CMD_STATUS_UNKNOWN
:
450 case FJES_CMD_STATUS_TIMEOUT
:
453 case FJES_CMD_STATUS_ERROR_PARAM
:
456 case FJES_CMD_STATUS_ERROR_STATUS
:
468 int fjes_hw_register_buff_addr(struct fjes_hw
*hw
, int dest_epid
,
469 struct ep_share_mem_info
*buf_pair
)
471 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
472 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
473 enum fjes_dev_command_response_e ret
;
480 if (test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
483 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
484 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
486 req_buf
->share_buffer
.length
= FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
489 req_buf
->share_buffer
.epid
= dest_epid
;
492 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->tx
.size
;
493 page_count
= buf_pair
->tx
.size
/ EP_BUFFER_INFO_SIZE
;
494 for (i
= 0; i
< page_count
; i
++) {
495 addr
= ((u8
*)(buf_pair
->tx
.buffer
)) +
496 (i
* EP_BUFFER_INFO_SIZE
);
497 req_buf
->share_buffer
.buffer
[idx
++] =
498 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
499 offset_in_page(addr
));
502 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->rx
.size
;
503 page_count
= buf_pair
->rx
.size
/ EP_BUFFER_INFO_SIZE
;
504 for (i
= 0; i
< page_count
; i
++) {
505 addr
= ((u8
*)(buf_pair
->rx
.buffer
)) +
506 (i
* EP_BUFFER_INFO_SIZE
);
507 req_buf
->share_buffer
.buffer
[idx
++] =
508 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
509 offset_in_page(addr
));
512 res_buf
->share_buffer
.length
= 0;
513 res_buf
->share_buffer
.code
= 0;
515 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_SHARE_BUFFER
);
517 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
518 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
519 (res_buf
->share_buffer
.length
==
520 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
) &&
521 (res_buf
->share_buffer
.code
== FJES_CMD_REQ_RES_CODE_BUSY
) &&
523 msleep(200 + hw
->my_epid
* 20);
524 timeout
-= (200 + hw
->my_epid
* 20);
526 res_buf
->share_buffer
.length
= 0;
527 res_buf
->share_buffer
.code
= 0;
529 ret
= fjes_hw_issue_request_command(
530 hw
, FJES_CMD_REQ_SHARE_BUFFER
);
535 if (res_buf
->share_buffer
.length
!=
536 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
)
538 else if (ret
== FJES_CMD_STATUS_NORMAL
) {
539 switch (res_buf
->share_buffer
.code
) {
540 case FJES_CMD_REQ_RES_CODE_NORMAL
:
542 set_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
544 case FJES_CMD_REQ_RES_CODE_BUSY
:
553 case FJES_CMD_STATUS_UNKNOWN
:
556 case FJES_CMD_STATUS_TIMEOUT
:
559 case FJES_CMD_STATUS_ERROR_PARAM
:
560 case FJES_CMD_STATUS_ERROR_STATUS
:
570 int fjes_hw_unregister_buff_addr(struct fjes_hw
*hw
, int dest_epid
)
572 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
573 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
574 struct fjes_device_shared_info
*share
= hw
->hw_info
.share
;
575 enum fjes_dev_command_response_e ret
;
582 if (!req_buf
|| !res_buf
|| !share
)
585 if (!test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
588 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
589 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
591 req_buf
->unshare_buffer
.length
=
592 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN
;
593 req_buf
->unshare_buffer
.epid
= dest_epid
;
595 res_buf
->unshare_buffer
.length
= 0;
596 res_buf
->unshare_buffer
.code
= 0;
598 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
600 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
601 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
602 (res_buf
->unshare_buffer
.length
==
603 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) &&
604 (res_buf
->unshare_buffer
.code
==
605 FJES_CMD_REQ_RES_CODE_BUSY
) &&
607 msleep(200 + hw
->my_epid
* 20);
608 timeout
-= (200 + hw
->my_epid
* 20);
610 res_buf
->unshare_buffer
.length
= 0;
611 res_buf
->unshare_buffer
.code
= 0;
614 fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
619 if (res_buf
->unshare_buffer
.length
!=
620 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) {
622 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
623 switch (res_buf
->unshare_buffer
.code
) {
624 case FJES_CMD_REQ_RES_CODE_NORMAL
:
626 clear_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
628 case FJES_CMD_REQ_RES_CODE_BUSY
:
637 case FJES_CMD_STATUS_UNKNOWN
:
640 case FJES_CMD_STATUS_TIMEOUT
:
643 case FJES_CMD_STATUS_ERROR_PARAM
:
644 case FJES_CMD_STATUS_ERROR_STATUS
:
654 int fjes_hw_raise_interrupt(struct fjes_hw
*hw
, int dest_epid
,
655 enum REG_ICTL_MASK mask
)
657 u32 ig
= mask
| dest_epid
;
659 wr32(XSCT_IG
, cpu_to_le32(ig
));
664 u32
fjes_hw_capture_interrupt_status(struct fjes_hw
*hw
)
668 cur_is
= rd32(XSCT_IS
);
673 void fjes_hw_set_irqmask(struct fjes_hw
*hw
,
674 enum REG_ICTL_MASK intr_mask
, bool mask
)
677 wr32(XSCT_IMS
, intr_mask
);
679 wr32(XSCT_IMC
, intr_mask
);
682 bool fjes_hw_epid_is_same_zone(struct fjes_hw
*hw
, int epid
)
684 if (epid
>= hw
->max_epid
)
687 if ((hw
->ep_shm_info
[epid
].es_status
!=
688 FJES_ZONING_STATUS_ENABLE
) ||
689 (hw
->ep_shm_info
[hw
->my_epid
].zone
==
690 FJES_ZONING_ZONE_TYPE_NONE
))
693 return (hw
->ep_shm_info
[epid
].zone
==
694 hw
->ep_shm_info
[hw
->my_epid
].zone
);
697 int fjes_hw_epid_is_shared(struct fjes_device_shared_info
*share
,
702 if (dest_epid
< share
->epnum
)
703 value
= share
->ep_status
[dest_epid
];
708 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw
*hw
, int src_epid
)
710 return test_bit(src_epid
, &hw
->txrx_stop_req_bit
);
713 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw
*hw
, int src_epid
)
715 return (hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
&
716 FJES_RX_STOP_REQ_DONE
);
719 enum ep_partner_status
720 fjes_hw_get_partner_ep_status(struct fjes_hw
*hw
, int epid
)
722 enum ep_partner_status status
;
724 if (fjes_hw_epid_is_shared(hw
->hw_info
.share
, epid
)) {
725 if (fjes_hw_epid_is_stop_requested(hw
, epid
)) {
726 status
= EP_PARTNER_WAITING
;
728 if (fjes_hw_epid_is_stop_process_done(hw
, epid
))
729 status
= EP_PARTNER_COMPLETE
;
731 status
= EP_PARTNER_SHARED
;
734 status
= EP_PARTNER_UNSHARE
;
740 void fjes_hw_raise_epstop(struct fjes_hw
*hw
)
742 enum ep_partner_status status
;
746 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
747 if (epidx
== hw
->my_epid
)
750 status
= fjes_hw_get_partner_ep_status(hw
, epidx
);
752 case EP_PARTNER_SHARED
:
753 fjes_hw_raise_interrupt(hw
, epidx
,
754 REG_ICTL_MASK_TXRX_STOP_REQ
);
755 hw
->ep_shm_info
[epidx
].ep_stats
.send_intr_unshare
+= 1;
761 set_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
762 set_bit(epidx
, &hw
->txrx_stop_req_bit
);
764 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
765 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
766 FJES_RX_STOP_REQ_REQUEST
;
767 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
771 int fjes_hw_wait_epstop(struct fjes_hw
*hw
)
773 enum ep_partner_status status
;
774 union ep_buffer_info
*info
;
778 while (hw
->hw_info
.buffer_unshare_reserve_bit
&&
779 (wait_time
< FJES_COMMAND_EPSTOP_WAIT_TIMEOUT
* 1000)) {
780 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
781 if (epidx
== hw
->my_epid
)
783 status
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
785 info
= hw
->ep_shm_info
[epidx
].rx
.info
;
787 (info
->v1i
.rx_status
&
788 FJES_RX_STOP_REQ_DONE
)) &&
790 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
792 &hw
->hw_info
.buffer_unshare_reserve_bit
);
800 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
801 if (epidx
== hw
->my_epid
)
803 if (test_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
))
805 &hw
->hw_info
.buffer_unshare_reserve_bit
);
808 return (wait_time
< FJES_COMMAND_EPSTOP_WAIT_TIMEOUT
* 1000)
812 bool fjes_hw_check_epbuf_version(struct epbuf_handler
*epbh
, u32 version
)
814 union ep_buffer_info
*info
= epbh
->info
;
816 return (info
->common
.version
== version
);
819 bool fjes_hw_check_mtu(struct epbuf_handler
*epbh
, u32 mtu
)
821 union ep_buffer_info
*info
= epbh
->info
;
823 return ((info
->v1i
.frame_max
== FJES_MTU_TO_FRAME_SIZE(mtu
)) &&
824 info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
);
827 bool fjes_hw_check_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
829 union ep_buffer_info
*info
= epbh
->info
;
836 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
837 if (vlan_id
== info
->v1i
.vlan_id
[i
]) {
846 bool fjes_hw_set_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
848 union ep_buffer_info
*info
= epbh
->info
;
851 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
852 if (info
->v1i
.vlan_id
[i
] == 0) {
853 info
->v1i
.vlan_id
[i
] = vlan_id
;
860 void fjes_hw_del_vlan_id(struct epbuf_handler
*epbh
, u16 vlan_id
)
862 union ep_buffer_info
*info
= epbh
->info
;
866 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++) {
867 if (vlan_id
== info
->v1i
.vlan_id
[i
])
868 info
->v1i
.vlan_id
[i
] = 0;
873 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler
*epbh
)
875 union ep_buffer_info
*info
= epbh
->info
;
877 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
880 if (info
->v1i
.count_max
== 0)
883 return EP_RING_EMPTY(info
->v1i
.head
, info
->v1i
.tail
,
884 info
->v1i
.count_max
);
887 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler
*epbh
,
890 union ep_buffer_info
*info
= epbh
->info
;
891 struct esmem_frame
*ring_frame
;
894 ring_frame
= (struct esmem_frame
*)&(epbh
->ring
[EP_RING_INDEX
896 info
->v1i
.count_max
) *
897 info
->v1i
.frame_max
]);
899 *psize
= (size_t)ring_frame
->frame_size
;
901 frame
= ring_frame
->frame_data
;
906 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler
*epbh
)
908 union ep_buffer_info
*info
= epbh
->info
;
910 if (fjes_hw_epbuf_rx_is_empty(epbh
))
913 EP_RING_INDEX_INC(epbh
->info
->v1i
.head
, info
->v1i
.count_max
);
916 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler
*epbh
,
917 void *frame
, size_t size
)
919 union ep_buffer_info
*info
= epbh
->info
;
920 struct esmem_frame
*ring_frame
;
922 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
, info
->v1i
.count_max
))
925 ring_frame
= (struct esmem_frame
*)&(epbh
->ring
[EP_RING_INDEX
927 info
->v1i
.count_max
) *
928 info
->v1i
.frame_max
]);
930 ring_frame
->frame_size
= size
;
931 memcpy((void *)(ring_frame
->frame_data
), (void *)frame
, size
);
933 EP_RING_INDEX_INC(epbh
->info
->v1i
.tail
, info
->v1i
.count_max
);
938 static void fjes_hw_update_zone_task(struct work_struct
*work
)
940 struct fjes_hw
*hw
= container_of(work
,
941 struct fjes_hw
, update_zone_task
);
943 struct my_s
{u8 es_status
; u8 zone
; } *info
;
944 union fjes_device_command_res
*res_buf
;
945 enum ep_partner_status pstatus
;
947 struct fjes_adapter
*adapter
;
948 struct net_device
*netdev
;
951 ulong unshare_bit
= 0;
958 adapter
= (struct fjes_adapter
*)hw
->back
;
959 netdev
= adapter
->netdev
;
960 res_buf
= hw
->hw_info
.res_buf
;
961 info
= (struct my_s
*)&res_buf
->info
.info
;
963 mutex_lock(&hw
->hw_info
.lock
);
965 ret
= fjes_hw_request_info(hw
);
970 if (!work_pending(&adapter
->force_close_task
)) {
971 adapter
->force_reset
= true;
972 schedule_work(&adapter
->force_close_task
);
978 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
979 if (epidx
== hw
->my_epid
) {
980 hw
->ep_shm_info
[epidx
].es_status
=
981 info
[epidx
].es_status
;
982 hw
->ep_shm_info
[epidx
].zone
=
987 pstatus
= fjes_hw_get_partner_ep_status(hw
, epidx
);
989 case EP_PARTNER_UNSHARE
:
991 if ((info
[epidx
].zone
!=
992 FJES_ZONING_ZONE_TYPE_NONE
) &&
993 (info
[epidx
].es_status
==
994 FJES_ZONING_STATUS_ENABLE
) &&
996 info
[hw
->my_epid
].zone
))
997 set_bit(epidx
, &share_bit
);
999 set_bit(epidx
, &unshare_bit
);
1002 case EP_PARTNER_COMPLETE
:
1003 case EP_PARTNER_WAITING
:
1004 if ((info
[epidx
].zone
==
1005 FJES_ZONING_ZONE_TYPE_NONE
) ||
1006 (info
[epidx
].es_status
!=
1007 FJES_ZONING_STATUS_ENABLE
) ||
1008 (info
[epidx
].zone
!=
1009 info
[hw
->my_epid
].zone
)) {
1011 &adapter
->unshare_watch_bitmask
);
1013 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1017 case EP_PARTNER_SHARED
:
1018 if ((info
[epidx
].zone
==
1019 FJES_ZONING_ZONE_TYPE_NONE
) ||
1020 (info
[epidx
].es_status
!=
1021 FJES_ZONING_STATUS_ENABLE
) ||
1022 (info
[epidx
].zone
!=
1023 info
[hw
->my_epid
].zone
))
1024 set_bit(epidx
, &irq_bit
);
1028 hw
->ep_shm_info
[epidx
].es_status
=
1029 info
[epidx
].es_status
;
1030 hw
->ep_shm_info
[epidx
].zone
= info
[epidx
].zone
;
1035 mutex_unlock(&hw
->hw_info
.lock
);
1037 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1038 if (epidx
== hw
->my_epid
)
1041 if (test_bit(epidx
, &share_bit
)) {
1042 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1043 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1044 netdev
->dev_addr
, netdev
->mtu
);
1045 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1047 mutex_lock(&hw
->hw_info
.lock
);
1049 ret
= fjes_hw_register_buff_addr(
1050 hw
, epidx
, &hw
->ep_shm_info
[epidx
]);
1058 if (!work_pending(&adapter
->force_close_task
)) {
1059 adapter
->force_reset
= true;
1061 &adapter
->force_close_task
);
1065 mutex_unlock(&hw
->hw_info
.lock
);
1067 hw
->ep_shm_info
[epidx
].ep_stats
1068 .com_regist_buf_exec
+= 1;
1071 if (test_bit(epidx
, &unshare_bit
)) {
1072 mutex_lock(&hw
->hw_info
.lock
);
1074 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1082 if (!work_pending(&adapter
->force_close_task
)) {
1083 adapter
->force_reset
= true;
1085 &adapter
->force_close_task
);
1090 mutex_unlock(&hw
->hw_info
.lock
);
1092 hw
->ep_shm_info
[epidx
].ep_stats
1093 .com_unregist_buf_exec
+= 1;
1096 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1097 fjes_hw_setup_epbuf(
1098 &hw
->ep_shm_info
[epidx
].tx
,
1099 netdev
->dev_addr
, netdev
->mtu
);
1100 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1105 if (test_bit(epidx
, &irq_bit
)) {
1106 fjes_hw_raise_interrupt(hw
, epidx
,
1107 REG_ICTL_MASK_TXRX_STOP_REQ
);
1109 hw
->ep_shm_info
[epidx
].ep_stats
.send_intr_unshare
+= 1;
1111 set_bit(epidx
, &hw
->txrx_stop_req_bit
);
1112 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1113 hw
->ep_shm_info
[epidx
].tx
.
1114 info
->v1i
.rx_status
|=
1115 FJES_RX_STOP_REQ_REQUEST
;
1116 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1117 set_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1121 if (irq_bit
|| adapter
->unshare_watch_bitmask
) {
1122 if (!work_pending(&adapter
->unshare_watch_task
))
1123 queue_work(adapter
->control_wq
,
1124 &adapter
->unshare_watch_task
);
1128 static void fjes_hw_epstop_task(struct work_struct
*work
)
1130 struct fjes_hw
*hw
= container_of(work
, struct fjes_hw
, epstop_task
);
1131 struct fjes_adapter
*adapter
= (struct fjes_adapter
*)hw
->back
;
1132 unsigned long flags
;
1137 while ((remain_bit
= hw
->epstop_req_bit
)) {
1138 for (epid_bit
= 0; remain_bit
; remain_bit
>>= 1, epid_bit
++) {
1139 if (remain_bit
& 1) {
1140 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1141 hw
->ep_shm_info
[epid_bit
].
1142 tx
.info
->v1i
.rx_status
|=
1143 FJES_RX_STOP_REQ_DONE
;
1144 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1147 clear_bit(epid_bit
, &hw
->epstop_req_bit
);
1149 &adapter
->unshare_watch_bitmask
);
1151 if (!work_pending(&adapter
->unshare_watch_task
))
1153 adapter
->control_wq
,
1154 &adapter
->unshare_watch_task
);