2 # ConvertAsm.py: Automatically generated from CopyMem.asm
\r
4 #------------------------------------------------------------------------------
\r
6 # Copyright (c) 2006 - 2009, Intel Corporation
\r
7 # All rights reserved. This program and the accompanying materials
\r
8 # are licensed and made available under the terms and conditions of the BSD License
\r
9 # which accompanies this distribution. The full text of the license may be found at
\r
10 # http://opensource.org/licenses/bsd-license.php
\r
12 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
\r
13 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
\r
25 #------------------------------------------------------------------------------
\r
27 #------------------------------------------------------------------------------
\r
30 # InternalMemCopyMem (
\r
31 # IN VOID *Destination,
\r
35 #------------------------------------------------------------------------------
\r
36 ASM_GLOBAL ASM_PFX(InternalMemCopyMem)
\r
37 ASM_PFX(InternalMemCopyMem):
\r
40 movq %rdx, %rsi # rsi <- Source
\r
41 movq %rcx, %rdi # rdi <- Destination
\r
42 leaq -1(%rsi,%r8,), %r9 # r9 <- Last byte of Source
\r
44 movq %rdi, %rax # rax <- Destination as return value
\r
45 jae L0 # Copy forward if Source > Destination
\r
46 cmpq %rdi, %r9 # Overlapped?
\r
47 jae L_CopyBackward # Copy backward if overlapped
\r
50 subq %rdi, %rcx # rcx <- -rdi
\r
51 andq $15, %rcx # rcx + rsi should be 16 bytes aligned
\r
52 jz L1 # skip if rcx == 0
\r
60 shrq $4, %rcx # rcx <- # of DQwords to copy
\r
62 movdqu %xmm0, 0x18(%rsp) # save xmm0 on stack
\r
64 movdqu (%rsi), %xmm0 # rsi may not be 16-byte aligned
\r
65 movntdq %xmm0, (%rdi) # rdi should be 16-byte aligned
\r
70 movdqa 0x18(%rsp), %xmm0 # restore xmm0
\r
71 jmp L_CopyBytes # copy remaining bytes
\r
73 movq %r9, %rsi # rsi <- Last byte of Source
\r
74 leaq -1(%rdi, %r8,), %rdi # rdi <- Last byte of Destination
\r