|
|
147e83 |
commit 87868c2418fb74357757e3b739ce5b76b17a8929
|
|
|
147e83 |
Author: Adhemerval Zanella <azanella@linux.vnet.ibm.com>
|
|
|
147e83 |
Date: Wed Jun 25 11:54:31 2014 -0500
|
|
|
147e83 |
|
|
|
147e83 |
PowerPC: Align power7 memcpy using VSX to quadword
|
|
|
147e83 |
|
|
|
147e83 |
This patch changes power7 memcpy to use VSX instructions only when
|
|
|
147e83 |
memory is aligned to quardword. It is to avoid unaligned kernel traps
|
|
|
147e83 |
on non-cacheable memory (for instance, memory-mapped I/O).
|
|
|
147e83 |
|
|
|
147e83 |
diff --git a/sysdeps/powerpc/powerpc32/power7/memcpy.S b/sysdeps/powerpc/powerpc32/power7/memcpy.S
|
|
|
147e83 |
index 52c2a6b..e540fea 100644
|
|
|
147e83 |
--- a/sysdeps/powerpc/powerpc32/power7/memcpy.S
|
|
|
147e83 |
+++ b/sysdeps/powerpc/powerpc32/power7/memcpy.S
|
|
|
147e83 |
@@ -38,8 +38,8 @@ EALIGN (memcpy, 5, 0)
|
|
|
147e83 |
ble cr1, L(copy_LT_32) /* If move < 32 bytes use short move
|
|
|
147e83 |
code. */
|
|
|
147e83 |
|
|
|
147e83 |
- andi. 11,3,7 /* Check alignment of DST. */
|
|
|
147e83 |
- clrlwi 10,4,29 /* Check alignment of SRC. */
|
|
|
147e83 |
+ andi. 11,3,15 /* Check alignment of DST. */
|
|
|
147e83 |
+ clrlwi 10,4,28 /* Check alignment of SRC. */
|
|
|
147e83 |
cmplw cr6,10,11 /* SRC and DST alignments match? */
|
|
|
147e83 |
mr 12,4
|
|
|
147e83 |
mr 31,5
|
|
|
147e83 |
diff --git a/sysdeps/powerpc/powerpc64/power7/memcpy.S b/sysdeps/powerpc/powerpc64/power7/memcpy.S
|
|
|
147e83 |
index bbfd381..58d9b12 100644
|
|
|
147e83 |
--- a/sysdeps/powerpc/powerpc64/power7/memcpy.S
|
|
|
147e83 |
+++ b/sysdeps/powerpc/powerpc64/power7/memcpy.S
|
|
|
147e83 |
@@ -36,16 +36,11 @@ EALIGN (memcpy, 5, 0)
|
|
|
147e83 |
ble cr1, L(copy_LT_32) /* If move < 32 bytes use short move
|
|
|
147e83 |
code. */
|
|
|
147e83 |
|
|
|
147e83 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
147e83 |
-/* In little-endian mode, power7 takes an alignment trap on any lxvd2x
|
|
|
147e83 |
- or stxvd2x crossing a 32-byte boundary, so ensure the aligned_copy
|
|
|
147e83 |
- loop is only used for quadword aligned copies. */
|
|
|
147e83 |
+/* Align copies using VSX instructions to quadword. It is to avoid alignment
|
|
|
147e83 |
+ traps when memcpy is used on non-cacheable memory (for instance, memory
|
|
|
147e83 |
+ mapped I/O). */
|
|
|
147e83 |
andi. 10,3,15
|
|
|
147e83 |
clrldi 11,4,60
|
|
|
147e83 |
-#else
|
|
|
147e83 |
- andi. 10,3,7 /* Check alignment of DST. */
|
|
|
147e83 |
- clrldi 11,4,61 /* Check alignment of SRC. */
|
|
|
147e83 |
-#endif
|
|
|
147e83 |
cmpld cr6,10,11 /* SRC and DST alignments match? */
|
|
|
147e83 |
|
|
|
147e83 |
mr dst,3
|
|
|
147e83 |
@@ -53,13 +48,9 @@ EALIGN (memcpy, 5, 0)
|
|
|
147e83 |
beq L(aligned_copy)
|
|
|
147e83 |
|
|
|
147e83 |
mtocrf 0x01,0
|
|
|
147e83 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
147e83 |
clrldi 0,0,60
|
|
|
147e83 |
-#else
|
|
|
147e83 |
- clrldi 0,0,61
|
|
|
147e83 |
-#endif
|
|
|
147e83 |
|
|
|
147e83 |
-/* Get the DST and SRC aligned to 8 bytes (16 for little-endian). */
|
|
|
147e83 |
+/* Get the DST and SRC aligned to 16 bytes. */
|
|
|
147e83 |
1:
|
|
|
147e83 |
bf 31,2f
|
|
|
147e83 |
lbz 6,0(src)
|
|
|
147e83 |
@@ -79,14 +70,12 @@ EALIGN (memcpy, 5, 0)
|
|
|
147e83 |
stw 6,0(dst)
|
|
|
147e83 |
addi dst,dst,4
|
|
|
147e83 |
8:
|
|
|
147e83 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
147e83 |
bf 28,16f
|
|
|
147e83 |
ld 6,0(src)
|
|
|
147e83 |
addi src,src,8
|
|
|
147e83 |
std 6,0(dst)
|
|
|
147e83 |
addi dst,dst,8
|
|
|
147e83 |
16:
|
|
|
147e83 |
-#endif
|
|
|
147e83 |
subf cnt,0,cnt
|
|
|
147e83 |
|
|
|
147e83 |
/* Main aligned copy loop. Copies 128 bytes at a time. */
|
|
|
147e83 |
@@ -298,9 +287,6 @@ L(copy_LE_8):
|
|
|
147e83 |
.align 4
|
|
|
147e83 |
L(copy_GE_32_unaligned):
|
|
|
147e83 |
clrldi 0,0,60 /* Number of bytes until the 1st dst quadword. */
|
|
|
147e83 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
147e83 |
- andi. 10,3,15 /* Check alignment of DST (against quadwords). */
|
|
|
147e83 |
-#endif
|
|
|
147e83 |
srdi 9,cnt,4 /* Number of full quadwords remaining. */
|
|
|
147e83 |
|
|
|
147e83 |
beq L(copy_GE_32_unaligned_cont)
|