arrfab / rpms / glibc

Forked from rpms/glibc 4 years ago
Clone
Blob Blame History Raw
    Backport of
    commit 8bedcb5f03c62bf6001396dafdd82fbd4da7c2db
    Author: Adhemerval Zanella <azanella@linux.vnet.ibm.com>
    Date:   Wed Jan 7 07:18:30 2015 -0500
    
        powerpc: Optimized strcmp for POWER8/PPC64
    
        This patch adds an optimized POWER8 strcmp using unaligned accesses.
        The algorithm first check the initial 16 bytes, then align the first
        function source and uses unaligned loads on second argument only.
        Aditional checks for page boundaries are done for unaligned cases
    
        ChangeLog:
    	2015-01-13  Adhemerval Zanella  <azanella@linux.vnet.ibm.com>
    
     	* sysdeps/powerpc/powerpc64/multiarch/Makefile [sysdep_routines]:
    	Add strcmp-power8 object.
    	* sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
    	(__libc_ifunc_impl_list): Add __strcmp_power8 implementation.
    	* sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S: New file.
    	* sysdeps/powerpc/powerpc64/multiarch/strcmp.c (strcmp): Add
    	__strcmp_power8 implementation.
    	* sysdeps/powerpc/powerpc64/power8/strcmp.S: New file.
    	* NEWS: Update.
    
    and its dependency:
    commit e23d3d2690bf63207b1a47e83a94693daebbbfe5
    Author: Vidya Ranganathan <vidya@linux.vnet.ibm.com>
    Date:   Fri Jun 6 07:56:07 2014 -0500
    
        PowerPC: Optimized strcmp for PPC64/POWER7
    
        Optimization is achieved on 8 byte aligned strings with double word
        comparison using cmpb instruction. On unaligned strings loop unrolling
        is applied for Power7 gain.
    
        ChangeLog:
        2014-06-11  Vidya Ranganathan  <vidya@linux.vnet.ibm.com>
    
           * sysdeps/powerpc/powerpc64/power7/strcmp.S: New file: Optimization.
           * sysdeps/powerpc/powerpc64/multiarch/strcmp.c: New file:
           multiarch strcmp for PPC64.
           * sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S: New file.
           * sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S: New file.
           * sysdeps/powerpc/powerpc64/multiarch/Makefile: Add strcmp
           multiarch optimizations.
           * sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c:
           (__libc_ifunc_impl_list): Likewise.

diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index ef39917..27c8b65 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -7,9 +7,10 @@ sysdep_routines += memcpy-power7 memcpy-a2 memcpy-power6 memcpy-cell \
                   memrchr-power7 memrchr-ppc64 rawmemchr-power7 \
                   stpcpy-power8 stpcpy-power7 stpcpy-ppc64 \
                   strcat-power8 strcat-power7 strcat-ppc64 \
+                  strcmp-power8 strcmp-power7 strcmp-ppc64 \
                   strcpy-power8 strcpy-power7 strcpy-ppc64 \
                   stpncpy-power8 stpncpy-power7 stpncpy-ppc64 \
-                  strncpy-power8 strncpy-power7 strncpy-ppc64 
+                  strncpy-power8 strncpy-power7 strncpy-ppc64 \
                   strncat-power7 \
                   rawmemchr-ppc64 strlen-power7 strlen-ppc64 strnlen-power7 \
                   strnlen-ppc64 strcasecmp-power7 strcasecmp_l-power7 \
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index 23bf5dc..2b38c71 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -277,5 +277,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, stpncpy, 1,
 			     __stpncpy_ppc))
 
+  /* Support sysdeps/powerpc/powerpc64/multiarch/strcmp.c.  */
+  IFUNC_IMPL (i, name, strcmp,
+	      IFUNC_IMPL_ADD (array, i, strcmp,
+			      hwcap2 & PPC_FEATURE2_ARCH_2_07,
+			      __strcmp_power8)
+	      IFUNC_IMPL_ADD (array, i, strcmp,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __strcmp_power7)
+	      IFUNC_IMPL_ADD (array, i, strcmp, 1,
+			     __strcmp_ppc))
   return i;
 }
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S
new file mode 100644
index 0000000..790ce8d
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S
@@ -0,0 +1,40 @@
+/* Optimized strcmp implementation for POWER7.
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+  .section ".text";						\
+  ENTRY_2(__strcmp_power7)					\
+  .align ALIGNARG(alignt);					\
+  EALIGN_W_##words;						\
+  BODY_LABEL(__strcmp_power7):					\
+  cfi_startproc;						\
+  LOCALENTRY(__strcmp_power7)
+
+#undef END
+#define END(name)						\
+  cfi_endproc;							\
+  TRACEBACK(__strcmp_power7)					\
+  END_2(__strcmp_power7)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power7/strcmp.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S
new file mode 100644
index 0000000..dc4bfac
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S
@@ -0,0 +1,40 @@
+/* Optimized strcmp implementation for POWER8/PPC64.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+  .section ".text";						\
+  ENTRY_2(__strcmp_power8)					\
+  .align ALIGNARG(alignt);					\
+  EALIGN_W_##words;						\
+  BODY_LABEL(__strcmp_power8):					\
+  cfi_startproc;						\
+  LOCALENTRY(__strcmp_power8)
+
+#undef END
+#define END(name)						\
+  cfi_endproc;							\
+  TRACEBACK(__strcmp_power8)					\
+  END_2(__strcmp_power8)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strcmp.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S
new file mode 100644
index 0000000..93d1277
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S
@@ -0,0 +1,43 @@
+/* Default strcmp implementation for PowerPC64.
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if defined SHARED && !defined NOT_IN_libc
+# undef EALIGN
+# define EALIGN(name, alignt, words)				\
+  .section ".text";						\
+  ENTRY_2(__strcmp_ppc)						\
+  .align ALIGNARG(alignt);					\
+  EALIGN_W_##words;						\
+  BODY_LABEL(__strcmp_ppc):					\
+  cfi_startproc;						\
+  LOCALENTRY(__strcmp_ppc)
+
+# undef END
+# define END(name)						\
+  cfi_endproc;							\
+  TRACEBACK(__strcmp_ppc)					\
+  END_2(__strcmp_ppc)
+
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)				\
+    .globl __GI_strcmp; __GI_strcmp = __strcmp_ppc
+#endif
+
+#include <sysdeps/powerpc/powerpc64/strcmp.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
new file mode 100644
index 0000000..c711969
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
@@ -0,0 +1,34 @@
+/* Multiple versions of strcmp. PowerPC64 version.
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if defined SHARED && !defined NOT_IN_libc
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (strcmp) __strcmp_ppc attribute_hidden;
+extern __typeof (strcmp) __strcmp_power7 attribute_hidden;
+extern __typeof (strcmp) __strcmp_power8 attribute_hidden;
+
+libc_ifunc (strcmp,
+            (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+              ? __strcmp_power8 :
+              (hwcap & PPC_FEATURE_HAS_VSX)
+              ? __strcmp_power7
+            : __strcmp_ppc);
+#endif
diff --git a/sysdeps/powerpc/powerpc64/power7/strcmp.S b/sysdeps/powerpc/powerpc64/power7/strcmp.S
new file mode 100644
index 0000000..f16a9d8
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power7/strcmp.S
@@ -0,0 +1,195 @@
+/* Optimized strcmp implementation for Power7 using 'cmpb' instruction
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* The optimization is achieved here through cmpb instruction.
+   8byte aligned strings are processed with double word comparision
+   and unaligned strings are handled effectively with loop unrolling
+   technique  */
+
+#include <sysdep.h>
+
+/* int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])  */
+
+EALIGN (strcmp, 4, 0)
+	CALL_MCOUNT 2
+
+	or r9, r3, r4
+	rldicl. r10, r9, 0, 61	/* are s1 and s2 8 byte aligned..?  */
+	bne cr0, L(process_unaligned_bytes)
+
+/* process input parameters on double word aligned boundary  */
+	ld r9, 0(r4)		/* load s2 at offset=0  */
+	li r10, 0		/* load mask=0  */
+	cmpb r10, r9, r10	/* compare bytes at s2 with mask  */
+	cmpdi cr7, r10, 0	/* is NULL found ..? is end of string HIT  */
+	bne cr7, L(process_unaligned_bytes)	/* process byte by byte  */
+
+	ld r10, 0(r3)		/* load s1 at offset=0  */
+	li r8, 0		/* load mask=0  */
+	cmpb r8, r10, r8	/* compare bytes at s1 with mask  */
+	cmpdi cr7, r8, 0	/* is NULL found ..? is end of string HIT  */
+	bne cr7, L(process_unaligned_bytes)	/* process byte by byte  */
+
+/*s1 and s2 does not contain NULL now , so compare all 8 bytes in a GO  */
+	cmpb r9, r10, r9	/* compare s1 and s2  */
+	cmpdi cr7, r9, -1	/* compare result with 0xFFFFFFFFFFFFFFFF  */
+	bne cr7, L(process_unaligned_bytes)	/* s1,s2 mismatch found  */
+
+	addi r5, r3, 8		/* save next offset of s2  */
+	addi r11, r4, 8		/* save next offset of s1  */
+	ld r8, 8(r4)		/* load s2 at offset=8  */
+	li r9, 0		/* load mask=0  */
+	cmpb r9, r8, r9		/* compare bytes at s2 with mask  */
+	cmpdi cr7, r9, 0	/* NULL found ..?  */
+	bne cr7, L(processBytes)/* update input and process bytes one by one  */
+
+	mr r9, r4		/* save s2  */
+	li r10, 0		/* load mask=0  */
+
+	ld r7, 8(r3)		/* load s1 at offset=8  */
+	cmpb r6, r7, r10	/* compare bytes at s1 with mask  */
+	cmpdi cr7, r6, 0	/* is NULL found  */
+	bne cr7, L(processBytes)/* mismatch, so process one by one  */
+
+L(unrollDword):
+	cmpb r8, r7, r8		/* compare s1 and s2  */
+	cmpdi cr7, r8, -1	/* compare result with 0xFFFFFFFFFFFFFFFF  */
+	bne cr7, L(processBytes)/* mismatch with s1 and s2  */
+
+	addi r5, r3, 16		/* save offset=16 of s1  */
+	addi r4, r9, 16		/* save offset=16 of s2  */
+	ld r8, 16(r9)		/* load s2 at offset=16  */
+	cmpb r7, r8, r10	/* compare bytes at s2 with mask  */
+	cmpdi cr7, r7, 0	/* NULL found  ..?  */
+	bne cr7, L(update2processBytes)
+
+	ld r7, 16(r3)		/* load s1 at offset=16  */
+	cmpb r6, r7, r10	/* check s1 for end of string  */
+	cmpdi cr7, r6, 0	/* end of s1 ?,then handle byte by byte  */
+	bne 7,L(update2processBytes)
+
+	cmpb r8, r7, r8		/* compare s1 and s2 double words  */
+	cmpdi cr7, r8, -1	/* compare results with 0xFFFFFFFFFFFFFFFF  */
+	bne cr7,L(update2processBytes)
+
+	addi r5, r3, 24		/* update s1 to offset=24  */
+	addi r4, r9, 24		/* update s2 to offset=24  */
+
+	ld r8, 24(r9)		/* load s2  */
+	cmpb r7, r8, r10	/* compare s2 for NULL  */
+	cmpdi cr7, r7, 0	/* verify if s2 is ending now  */
+	bne cr7,L(update2processBytes)
+
+	ld r7, 24(r3)		/* load s1 at offset=24  */
+	cmpb r6, r7, r10	/* verify for NULL  */
+	cmpdi cr7, r6, 0	/* is NULL found  */
+	bne cr7, L(update2processBytes)
+
+	cmpb r8, r7, r8		/* compare s1 and s2  */
+	cmpdi cr7, r8, -1	/* are s1 and s2 same ..?  */
+	bne cr7, L(update2processBytes)
+
+	addi r7, r9, 32		/* update s2 to next double word  */
+	addi r3, r3, 32		/* update s1 to next double word  */
+
+	ld r8, 32(r9)		/* load s2  */
+	mr r4, r7		/* save s2  */
+	cmpb r6, r8, r10	/* compare s2 with NULL  */
+	cmpdi cr7, r6, 0	/* end of s2 ..? */
+	bne cr7, L(process_unaligned_bytes)
+
+	ld r6, 0(r3)		/* load and compare s1 for NULL  */
+	cmpb r5, r6, r10
+	cmpdi cr7, r5, 0
+	bne cr7, L(process_unaligned_bytes)
+
+	cmpb r8, r6, r8		/* compare s1 and s2  */
+	cmpdi cr7, r8, -1
+	bne cr7, L(process_unaligned_bytes)
+
+	addi r5, r3, 8		/* increment s1 and d2 here  */
+	addi r11, r9, 40
+
+	ld r8, 40(r9)		/* process s2 now  */
+	cmpb r9, r8, r10
+	cmpdi cr7, r9, 0
+	bne cr7, L(processBytes)
+
+	mr r9, r7
+	ld r7, 8(r3)		/* process s1 now  */
+	cmpb r6, r7, r10
+	cmpdi cr7, r6, 0
+	beq cr7, L(unrollDword)	/* unroll to compare s1 and s2  */
+
+L(processBytes):
+	mr r4, r11		/* update input params  */
+	mr r3, r5
+
+	.p2align 4
+L(process_unaligned_bytes):
+	lbz r9, 0(r3)		/* load byte from s1  */
+	lbz r10, 0(r4)		/* load byte from s2  */
+	cmpdi cr7, r9, 0	/* compare *s1 with NULL  */
+	beq cr7, L(diffOfNULL)	/* if *s1 is NULL , return *s1 - *s2  */
+	cmplw cr7, r9, r10	/* compare *s1 and *s2  */
+	bne cr7, L(ComputeDiff)	/* branch to compute difference and return  */
+
+	lbz r9, 1(r3)		/* load next byte from s1  */
+	lbz r10, 1(r4)		/* load next byte from s2  */
+	cmpdi cr7, r9, 0	/* compare *s1 with NULL  */
+	beq cr7, L(diffOfNULL)	/* if *s1 is NULL , return *s1 - *s2  */
+	cmplw cr7, r9, r10	/* compare *s1 and *s2  */
+	bne cr7, L(ComputeDiff)	/* branch to compute difference and return  */
+
+	lbz r9, 2(r3)		/* unroll 3rd byte here  */
+	lbz r10, 2(r4)
+	cmpdi cr7, r9, 0
+	beq cr7, L(diffOfNULL)
+	cmplw cr7, r9, r10
+	bne 7, L(ComputeDiff)
+
+	lbz r9, 3(r3)		/* unroll 4th byte now  */
+	lbz r10, 3(r4)
+	addi r3, r3, 4		/* increment s1 by unroll factor  */
+	cmpdi cr7, r9, 0
+	cmplw cr6, 9, r10
+	beq cr7, L(diffOfNULL)
+	addi r4, r4, 4		/* increment s2 by unroll factor  */
+	beq cr6, L(process_unaligned_bytes)	/* unroll byte processing  */
+
+	.p2align 4
+L(ComputeDiff):
+	extsw r9, r9
+	subf r10, r10, r9	/* compute s1 - s2  */
+	extsw r3, r10
+	blr			/* return  */
+
+	.p2align 4
+L(diffOfNULL):
+	li r9, 0
+	subf r10, r10, r9	/* compute s1 - s2  */
+	extsw r3, r10		/* sign extend result  */
+	blr			/* return  */
+
+	.p2align 4
+L(update2processBytes):
+	mr r3, r5		/* update and proceed  */
+	b L(process_unaligned_bytes)
+
+END (strcmp)
+libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/power8/strcmp.S b/sysdeps/powerpc/powerpc64/power8/strcmp.S
new file mode 100644
index 0000000..223d891
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strcmp.S
@@ -0,0 +1,257 @@
+/* Optimized strcmp implementation for PowerPC64/POWER8.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* Implements the function
+
+   size_t [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
+
+   The implementation uses unaligned doubleword access to avoid specialized
+   code paths depending of data alignment.  Although recent powerpc64 uses
+   64K as default, the page cross handling assumes minimum page size of
+   4k.  */
+
+EALIGN (strcmp, 4, 0)
+	li	r0,0
+
+	/* Check if [s1]+32 or [s2]+32 will cross a 4K page boundary using
+	   the code:
+
+	    (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
+
+	   with PAGE_SIZE being 4096 and ITER_SIZE begin 32.  */
+
+	rldicl	r7,r3,0,52
+	rldicl	r9,r4,0,52
+	cmpldi	cr7,r7,4096-32
+	bgt	cr7,L(pagecross_check)
+	cmpldi	cr5,r9,4096-32
+	bgt	cr5,L(pagecross_check)
+
+	/* For short string up to 32 bytes, load both s1 and s2 using
+	   unaligned dwords and compare.  */
+	ld	r8,0(r3)
+	ld	r10,0(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	ld	r8,8(r3)
+	ld	r10,8(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	ld	r8,16(r3)
+	ld	r10,16(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	ld	r8,24(r3)
+	ld	r10,24(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	addi	r7,r3,32
+	addi	r4,r4,32
+
+L(align_8b):
+	/* Now it has checked for first 32 bytes, align source1 to doubleword
+	   and adjust source2 address.  */
+	rldicl	r9,r7,0,61	/* source1 alignment to doubleword  */
+	subf	r4,r9,r4	/* Adjust source2 address based on source1
+				   alignment.  */
+	rldicr	r7,r7,0,60	/* Align source1 to doubleword.  */
+
+	/* At this point, source1 alignment is 0 and source2 alignment is
+	   between 0 and 7.  Check is source2 alignment is 0, meaning both
+	   sources have the same alignment.  */
+	andi.	r9,r4,0x7
+	bne	cr0,L(loop_diff_align)
+
+	/* If both source1 and source2 are doubleword aligned, there is no
+	   need for page boundary cross checks.  */
+
+	ld	r8,0(r7)
+	ld	r10,0(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	.align 4
+L(loop_equal_align):
+	ld	r8,8(r7)
+	ld	r10,8(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	ld	r8,16(r7)
+	ld	r10,16(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	ldu	r8,24(r7)
+	ldu	r10,24(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+
+	b	L(loop_equal_align)
+
+	/* A zero byte was found in r8 (s1 dword), r9 contains the cmpb
+	   result and r10 the dword from s2.  To code isolate the byte
+	   up to end (including the '\0'), masking with 0xFF the remaining
+	   ones:
+
+           #if __LITTLE_ENDIAN__
+	     (__builtin_ffsl (x) - 1) = counting trailing zero bits
+	     r9 = (__builtin_ffsl (r9) - 1) + 8;
+	     r9 = -1UL << r9
+	   #else
+	     r9  = __builtin_clzl (r9) + 8;
+	     r9  = -1UL >> r9
+	   #endif
+	     r8  = r8  | r9
+	     r10 = r10 | r9  */
+
+#ifdef __LITTLE_ENDIAN__
+	nor 	r9,r9,r9
+L(different_nocmpb):
+	neg	r3,r9
+	and	r9,r9,r3
+	cntlzd	r9,r9
+	subfic	r9,r9,63
+#else
+	not	r9,r9
+L(different_nocmpb):
+	cntlzd	r9,r9
+	subfic	r9,r9,56
+#endif
+	srd	r3,r8,r9
+	srd	r10,r10,r9
+	rldicl	r10,r10,0,56
+	rldicl	r3,r3,0,56
+	subf	r3,r10,r3
+	extsw	r3,r3
+	blr
+
+	.align	4
+L(pagecross_check):
+	subfic	r9,r9,4096
+	subfic	r7,r7,4096
+	cmpld	cr7,r7,r9
+	bge	cr7,L(pagecross)
+	mr	r7,r9
+
+	/* If unaligned 16 bytes reads across a 4K page boundary, it uses
+	   a simple byte a byte comparison until the page alignment for s1
+	   is reached.  */
+L(pagecross):
+	add	r7,r3,r7
+	subf	r9,r3,r7
+	mtctr	r9
+
+	.align	4
+L(pagecross_loop):
+	/* Loads a byte from s1 and s2, compare if *s1 is equal to *s2
+	   and if *s1 is '\0'.  */
+	lbz	r9,0(r3)
+	lbz	r10,0(r4)
+	addi	r3,r3,1
+	addi	r4,r4,1
+	cmplw	cr7,r9,r10
+	cmpdi	cr5,r9,r0
+	bne	cr7,L(pagecross_ne)
+	beq	cr5,L(pagecross_nullfound)
+	bdnz	L(pagecross_loop)
+	b	L(align_8b)
+
+	.align	4
+	/* The unaligned read of source2 will cross a 4K page boundary,
+	   and the different byte or NULL maybe be in the remaining page
+	   bytes. Since it can not use the unaligned load, the algorithm
+	   reads and compares 8 bytes to keep source1 doubleword aligned.  */
+L(check_source2_byte):
+	li	r9,8
+	mtctr	r9
+
+	.align	4
+L(check_source2_byte_loop):
+	lbz	r9,0(r7)
+	lbz	r10,0(r4)
+	addi	r7,r7,1
+	addi	r4,r4,1
+	cmplw	cr7,r9,10
+	cmpdi	r5,r9,0
+	bne	cr7,L(pagecross_ne)
+	beq	cr5,L(pagecross_nullfound)
+	bdnz	L(check_source2_byte_loop)
+
+	/* If source2 is unaligned to doubleword, the code needs to check
+	   on each interation if the unaligned doubleword access will cross
+	   a 4k page boundary.  */
+	.align	5
+L(loop_unaligned):
+	ld	r8,0(r7)
+	ld	r10,0(r4)
+	cmpb	r12,r8,r0
+	cmpb	r11,r8,r10
+	orc.	r9,r12,r11
+	bne	cr0,L(different_nocmpb)
+	addi	r7,r7,8
+	addi	r4,r4,8
+
+L(loop_diff_align):
+	/* Check if [src2]+8 cross a 4k page boundary:
+
+	     srcin2 % PAGE_SIZE > (PAGE_SIZE - 8)
+
+	     with PAGE_SIZE being 4096.  */
+	rldicl	r9,r4,0,52
+	cmpldi	cr7,r9,4088
+	ble	cr7,L(loop_unaligned)
+	b	L(check_source2_byte)
+
+	.align	4
+L(pagecross_ne):
+	extsw	r3,r9
+	mr	r9,r10
+L(pagecross_retdiff):
+	subf	r9,r9,r3
+	extsw	r3,r9
+	blr
+
+	.align	4
+L(pagecross_nullfound):
+	li	r3,0
+	b	L(pagecross_retdiff)
+END (strcmp)
+libc_hidden_builtin_def (strcmp)