aboutsummaryrefslogtreecommitdiff
path: root/gnu/packages
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/packages')
-rw-r--r--gnu/packages/patches/glibc-2.27-git-fixes.patch155
1 files changed, 155 insertions, 0 deletions
diff --git a/gnu/packages/patches/glibc-2.27-git-fixes.patch b/gnu/packages/patches/glibc-2.27-git-fixes.patch
index c28bac222a..4ed67c7c25 100644
--- a/gnu/packages/patches/glibc-2.27-git-fixes.patch
+++ b/gnu/packages/patches/glibc-2.27-git-fixes.patch
@@ -10,6 +10,7 @@ f36553bf6a4f69070f99badbdab5802b43e6e211
7c6304182b9f422b782ace1cdd3efbde056aec36
78a90c2f74a2012dd3eff302189e47ff6779a757
1e52d8e65a58c49a48549053a1b89c06240e0c6c
+55ad82e45c313454de657931898e974a7a036cad
From 56170e064e2b21ce204f0817733e92f1730541ea Mon Sep 17 00:00:00 2001
From: Igor Gnatenko <ignatenko@redhat.com>
@@ -545,3 +546,157 @@ index 0000000000..353e36507d
+}
+
+#include <support/test-driver.c>
+
+From 55ad82e45c313454de657931898e974a7a036cad Mon Sep 17 00:00:00 2001
+From: Andrew Senkevich <andrew.n.senkevich@gmail.com>
+Date: Fri, 23 Mar 2018 16:19:45 +0100
+Subject: [PATCH] Fix i386 memmove issue (bug 22644).
+
+ [BZ #22644]
+ * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed
+ branch conditions.
+ * string/test-memmove.c (do_test2): New testcase.
+
+(cherry picked from commit cd66c0e584c6d692bc8347b5e72723d02b8a8ada)
+
+diff --git a/string/test-memmove.c b/string/test-memmove.c
+index edc7a4c3bf..64e3651ba4 100644
+--- a/string/test-memmove.c
++++ b/string/test-memmove.c
+@@ -24,6 +24,7 @@
+ # define TEST_NAME "memmove"
+ #endif
+ #include "test-string.h"
++#include <support/test-driver.h>
+
+ char *simple_memmove (char *, const char *, size_t);
+
+@@ -245,6 +246,60 @@ do_random_tests (void)
+ }
+ }
+
++static void
++do_test2 (void)
++{
++ size_t size = 0x20000000;
++ uint32_t * large_buf;
++
++ large_buf = mmap ((void*) 0x70000000, size, PROT_READ | PROT_WRITE,
++ MAP_PRIVATE | MAP_ANON, -1, 0);
++
++ if (large_buf == MAP_FAILED)
++ error (EXIT_UNSUPPORTED, errno, "Large mmap failed");
++
++ if ((uintptr_t) large_buf > 0x80000000 - 128
++ || 0x80000000 - (uintptr_t) large_buf > 0x20000000)
++ {
++ error (0, 0, "Large mmap allocated improperly");
++ ret = EXIT_UNSUPPORTED;
++ munmap ((void *) large_buf, size);
++ return;
++ }
++
++ size_t bytes_move = 0x80000000 - (uintptr_t) large_buf;
++ size_t arr_size = bytes_move / sizeof (uint32_t);
++ size_t i;
++
++ FOR_EACH_IMPL (impl, 0)
++ {
++ for (i = 0; i < arr_size; i++)
++ large_buf[i] = (uint32_t) i;
++
++ uint32_t * dst = &large_buf[33];
++
++#ifdef TEST_BCOPY
++ CALL (impl, (char *) large_buf, (char *) dst, bytes_move);
++#else
++ CALL (impl, (char *) dst, (char *) large_buf, bytes_move);
++#endif
++
++ for (i = 0; i < arr_size; i++)
++ {
++ if (dst[i] != (uint32_t) i)
++ {
++ error (0, 0,
++ "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%zd\"",
++ impl->name, dst, large_buf, i);
++ ret = 1;
++ break;
++ }
++ }
++ }
++
++ munmap ((void *) large_buf, size);
++}
++
+ int
+ test_main (void)
+ {
+@@ -284,6 +339,9 @@ test_main (void)
+ }
+
+ do_random_tests ();
++
++ do_test2 ();
++
+ return ret;
+ }
+
+diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
+index 9c3bbe7e17..9aa17de99c 100644
+--- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
++++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
+@@ -72,7 +72,7 @@ ENTRY (MEMCPY)
+ cmp %edx, %eax
+
+ # ifdef USE_AS_MEMMOVE
+- jg L(check_forward)
++ ja L(check_forward)
+
+ L(mm_len_0_or_more_backward):
+ /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
+@@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward):
+ jbe L(mm_len_0_16_bytes_backward)
+
+ cmpl $32, %ecx
+- jg L(mm_len_32_or_more_backward)
++ ja L(mm_len_32_or_more_backward)
+
+ /* Copy [0..32] and return. */
+ movdqu (%eax), %xmm0
+@@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward):
+
+ L(mm_len_32_or_more_backward):
+ cmpl $64, %ecx
+- jg L(mm_len_64_or_more_backward)
++ ja L(mm_len_64_or_more_backward)
+
+ /* Copy [0..64] and return. */
+ movdqu (%eax), %xmm0
+@@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward):
+
+ L(mm_len_64_or_more_backward):
+ cmpl $128, %ecx
+- jg L(mm_len_128_or_more_backward)
++ ja L(mm_len_128_or_more_backward)
+
+ /* Copy [0..128] and return. */
+ movdqu (%eax), %xmm0
+@@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward):
+ add %ecx, %eax
+ cmp %edx, %eax
+ movl SRC(%esp), %eax
+- jle L(forward)
++ jbe L(forward)
+ PUSH (%esi)
+ PUSH (%edi)
+ PUSH (%ebx)
+@@ -269,7 +269,7 @@ L(check_forward):
+ add %edx, %ecx
+ cmp %eax, %ecx
+ movl LEN(%esp), %ecx
+- jle L(forward)
++ jbe L(forward)
+
+ /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
+ separately. */