summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEvgeniy Stepanov <eugeni.stepanov@gmail.com>2014-03-03 13:52:36 +0000
committerEvgeniy Stepanov <eugeni.stepanov@gmail.com>2014-03-03 13:52:36 +0000
commit2b4d4bcd7e42d4419b6e720efde7143bd120fff9 (patch)
tree6baeb0bd14777b3c6476d953015ccf4f19ad6031
parentfce1ac5069f9cff3605f8c7b0203bac7bf580fd5 (diff)
[msan] Tests for X86 SIMD bitshift intrinsic support.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@202713 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/msan/tests/msan_test.cc96
1 files changed, 96 insertions, 0 deletions
diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc
index 81b96e55f..fb7011eab 100644
--- a/lib/msan/tests/msan_test.cc
+++ b/lib/msan/tests/msan_test.cc
@@ -62,6 +62,10 @@
# define MSAN_HAS_M128 0
#endif
+#ifdef __AVX2__
+# include <immintrin.h>
+#endif
+
static const int kPageSize = 4096;
typedef unsigned char U1;
@@ -3272,6 +3276,98 @@ TEST(MemorySanitizer, UnalignedStore64) {
EXPECT_POISONED_O(x[11], origin);
}
+namespace {
+typedef U2 V8x16 __attribute__((__vector_size__(16)));
+typedef U4 V4x32 __attribute__((__vector_size__(16)));
+typedef U8 V2x64 __attribute__((__vector_size__(16)));
+typedef U4 V8x32 __attribute__((__vector_size__(32)));
+typedef U8 V4x64 __attribute__((__vector_size__(32)));
+
+
+V8x16 shift_sse2_left_scalar(V8x16 x, U4 y) {
+ return _mm_slli_epi16(x, y);
+}
+
+V8x16 shift_sse2_left(V8x16 x, V8x16 y) {
+ return _mm_sll_epi16(x, y);
+}
+
+TEST(VectorShiftTest, sse2_left_scalar) {
+ V8x16 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3,
+ 4, 5, 6, 7};
+ V8x16 u = shift_sse2_left_scalar(v, 2);
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_NOT_POISONED(u[0] | (~7U));
+ EXPECT_NOT_POISONED(u[1] | (~31U));
+ u[0] = u[1] = 0;
+ EXPECT_NOT_POISONED(u);
+}
+
+TEST(VectorShiftTest, sse2_left_scalar_by_uninit) {
+ V8x16 v = {0, 1, 2, 3, 4, 5, 6, 7};
+ V8x16 u = shift_sse2_left_scalar(v, *GetPoisoned<U4>());
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_POISONED(u[2]);
+ EXPECT_POISONED(u[3]);
+ EXPECT_POISONED(u[4]);
+ EXPECT_POISONED(u[5]);
+ EXPECT_POISONED(u[6]);
+ EXPECT_POISONED(u[7]);
+}
+
+TEST(VectorShiftTest, sse2_left) {
+ V8x16 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3,
+ 4, 5, 6, 7};
+ // Top 64 bits of shift count don't affect the result.
+ V2x64 s = {2, *GetPoisoned<U8>()};
+ V8x16 u = shift_sse2_left(v, s);
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_NOT_POISONED(u[0] | (~7U));
+ EXPECT_NOT_POISONED(u[1] | (~31U));
+ u[0] = u[1] = 0;
+ EXPECT_NOT_POISONED(u);
+}
+
+TEST(VectorShiftTest, sse2_left_by_uninit) {
+ V8x16 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3,
+ 4, 5, 6, 7};
+ V2x64 s = {*GetPoisoned<U8>(), *GetPoisoned<U8>()};
+ V8x16 u = shift_sse2_left(v, s);
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_POISONED(u[2]);
+ EXPECT_POISONED(u[3]);
+ EXPECT_POISONED(u[4]);
+ EXPECT_POISONED(u[5]);
+ EXPECT_POISONED(u[6]);
+ EXPECT_POISONED(u[7]);
+}
+
+#ifdef __AVX2__
+V4x32 shift_avx2_left(V4x32 x, V4x32 y) {
+ return _mm_sllv_epi32(x, y);
+}
+// This is variable vector shift that's only available starting with AVX2.
+// V4x32 shift_avx2_left(V4x32 x, V4x32 y) {
+TEST(VectorShiftTest, avx2_left) {
+ V4x32 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3};
+ V4x32 s = {2, *GetPoisoned<U4>(), 3, *GetPoisoned<U4>()};
+ V4x32 u = shift_avx2_left(v, s);
+ EXPECT_POISONED(u[0]);
+ EXPECT_NOT_POISONED(u[0] | (~7U));
+ EXPECT_POISONED(u[1]);
+ EXPECT_POISONED(u[1] | (~31U));
+ EXPECT_NOT_POISONED(u[2]);
+ EXPECT_POISONED(u[3]);
+ EXPECT_POISONED(u[3] | (~31U));
+}
+#endif // __AVX2__
+} // namespace
+
+
TEST(MemorySanitizerDr, StoreInDSOTest) {
if (!__msan_has_dynamic_component()) return;
char* s = new char[10];