summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Stellard <tstellar@redhat.com>2018-11-30 00:08:50 +0000
committerTom Stellard <tstellar@redhat.com>2018-11-30 00:08:50 +0000
commit86a400a82de5dad7241841fa086e4bb17b88c483 (patch)
tree1b2e41c1232eef1bf1fd7ae0b935a7ebfaa3f7e7
parent6655e514ce4f9dbf2bd6689007308a72b5bda8d7 (diff)
------------------------------------------------------------------------ r347556 | nemanjai | 2018-11-26 06:35:38 -0800 (Mon, 26 Nov 2018) | 11 lines [PowerPC] Vector load/store builtins overstate alignment of pointers A number of builtins in altivec.h load/store vectors from pointers to scalar types. Currently they just cast the pointer to a vector pointer, but expressions like that have the alignment of the target type. Of course, the input pointer did not have that alignment so this triggers UBSan (and rightly so). This resolves https://bugs.llvm.org/show_bug.cgi?id=39704 Differential revision: https://reviews.llvm.org/D54787 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/cfe/branches/release_70@347935 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Headers/altivec.h63
-rw-r--r--test/CodeGen/builtins-ppc-altivec.c84
-rw-r--r--test/CodeGen/builtins-ppc-quadword.c32
-rw-r--r--test/CodeGen/builtins-ppc-vsx.c36
4 files changed, 115 insertions, 100 deletions
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index 90fd477d9b..eaa56cb47b 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -16353,67 +16353,82 @@ vec_revb(vector unsigned __int128 __a) {
/* vec_xl */
+typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
+typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
+typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
+typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
+typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
+typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
+typedef vector float unaligned_vec_float __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
signed char *__ptr) {
- return *(vector signed char *)(__ptr + __offset);
+ return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
vec_xl(signed long long __offset, unsigned char *__ptr) {
- return *(vector unsigned char *)(__ptr + __offset);
+ return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
signed short *__ptr) {
- return *(vector signed short *)(__ptr + __offset);
+ return *(unaligned_vec_sshort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned short
vec_xl(signed long long __offset, unsigned short *__ptr) {
- return *(vector unsigned short *)(__ptr + __offset);
+ return *(unaligned_vec_ushort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
signed int *__ptr) {
- return *(vector signed int *)(__ptr + __offset);
+ return *(unaligned_vec_sint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
unsigned int *__ptr) {
- return *(vector unsigned int *)(__ptr + __offset);
+ return *(unaligned_vec_uint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
float *__ptr) {
- return *(vector float *)(__ptr + __offset);
+ return *(unaligned_vec_float *)(__ptr + __offset);
}
#ifdef __VSX__
+typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
+typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
+typedef vector double unaligned_vec_double __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed long long
vec_xl(signed long long __offset, signed long long *__ptr) {
- return *(vector signed long long *)(__ptr + __offset);
+ return *(unaligned_vec_sll *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned long long
vec_xl(signed long long __offset, unsigned long long *__ptr) {
- return *(vector unsigned long long *)(__ptr + __offset);
+ return *(unaligned_vec_ull *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
double *__ptr) {
- return *(vector double *)(__ptr + __offset);
+ return *(unaligned_vec_double *)(__ptr + __offset);
}
#endif
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
+typedef vector unsigned __int128 unaligned_vec_ui128
+ __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
vec_xl(signed long long __offset, signed __int128 *__ptr) {
- return *(vector signed __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_si128 *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned __int128
vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
- return *(vector unsigned __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_ui128 *)(__ptr + __offset);
}
#endif
@@ -16498,62 +16513,62 @@ vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
signed long long __offset,
signed char *__ptr) {
- *(vector signed char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
signed long long __offset,
unsigned char *__ptr) {
- *(vector unsigned char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
signed long long __offset,
signed short *__ptr) {
- *(vector signed short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
signed long long __offset,
unsigned short *__ptr) {
- *(vector unsigned short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
signed long long __offset,
signed int *__ptr) {
- *(vector signed int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
signed long long __offset,
unsigned int *__ptr) {
- *(vector unsigned int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector float __vec,
signed long long __offset,
float *__ptr) {
- *(vector float *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_float *)(__ptr + __offset) = __vec;
}
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
signed long long __offset,
signed long long *__ptr) {
- *(vector signed long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sll *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
signed long long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ull *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector double __vec,
signed long long __offset,
double *__ptr) {
- *(vector double *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_double *)(__ptr + __offset) = __vec;
}
#endif
@@ -16561,13 +16576,13 @@ static inline __ATTRS_o_ai void vec_xst(vector double __vec,
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
signed long long __offset,
signed __int128 *__ptr) {
- *(vector signed __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
signed long long __offset,
unsigned __int128 *__ptr) {
- *(vector unsigned __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
}
#endif
diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c
index 99cf3c2538..8c22de4e1e 100644
--- a/test/CodeGen/builtins-ppc-altivec.c
+++ b/test/CodeGen/builtins-ppc-altivec.c
@@ -9338,32 +9338,32 @@ void test9() {
// CHECK-LABEL: define void @test9
// CHECK-LE-LABEL: define void @test9
res_vsc = vec_xl(param_sll, &param_sc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
res_vuc = vec_xl(param_sll, &param_uc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
res_vs = vec_xl(param_sll, &param_s);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
res_vus = vec_xl(param_sll, &param_us);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
res_vi = vec_xl(param_sll, &param_i);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
res_vui = vec_xl(param_sll, &param_ui);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
res_vf = vec_xl(param_sll, &param_f);
- // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
}
/* ------------------------------ vec_xst ----------------------------------- */
@@ -9371,32 +9371,32 @@ void test10() {
// CHECK-LABEL: define void @test10
// CHECK-LE-LABEL: define void @test10
vec_xst(vsc, param_sll, &param_sc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
vec_xst(vuc, param_sll, &param_uc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
vec_xst(vs, param_sll, &param_s);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
vec_xst(vus, param_sll, &param_us);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
vec_xst(vi, param_sll, &param_i);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
vec_xst(vui, param_sll, &param_ui);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
vec_xst(vf, param_sll, &param_f);
- // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 1
}
/* ----------------------------- vec_xl_be ---------------------------------- */
@@ -9404,35 +9404,35 @@ void test11() {
// CHECK-LABEL: define void @test11
// CHECK-LE-LABEL: define void @test11
res_vsc = vec_xl_be(param_sll, &param_sc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
res_vuc = vec_xl_be(param_sll, &param_uc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
res_vs = vec_xl_be(param_sll, &param_s);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
res_vus = vec_xl_be(param_sll, &param_us);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
res_vi = vec_xl_be(param_sll, &param_i);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
res_vui = vec_xl_be(param_sll, &param_ui);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
res_vf = vec_xl_be(param_sll, &param_f);
- // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
}
@@ -9441,34 +9441,34 @@ void test12() {
// CHECK-LABEL: define void @test12
// CHECK-LE-LABEL: define void @test12
vec_xst_be(vsc, param_sll, &param_sc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vuc, param_sll, &param_uc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vs, param_sll, &param_s);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vus, param_sll, &param_us);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vi, param_sll, &param_i);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vui, param_sll, &param_ui);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vf, param_sll, &param_f);
- // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}})
}
diff --git a/test/CodeGen/builtins-ppc-quadword.c b/test/CodeGen/builtins-ppc-quadword.c
index 7d014db613..868fb183a6 100644
--- a/test/CodeGen/builtins-ppc-quadword.c
+++ b/test/CodeGen/builtins-ppc-quadword.c
@@ -205,45 +205,45 @@ void test1() {
/* vec_xl */
res_vlll = vec_xl(param_sll, &param_lll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
res_vulll = vec_xl(param_sll, &param_ulll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
/* vec_xst */
vec_xst(vlll, param_sll, &param_lll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
vec_xst(vulll, param_sll, &param_ulll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
/* vec_xl_be */
res_vlll = vec_xl_be(param_sll, &param_lll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
res_vulll = vec_xl_be(param_sll, &param_ulll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
/* vec_xst_be */
vec_xst_be(vlll, param_sll, &param_lll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
vec_xst_be(vulll, param_sll, &param_ulll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
}
diff --git a/test/CodeGen/builtins-ppc-vsx.c b/test/CodeGen/builtins-ppc-vsx.c
index 848d24d4fb..29b7149e1e 100644
--- a/test/CodeGen/builtins-ppc-vsx.c
+++ b/test/CodeGen/builtins-ppc-vsx.c
@@ -1637,51 +1637,51 @@ res_vsll = vec_slo(vsll, vsc);
// CHECK-LE: @llvm.ppc.altivec.vsro
res_vsll = vec_xl(sll, asll);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
res_vull = vec_xl(sll, aull);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
res_vd = vec_xl(sll, ad);
-// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
-// CHECK-LE: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 1
+// CHECK-LE: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 1
vec_xst(vsll, sll, asll);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
vec_xst(vull, sll, aull);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
vec_xst(vd, sll, ad);
-// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
-// CHECK-LE: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 1
+// CHECK-LE: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 1
res_vsll = vec_xl_be(sll, asll);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
res_vull = vec_xl_be(sll, aull);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
res_vd = vec_xl_be(sll, ad);
-// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
vec_xst_be(vsll, sll, asll);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vull, sll, aull);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vd, sll, ad);
-// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
res_vf = vec_neg(vf);