|
| 1 | +; No assertions yet because the test case crashes MSan |
| 2 | +; |
| 3 | +; Test memory sanitizer instrumentation for Arm NEON VST_{2,3,4} and |
| 4 | +; VST_1x{2,3,4} instructions, including floating-point parameters. |
| 5 | +; |
| 6 | +; RUN: opt < %s -passes=msan -S | FileCheck %s |
| 7 | +; |
| 8 | +; UNSUPPORTED: {{.*}} |
| 9 | +; |
| 10 | +; Generated with: |
| 11 | +; grep call clang/test/CodeGen/aarch64-neon-intrinsics.c \ |
| 12 | +; | grep 'neon[.]st' \ |
| 13 | +; | sed -r 's/^\/\/ CHECK:[ ]*//' \ |
| 14 | +; | cut -d ' ' -f 1 --complement \ |
| 15 | +; | sed -r 's/[[][[]TMP[0-9]+[]][]]/%A/' \ |
| 16 | +; | sed -r 's/[[][[]TMP[0-9]+[]][]]/%B/' \ |
| 17 | +; | sed -r 's/[[][[]TMP[0-9]+[]][]]/%C/' \ |
| 18 | +; | sed -r 's/[[][[]TMP[0-9]+[]][]]/%D/' \ |
| 19 | +; | sort \ |
| 20 | +; | uniq \ |
| 21 | +; | while read x; \ |
| 22 | +; do \ |
| 23 | +; y=`echo "$x" \ |
| 24 | +; | sed -r 's/@llvm[.]aarch64[.]neon[.]/@/' \ |
| 25 | +; | sed -r 's/[.]p0//' \ |
| 26 | +; | tr '.' '_'`; \ |
| 27 | +; echo "define $y sanitize_memory {"; \ |
| 28 | +; echo " call $x"; \ |
| 29 | +; echo " ret void"; \ |
| 30 | +; echo "}"; \ |
| 31 | +; echo; \ |
| 32 | +; done |
| 33 | + |
| 34 | +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" |
| 35 | +target triple = "aarch64--linux-android9001" |
| 36 | + |
| 37 | +; ----------------------------------------------------------------------------------------------------------------------------------------------- |
| 38 | + |
| 39 | +define void @st1x2_v1f64(<1 x double> %A, <1 x double> %B, ptr %a) sanitize_memory { |
| 40 | + call void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double> %A, <1 x double> %B, ptr %a) |
| 41 | + ret void |
| 42 | +} |
| 43 | + |
| 44 | +define void @st1x2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %a) sanitize_memory { |
| 45 | + call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> %A, <1 x i64> %B, ptr %a) |
| 46 | + ret void |
| 47 | +} |
| 48 | + |
| 49 | +define void @st1x2_v2f64(<2 x double> %A, <2 x double> %B, ptr %a) sanitize_memory { |
| 50 | + call void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double> %A, <2 x double> %B, ptr %a) |
| 51 | + ret void |
| 52 | +} |
| 53 | + |
| 54 | +define void @st1x2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %a) sanitize_memory { |
| 55 | + call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> %A, <2 x i64> %B, ptr %a) |
| 56 | + ret void |
| 57 | +} |
| 58 | + |
| 59 | +define void @st1x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %a) sanitize_memory { |
| 60 | + call void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %a) |
| 61 | + ret void |
| 62 | +} |
| 63 | + |
| 64 | +define void @st1x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %a) sanitize_memory { |
| 65 | + call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %a) |
| 66 | + ret void |
| 67 | +} |
| 68 | + |
| 69 | +define void @st1x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %a) sanitize_memory { |
| 70 | + call void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %a) |
| 71 | + ret void |
| 72 | +} |
| 73 | + |
| 74 | +define void @st1x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %a) sanitize_memory { |
| 75 | + call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %a) |
| 76 | + ret void |
| 77 | +} |
| 78 | + |
| 79 | +define void @st1x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %a) sanitize_memory { |
| 80 | + call void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %a) |
| 81 | + ret void |
| 82 | +} |
| 83 | + |
| 84 | +define void @st1x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %a) sanitize_memory { |
| 85 | + call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %a) |
| 86 | + ret void |
| 87 | +} |
| 88 | + |
| 89 | +define void @st1x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %a) sanitize_memory { |
| 90 | + call void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %a) |
| 91 | + ret void |
| 92 | +} |
| 93 | + |
| 94 | +define void @st1x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %a) sanitize_memory { |
| 95 | + call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %a) |
| 96 | + ret void |
| 97 | +} |
| 98 | + |
| 99 | +define void @st2_v16i8(<16 x i8> %A, <16 x i8> %B, ptr %a) sanitize_memory { |
| 100 | + call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> %A, <16 x i8> %B, ptr %a) |
| 101 | + ret void |
| 102 | +} |
| 103 | + |
| 104 | +define void @st2_v1f64(<1 x double> %A, <1 x double> %B, ptr %a) sanitize_memory { |
| 105 | + call void @llvm.aarch64.neon.st2.v1f64.p0(<1 x double> %A, <1 x double> %B, ptr %a) |
| 106 | + ret void |
| 107 | +} |
| 108 | + |
| 109 | +define void @st2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %a) sanitize_memory { |
| 110 | + call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> %A, <1 x i64> %B, ptr %a) |
| 111 | + ret void |
| 112 | +} |
| 113 | + |
| 114 | +define void @st2_v2f32(<2 x float> %A, <2 x float> %B, ptr %a) sanitize_memory { |
| 115 | + call void @llvm.aarch64.neon.st2.v2f32.p0(<2 x float> %A, <2 x float> %B, ptr %a) |
| 116 | + ret void |
| 117 | +} |
| 118 | + |
| 119 | +define void @st2_v2f64(<2 x double> %A, <2 x double> %B, ptr %a) sanitize_memory { |
| 120 | + call void @llvm.aarch64.neon.st2.v2f64.p0(<2 x double> %A, <2 x double> %B, ptr %a) |
| 121 | + ret void |
| 122 | +} |
| 123 | + |
| 124 | +define void @st2_v2i32(<2 x i32> %A, <2 x i32> %B, ptr %a) sanitize_memory { |
| 125 | + call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> %A, <2 x i32> %B, ptr %a) |
| 126 | + ret void |
| 127 | +} |
| 128 | + |
| 129 | +define void @st2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %a) sanitize_memory { |
| 130 | + call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> %A, <2 x i64> %B, ptr %a) |
| 131 | + ret void |
| 132 | +} |
| 133 | + |
| 134 | +define void @st2_v4f16(<4 x half> %A, <4 x half> %B, ptr %a) sanitize_memory { |
| 135 | + call void @llvm.aarch64.neon.st2.v4f16.p0(<4 x half> %A, <4 x half> %B, ptr %a) |
| 136 | + ret void |
| 137 | +} |
| 138 | + |
| 139 | +define void @st2_v4f32(<4 x float> %A, <4 x float> %B, ptr %a) sanitize_memory { |
| 140 | + call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> %A, <4 x float> %B, ptr %a) |
| 141 | + ret void |
| 142 | +} |
| 143 | + |
| 144 | +define void @st2_v4i16(<4 x i16> %A, <4 x i16> %B, ptr %a) sanitize_memory { |
| 145 | + call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> %A, <4 x i16> %B, ptr %a) |
| 146 | + ret void |
| 147 | +} |
| 148 | + |
| 149 | +define void @st2_v4i32(<4 x i32> %A, <4 x i32> %B, ptr %a) sanitize_memory { |
| 150 | + call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %A, <4 x i32> %B, ptr %a) |
| 151 | + ret void |
| 152 | +} |
| 153 | + |
| 154 | +define void @st2_v8f16(<8 x half> %A, <8 x half> %B, ptr %a) sanitize_memory { |
| 155 | + call void @llvm.aarch64.neon.st2.v8f16.p0(<8 x half> %A, <8 x half> %B, ptr %a) |
| 156 | + ret void |
| 157 | +} |
| 158 | + |
| 159 | +define void @st2_v8i16(<8 x i16> %A, <8 x i16> %B, ptr %a) sanitize_memory { |
| 160 | + call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> %A, <8 x i16> %B, ptr %a) |
| 161 | + ret void |
| 162 | +} |
| 163 | + |
| 164 | +define void @st2_v8i8(<8 x i8> %A, <8 x i8> %B, ptr %a) sanitize_memory { |
| 165 | + call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %A, <8 x i8> %B, ptr %a) |
| 166 | + ret void |
| 167 | +} |
| 168 | + |
| 169 | +define void @st3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %a) sanitize_memory { |
| 170 | + call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %a) |
| 171 | + ret void |
| 172 | +} |
| 173 | + |
| 174 | +define void @st3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %a) sanitize_memory { |
| 175 | + call void @llvm.aarch64.neon.st3.v1f64.p0(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %a) |
| 176 | + ret void |
| 177 | +} |
| 178 | + |
| 179 | +define void @st3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %a) sanitize_memory { |
| 180 | + call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %a) |
| 181 | + ret void |
| 182 | +} |
| 183 | + |
| 184 | +define void @st3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %a) sanitize_memory { |
| 185 | + call void @llvm.aarch64.neon.st3.v2f32.p0(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %a) |
| 186 | + ret void |
| 187 | +} |
| 188 | + |
| 189 | +define void @st3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %a) sanitize_memory { |
| 190 | + call void @llvm.aarch64.neon.st3.v2f64.p0(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %a) |
| 191 | + ret void |
| 192 | +} |
| 193 | + |
| 194 | +define void @st3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %a) sanitize_memory { |
| 195 | + call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %a) |
| 196 | + ret void |
| 197 | +} |
| 198 | + |
| 199 | +define void @st3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %a) sanitize_memory { |
| 200 | + call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %a) |
| 201 | + ret void |
| 202 | +} |
| 203 | + |
| 204 | +define void @st3_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr %a) sanitize_memory { |
| 205 | + call void @llvm.aarch64.neon.st3.v4f16.p0(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr %a) |
| 206 | + ret void |
| 207 | +} |
| 208 | + |
| 209 | +define void @st3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %a) sanitize_memory { |
| 210 | + call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %a) |
| 211 | + ret void |
| 212 | +} |
| 213 | + |
| 214 | +define void @st3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %a) sanitize_memory { |
| 215 | + call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %a) |
| 216 | + ret void |
| 217 | +} |
| 218 | + |
| 219 | +define void @st3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %a) sanitize_memory { |
| 220 | + call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %a) |
| 221 | + ret void |
| 222 | +} |
| 223 | + |
| 224 | +define void @st3_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, ptr %a) sanitize_memory { |
| 225 | + call void @llvm.aarch64.neon.st3.v8f16.p0(<8 x half> %A, <8 x half> %B, <8 x half> %C, ptr %a) |
| 226 | + ret void |
| 227 | +} |
| 228 | + |
| 229 | +define void @st3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %a) sanitize_memory { |
| 230 | + call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %a) |
| 231 | + ret void |
| 232 | +} |
| 233 | + |
| 234 | +define void @st3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %a) sanitize_memory { |
| 235 | + call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %a) |
| 236 | + ret void |
| 237 | +} |
| 238 | + |
| 239 | +define void @st4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %a) sanitize_memory { |
| 240 | + call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %a) |
| 241 | + ret void |
| 242 | +} |
| 243 | + |
| 244 | +define void @st4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %a) sanitize_memory { |
| 245 | + call void @llvm.aarch64.neon.st4.v1f64.p0(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %a) |
| 246 | + ret void |
| 247 | +} |
| 248 | + |
| 249 | +define void @st4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %a) sanitize_memory { |
| 250 | + call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %a) |
| 251 | + ret void |
| 252 | +} |
| 253 | + |
| 254 | +define void @st4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %a) sanitize_memory { |
| 255 | + call void @llvm.aarch64.neon.st4.v2f32.p0(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %a) |
| 256 | + ret void |
| 257 | +} |
| 258 | + |
| 259 | +define void @st4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %a) sanitize_memory { |
| 260 | + call void @llvm.aarch64.neon.st4.v2f64.p0(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %a) |
| 261 | + ret void |
| 262 | +} |
| 263 | + |
| 264 | +define void @st4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %a) sanitize_memory { |
| 265 | + call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %a) |
| 266 | + ret void |
| 267 | +} |
| 268 | + |
| 269 | +define void @st4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %a) sanitize_memory { |
| 270 | + call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %a) |
| 271 | + ret void |
| 272 | +} |
| 273 | + |
| 274 | +define void @st4_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> %D, ptr %a) sanitize_memory { |
| 275 | + call void @llvm.aarch64.neon.st4.v4f16.p0(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> %D, ptr %a) |
| 276 | + ret void |
| 277 | +} |
| 278 | + |
| 279 | +define void @st4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %a) sanitize_memory { |
| 280 | + call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %a) |
| 281 | + ret void |
| 282 | +} |
| 283 | + |
| 284 | +define void @st4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %a) sanitize_memory { |
| 285 | + call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %a) |
| 286 | + ret void |
| 287 | +} |
| 288 | + |
| 289 | +define void @st4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %a) sanitize_memory { |
| 290 | + call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %a) |
| 291 | + ret void |
| 292 | +} |
| 293 | + |
| 294 | +define void @st4_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, <8 x half> %D, ptr %a) sanitize_memory { |
| 295 | + call void @llvm.aarch64.neon.st4.v8f16.p0(<8 x half> %A, <8 x half> %B, <8 x half> %C, <8 x half> %D, ptr %a) |
| 296 | + ret void |
| 297 | +} |
| 298 | + |
| 299 | +define void @st4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %a) sanitize_memory { |
| 300 | + call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %a) |
| 301 | + ret void |
| 302 | +} |
| 303 | + |
| 304 | +define void @st4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %a) sanitize_memory { |
| 305 | + call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %a) |
| 306 | + ret void |
| 307 | +} |
0 commit comments