@@ -5363,15 +5363,15 @@ define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) {
5363
5363
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
5364
5364
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
5365
5365
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
5366
- ; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9 ,0x29,0x00]
5366
+ ; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8 ,0x29,0x00]
5367
5367
; X86-AVX1-NEXT: retl # encoding: [0xc3]
5368
5368
;
5369
5369
; X86-AVX512-LABEL: test_mm_store_pd1:
5370
5370
; X86-AVX512: # %bb.0:
5371
5371
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
5372
5372
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
5373
5373
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
5374
- ; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9 ,0x29,0x00]
5374
+ ; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8 ,0x29,0x00]
5375
5375
; X86-AVX512-NEXT: retl # encoding: [0xc3]
5376
5376
;
5377
5377
; X64-SSE-LABEL: test_mm_store_pd1:
@@ -5385,14 +5385,14 @@ define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) {
5385
5385
; X64-AVX1: # %bb.0:
5386
5386
; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
5387
5387
; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
5388
- ; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9 ,0x29,0x07]
5388
+ ; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8 ,0x29,0x07]
5389
5389
; X64-AVX1-NEXT: retq # encoding: [0xc3]
5390
5390
;
5391
5391
; X64-AVX512-LABEL: test_mm_store_pd1:
5392
5392
; X64-AVX512: # %bb.0:
5393
5393
; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
5394
5394
; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
5395
- ; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9 ,0x29,0x07]
5395
+ ; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8 ,0x29,0x07]
5396
5396
; X64-AVX512-NEXT: retq # encoding: [0xc3]
5397
5397
%arg0 = bitcast double * %a0 to <2 x double >*
5398
5398
%shuf = shufflevector <2 x double > %a1 , <2 x double > undef , <2 x i32 > zeroinitializer
@@ -5489,15 +5489,15 @@ define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) {
5489
5489
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
5490
5490
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
5491
5491
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
5492
- ; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9 ,0x29,0x00]
5492
+ ; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8 ,0x29,0x00]
5493
5493
; X86-AVX1-NEXT: retl # encoding: [0xc3]
5494
5494
;
5495
5495
; X86-AVX512-LABEL: test_mm_store1_pd:
5496
5496
; X86-AVX512: # %bb.0:
5497
5497
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
5498
5498
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
5499
5499
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
5500
- ; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9 ,0x29,0x00]
5500
+ ; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8 ,0x29,0x00]
5501
5501
; X86-AVX512-NEXT: retl # encoding: [0xc3]
5502
5502
;
5503
5503
; X64-SSE-LABEL: test_mm_store1_pd:
@@ -5511,14 +5511,14 @@ define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) {
5511
5511
; X64-AVX1: # %bb.0:
5512
5512
; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
5513
5513
; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
5514
- ; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9 ,0x29,0x07]
5514
+ ; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8 ,0x29,0x07]
5515
5515
; X64-AVX1-NEXT: retq # encoding: [0xc3]
5516
5516
;
5517
5517
; X64-AVX512-LABEL: test_mm_store1_pd:
5518
5518
; X64-AVX512: # %bb.0:
5519
5519
; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
5520
5520
; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
5521
- ; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9 ,0x29,0x07]
5521
+ ; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8 ,0x29,0x07]
5522
5522
; X64-AVX512-NEXT: retq # encoding: [0xc3]
5523
5523
%arg0 = bitcast double * %a0 to <2 x double >*
5524
5524
%shuf = shufflevector <2 x double > %a1 , <2 x double > undef , <2 x i32 > zeroinitializer
0 commit comments