Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integer halves shuffle pattern produces worse codegen with canonical IR #122425

Open
arsenm opened this issue Jan 10, 2025 · 2 comments
Open

Integer halves shuffle pattern produces worse codegen with canonical IR #122425

arsenm opened this issue Jan 10, 2025 · 2 comments

Comments

@arsenm
Copy link
Contributor

arsenm commented Jan 10, 2025

While investigating a regressions related to lowered shufflevector as an integer, I noticed this code does a worse job with canonical IR.

For old AMDGPU targets without v_perm_b32, the there is an extra instruction. With perm it's a neutral net result (though I still prefer the non-canonical output, since it involves fewer steps in codegen since it doesn't rely on the SDWA pass to clean up an extra instruction).

X86 also has one additional instruction in the canonical case

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefix=GFX7 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 < %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
; RUN: llc -mtriple=x86_64 -mattr=+avx2< %s | FileCheck -check-prefix=X86

define i32 @shuffle_as_int(i32 %arg) {
; GFX7-LABEL: shuffle_as_int:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT:    v_alignbit_b32 v0, v1, v0, 16
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: shuffle_as_int:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    s_mov_b32 s4, 0x7060706
; GFX8-NEXT:    v_perm_b32 v0, v0, v0, s4
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: shuffle_as_int:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    s_mov_b32 s4, 0x7060706
; GFX9-NEXT:    v_perm_b32 v0, v0, v0, s4
; GFX9-NEXT:    s_setpc_b64 s[30:31]
;
; X86-LABEL: shuffle_as_int:
; X86:       # %bb.0:
; X86-NEXT:    movl %edi, %eax
; X86-NEXT:    shrl $16, %eax
; X86-NEXT:    shldl $16, %edi, %eax
; X86-NEXT:    retq
  %arg.lshr16 = lshr i32 %arg, 16
  %arg.hi16.in.lo = and i32 %arg.lshr16, 65535
  %zero.low16 = shl i32 %arg.lshr16, 16
  %or = or i32 %arg.hi16.in.lo, %zero.low16
  ret i32 %or
}

; Same function run through instcombine, codegen is worse.
define i32 @shuffle_as_int_canonical(i32 %arg) {
; GFX7-LABEL: shuffle_as_int_canonical:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT:    v_or_b32_e32 v0, v1, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: shuffle_as_int_canonical:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v0
; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: shuffle_as_int_canonical:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX9-NEXT:    s_mov_b32 s4, 0xffff0000
; GFX9-NEXT:    v_and_or_b32 v0, v0, s4, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
;
; X86-LABEL: shuffle_as_int_canonical:
; X86:       # %bb.0:
; X86-NEXT:    movl %edi, %eax
; X86-NEXT:    shrl $16, %eax
; X86-NEXT:    andl $-65536, %edi # imm = 0xFFFF0000
; X86-NEXT:    orl %edi, %eax
; X86-NEXT:    retq
  %arg.lshr16 = lshr i32 %arg, 16
  %zero.low16 = and i32 %arg, -65536
  %or = or disjoint i32 %arg.lshr16, %zero.low16
  ret i32 %or
}
@llvmbot
Copy link
Member

llvmbot commented Jan 10, 2025

@llvm/issue-subscribers-backend-amdgpu

Author: Matt Arsenault (arsenm)

While investigating a regressions related to lowered shufflevector as an integer, I noticed this code does a worse job with canonical IR.

For old AMDGPU targets without v_perm_b32, the there is an extra instruction. With perm it's a neutral net result (though I still prefer the non-canonical output, since it involves fewer steps in codegen since it doesn't rely on the SDWA pass to clean up an extra instruction).

X86 also has one additional instruction in the canonical case

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 &lt; %s | FileCheck -check-prefix=GFX7 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 &lt; %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 &lt; %s | FileCheck -check-prefix=GFX9 %s
; RUN: llc -mtriple=x86_64 -mattr=+avx2&lt; %s | FileCheck -check-prefix=X86

define i32 @<!-- -->shuffle_as_int(i32 %arg) {
; GFX7-LABEL: shuffle_as_int:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT:    v_alignbit_b32 v0, v1, v0, 16
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: shuffle_as_int:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    s_mov_b32 s4, 0x7060706
; GFX8-NEXT:    v_perm_b32 v0, v0, v0, s4
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: shuffle_as_int:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    s_mov_b32 s4, 0x7060706
; GFX9-NEXT:    v_perm_b32 v0, v0, v0, s4
; GFX9-NEXT:    s_setpc_b64 s[30:31]
;
; X86-LABEL: shuffle_as_int:
; X86:       # %bb.0:
; X86-NEXT:    movl %edi, %eax
; X86-NEXT:    shrl $16, %eax
; X86-NEXT:    shldl $16, %edi, %eax
; X86-NEXT:    retq
  %arg.lshr16 = lshr i32 %arg, 16
  %arg.hi16.in.lo = and i32 %arg.lshr16, 65535
  %zero.low16 = shl i32 %arg.lshr16, 16
  %or = or i32 %arg.hi16.in.lo, %zero.low16
  ret i32 %or
}

; Same function run through instcombine, codegen is worse.
define i32 @<!-- -->shuffle_as_int_canonical(i32 %arg) {
; GFX7-LABEL: shuffle_as_int_canonical:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT:    v_or_b32_e32 v0, v1, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: shuffle_as_int_canonical:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v0
; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: shuffle_as_int_canonical:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX9-NEXT:    s_mov_b32 s4, 0xffff0000
; GFX9-NEXT:    v_and_or_b32 v0, v0, s4, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
;
; X86-LABEL: shuffle_as_int_canonical:
; X86:       # %bb.0:
; X86-NEXT:    movl %edi, %eax
; X86-NEXT:    shrl $16, %eax
; X86-NEXT:    andl $-65536, %edi # imm = 0xFFFF0000
; X86-NEXT:    orl %edi, %eax
; X86-NEXT:    retq
  %arg.lshr16 = lshr i32 %arg, 16
  %zero.low16 = and i32 %arg, -65536
  %or = or disjoint i32 %arg.lshr16, %zero.low16
  ret i32 %or
}

@llvmbot
Copy link
Member

llvmbot commented Jan 10, 2025

@llvm/issue-subscribers-backend-x86

Author: Matt Arsenault (arsenm)

While investigating a regressions related to lowered shufflevector as an integer, I noticed this code does a worse job with canonical IR.

For old AMDGPU targets without v_perm_b32, the there is an extra instruction. With perm it's a neutral net result (though I still prefer the non-canonical output, since it involves fewer steps in codegen since it doesn't rely on the SDWA pass to clean up an extra instruction).

X86 also has one additional instruction in the canonical case

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 &lt; %s | FileCheck -check-prefix=GFX7 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 &lt; %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 &lt; %s | FileCheck -check-prefix=GFX9 %s
; RUN: llc -mtriple=x86_64 -mattr=+avx2&lt; %s | FileCheck -check-prefix=X86

define i32 @<!-- -->shuffle_as_int(i32 %arg) {
; GFX7-LABEL: shuffle_as_int:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT:    v_alignbit_b32 v0, v1, v0, 16
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: shuffle_as_int:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    s_mov_b32 s4, 0x7060706
; GFX8-NEXT:    v_perm_b32 v0, v0, v0, s4
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: shuffle_as_int:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    s_mov_b32 s4, 0x7060706
; GFX9-NEXT:    v_perm_b32 v0, v0, v0, s4
; GFX9-NEXT:    s_setpc_b64 s[30:31]
;
; X86-LABEL: shuffle_as_int:
; X86:       # %bb.0:
; X86-NEXT:    movl %edi, %eax
; X86-NEXT:    shrl $16, %eax
; X86-NEXT:    shldl $16, %edi, %eax
; X86-NEXT:    retq
  %arg.lshr16 = lshr i32 %arg, 16
  %arg.hi16.in.lo = and i32 %arg.lshr16, 65535
  %zero.low16 = shl i32 %arg.lshr16, 16
  %or = or i32 %arg.hi16.in.lo, %zero.low16
  ret i32 %or
}

; Same function run through instcombine, codegen is worse.
define i32 @<!-- -->shuffle_as_int_canonical(i32 %arg) {
; GFX7-LABEL: shuffle_as_int_canonical:
; GFX7:       ; %bb.0:
; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
; GFX7-NEXT:    v_or_b32_e32 v0, v1, v0
; GFX7-NEXT:    s_setpc_b64 s[30:31]
;
; GFX8-LABEL: shuffle_as_int_canonical:
; GFX8:       ; %bb.0:
; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v0
; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT:    s_setpc_b64 s[30:31]
;
; GFX9-LABEL: shuffle_as_int_canonical:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
; GFX9-NEXT:    s_mov_b32 s4, 0xffff0000
; GFX9-NEXT:    v_and_or_b32 v0, v0, s4, v1
; GFX9-NEXT:    s_setpc_b64 s[30:31]
;
; X86-LABEL: shuffle_as_int_canonical:
; X86:       # %bb.0:
; X86-NEXT:    movl %edi, %eax
; X86-NEXT:    shrl $16, %eax
; X86-NEXT:    andl $-65536, %edi # imm = 0xFFFF0000
; X86-NEXT:    orl %edi, %eax
; X86-NEXT:    retq
  %arg.lshr16 = lshr i32 %arg, 16
  %zero.low16 = and i32 %arg, -65536
  %or = or disjoint i32 %arg.lshr16, %zero.low16
  ret i32 %or
}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

No branches or pull requests

2 participants