diff --git a/articles/simd-rect/page.mmd b/articles/simd-rect/page.mmd index 4df1b8e..4150bc7 100644 --- a/articles/simd-rect/page.mmd +++ b/articles/simd-rect/page.mmd @@ -19,15 +19,15 @@ pub fn RectSIMD(comptime T: type) type { } pub fn isRectWithin(self: @This(), a: @This()) bool { - const q = @shuffle(T, a.xyxy, self.xyxy, [16]i32{ -1, -2, 0, 1, -1, -2, 2, 1, -1, -2, 0, 3, -1, -2, 2, 3 }); - const w = @shuffle(T, a.xyxy, self.xyxy, [16]i32{ 0, 1, -3, -4, 2, 1, -3, -4, 0, 3, -3, -4, 2, 3, -3, -4 }); + const q = @shuffle(T, a.xyxy, self.xyxy, [8]i32{ 0, 1, 2, 3, -1, -2, -1, -2 }); + const w = @shuffle(T, a.xyxy, self.xyxy, [8]i32{ -3, -4, -3, -4, 0, 1, 2, 3 }); return @reduce(.And, q <= w); } // todo: Handle zero area cases? pub fn isRectIntersecting(self: @This(), a: @This()) bool { - const q = @shuffle(T, a.xyxy, self.xyxy, [4]i32{ 0, -1, -2, 1 }); - const w = @shuffle(T, a.xyxy, self.xyxy, [4]i32{ -3, 2, 3, -4 }); + const q = @shuffle(T, a.xyxy, self.xyxy, [4]i32{ 0, 1, -1, -2 }); + const w = @shuffle(T, a.xyxy, self.xyxy, [4]i32{ -3, -4, 2, 3 }); return @reduce(.And, q <= w); } }; @@ -53,26 +53,21 @@ For 32bit floating point: "example.RectSIMD(f32).isRectWithin": vmovaps xmm2, xmmword ptr [rsi] - vmovaps xmm0, xmmword ptr [rdi] - vmovaps xmm1, xmm2 - vmovaps xmm3, xmm0 - vmovdqa64 zmm2, zmmword ptr [rip + .LCPI2_0] - vmovaps zmm0, zmm3 - vpermt2ps zmm0, zmm2, zmm1 - vmovdqa64 zmm2, zmmword ptr [rip + .LCPI2_1] - vpermt2ps zmm1, zmm2, zmm3 - vcmpleps k0, zmm0, zmm1 - kortestw k0, k0 + vmovaps xmm0, xmm2 + vmovddup xmm1, qword ptr [rdi] + vinsertf128 ymm0, ymm0, xmm1, 1 + vmovddup xmm3, qword ptr [rdi + 8] + vmovaps xmm1, xmm3 + vinsertf128 ymm1, ymm1, xmm2, 1 + vcmpleps k0, ymm0, ymm1 + kortestb k0, k0 setb al "example.RectSIMD(f32).isRectIntersecting": - vmovaps xmm1, xmmword ptr [rsi] - vmovaps xmm3, xmmword ptr [rdi] - vmovdqa xmm2, xmmword ptr [rip + .LCPI3_0] - vmovaps xmm0, xmm1 - vpermt2ps xmm0, xmm2, xmm3 - vmovdqa xmm2, xmmword ptr [rip + .LCPI3_1] - vpermt2ps xmm1, xmm2, xmm3 + vmovapd xmm2, xmmword ptr [rsi] + vmovapd xmm1, xmmword ptr [rdi] + vunpcklpd xmm0, xmm2, xmm1 + vunpckhpd xmm1, xmm1, xmm2 vcmpleps k0, xmm0, xmm1 kmovd eax, k0 sub al, 15 @@ -93,34 +88,28 @@ For 32bit signed integers it fares amazing too: "example.RectSIMD(i32).isRectWithin": vmovdqa xmm2, xmmword ptr [rsi] - vmovdqa xmm0, xmmword ptr [rdi] - vmovaps xmm1, xmm2 - vmovaps xmm3, xmm0 - vmovdqa64 zmm2, zmmword ptr [rip + .LCPI2_0] - vmovaps zmm0, zmm3 - vpermt2d zmm0, zmm2, zmm1 - vmovdqa64 zmm2, zmmword ptr [rip + .LCPI2_1] - vpermt2d zmm1, zmm2, zmm3 - vpcmpled k0, zmm0, zmm1 - kortestw k0, k0 + vmovaps xmm0, xmm2 + vpbroadcastq xmm1, qword ptr [rdi] + vinserti128 ymm0, ymm0, xmm1, 1 + vpbroadcastq xmm3, qword ptr [rdi + 8] + vmovaps xmm1, xmm3 + vinserti128 ymm1, ymm1, xmm2, 1 + vpcmpled k0, ymm0, ymm1 + kortestb k0, k0 setb al "example.RectSIMD(i32).isRectIntersecting": - vmovdqa xmm1, xmmword ptr [rsi] - vmovdqa xmm3, xmmword ptr [rdi] - vpbroadcastq xmm0, qword ptr [rsi] - vmovdqa xmm2, xmmword ptr [rip + .LCPI3_0] - vpermt2d xmm0, xmm2, xmm3 - vpbroadcastq xmm3, qword ptr [rdi + 8] - vmovdqa xmm2, xmmword ptr [rip + .LCPI3_1] - vpermt2d xmm1, xmm2, xmm3 + vmovdqa xmm2, xmmword ptr [rsi] + vmovdqa xmm1, xmmword ptr [rdi] + vpunpcklqdq xmm0, xmm2, xmm1 + vpunpckhqdq xmm1, xmm1, xmm2 vpcmpled k0, xmm0, xmm1 kmovd eax, k0 sub al, 15 sete al ``` -64bit floating point, now even on AVX512 some ops are not done at once: +64bit floating point: ```asm "example.RectSIMD(f64).isPointWithin": vmovaps xmm3, xmm0 @@ -135,49 +124,34 @@ For 32bit signed integers it fares amazing too: pop rbp "example.RectSIMD(f64).isRectWithin": - vmovapd ymm3, ymmword ptr [rsi] - vmovapd ymm5, ymmword ptr [rdi] - vblendpd ymm0, ymm5, ymm3, 8 - vperm2f128 ymm4, ymm5, ymm3, 32 - vblendpd ymm1, ymm4, ymm0, 10 - vmovaps ymm0, ymm1 - vblendpd ymm1, ymm5, ymm3, 12 - vinsertf64x4 zmm0, zmm0, ymm1, 1 - vblendpd ymm1, ymm5, ymm3, 4 - vblendpd ymm2, ymm1, ymm4, 10 - vmovaps ymm1, ymm4 - vinsertf64x4 zmm2, zmm1, ymm2, 1 - vperm2f128 ymm4, ymm3, ymm5, 49 - vblendpd ymm1, ymm3, ymm5, 4 - vblendpd ymm6, ymm1, ymm4, 10 - vmovaps ymm1, ymm6 - vinsertf64x4 zmm1, zmm1, ymm4, 1 - vblendpd ymm6, ymm3, ymm5, 8 - vblendpd ymm4, ymm4, ymm6, 10 - vblendpd ymm5, ymm3, ymm5, 12 - vmovaps ymm3, ymm5 - vinsertf64x4 zmm3, zmm3, ymm4, 1 - vcmplepd k1, zmm2, zmm3 + vmovapd ymm2, ymmword ptr [rsi] + vmovapd ymm1, ymmword ptr [rdi] + vmovaps ymm0, ymm2 + vpermpd ymm3, ymm1, 68 + vinsertf64x4 zmm0, zmm0, ymm3, 1 + vpermpd ymm3, ymm1, 238 + vmovaps ymm1, ymm3 + vinsertf64x4 zmm1, zmm1, ymm2, 1 vcmplepd k0, zmm0, zmm1 - kunpckbw k0, k0, k1 - kortestw k0, k0 + kortestb k0, k0 setb al "example.RectSIMD(f64).isRectIntersecting": - vmovapd ymm3, ymmword ptr [rsi] + vmovapd ymm2, ymmword ptr [rsi] vmovapd ymm1, ymmword ptr [rdi] - vmovdqa ymm2, ymmword ptr [rip + .LCPI3_0] - vmovaps ymm0, ymm3 - vpermt2pd ymm0, ymm2, ymm1 - vmovdqa ymm2, ymmword ptr [rip + .LCPI3_1] - vpermt2pd ymm1, ymm2, ymm3 + vperm2f128 ymm0, ymm2, ymm1, 32 + vperm2f128 ymm1, ymm1, ymm2, 49 vcmplepd k0, ymm0, ymm1 kmovd eax, k0 sub al, 15 sete al ``` -So, selection of coordinate data type plays quite big role, especially when extensions are not provided. +AVX512 makes it so that there's no big penalty for double precision types, which is nice. Note that permutation masks are also supplied along side code which increase binary size. With inlining it could be quite substantial if per object %rip relative addressing is used. + +### Edits ### +- Reordered to use packed vectors without swizzling when possible +- Eliminated redundant computations.