sub-optimal use of 'haddps' instruction in (vectorized) (4x4)' * (4*1)
Submitted by Wouter Vermaelen
Assigned to Nobody
Link to original bugzilla bug (#312)
Version: 3.0
Platform: x86 - SSE
Description
The following two routines are functionally equivalent, though the latter generates better x86-SSE code. The main (only) difference is the use of the 'haddps' instruction.
void func1(Matrix4f& m, Vector4f& v, Vector4f& r)
{
r = m.transpose() * v;
}
void func2(Matrix4f& m, Vector4f& v, Vector4f& r)
{
__m128 v0 = _mm_load_ps(&v(0));
__m128 m0 = _mm_load_ps(&m(0, 0));
__m128 m1 = _mm_load_ps(&m(0, 1));
__m128 m2 = _mm_load_ps(&m(0, 2));
__m128 m3 = _mm_load_ps(&m(0, 3));
__m128 t0 = _mm_mul_ps(v0, m0);
__m128 t1 = _mm_mul_ps(v0, m1);
__m128 t2 = _mm_mul_ps(v0, m2);
__m128 t3 = _mm_mul_ps(v0, m3);
__m128 t01 = _mm_hadd_ps(t0, t1);
__m128 t23 = _mm_hadd_ps(t2, t3);
__m128 t0123 = _mm_hadd_ps(t01, t23);
_mm_store_ps(&r(0), t0123);
}
func1(Eigen::Matrix<float, 4, 4, 0, 4, 4>&, Eigen::Matrix<float, 4, 1, 0, 4, 1>&, Eigen::Matrix<float, 4, 1, 0, 4, 1>&):
movaps (%rsi), %xmm0
movaps (%rdi), %xmm1
mulps %xmm0, %xmm1
haddps %xmm1, %xmm1
haddps %xmm1, %xmm1
movss %xmm1, -24(%rsp)
movaps 16(%rdi), %xmm1
mulps %xmm0, %xmm1
haddps %xmm1, %xmm1
haddps %xmm1, %xmm1
movss %xmm1, -20(%rsp)
movaps 32(%rdi), %xmm1
mulps %xmm0, %xmm1
mulps 48(%rdi), %xmm0
haddps %xmm1, %xmm1
haddps %xmm0, %xmm0
haddps %xmm1, %xmm1
haddps %xmm0, %xmm0
movss %xmm1, -16(%rsp)
movss %xmm0, -12(%rsp)
movaps -24(%rsp), %xmm0
movaps %xmm0, (%rdx)
ret
func2(Eigen::Matrix<float, 4, 4, 0, 4, 4>&, Eigen::Matrix<float, 4, 1, 0, 4, 1>&, Eigen::Matrix<float, 4, 1, 0, 4, 1>&):
movaps (%rsi), %xmm2
movaps (%rdi), %xmm0
movaps 16(%rdi), %xmm3
mulps %xmm2, %xmm0
mulps %xmm2, %xmm3
movaps 32(%rdi), %xmm1
haddps %xmm3, %xmm0
mulps %xmm2, %xmm1
mulps 48(%rdi), %xmm2
haddps %xmm2, %xmm1
haddps %xmm1, %xmm0
movaps %xmm0, (%rdx)
ret