diff options
Diffstat (limited to 'pixman/pixman/pixman-mmx.c')
-rw-r--r-- | pixman/pixman/pixman-mmx.c | 546 |
1 files changed, 455 insertions, 91 deletions
diff --git a/pixman/pixman/pixman-mmx.c b/pixman/pixman/pixman-mmx.c index 9fd1a76f9..b14201a4e 100644 --- a/pixman/pixman/pixman-mmx.c +++ b/pixman/pixman/pixman-mmx.c @@ -33,9 +33,13 @@ #include <config.h> #endif -#if defined USE_X86_MMX || defined USE_ARM_IWMMXT +#if defined USE_X86_MMX || defined USE_ARM_IWMMXT || defined USE_LOONGSON_MMI +#ifdef USE_LOONGSON_MMI +#include <loongson-mmintrin.h> +#else #include <mmintrin.h> +#endif #include "pixman-private.h" #include "pixman-combine32.h" @@ -63,6 +67,19 @@ _mm_empty (void) /* We have to compile with -msse to use xmmintrin.h, but that causes SSE * instructions to be generated that we don't want. Just duplicate the * functions we want to use. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_pi8 (__m64 __A) +{ + int ret; + + asm ("pmovmskb %1, %0\n\t" + : "=r" (ret) + : "y" (__A) + ); + + return ret; +} + extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mulhi_pu16 (__m64 __A, __m64 __B) { @@ -125,11 +142,14 @@ _mm_shuffle_pi16 (__m64 __A, int8_t const __N) * uint64_t and __m64 values, then define USE_CVT_INTRINSICS. * If __m64 and uint64_t values can just be cast to each other directly, * then define USE_M64_CASTS. + * If __m64 is a double datatype, then define USE_M64_DOUBLE. */ #ifdef _MSC_VER # define M64_MEMBER m64_u64 #elif defined(__ICC) # define USE_CVT_INTRINSICS +#elif defined(USE_LOONGSON_MMI) +# define USE_M64_DOUBLE #elif defined(__GNUC__) # define USE_M64_CASTS #elif defined(__SUNPRO_C) @@ -147,7 +167,7 @@ _mm_shuffle_pi16 (__m64 __A, int8_t const __N) # endif #endif -#if defined(USE_M64_CASTS) || defined(USE_CVT_INTRINSICS) +#if defined(USE_M64_CASTS) || defined(USE_CVT_INTRINSICS) || defined(USE_M64_DOUBLE) typedef uint64_t mmxdatafield; #else typedef __m64 mmxdatafield; @@ -159,15 +179,21 @@ typedef struct mmxdatafield mmx_4x0080; mmxdatafield mmx_565_rgb; mmxdatafield mmx_565_unpack_multiplier; + mmxdatafield mmx_565_pack_multiplier; mmxdatafield mmx_565_r; mmxdatafield mmx_565_g; mmxdatafield mmx_565_b; + mmxdatafield mmx_packed_565_rb; + mmxdatafield mmx_packed_565_g; +#ifndef USE_LOONGSON_MMI mmxdatafield mmx_mask_0; mmxdatafield mmx_mask_1; mmxdatafield mmx_mask_2; mmxdatafield mmx_mask_3; +#endif mmxdatafield mmx_full_alpha; mmxdatafield mmx_4x0101; + mmxdatafield mmx_ff000000; } mmx_data_t; #if defined(_MSC_VER) @@ -184,21 +210,29 @@ static const mmx_data_t c = MMXDATA_INIT (.mmx_4x0080, 0x0080008000800080), MMXDATA_INIT (.mmx_565_rgb, 0x000001f0003f001f), MMXDATA_INIT (.mmx_565_unpack_multiplier, 0x0000008404100840), + MMXDATA_INIT (.mmx_565_pack_multiplier, 0x2000000420000004), MMXDATA_INIT (.mmx_565_r, 0x000000f800000000), MMXDATA_INIT (.mmx_565_g, 0x0000000000fc0000), MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8), + MMXDATA_INIT (.mmx_packed_565_rb, 0x00f800f800f800f8), + MMXDATA_INIT (.mmx_packed_565_g, 0x0000fc000000fc00), +#ifndef USE_LOONGSON_MMI MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000), MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff), MMXDATA_INIT (.mmx_mask_2, 0xffff0000ffffffff), MMXDATA_INIT (.mmx_mask_3, 0x0000ffffffffffff), +#endif MMXDATA_INIT (.mmx_full_alpha, 0x00ff000000000000), MMXDATA_INIT (.mmx_4x0101, 0x0101010101010101), + MMXDATA_INIT (.mmx_ff000000, 0xff000000ff000000), }; #ifdef USE_CVT_INTRINSICS # define MC(x) to_m64 (c.mmx_ ## x) #elif defined(USE_M64_CASTS) # define MC(x) ((__m64)c.mmx_ ## x) +#elif defined(USE_M64_DOUBLE) +# define MC(x) (*(__m64 *)&c.mmx_ ## x) #else # define MC(x) c.mmx_ ## x #endif @@ -213,6 +247,8 @@ to_m64 (uint64_t x) res.M64_MEMBER = x; return res; +#elif defined USE_M64_DOUBLE + return *(__m64 *)&x; #else /* USE_M64_CASTS */ return (__m64)x; #endif @@ -226,6 +262,8 @@ to_uint64 (__m64 x) #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */ uint64_t res = x.M64_MEMBER; return res; +#elif defined USE_M64_DOUBLE + return *(uint64_t *)&x; #else /* USE_M64_CASTS */ return (uint64_t)x; #endif @@ -324,7 +362,7 @@ in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest) /* Elemental unaligned loads */ -static force_inline __m64 ldq_u(uint64_t *p) +static force_inline __m64 ldq_u(__m64 *p) { #ifdef USE_X86_MMX /* x86's alignment restrictions are very relaxed. */ @@ -337,7 +375,7 @@ static force_inline __m64 ldq_u(uint64_t *p) aligned_p = (__m64 *)((uintptr_t)p & ~7); return (__m64) _mm_align_si64 (aligned_p[0], aligned_p[1], align); #else - struct __una_u64 { uint64_t x __attribute__((packed)); }; + struct __una_u64 { __m64 x __attribute__((packed)); }; const struct __una_u64 *ptr = (const struct __una_u64 *) p; return (__m64) ptr->x; #endif @@ -356,16 +394,35 @@ static force_inline uint32_t ldl_u(const uint32_t *p) } static force_inline __m64 +load (const uint32_t *v) +{ +#ifdef USE_LOONGSON_MMI + __m64 ret; + asm ("lwc1 %0, %1\n\t" + : "=f" (ret) + : "m" (*v) + ); + return ret; +#else + return _mm_cvtsi32_si64 (*v); +#endif +} + +static force_inline __m64 load8888 (const uint32_t *v) { - return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (*v), _mm_setzero_si64 ()); +#ifdef USE_LOONGSON_MMI + return _mm_unpacklo_pi8_f (*(__m32 *)v, _mm_setzero_si64 ()); +#else + return _mm_unpacklo_pi8 (load (v), _mm_setzero_si64 ()); +#endif } static force_inline __m64 load8888u (const uint32_t *v) { - uint32_t l = ldl_u(v); - return load8888(&l); + uint32_t l = ldl_u (v); + return load8888 (&l); } static force_inline __m64 @@ -375,10 +432,52 @@ pack8888 (__m64 lo, __m64 hi) } static force_inline void -store8888 (uint32_t *dest, __m64 v) +store (uint32_t *dest, __m64 v) { - v = pack8888 (v, _mm_setzero_si64()); +#ifdef USE_LOONGSON_MMI + asm ("swc1 %1, %0\n\t" + : "=m" (*dest) + : "f" (v) + : "memory" + ); +#else *dest = _mm_cvtsi64_si32 (v); +#endif +} + +static force_inline void +store8888 (uint32_t *dest, __m64 v) +{ + v = pack8888 (v, _mm_setzero_si64 ()); + store (dest, v); +} + +static force_inline pixman_bool_t +is_equal (__m64 a, __m64 b) +{ +#ifdef USE_LOONGSON_MMI + /* __m64 is double, we can compare directly. */ + return a == b; +#else + return _mm_movemask_pi8 (_mm_cmpeq_pi8 (a, b)) == 0xff; +#endif +} + +static force_inline pixman_bool_t +is_opaque (__m64 v) +{ +#ifdef USE_LOONGSON_MMI + return is_equal (_mm_and_si64 (v, MC (full_alpha)), MC (full_alpha)); +#else + __m64 ffs = _mm_cmpeq_pi8 (v, v); + return (_mm_movemask_pi8 (_mm_cmpeq_pi8 (v, ffs)) & 0x40); +#endif +} + +static force_inline pixman_bool_t +is_zero (__m64 v) +{ + return is_equal (v, _mm_setzero_si64 ()); } /* Expand 16 bits positioned at @pos (0-3) of a mmx register into @@ -402,7 +501,11 @@ expand565 (__m64 pixel, int pos) __m64 t1, t2; /* move pixel to low 16 bit and zero the rest */ +#ifdef USE_LOONGSON_MMI + p = loongson_extract_pi16 (p, pos); +#else p = shift (shift (p, (3 - pos) * 16), -48); +#endif t1 = shift (p, 36 - 11); t2 = shift (p, 16 - 5); @@ -441,6 +544,15 @@ pack_565 (__m64 pixel, __m64 target, int pos) g = _mm_and_si64 (p, MC (565_g)); b = _mm_and_si64 (p, MC (565_b)); +#ifdef USE_LOONGSON_MMI + r = shift (r, -(32 - 8)); + g = shift (g, -(16 - 3)); + b = shift (b, -(0 + 3)); + + p = _mm_or_si64 (r, g); + p = _mm_or_si64 (p, b); + return loongson_insert_pi16 (t, p, pos); +#else r = shift (r, -(32 - 8) + pos * 16); g = shift (g, -(16 - 3) + pos * 16); b = shift (b, -(0 + 3) + pos * 16); @@ -458,6 +570,32 @@ pack_565 (__m64 pixel, __m64 target, int pos) p = _mm_or_si64 (g, p); return _mm_or_si64 (b, p); +#endif +} + +static force_inline __m64 +pack_4xpacked565 (__m64 a, __m64 b) +{ + __m64 rb0 = _mm_and_si64 (a, MC (packed_565_rb)); + __m64 rb1 = _mm_and_si64 (b, MC (packed_565_rb)); + + __m64 t0 = _mm_madd_pi16 (rb0, MC (565_pack_multiplier)); + __m64 t1 = _mm_madd_pi16 (rb1, MC (565_pack_multiplier)); + + __m64 g0 = _mm_and_si64 (a, MC (packed_565_g)); + __m64 g1 = _mm_and_si64 (b, MC (packed_565_g)); + + t0 = _mm_or_si64 (t0, g0); + t1 = _mm_or_si64 (t1, g1); + + t0 = shift(t0, -5); +#ifdef USE_ARM_IWMMXT + t1 = shift(t1, -5); + return _mm_packs_pu32 (t0, t1); +#else + t1 = shift(t1, -5 + 16); + return _mm_shuffle_pi16 (_mm_or_si64 (t0, t1), _MM_SHUFFLE (3, 1, 2, 0)); +#endif } #ifndef _MSC_VER @@ -482,23 +620,20 @@ pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b) /* --------------- MMX code patch for fbcompose.c --------------------- */ -static force_inline uint32_t +static force_inline __m64 combine (const uint32_t *src, const uint32_t *mask) { - uint32_t ssrc = *src; + __m64 vsrc = load8888 (src); if (mask) { __m64 m = load8888 (mask); - __m64 s = load8888 (&ssrc); m = expand_alpha (m); - s = pix_multiply (s, m); - - store8888 (&ssrc, s); + vsrc = pix_multiply (vsrc, m); } - return ssrc; + return vsrc; } static void @@ -513,19 +648,16 @@ mmx_combine_over_u (pixman_implementation_t *imp, while (dest < end) { - uint32_t ssrc = combine (src, mask); - uint32_t a = ssrc >> 24; + __m64 vsrc = combine (src, mask); - if (a == 0xff) + if (is_opaque (vsrc)) { - *dest = ssrc; + store8888 (dest, vsrc); } - else if (ssrc) + else if (!is_zero (vsrc)) { - __m64 s, sa; - s = load8888 (&ssrc); - sa = expand_alpha (s); - store8888 (dest, over (s, sa, load8888 (dest))); + __m64 sa = expand_alpha (vsrc); + store8888 (dest, over (vsrc, sa, load8888 (dest))); } ++dest; @@ -549,11 +681,11 @@ mmx_combine_over_reverse_u (pixman_implementation_t *imp, while (dest < end) { __m64 d, da; - uint32_t s = combine (src, mask); + __m64 s = combine (src, mask); d = load8888 (dest); da = expand_alpha (d); - store8888 (dest, over (d, da, load8888 (&s))); + store8888 (dest, over (d, da, s)); ++dest; ++src; @@ -575,10 +707,9 @@ mmx_combine_in_u (pixman_implementation_t *imp, while (dest < end) { - __m64 x, a; - uint32_t ssrc = combine (src, mask); + __m64 a; + __m64 x = combine (src, mask); - x = load8888 (&ssrc); a = load8888 (dest); a = expand_alpha (a); x = pix_multiply (x, a); @@ -605,11 +736,10 @@ mmx_combine_in_reverse_u (pixman_implementation_t *imp, while (dest < end) { - __m64 x, a; - uint32_t ssrc = combine (src, mask); + __m64 a = combine (src, mask); + __m64 x; x = load8888 (dest); - a = load8888 (&ssrc); a = expand_alpha (a); x = pix_multiply (x, a); store8888 (dest, x); @@ -634,10 +764,9 @@ mmx_combine_out_u (pixman_implementation_t *imp, while (dest < end) { - __m64 x, a; - uint32_t ssrc = combine (src, mask); + __m64 a; + __m64 x = combine (src, mask); - x = load8888 (&ssrc); a = load8888 (dest); a = expand_alpha (a); a = negate (a); @@ -664,11 +793,10 @@ mmx_combine_out_reverse_u (pixman_implementation_t *imp, while (dest < end) { - __m64 x, a; - uint32_t ssrc = combine (src, mask); + __m64 a = combine (src, mask); + __m64 x; x = load8888 (dest); - a = load8888 (&ssrc); a = expand_alpha (a); a = negate (a); x = pix_multiply (x, a); @@ -695,10 +823,9 @@ mmx_combine_atop_u (pixman_implementation_t *imp, while (dest < end) { - __m64 s, da, d, sia; - uint32_t ssrc = combine (src, mask); + __m64 da, d, sia; + __m64 s = combine (src, mask); - s = load8888 (&ssrc); d = load8888 (dest); sia = expand_alpha (s); sia = negate (sia); @@ -728,10 +855,9 @@ mmx_combine_atop_reverse_u (pixman_implementation_t *imp, while (dest < end) { - __m64 s, dia, d, sa; - uint32_t ssrc = combine (src, mask); + __m64 dia, d, sa; + __m64 s = combine (src, mask); - s = load8888 (&ssrc); d = load8888 (dest); sa = expand_alpha (s); dia = expand_alpha (d); @@ -759,10 +885,9 @@ mmx_combine_xor_u (pixman_implementation_t *imp, while (dest < end) { - __m64 s, dia, d, sia; - uint32_t ssrc = combine (src, mask); + __m64 dia, d, sia; + __m64 s = combine (src, mask); - s = load8888 (&ssrc); d = load8888 (dest); sia = expand_alpha (s); dia = expand_alpha (d); @@ -791,10 +916,9 @@ mmx_combine_add_u (pixman_implementation_t *imp, while (dest < end) { - __m64 s, d; - uint32_t ssrc = combine (src, mask); + __m64 d; + __m64 s = combine (src, mask); - s = load8888 (&ssrc); d = load8888 (dest); s = pix_add (s, d); store8888 (dest, s); @@ -819,12 +943,14 @@ mmx_combine_saturate_u (pixman_implementation_t *imp, while (dest < end) { - uint32_t s = combine (src, mask); + uint32_t s, sa, da; uint32_t d = *dest; - __m64 ms = load8888 (&s); - __m64 md = load8888 (&d); - uint32_t sa = s >> 24; - uint32_t da = ~d >> 24; + __m64 ms = combine (src, mask); + __m64 md = load8888 (dest); + + store8888(&s, ms); + da = ~d >> 24; + sa = s >> 24; if (sa > da) { @@ -1440,7 +1566,7 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp, while (w >= 2) { - __m64 vs = ldq_u((uint64_t *)src); + __m64 vs = ldq_u ((__m64 *)src); __m64 vd = *(__m64 *)dst; __m64 vsrc0 = expand8888 (vs, 0); __m64 vsrc1 = expand8888 (vs, 1); @@ -1522,14 +1648,14 @@ mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp, __m64 vd6 = *(__m64 *)(dst + 12); __m64 vd7 = *(__m64 *)(dst + 14); - __m64 vs0 = ldq_u((uint64_t *)(src + 0)); - __m64 vs1 = ldq_u((uint64_t *)(src + 2)); - __m64 vs2 = ldq_u((uint64_t *)(src + 4)); - __m64 vs3 = ldq_u((uint64_t *)(src + 6)); - __m64 vs4 = ldq_u((uint64_t *)(src + 8)); - __m64 vs5 = ldq_u((uint64_t *)(src + 10)); - __m64 vs6 = ldq_u((uint64_t *)(src + 12)); - __m64 vs7 = ldq_u((uint64_t *)(src + 14)); + __m64 vs0 = ldq_u ((__m64 *)(src + 0)); + __m64 vs1 = ldq_u ((__m64 *)(src + 2)); + __m64 vs2 = ldq_u ((__m64 *)(src + 4)); + __m64 vs3 = ldq_u ((__m64 *)(src + 6)); + __m64 vs4 = ldq_u ((__m64 *)(src + 8)); + __m64 vs5 = ldq_u ((__m64 *)(src + 10)); + __m64 vs6 = ldq_u ((__m64 *)(src + 12)); + __m64 vs7 = ldq_u ((__m64 *)(src + 14)); vd0 = pack8888 ( in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)), @@ -1996,6 +2122,60 @@ pixman_fill_mmx (uint32_t *bits, } static void +mmx_composite_src_x888_0565 (pixman_implementation_t *imp, + pixman_composite_info_t *info) +{ + PIXMAN_COMPOSITE_ARGS (info); + uint16_t *dst_line, *dst; + uint32_t *src_line, *src, s; + int dst_stride, src_stride; + int32_t w; + + PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); + PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); + + while (height--) + { + dst = dst_line; + dst_line += dst_stride; + src = src_line; + src_line += src_stride; + w = width; + + while (w && (unsigned long)dst & 7) + { + s = *src++; + *dst = CONVERT_8888_TO_0565 (s); + dst++; + w--; + } + + while (w >= 4) + { + __m64 vdest; + __m64 vsrc0 = ldq_u ((__m64 *)(src + 0)); + __m64 vsrc1 = ldq_u ((__m64 *)(src + 2)); + + vdest = pack_4xpacked565 (vsrc0, vsrc1); + + *(__m64 *)dst = vdest; + + w -= 4; + src += 4; + dst += 4; + } + + while (w) + { + s = *src++; + *dst = CONVERT_8888_TO_0565 (s); + dst++; + w--; + } + } +} + +static void mmx_composite_src_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { @@ -2123,7 +2303,7 @@ mmx_composite_over_n_8_0565 (pixman_implementation_t *imp, int dst_stride, mask_stride; int32_t w; __m64 vsrc, vsrca, tmp; - uint64_t srcsrcsrcsrc, src16; + __m64 srcsrcsrcsrc; CHECKPOINT (); @@ -2140,11 +2320,7 @@ mmx_composite_over_n_8_0565 (pixman_implementation_t *imp, vsrca = expand_alpha (vsrc); tmp = pack_565 (vsrc, _mm_setzero_si64 (), 0); - src16 = to_uint64 (tmp); - - srcsrcsrcsrc = - (uint64_t)src16 << 48 | (uint64_t)src16 << 32 | - (uint64_t)src16 << 16 | (uint64_t)src16; + srcsrcsrcsrc = expand_alpha_rev (tmp); while (height--) { @@ -2188,7 +2364,7 @@ mmx_composite_over_n_8_0565 (pixman_implementation_t *imp, if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff) { - *(uint64_t *)dst = srcsrcsrcsrc; + *(__m64 *)dst = srcsrcsrcsrc; } else if (m0 | m1 | m2 | m3) { @@ -2809,7 +2985,7 @@ mmx_composite_add_8_8 (pixman_implementation_t *imp, while (w >= 8) { - *(__m64*)dst = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst); + *(__m64*)dst = _mm_adds_pu8 (ldq_u ((__m64 *)src), *(__m64*)dst); dst += 8; src += 8; w -= 8; @@ -2837,7 +3013,6 @@ mmx_composite_add_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); - __m64 dst64; uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; @@ -2858,8 +3033,8 @@ mmx_composite_add_8888_8888 (pixman_implementation_t *imp, while (w && (unsigned long)dst & 7) { - *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src), - _mm_cvtsi32_si64 (*dst))); + store (dst, _mm_adds_pu8 (load ((const uint32_t *)src), + load ((const uint32_t *)dst))); dst++; src++; w--; @@ -2867,8 +3042,7 @@ mmx_composite_add_8888_8888 (pixman_implementation_t *imp, while (w >= 2) { - dst64 = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst); - *(uint64_t*)dst = to_uint64 (dst64); + *(__m64 *)dst = _mm_adds_pu8 (ldq_u ((__m64 *)src), *(__m64*)dst); dst += 2; src += 2; w -= 2; @@ -2876,8 +3050,8 @@ mmx_composite_add_8888_8888 (pixman_implementation_t *imp, if (w) { - *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src), - _mm_cvtsi32_si64 (*dst))); + store (dst, _mm_adds_pu8 (load ((const uint32_t *)src), + load ((const uint32_t *)dst))); } } @@ -2958,7 +3132,7 @@ pixman_blt_mmx (uint32_t *src_bits, while (w >= 4 && ((unsigned long)d & 7)) { - *(uint32_t *)d = ldl_u((uint32_t *)s); + *(uint32_t *)d = ldl_u ((uint32_t *)s); w -= 4; s += 4; @@ -2992,14 +3166,14 @@ pixman_blt_mmx (uint32_t *src_bits, "%mm0", "%mm1", "%mm2", "%mm3", "%mm4", "%mm5", "%mm6", "%mm7"); #else - __m64 v0 = ldq_u((uint64_t *)(s + 0)); - __m64 v1 = ldq_u((uint64_t *)(s + 8)); - __m64 v2 = ldq_u((uint64_t *)(s + 16)); - __m64 v3 = ldq_u((uint64_t *)(s + 24)); - __m64 v4 = ldq_u((uint64_t *)(s + 32)); - __m64 v5 = ldq_u((uint64_t *)(s + 40)); - __m64 v6 = ldq_u((uint64_t *)(s + 48)); - __m64 v7 = ldq_u((uint64_t *)(s + 56)); + __m64 v0 = ldq_u ((__m64 *)(s + 0)); + __m64 v1 = ldq_u ((__m64 *)(s + 8)); + __m64 v2 = ldq_u ((__m64 *)(s + 16)); + __m64 v3 = ldq_u ((__m64 *)(s + 24)); + __m64 v4 = ldq_u ((__m64 *)(s + 32)); + __m64 v5 = ldq_u ((__m64 *)(s + 40)); + __m64 v6 = ldq_u ((__m64 *)(s + 48)); + __m64 v7 = ldq_u ((__m64 *)(s + 56)); *(__m64 *)(d + 0) = v0; *(__m64 *)(d + 8) = v1; *(__m64 *)(d + 16) = v2; @@ -3016,7 +3190,7 @@ pixman_blt_mmx (uint32_t *src_bits, } while (w >= 4) { - *(uint32_t *)d = ldl_u((uint32_t *)s); + *(uint32_t *)d = ldl_u ((uint32_t *)s); w -= 4; s += 4; @@ -3109,6 +3283,190 @@ mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp, _mm_empty (); } +static uint32_t * +mmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask) +{ + int w = iter->width; + uint32_t *dst = iter->buffer; + uint32_t *src = (uint32_t *)iter->bits; + + iter->bits += iter->stride; + + while (w && ((unsigned long)dst) & 7) + { + *dst++ = (*src++) | 0xff000000; + w--; + } + + while (w >= 8) + { + __m64 vsrc1 = ldq_u ((__m64 *)(src + 0)); + __m64 vsrc2 = ldq_u ((__m64 *)(src + 2)); + __m64 vsrc3 = ldq_u ((__m64 *)(src + 4)); + __m64 vsrc4 = ldq_u ((__m64 *)(src + 6)); + + *(__m64 *)(dst + 0) = _mm_or_si64 (vsrc1, MC (ff000000)); + *(__m64 *)(dst + 2) = _mm_or_si64 (vsrc2, MC (ff000000)); + *(__m64 *)(dst + 4) = _mm_or_si64 (vsrc3, MC (ff000000)); + *(__m64 *)(dst + 6) = _mm_or_si64 (vsrc4, MC (ff000000)); + + dst += 8; + src += 8; + w -= 8; + } + + while (w) + { + *dst++ = (*src++) | 0xff000000; + w--; + } + + return iter->buffer; +} + +static uint32_t * +mmx_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask) +{ + int w = iter->width; + uint32_t *dst = iter->buffer; + uint16_t *src = (uint16_t *)iter->bits; + + iter->bits += iter->stride; + + while (w && ((unsigned long)dst) & 0x0f) + { + uint16_t s = *src++; + + *dst++ = CONVERT_0565_TO_8888 (s); + w--; + } + + while (w >= 4) + { + __m64 vsrc = ldq_u ((__m64 *)src); + + __m64 mm0 = expand565 (vsrc, 0); + __m64 mm1 = expand565 (vsrc, 1); + __m64 mm2 = expand565 (vsrc, 2); + __m64 mm3 = expand565 (vsrc, 3); + + *(__m64 *)(dst + 0) = _mm_or_si64 (pack8888 (mm0, mm1), MC (ff000000)); + *(__m64 *)(dst + 2) = _mm_or_si64 (pack8888 (mm2, mm3), MC (ff000000)); + + dst += 4; + src += 4; + w -= 4; + } + + while (w) + { + uint16_t s = *src++; + + *dst++ = CONVERT_0565_TO_8888 (s); + w--; + } + + return iter->buffer; +} + +static uint32_t * +mmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask) +{ + int w = iter->width; + uint32_t *dst = iter->buffer; + uint8_t *src = iter->bits; + + iter->bits += iter->stride; + + while (w && (((unsigned long)dst) & 15)) + { + *dst++ = *(src++) << 24; + w--; + } + + while (w >= 8) + { + __m64 mm0 = ldq_u ((__m64 *)src); + + __m64 mm1 = _mm_unpacklo_pi8 (_mm_setzero_si64(), mm0); + __m64 mm2 = _mm_unpackhi_pi8 (_mm_setzero_si64(), mm0); + __m64 mm3 = _mm_unpacklo_pi16 (_mm_setzero_si64(), mm1); + __m64 mm4 = _mm_unpackhi_pi16 (_mm_setzero_si64(), mm1); + __m64 mm5 = _mm_unpacklo_pi16 (_mm_setzero_si64(), mm2); + __m64 mm6 = _mm_unpackhi_pi16 (_mm_setzero_si64(), mm2); + + *(__m64 *)(dst + 0) = mm3; + *(__m64 *)(dst + 2) = mm4; + *(__m64 *)(dst + 4) = mm5; + *(__m64 *)(dst + 6) = mm6; + + dst += 8; + src += 8; + w -= 8; + } + + while (w) + { + *dst++ = *(src++) << 24; + w--; + } + + return iter->buffer; +} + +typedef struct +{ + pixman_format_code_t format; + pixman_iter_get_scanline_t get_scanline; +} fetcher_info_t; + +static const fetcher_info_t fetchers[] = +{ + { PIXMAN_x8r8g8b8, mmx_fetch_x8r8g8b8 }, + { PIXMAN_r5g6b5, mmx_fetch_r5g6b5 }, + { PIXMAN_a8, mmx_fetch_a8 }, + { PIXMAN_null } +}; + +static void +mmx_src_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter) +{ + pixman_image_t *image = iter->image; + int x = iter->x; + int y = iter->y; + int width = iter->width; + int height = iter->height; + +#define FLAGS \ + (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | FAST_PATH_BITS_IMAGE) + + if ((iter->flags & ITER_NARROW) && + (image->common.flags & FLAGS) == FLAGS && + x >= 0 && y >= 0 && + x + width <= image->bits.width && + y + height <= image->bits.height) + { + const fetcher_info_t *f; + + for (f = &fetchers[0]; f->format != PIXMAN_null; f++) + { + if (image->common.extended_format_code == f->format) + { + uint8_t *b = (uint8_t *)image->bits.bits; + int s = image->bits.rowstride * 4; + + iter->bits = b + s * iter->y + x * PIXMAN_FORMAT_BPP (f->format) / 8; + iter->stride = s; + + iter->get_scanline = f->get_scanline; + return; + } + } + } + + imp->delegate->src_iter_init (imp->delegate, iter); +} + static const pixman_fast_path_t mmx_fast_paths[] = { PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mmx_composite_over_n_8_0565 ), @@ -3160,6 +3518,10 @@ static const pixman_fast_path_t mmx_fast_paths[] = PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mmx_composite_add_8_8 ), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, mmx_composite_add_n_8_8 ), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, mmx_composite_src_x888_0565 ), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, mmx_composite_src_x888_0565 ), + PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, mmx_composite_src_x888_0565 ), + PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, mmx_composite_src_x888_0565 ), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, mmx_composite_src_n_8_8888 ), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, mmx_composite_src_n_8_8888 ), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, mmx_composite_src_n_8_8888 ), @@ -3260,7 +3622,9 @@ _pixman_implementation_create_mmx (pixman_implementation_t *fallback) imp->blt = mmx_blt; imp->fill = mmx_fill; + imp->src_iter_init = mmx_src_iter_init; + return imp; } -#endif /* USE_X86_MMX || USE_ARM_IWMMXT */ +#endif /* USE_X86_MMX || USE_ARM_IWMMXT || USE_LOONGSON_MMI */ |