aboutsummaryrefslogtreecommitdiff
path: root/pixman
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2012-05-29 08:45:04 +0200
committermarha <marha@users.sourceforge.net>2012-05-29 08:45:04 +0200
commitce592e25d6303a8f4a8aa0e2918e0f72a35bf175 (patch)
tree845b108f004259a6e4f72ade3dd5e0521ab3952b /pixman
parenteef864d36de97903007f04fad9fa43afe297745b (diff)
downloadvcxsrv-ce592e25d6303a8f4a8aa0e2918e0f72a35bf175.tar.gz
vcxsrv-ce592e25d6303a8f4a8aa0e2918e0f72a35bf175.tar.bz2
vcxsrv-ce592e25d6303a8f4a8aa0e2918e0f72a35bf175.zip
fontconfig libX11 mesa pixman xserver git update 29 May 2012
Diffstat (limited to 'pixman')
-rw-r--r--pixman/configure.ac4
-rw-r--r--pixman/pixman/loongson-mmintrin.h21
-rw-r--r--pixman/pixman/pixman-fast-path.c44
-rw-r--r--pixman/pixman/pixman-mmx.c296
-rw-r--r--pixman/test/composite.c19
-rw-r--r--pixman/test/lowlevel-blt-bench.c1
-rw-r--r--pixman/test/scaling-crash-test.c7
7 files changed, 347 insertions, 45 deletions
diff --git a/pixman/configure.ac b/pixman/configure.ac
index 027a1680e..5c60b13c9 100644
--- a/pixman/configure.ac
+++ b/pixman/configure.ac
@@ -53,8 +53,8 @@ AC_PREREQ([2.57])
#
m4_define([pixman_major], 0)
-m4_define([pixman_minor], 25)
-m4_define([pixman_micro], 7)
+m4_define([pixman_minor], 27)
+m4_define([pixman_micro], 1)
m4_define([pixman_version],[pixman_major.pixman_minor.pixman_micro])
diff --git a/pixman/pixman/loongson-mmintrin.h b/pixman/pixman/loongson-mmintrin.h
index 8295ba01c..1a114fe0f 100644
--- a/pixman/pixman/loongson-mmintrin.h
+++ b/pixman/pixman/loongson-mmintrin.h
@@ -77,6 +77,17 @@ _mm_and_si64 (__m64 __m1, __m64 __m2)
return ret;
}
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+ asm("pcmpeqw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+ return ret;
+}
+
extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_empty (void)
{
@@ -150,6 +161,16 @@ _mm_shuffle_pi16 (__m64 __m, int64_t __n)
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_pi16 (__m64 __m, int64_t __count)
+{
+ __m64 ret;
+ asm("psllh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+ return ret;
+}
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_slli_si64 (__m64 __m, int64_t __count)
{
__m64 ret;
diff --git a/pixman/pixman/pixman-fast-path.c b/pixman/pixman/pixman-fast-path.c
index 0a134eda3..e79b06970 100644
--- a/pixman/pixman/pixman-fast-path.c
+++ b/pixman/pixman/pixman-fast-path.c
@@ -810,6 +810,48 @@ fast_composite_add_8_8 (pixman_implementation_t *imp,
}
static void
+fast_composite_add_0565_0565 (pixman_implementation_t *imp,
+ pixman_composite_info_t *info)
+{
+ PIXMAN_COMPOSITE_ARGS (info);
+ uint16_t *dst_line, *dst;
+ uint32_t d;
+ uint16_t *src_line, *src;
+ uint32_t s;
+ int dst_stride, src_stride;
+ int32_t w;
+
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1);
+ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w--)
+ {
+ s = *src++;
+ if (s)
+ {
+ d = *dst;
+ s = CONVERT_0565_TO_8888 (s);
+ if (d)
+ {
+ d = CONVERT_0565_TO_8888 (d);
+ UN8x4_ADD_UN8x4 (s, d);
+ }
+ *dst = CONVERT_8888_TO_0565 (s);
+ }
+ dst++;
+ }
+ }
+}
+
+static void
fast_composite_add_8888_8888 (pixman_implementation_t *imp,
pixman_composite_info_t *info)
{
@@ -1836,6 +1878,8 @@ static const pixman_fast_path_t c_fast_paths[] =
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565),
+ PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, fast_composite_add_0565_0565),
+ PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, fast_composite_add_0565_0565),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8),
diff --git a/pixman/pixman/pixman-mmx.c b/pixman/pixman/pixman-mmx.c
index 01a2bc93b..6e292c6b2 100644
--- a/pixman/pixman/pixman-mmx.c
+++ b/pixman/pixman/pixman-mmx.c
@@ -185,6 +185,9 @@ typedef struct
mmxdatafield mmx_565_b;
mmxdatafield mmx_packed_565_rb;
mmxdatafield mmx_packed_565_g;
+ mmxdatafield mmx_expand_565_g;
+ mmxdatafield mmx_expand_565_b;
+ mmxdatafield mmx_expand_565_r;
#ifndef USE_LOONGSON_MMI
mmxdatafield mmx_mask_0;
mmxdatafield mmx_mask_1;
@@ -216,6 +219,9 @@ static const mmx_data_t c =
MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8),
MMXDATA_INIT (.mmx_packed_565_rb, 0x00f800f800f800f8),
MMXDATA_INIT (.mmx_packed_565_g, 0x0000fc000000fc00),
+ MMXDATA_INIT (.mmx_expand_565_g, 0x07e007e007e007e0),
+ MMXDATA_INIT (.mmx_expand_565_b, 0x001f001f001f001f),
+ MMXDATA_INIT (.mmx_expand_565_r, 0xf800f800f800f800),
#ifndef USE_LOONGSON_MMI
MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000),
MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff),
@@ -518,6 +524,36 @@ expand565 (__m64 pixel, int pos)
return _mm_srli_pi16 (pixel, 8);
}
+/* Expand 4 16 bit pixels in an mmx register into two mmx registers of
+ *
+ * AARRGGBBRRGGBB
+ */
+static force_inline void
+expand_4xpacked565 (__m64 vin, __m64 *vout0, __m64 *vout1, int full_alpha)
+{
+ __m64 t0, t1, alpha = _mm_setzero_si64 ();;
+ __m64 r = _mm_and_si64 (vin, MC (expand_565_r));
+ __m64 g = _mm_and_si64 (vin, MC (expand_565_g));
+ __m64 b = _mm_and_si64 (vin, MC (expand_565_b));
+ if (full_alpha)
+ alpha = _mm_cmpeq_pi32 (alpha, alpha);
+
+ /* Replicate high bits into empty low bits. */
+ r = _mm_or_si64 (_mm_srli_pi16 (r, 8), _mm_srli_pi16 (r, 13));
+ g = _mm_or_si64 (_mm_srli_pi16 (g, 3), _mm_srli_pi16 (g, 9));
+ b = _mm_or_si64 (_mm_slli_pi16 (b, 3), _mm_srli_pi16 (b, 2));
+
+ r = _mm_packs_pu16 (r, _mm_setzero_si64 ()); /* 00 00 00 00 R3 R2 R1 R0 */
+ g = _mm_packs_pu16 (g, _mm_setzero_si64 ()); /* 00 00 00 00 G3 G2 G1 G0 */
+ b = _mm_packs_pu16 (b, _mm_setzero_si64 ()); /* 00 00 00 00 B3 B2 B1 B0 */
+
+ t1 = _mm_unpacklo_pi8 (r, alpha); /* A3 R3 A2 R2 A1 R1 A0 R0 */
+ t0 = _mm_unpacklo_pi8 (b, g); /* G3 B3 G2 B2 G1 B1 G0 B0 */
+
+ *vout0 = _mm_unpacklo_pi16 (t0, t1); /* A1 R1 G1 B1 A0 R0 G0 B0 */
+ *vout1 = _mm_unpackhi_pi16 (t0, t1); /* A3 R3 G3 B3 A2 R2 G2 B2 */
+}
+
static force_inline __m64
expand8888 (__m64 in, int pos)
{
@@ -533,6 +569,17 @@ expandx888 (__m64 in, int pos)
return _mm_or_si64 (expand8888 (in, pos), MC (full_alpha));
}
+static force_inline void
+expand_4x565 (__m64 vin, __m64 *vout0, __m64 *vout1, __m64 *vout2, __m64 *vout3, int full_alpha)
+{
+ __m64 v0, v1;
+ expand_4xpacked565 (vin, &v0, &v1, full_alpha);
+ *vout0 = expand8888 (v0, 0);
+ *vout1 = expand8888 (v0, 1);
+ *vout2 = expand8888 (v1, 0);
+ *vout3 = expand8888 (v1, 1);
+}
+
static force_inline __m64
pack_565 (__m64 pixel, __m64 target, int pos)
{
@@ -598,14 +645,14 @@ pack_4xpacked565 (__m64 a, __m64 b)
#endif
}
+#ifndef _MSC_VER
+
static force_inline __m64
pack_4x565 (__m64 v0, __m64 v1, __m64 v2, __m64 v3)
{
return pack_4xpacked565 (pack8888 (v0, v1), pack8888 (v2, v3));
}
-#ifndef _MSC_VER
-
static force_inline __m64
pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
{
@@ -617,6 +664,11 @@ pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
#else
+/* MSVC only handles a "pass by register" of up to three SSE intrinsics */
+
+#define pack_4x565(v0, v1, v2, v3) \
+ pack_4xpacked565 (pack8888 (v0, v1), pack8888 (v2, v3))
+
#define pix_add_mul(x, a, y, b) \
( x = pix_multiply (x, a), \
y = pix_multiply (y, b), \
@@ -1403,11 +1455,14 @@ mmx_composite_over_n_0565 (pixman_implementation_t *imp,
while (w >= 4)
{
__m64 vdest = *(__m64 *)dst;
+ __m64 v0, v1, v2, v3;
+
+ expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
- __m64 v0 = over (vsrc, vsrca, expand565 (vdest, 0));
- __m64 v1 = over (vsrc, vsrca, expand565 (vdest, 1));
- __m64 v2 = over (vsrc, vsrca, expand565 (vdest, 2));
- __m64 v3 = over (vsrc, vsrca, expand565 (vdest, 3));
+ v0 = over (vsrc, vsrca, v0);
+ v1 = over (vsrc, vsrca, v1);
+ v2 = over (vsrc, vsrca, v2);
+ v3 = over (vsrc, vsrca, v3);
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);
@@ -1823,16 +1878,19 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
while (w >= 4)
{
__m64 vdest = *(__m64 *)dst;
+ __m64 v0, v1, v2, v3;
+
+ expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
__m64 vsrc0 = load8888 ((src + 0));
__m64 vsrc1 = load8888 ((src + 1));
__m64 vsrc2 = load8888 ((src + 2));
__m64 vsrc3 = load8888 ((src + 3));
- __m64 v0 = over (vsrc0, expand_alpha (vsrc0), expand565 (vdest, 0));
- __m64 v1 = over (vsrc1, expand_alpha (vsrc1), expand565 (vdest, 1));
- __m64 v2 = over (vsrc2, expand_alpha (vsrc2), expand565 (vdest, 2));
- __m64 v3 = over (vsrc3, expand_alpha (vsrc3), expand565 (vdest, 3));
+ v0 = over (vsrc0, expand_alpha (vsrc0), v0);
+ v1 = over (vsrc1, expand_alpha (vsrc1), v1);
+ v2 = over (vsrc2, expand_alpha (vsrc2), v2);
+ v3 = over (vsrc3, expand_alpha (vsrc3), v3);
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);
@@ -2174,6 +2232,8 @@ mmx_composite_src_x888_0565 (pixman_implementation_t *imp,
w--;
}
}
+
+ _mm_empty ();
}
static void
@@ -2370,19 +2430,21 @@ mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
else if (m0 | m1 | m2 | m3)
{
__m64 vdest = *(__m64 *)dst;
+ __m64 v0, v1, v2, v3;
+
+ expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
__m64 vm0 = to_m64 (m0);
- __m64 v0 = in_over (vsrc, vsrca, expand_alpha_rev (vm0),
- expand565 (vdest, 0));
+ v0 = in_over (vsrc, vsrca, expand_alpha_rev (vm0), v0);
+
__m64 vm1 = to_m64 (m1);
- __m64 v1 = in_over (vsrc, vsrca, expand_alpha_rev (vm1),
- expand565 (vdest, 1));
+ v1 = in_over (vsrc, vsrca, expand_alpha_rev (vm1), v1);
+
__m64 vm2 = to_m64 (m2);
- __m64 v2 = in_over (vsrc, vsrca, expand_alpha_rev (vm2),
- expand565 (vdest, 2));
+ v2 = in_over (vsrc, vsrca, expand_alpha_rev (vm2), v2);
+
__m64 vm3 = to_m64 (m3);
- __m64 v3 = in_over (vsrc, vsrca, expand_alpha_rev (vm3),
- expand565 (vdest, 3));
+ v3 = in_over (vsrc, vsrca, expand_alpha_rev (vm3), v3);
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);;
}
@@ -2491,11 +2553,19 @@ mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
else if (s0 | s1 | s2 | s3)
{
__m64 vdest = *(__m64 *)dst;
+ __m64 v0, v1, v2, v3;
+
+ __m64 vsrc0 = load8888 (&s0);
+ __m64 vsrc1 = load8888 (&s1);
+ __m64 vsrc2 = load8888 (&s2);
+ __m64 vsrc3 = load8888 (&s3);
- __m64 v0 = over_rev_non_pre (load8888 (&s0), expand565 (vdest, 0));
- __m64 v1 = over_rev_non_pre (load8888 (&s1), expand565 (vdest, 1));
- __m64 v2 = over_rev_non_pre (load8888 (&s2), expand565 (vdest, 2));
- __m64 v3 = over_rev_non_pre (load8888 (&s3), expand565 (vdest, 3));
+ expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
+
+ v0 = over_rev_non_pre (vsrc0, v0);
+ v1 = over_rev_non_pre (vsrc1, v1);
+ v2 = over_rev_non_pre (vsrc2, v2);
+ v3 = over_rev_non_pre (vsrc3, v3);
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);
}
@@ -2671,11 +2741,14 @@ mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
if ((m0 | m1 | m2 | m3))
{
__m64 vdest = *(__m64 *)q;
+ __m64 v0, v1, v2, v3;
+
+ expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
- __m64 v0 = in_over (vsrc, vsrca, load8888 (&m0), expand565 (vdest, 0));
- __m64 v1 = in_over (vsrc, vsrca, load8888 (&m1), expand565 (vdest, 1));
- __m64 v2 = in_over (vsrc, vsrca, load8888 (&m2), expand565 (vdest, 2));
- __m64 v3 = in_over (vsrc, vsrca, load8888 (&m3), expand565 (vdest, 3));
+ v0 = in_over (vsrc, vsrca, load8888 (&m0), v0);
+ v1 = in_over (vsrc, vsrca, load8888 (&m1), v1);
+ v2 = in_over (vsrc, vsrca, load8888 (&m2), v2);
+ v3 = in_over (vsrc, vsrca, load8888 (&m3), v3);
*(__m64 *)q = pack_4x565 (v0, v1, v2, v3);
}
@@ -3006,6 +3079,90 @@ mmx_composite_add_8_8 (pixman_implementation_t *imp,
}
static void
+mmx_composite_add_0565_0565 (pixman_implementation_t *imp,
+ pixman_composite_info_t *info)
+{
+ PIXMAN_COMPOSITE_ARGS (info);
+ uint16_t *dst_line, *dst;
+ uint32_t d;
+ uint16_t *src_line, *src;
+ uint32_t s;
+ int dst_stride, src_stride;
+ int32_t w;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1);
+ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 7)
+ {
+ s = *src++;
+ if (s)
+ {
+ d = *dst;
+ s = CONVERT_0565_TO_8888 (s);
+ if (d)
+ {
+ d = CONVERT_0565_TO_8888 (d);
+ UN8x4_ADD_UN8x4 (s, d);
+ }
+ *dst = CONVERT_8888_TO_0565 (s);
+ }
+ dst++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ __m64 vdest = *(__m64 *)dst;
+ __m64 vsrc = ldq_u ((__m64 *)src);
+ __m64 vd0, vd1;
+ __m64 vs0, vs1;
+
+ expand_4xpacked565 (vdest, &vd0, &vd1, 0);
+ expand_4xpacked565 (vsrc, &vs0, &vs1, 0);
+
+ vd0 = _mm_adds_pu8 (vd0, vs0);
+ vd1 = _mm_adds_pu8 (vd1, vs1);
+
+ *(__m64 *)dst = pack_4xpacked565 (vd0, vd1);
+
+ dst += 4;
+ src += 4;
+ w -= 4;
+ }
+
+ while (w--)
+ {
+ s = *src++;
+ if (s)
+ {
+ d = *dst;
+ s = CONVERT_0565_TO_8888 (s);
+ if (d)
+ {
+ d = CONVERT_0565_TO_8888 (d);
+ UN8x4_ADD_UN8x4 (s, d);
+ }
+ *dst = CONVERT_8888_TO_0565 (s);
+ }
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
pixman_composite_info_t *info)
{
@@ -3280,6 +3437,75 @@ mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp,
_mm_empty ();
}
+static void
+mmx_composite_over_reverse_n_8888 (pixman_implementation_t *imp,
+ pixman_composite_info_t *info)
+{
+ PIXMAN_COMPOSITE_ARGS (info);
+ uint32_t src;
+ uint32_t *dst_line, *dst;
+ int32_t w;
+ int dst_stride;
+ __m64 vsrc;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+
+ vsrc = load8888 (&src);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ __m64 vdest = load8888 (dst);
+
+ store8888 (dst, over (vdest, expand_alpha (vdest), vsrc));
+
+ w--;
+ dst++;
+ }
+
+ while (w >= 2)
+ {
+ __m64 vdest = *(__m64 *)dst;
+ __m64 dest0 = expand8888 (vdest, 0);
+ __m64 dest1 = expand8888 (vdest, 1);
+
+
+ dest0 = over (dest0, expand_alpha (dest0), vsrc);
+ dest1 = over (dest1, expand_alpha (dest1), vsrc);
+
+ *(__m64 *)dst = pack8888 (dest0, dest1);
+
+ dst += 2;
+ w -= 2;
+ }
+
+ CHECKPOINT ();
+
+ if (w)
+ {
+ __m64 vdest = load8888 (dst);
+
+ store8888 (dst, over (vdest, expand_alpha (vdest), vsrc));
+ }
+ }
+
+ _mm_empty ();
+}
+
static uint32_t *
mmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
{
@@ -3318,6 +3544,7 @@ mmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
w--;
}
+ _mm_empty ();
return iter->buffer;
}
@@ -3341,14 +3568,12 @@ mmx_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask)
while (w >= 4)
{
__m64 vsrc = ldq_u ((__m64 *)src);
+ __m64 mm0, mm1;
- __m64 mm0 = expand565 (vsrc, 0);
- __m64 mm1 = expand565 (vsrc, 1);
- __m64 mm2 = expand565 (vsrc, 2);
- __m64 mm3 = expand565 (vsrc, 3);
+ expand_4xpacked565 (vsrc, &mm0, &mm1, 1);
- *(__m64 *)(dst + 0) = _mm_or_si64 (pack8888 (mm0, mm1), MC (ff000000));
- *(__m64 *)(dst + 2) = _mm_or_si64 (pack8888 (mm2, mm3), MC (ff000000));
+ *(__m64 *)(dst + 0) = mm0;
+ *(__m64 *)(dst + 2) = mm1;
dst += 4;
src += 4;
@@ -3363,6 +3588,7 @@ mmx_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask)
w--;
}
+ _mm_empty ();
return iter->buffer;
}
@@ -3408,6 +3634,7 @@ mmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
w--;
}
+ _mm_empty ();
return iter->buffer;
}
@@ -3510,6 +3737,11 @@ static const pixman_fast_path_t mmx_fast_paths[] =
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, mmx_composite_over_8888_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, mmx_composite_over_8888_0565 ),
+ PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, mmx_composite_over_reverse_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, mmx_composite_over_reverse_n_8888),
+
+ PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, mmx_composite_add_0565_0565 ),
+ PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, mmx_composite_add_0565_0565 ),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, mmx_composite_add_8888_8888 ),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, mmx_composite_add_8888_8888 ),
PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mmx_composite_add_8_8 ),
diff --git a/pixman/test/composite.c b/pixman/test/composite.c
index 48bde9c7d..94b482551 100644
--- a/pixman/test/composite.c
+++ b/pixman/test/composite.c
@@ -525,15 +525,16 @@ composite_test (image_t *dst,
int testno)
{
pixman_color_t fill;
- pixman_rectangle16_t rect;
color_t expected, tdst, tsrc, tmsk;
pixel_checker_t checker;
+ pixman_image_t *solid;
/* Initialize dst */
compute_pixman_color (dst->color, &fill);
- rect.x = rect.y = 0;
- rect.width = rect.height = dst->size;
- pixman_image_fill_rectangles (PIXMAN_OP_SRC, dst->image, &fill, 1, &rect);
+ solid = pixman_image_create_solid_fill (&fill);
+ pixman_image_composite32 (PIXMAN_OP_SRC, solid, NULL, dst->image,
+ 0, 0, 0, 0, 0, 0, dst->size, dst->size);
+ pixman_image_unref (solid);
if (mask)
{
@@ -644,16 +645,16 @@ image_init (image_t *info,
if (info->size)
{
- pixman_rectangle16_t rect;
+ pixman_image_t *solid;
info->image = pixman_image_create_bits (info->format->format,
info->size, info->size,
NULL, 0);
- rect.x = rect.y = 0;
- rect.width = rect.height = info->size;
- pixman_image_fill_rectangles (PIXMAN_OP_SRC, info->image, &fill,
- 1, &rect);
+ solid = pixman_image_create_solid_fill (&fill);
+ pixman_image_composite32 (PIXMAN_OP_SRC, solid, NULL, info->image,
+ 0, 0, 0, 0, 0, 0, info->size, info->size);
+ pixman_image_unref (solid);
if (sizes[size] & REPEAT)
{
diff --git a/pixman/test/lowlevel-blt-bench.c b/pixman/test/lowlevel-blt-bench.c
index 8a39a4615..b44b9f819 100644
--- a/pixman/test/lowlevel-blt-bench.c
+++ b/pixman/test/lowlevel-blt-bench.c
@@ -661,6 +661,7 @@ tests_tbl[] =
{ "outrev_n_8888_1555_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8r8g8b8, 2, PIXMAN_a1r5g5b5 },
{ "outrev_n_8888_x888_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8r8g8b8, 2, PIXMAN_x8r8g8b8 },
{ "outrev_n_8888_8888_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8r8g8b8, 2, PIXMAN_a8r8g8b8 },
+ { "over_reverse_n_8888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER_REVERSE, PIXMAN_null, 0, PIXMAN_a8r8g8b8 },
};
int
diff --git a/pixman/test/scaling-crash-test.c b/pixman/test/scaling-crash-test.c
index ed57ae024..0dac892b5 100644
--- a/pixman/test/scaling-crash-test.c
+++ b/pixman/test/scaling-crash-test.c
@@ -32,8 +32,8 @@ run_test (int32_t dst_width,
pixman_transform_t transform;
uint32_t * srcbuf;
uint32_t * dstbuf;
- pixman_box32_t box = { 0, 0, src_width, src_height };
pixman_color_t color_cc = { 0xcccc, 0xcccc, 0xcccc, 0xcccc };
+ pixman_image_t * solid;
int result;
int i;
@@ -62,7 +62,10 @@ run_test (int32_t dst_width,
PIXMAN_a8r8g8b8, src_width, src_height,
srcbuf + (src_width + 10) * 5 + 5, (src_width + 10) * 4);
- pixman_image_fill_boxes (PIXMAN_OP_SRC, src_img, &color_cc, 1, &box);
+ solid = pixman_image_create_solid_fill (&color_cc);
+ pixman_image_composite32 (PIXMAN_OP_SRC, solid, NULL, src_img,
+ 0, 0, 0, 0, 0, 0, src_width, src_height);
+ pixman_image_unref (solid);
dst_img = pixman_image_create_bits (
PIXMAN_a8r8g8b8, dst_width, dst_height, dstbuf, dst_width * 4);