diff options
author | marha <marha@users.sourceforge.net> | 2010-02-14 13:21:40 +0000 |
---|---|---|
committer | marha <marha@users.sourceforge.net> | 2010-02-14 13:21:40 +0000 |
commit | 26f62ef5ccd04fa3a55632d808ca6e1061cfb983 (patch) | |
tree | d55c1b9efb2e44c83dc85961d195c5c997577300 /pixman | |
parent | 4268e59d0c18a7bfc584189d3418765407d05bbf (diff) | |
download | vcxsrv-26f62ef5ccd04fa3a55632d808ca6e1061cfb983.tar.gz vcxsrv-26f62ef5ccd04fa3a55632d808ca6e1061cfb983.tar.bz2 vcxsrv-26f62ef5ccd04fa3a55632d808ca6e1061cfb983.zip |
Updated to pixman-0.17.6
Diffstat (limited to 'pixman')
-rw-r--r-- | pixman/Makefile.am | 2 | ||||
-rw-r--r-- | pixman/Makefile.in | 2 | ||||
-rw-r--r-- | pixman/configure | 24 | ||||
-rw-r--r-- | pixman/configure.ac | 2 | ||||
-rw-r--r-- | pixman/pixman/pixman-arm-neon.c | 93 | ||||
-rw-r--r-- | pixman/pixman/pixman-arm-simd.c | 34 | ||||
-rw-r--r-- | pixman/pixman/pixman-cpu.c | 10 | ||||
-rw-r--r-- | pixman/pixman/pixman-fast-path.c | 394 | ||||
-rw-r--r-- | pixman/pixman/pixman-image.c | 10 | ||||
-rw-r--r-- | pixman/pixman/pixman-mmx.c | 164 | ||||
-rw-r--r-- | pixman/pixman/pixman-private.h | 73 | ||||
-rw-r--r-- | pixman/pixman/pixman-region.c | 37 | ||||
-rw-r--r-- | pixman/pixman/pixman-solid-fill.c | 35 | ||||
-rw-r--r-- | pixman/pixman/pixman-sse2.c | 185 | ||||
-rw-r--r-- | pixman/pixman/pixman-utils.c | 231 | ||||
-rw-r--r-- | pixman/pixman/pixman-version.h | 4 | ||||
-rw-r--r-- | pixman/pixman/pixman.c | 157 | ||||
-rw-r--r-- | pixman/pixman/pixman.h | 32 | ||||
-rw-r--r-- | pixman/test/fetch-test.c | 6 |
19 files changed, 836 insertions, 659 deletions
diff --git a/pixman/Makefile.am b/pixman/Makefile.am index 9fd3a375a..63b08c1fb 100644 --- a/pixman/Makefile.am +++ b/pixman/Makefile.am @@ -77,7 +77,7 @@ ensure-prev: false; \ fi -release-check: ensure-prev release-verify-newer release-remove-old check distcheck +release-check: ensure-prev release-verify-newer release-remove-old distcheck release-tag: git tag -u $(GPGKEY) -m "$(PACKAGE) $(VERSION) release" $(PACKAGE)-$(VERSION) diff --git a/pixman/Makefile.in b/pixman/Makefile.in index 35109d3b0..8017fe87d 100644 --- a/pixman/Makefile.in +++ b/pixman/Makefile.in @@ -756,7 +756,7 @@ ensure-prev: false; \ fi -release-check: ensure-prev release-verify-newer release-remove-old check distcheck +release-check: ensure-prev release-verify-newer release-remove-old distcheck release-tag: git tag -u $(GPGKEY) -m "$(PACKAGE) $(VERSION) release" $(PACKAGE)-$(VERSION) diff --git a/pixman/configure b/pixman/configure index 38551e427..68ec9c13d 100644 --- a/pixman/configure +++ b/pixman/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.63 for pixman 0.17.4. +# Generated by GNU Autoconf 2.63 for pixman 0.17.6. # # Report bugs to <"sandmann@daimi.au.dk">. # @@ -750,8 +750,8 @@ SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME='pixman' PACKAGE_TARNAME='pixman' -PACKAGE_VERSION='0.17.4' -PACKAGE_STRING='pixman 0.17.4' +PACKAGE_VERSION='0.17.6' +PACKAGE_STRING='pixman 0.17.6' PACKAGE_BUGREPORT='"sandmann@daimi.au.dk"' # Factoring default headers for most tests. @@ -1525,7 +1525,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures pixman 0.17.4 to adapt to many kinds of systems. +\`configure' configures pixman 0.17.6 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1595,7 +1595,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of pixman 0.17.4:";; + short | recursive ) echo "Configuration of pixman 0.17.6:";; esac cat <<\_ACEOF @@ -1714,7 +1714,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -pixman configure 0.17.4 +pixman configure 0.17.6 generated by GNU Autoconf 2.63 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, @@ -1728,7 +1728,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by pixman $as_me 0.17.4, which was +It was created by pixman $as_me 0.17.6, which was generated by GNU Autoconf 2.63. Invocation command line was $ $0 $@ @@ -2444,7 +2444,7 @@ fi # Define the identity of the package. PACKAGE='pixman' - VERSION='0.17.4' + VERSION='0.17.6' cat >>confdefs.h <<_ACEOF @@ -21154,13 +21154,13 @@ fi -LT_VERSION_INFO="17:4:17" +LT_VERSION_INFO="17:6:17" PIXMAN_VERSION_MAJOR=0 PIXMAN_VERSION_MINOR=17 -PIXMAN_VERSION_MICRO=4 +PIXMAN_VERSION_MICRO=6 @@ -22802,7 +22802,7 @@ exec 6>&1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by pixman $as_me 0.17.4, which was +This file was extended by pixman $as_me 0.17.6, which was generated by GNU Autoconf 2.63. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -22865,7 +22865,7 @@ Report bugs to <bug-autoconf@gnu.org>." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_version="\\ -pixman config.status 0.17.4 +pixman config.status 0.17.6 configured by $0, generated by GNU Autoconf 2.63, with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" diff --git a/pixman/configure.ac b/pixman/configure.ac index 2828648d8..9ccd7d0f8 100644 --- a/pixman/configure.ac +++ b/pixman/configure.ac @@ -54,7 +54,7 @@ AC_PREREQ([2.57]) m4_define([pixman_major], 0) m4_define([pixman_minor], 17) -m4_define([pixman_micro], 4) +m4_define([pixman_micro], 6) m4_define([pixman_version],[pixman_major.pixman_minor.pixman_micro]) diff --git a/pixman/pixman/pixman-arm-neon.c b/pixman/pixman/pixman-arm-neon.c index efeabebd5..26f7267c4 100644 --- a/pixman/pixman/pixman-arm-neon.c +++ b/pixman/pixman/pixman-arm-neon.c @@ -385,52 +385,53 @@ pixman_blt_neon (uint32_t *src_bits, static const pixman_fast_path_t arm_neon_fast_path_array[] = { - { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_0565_0565 }, - { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_0565_0565 }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_8888_0565 }, - { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_8888_0565 }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_8888_0565 }, - { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_8888_0565 }, - { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_src_0565_8888 }, - { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_src_0565_8888 }, - { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_a8b8g8r8, neon_composite_src_0565_8888 }, - { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_src_0565_8888 }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_src_8888_8888 }, - { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_src_8888_8888 }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_src_8888_8888 }, - { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_src_8888_8888 }, - { PIXMAN_OP_SRC, PIXMAN_r8g8b8, PIXMAN_null, PIXMAN_r8g8b8, neon_composite_src_0888_0888 }, - { PIXMAN_OP_SRC, PIXMAN_b8g8r8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_src_0888_8888_rev }, - { PIXMAN_OP_SRC, PIXMAN_b8g8r8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_0888_0565_rev }, - { PIXMAN_OP_SRC, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_a8r8g8b8, neon_composite_src_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, neon_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, neon_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_n_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, neon_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, neon_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, neon_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, neon_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, neon_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_over_8888_8888 }, - { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, neon_composite_add_n_8_8 }, - { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_a8, PIXMAN_a8, neon_composite_add_8_8_8 }, - { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, neon_composite_add_8888_8888_8888 }, - { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, neon_composite_add_8000_8000 }, - { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_add_8888_8888 }, - { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, neon_composite_add_8888_8888 }, + PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, neon_composite_src_0565_0565), + PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, neon_composite_src_0565_0565), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565), + PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565), + PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565), + PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, neon_composite_src_0565_8888), + PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, neon_composite_src_0565_8888), + PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, neon_composite_src_0565_8888), + PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, neon_composite_src_0565_8888), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888), + PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888), + PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888), + PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, neon_composite_src_0888_0888), + PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, x8r8g8b8, neon_composite_src_0888_8888_rev), + PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, r5g6b5, neon_composite_src_0888_0565_rev), + PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8r8g8b8, neon_composite_src_pixbuf_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, neon_composite_over_n_8_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, neon_composite_over_n_8_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, neon_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, neon_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, neon_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, neon_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, neon_composite_over_n_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, neon_composite_over_n_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, neon_composite_over_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, neon_composite_over_8888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, neon_composite_over_8888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, neon_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, neon_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, neon_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, neon_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_over_8888_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, neon_composite_over_8888_0565), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, neon_composite_over_8888_0565), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, neon_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, neon_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, neon_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, neon_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, neon_composite_add_n_8_8), + PIXMAN_STD_FAST_PATH (ADD, a8, a8, a8, neon_composite_add_8_8_8), + PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_add_8888_8888_8888), + PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, neon_composite_add_8000_8000), + PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, neon_composite_add_8888_8888), + PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, neon_composite_add_8888_8888), + { PIXMAN_OP_NONE }, }; diff --git a/pixman/pixman/pixman-arm-simd.c b/pixman/pixman/pixman-arm-simd.c index 479524690..dd8dc5c0a 100644 --- a/pixman/pixman/pixman-arm-simd.c +++ b/pixman/pixman/pixman-arm-simd.c @@ -47,7 +47,7 @@ arm_composite_add_8000_8000 (pixman_implementation_t * impl, uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint8_t s, d; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); @@ -119,7 +119,7 @@ arm_composite_over_8888_8888 (pixman_implementation_t * impl, uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint32_t component_half = 0x800080; uint32_t upper_component_mask = 0xff00ff00; uint32_t alpha_mask = 0xff; @@ -213,7 +213,7 @@ arm_composite_over_8888_n_8888 (pixman_implementation_t * impl, uint32_t *src_line, *src; uint32_t mask; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint32_t component_half = 0x800080; uint32_t alpha_mask = 0xff; @@ -322,7 +322,7 @@ arm_composite_over_n_8_8888 (pixman_implementation_t * impl, uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; src = _pixman_image_get_solid (src_image, dst_image->bits.format); @@ -421,19 +421,19 @@ arm_composite_over_n_8_8888 (pixman_implementation_t * impl, static const pixman_fast_path_t arm_simd_fast_path_array[] = { - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, arm_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, arm_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, arm_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, arm_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888 }, - - { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, arm_composite_add_8000_8000 }, - - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888 }, + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, arm_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, arm_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, arm_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, arm_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, arm_composite_over_8888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, arm_composite_over_8888_n_8888), + + PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, arm_composite_add_8000_8000), + + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, arm_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, arm_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, arm_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, arm_composite_over_n_8_8888), { PIXMAN_OP_NONE }, }; diff --git a/pixman/pixman/pixman-cpu.c b/pixman/pixman/pixman-cpu.c index 5d5469bb8..d727ddb19 100644 --- a/pixman/pixman/pixman-cpu.c +++ b/pixman/pixman/pixman-cpu.c @@ -253,8 +253,6 @@ pixman_arm_read_auxv () if (aux.a_type == AT_HWCAP) { uint32_t hwcap = aux.a_un.a_val; - if (getenv ("ARM_FORCE_HWCAP")) - hwcap = strtoul (getenv ("ARM_FORCE_HWCAP"), NULL, 0); /* hardcode these values to avoid depending on specific * versions of the hwcap header, e.g. HWCAP_NEON */ @@ -266,8 +264,6 @@ pixman_arm_read_auxv () else if (aux.a_type == AT_PLATFORM) { const char *plat = (const char*) aux.a_un.a_val; - if (getenv ("ARM_FORCE_PLATFORM")) - plat = getenv ("ARM_FORCE_PLATFORM"); if (strncmp (plat, "v7l", 3) == 0) { arm_has_v7 = TRUE; @@ -280,12 +276,6 @@ pixman_arm_read_auxv () } } close (fd); - - /* if we don't have 2.6.29, we have to do this hack; set - * the env var to trust HWCAP. - */ - if (!getenv ("ARM_TRUST_HWCAP") && arm_has_v7) - arm_has_neon = TRUE; } arm_tests_initialized = TRUE; diff --git a/pixman/pixman/pixman-fast-path.c b/pixman/pixman/pixman-fast-path.c index 75a0c1e07..170e9d69d 100644 --- a/pixman/pixman/pixman-fast-path.c +++ b/pixman/pixman/pixman-fast-path.c @@ -125,7 +125,7 @@ fast_composite_over_x888_8_8888 (pixman_implementation_t *imp, int src_stride, mask_stride, dst_stride; uint8_t m; uint32_t s, d; - uint16_t w; + int32_t w; PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); @@ -183,7 +183,7 @@ fast_composite_in_n_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint16_t t; src = _pixman_image_get_solid (src_image, dest_image->bits.format); @@ -260,7 +260,7 @@ fast_composite_in_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint8_t s; uint16_t t; @@ -308,7 +308,7 @@ fast_composite_over_n_8_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst, d; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; - uint16_t w; + int32_t w; src = _pixman_image_get_solid (src_image, dst_image->bits.format); @@ -366,7 +366,7 @@ fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, uint32_t *dst_line, *dst, d; uint32_t *mask_line, *mask, ma; int dst_stride, mask_stride; - uint16_t w; + int32_t w; src = _pixman_image_get_solid (src_image, dst_image->bits.format); @@ -423,7 +423,7 @@ fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp, uint32_t *dst_line, *dst, d; uint32_t *mask_line, *mask, ma; int dst_stride, mask_stride; - uint16_t w; + int32_t w; src = _pixman_image_get_solid (src_image, dst_image->bits.format); @@ -490,7 +490,7 @@ fast_composite_over_n_8_0888 (pixman_implementation_t *imp, uint32_t d; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; - uint16_t w; + int32_t w; src = _pixman_image_get_solid (src_image, dst_image->bits.format); @@ -555,7 +555,7 @@ fast_composite_over_n_8_0565 (pixman_implementation_t *imp, uint32_t d; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; - uint16_t w; + int32_t w; src = _pixman_image_get_solid (src_image, dst_image->bits.format); @@ -622,7 +622,7 @@ fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp, uint32_t d; uint32_t *mask_line, *mask, ma; int dst_stride, mask_stride; - uint16_t w; + int32_t w; src = _pixman_image_get_solid (src_image, dst_image->bits.format); @@ -697,7 +697,7 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp, uint32_t *src_line, *src, s; int dst_stride, src_stride; uint8_t a; - uint16_t w; + int32_t w; PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); @@ -744,7 +744,7 @@ fast_composite_over_8888_0888 (pixman_implementation_t *imp, uint32_t *src_line, *src, s; uint8_t a; int dst_stride, src_stride; - uint16_t w; + int32_t w; PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); @@ -796,7 +796,7 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp, uint32_t *src_line, *src, s; uint8_t a; int dst_stride, src_stride; - uint16_t w; + int32_t w; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); @@ -849,7 +849,7 @@ fast_composite_src_x888_0565 (pixman_implementation_t *imp, uint16_t *dst_line, *dst; uint32_t *src_line, *src, s; int dst_stride, src_stride; - uint16_t w; + int32_t w; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); @@ -889,7 +889,7 @@ fast_composite_add_8000_8000 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint8_t s, d; uint16_t t; @@ -940,7 +940,7 @@ fast_composite_add_8888_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint32_t s, d; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); @@ -990,7 +990,7 @@ fast_composite_add_n_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint32_t src; uint8_t sa; @@ -1033,10 +1033,10 @@ fast_composite_add_n_8_8 (pixman_implementation_t *imp, #define UPDATE_BITMASK(n) ((n) << 1) #endif -#define TEST_BIT(p, n) \ - (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31)) -#define SET_BIT(p, n) \ - do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0); +#define TEST_BIT(p, n) \ + (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31)) +#define SET_BIT(p, n) \ + do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0); static void fast_composite_add_1000_1000 (pixman_implementation_t *imp, @@ -1347,86 +1347,158 @@ fast_composite_src_8888_x888 (pixman_implementation_t *imp, static const pixman_fast_path_t c_fast_paths[] = { - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, fast_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, fast_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r8g8b8, fast_composite_over_n_8_0888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b8g8r8, fast_composite_over_n_8_0888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_a8r8g8b8, fast_composite_over_n_1_8888, }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_x8r8g8b8, fast_composite_over_n_1_8888, }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_a8b8g8r8, fast_composite_over_n_1_8888, }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_x8b8g8r8, fast_composite_over_n_1_8888, }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_r5g6b5, fast_composite_over_n_1_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_b5g6r5, fast_composite_over_n_1_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, fast_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_x8r8g8b8, fast_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_r5g6b5, fast_composite_over_n_8888_0565_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_a8b8g8r8, fast_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_x8b8g8r8, fast_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_b5g6r5, fast_composite_over_n_8888_0565_ca }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_x888_8_8888, }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_x888_8_8888, }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_x888_8_8888, }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_x888_8_8888, }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_over_8888_8888, }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_over_8888_8888, }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_over_8888_0565, }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_over_8888_8888, }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_over_8888_8888, }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_over_8888_0565, }, - { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_add_8888_8888, }, - { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_add_8888_8888, }, - { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_add_8000_8000, }, - { PIXMAN_OP_ADD, PIXMAN_a1, PIXMAN_null, PIXMAN_a1, fast_composite_add_1000_1000, }, - { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, fast_composite_add_n_8888_8888_ca }, - { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_add_n_8_8, }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_solid_fill }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_solid_fill }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_solid_fill }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_solid_fill }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8, fast_composite_solid_fill }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_solid_fill }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888 }, - { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888 }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888 }, - { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888 }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565 }, - { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565 }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565 }, - { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565 }, - { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_in_8_8, }, - { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_in_n_8_8 }, + PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565), + PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888), + PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888), + PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8000_8000), + PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1000_1000), + PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8), + PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill), + PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill), + PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill), + PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill), + PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill), + PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_8888_x888), + PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_8888_x888), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_8888_x888), + PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_8888_x888), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565), + PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565), + PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565), + PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8), + PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8), + { PIXMAN_OP_NONE }, }; +static force_inline pixman_bool_t +repeat (pixman_repeat_t repeat, int *c, int size) +{ + if (repeat == PIXMAN_REPEAT_NONE) + { + if (*c < 0 || *c >= size) + return FALSE; + } + else if (repeat == PIXMAN_REPEAT_NORMAL) + { + while (*c >= size) + *c -= size; + while (*c < 0) + *c += size; + } + else if (repeat == PIXMAN_REPEAT_PAD) + { + *c = CLIP (*c, 0, size - 1); + } + else /* REFLECT */ + { + *c = MOD (*c, size * 2); + if (*c >= size) + *c = size * 2 - *c - 1; + } + return TRUE; +} + +static force_inline uint32_t +fetch_nearest (pixman_repeat_t src_repeat, + pixman_format_code_t format, + uint32_t *src, int x, int src_width) +{ + if (repeat (src_repeat, &x, src_width)) + { + if (format == PIXMAN_x8r8g8b8) + return *(src + x) | 0xff000000; + else + return *(src + x); + } + else + { + return 0; + } +} + +static force_inline void +combine_over (uint32_t s, uint32_t *dst) +{ + if (s) + { + uint8_t ia = 0xff - (s >> 24); + + if (ia) + UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s); + else + *dst = s; + } +} + +static force_inline void +combine_src (uint32_t s, uint32_t *dst) +{ + *dst = s; +} + static void -fast_composite_src_scale_nearest (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) +fast_composite_scaled_nearest (pixman_implementation_t *imp, + pixman_op_t op, + pixman_image_t * src_image, + pixman_image_t * mask_image, + pixman_image_t * dst_image, + int32_t src_x, + int32_t src_y, + int32_t mask_x, + int32_t mask_y, + int32_t dest_x, + int32_t dest_y, + int32_t width, + int32_t height) { - uint32_t *dst; - uint32_t *src; - int dst_stride, src_stride; - int i, j; + uint32_t *dst_line; + uint32_t *src_line; + int dst_stride, src_stride; + int src_width, src_height; + pixman_repeat_t src_repeat; + pixman_fixed_t unit_x, unit_y; + pixman_format_code_t src_format; pixman_vector_t v; + pixman_fixed_t vy; - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst, 1); + PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); /* pass in 0 instead of src_x and src_y because src_x and src_y need to be - * transformed from destination space to source space */ - PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src, 1); + * transformed from destination space to source space + */ + PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1); /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; @@ -1436,81 +1508,86 @@ fast_composite_src_scale_nearest (pixman_implementation_t *imp, if (!pixman_transform_point_3d (src_image->common.transform, &v)) return; + unit_x = src_image->common.transform->matrix[0][0]; + unit_y = src_image->common.transform->matrix[1][1]; + /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ v.vector[0] -= pixman_fixed_e; v.vector[1] -= pixman_fixed_e; - for (j = 0; j < height; j++) + src_height = src_image->bits.height; + src_width = src_image->bits.width; + src_repeat = src_image->common.repeat; + src_format = src_image->bits.format; + + vy = v.vector[1]; + while (height--) { - pixman_fixed_t vx = v.vector[0]; - pixman_fixed_t vy = v.vector[1]; + pixman_fixed_t vx = v.vector[0]; + int y = pixman_fixed_to_int (vy); + uint32_t *dst = dst_line; - for (i = 0; i < width; ++i) + dst_line += dst_stride; + + /* adjust the y location by a unit vector in the y direction + * this is equivalent to transforming y+1 of the destination point to source space */ + vy += unit_y; + + if (!repeat (src_repeat, &y, src_height)) { - pixman_bool_t inside_bounds; - uint32_t result; - int x, y; - x = vx >> 16; - y = vy >> 16; - - /* apply the repeat function */ - switch (src_image->common.repeat) - { - case PIXMAN_REPEAT_NORMAL: - x = MOD (x, src_image->bits.width); - y = MOD (y, src_image->bits.height); - inside_bounds = TRUE; - break; - - case PIXMAN_REPEAT_PAD: - x = CLIP (x, 0, src_image->bits.width - 1); - y = CLIP (y, 0, src_image->bits.height - 1); - inside_bounds = TRUE; - break; - - case PIXMAN_REPEAT_REFLECT: - x = MOD (x, src_image->bits.width * 2); - if (x >= src_image->bits.width) - x = src_image->bits.width * 2 - x - 1; - y = MOD (y, src_image->bits.height * 2); - if (y >= src_image->bits.height) - y = src_image->bits.height * 2 - y - 1; - inside_bounds = TRUE; - break; - - case PIXMAN_REPEAT_NONE: - default: - inside_bounds = - (x >= 0 && - x < src_image->bits.width && - y >= 0 && - y < src_image->bits.height); - break; - } + if (op == PIXMAN_OP_SRC) + memset (dst, 0, sizeof (*dst) * width); + } + else + { + int w = width; + + uint32_t *src = src_line + y * src_stride; - if (inside_bounds) + while (w >= 2) { - /* XXX: we should move this multiplication out of the loop */ - result = *(src + y * src_stride + x); + uint32_t s1, s2; + int x1, x2; + + x1 = pixman_fixed_to_int (vx); + vx += unit_x; + + x2 = pixman_fixed_to_int (vx); + vx += unit_x; + + w -= 2; + + s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width); + s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width); + + if (op == PIXMAN_OP_OVER) + { + combine_over (s1, dst++); + combine_over (s2, dst++); + } + else + { + combine_src (s1, dst++); + combine_src (s2, dst++); + } } - else + + while (w--) { - result = 0; - } - *(dst + i) = result; + uint32_t s; + int x; - /* adjust the x location by a unit vector in the x direction: - * this is equivalent to transforming x+1 of the destination - * point to source space - */ - vx += src_image->common.transform->matrix[0][0]; + x = pixman_fixed_to_int (vx); + vx += unit_x; + + s = fetch_nearest (src_repeat, src_format, src, x, src_width); + + if (op == PIXMAN_OP_OVER) + combine_over (s, dst++); + else + combine_src (s, dst++); + } } - /* adjust the y location by a unit vector in the y direction - * this is equivalent to transforming y+1 of the destination point - * to source space - */ - v.vector[1] += src_image->common.transform->matrix[1][1]; - dst += dst_stride; } } @@ -1532,11 +1609,11 @@ fast_path_composite (pixman_implementation_t *imp, if (src->type == BITS && src->common.transform && !mask - && op == PIXMAN_OP_SRC + && (op == PIXMAN_OP_SRC || op == PIXMAN_OP_OVER) && !src->common.alpha_map && !dest->common.alpha_map && (src->common.filter == PIXMAN_FILTER_NEAREST) - && PIXMAN_FORMAT_BPP (dest->bits.format) == 32 - && src->bits.format == dest->bits.format + && (dest->bits.format == PIXMAN_a8r8g8b8 || dest->bits.format == PIXMAN_x8r8g8b8) + && (src->bits.format == PIXMAN_a8r8g8b8 || src->bits.format == PIXMAN_x8r8g8b8) && !src->bits.read_func && !src->bits.write_func && !dest->bits.read_func && !dest->bits.write_func) { @@ -1553,7 +1630,7 @@ fast_path_composite (pixman_implementation_t *imp, mask_x, mask_y, dest_x, dest_y, width, height, - fast_composite_src_scale_nearest); + fast_composite_scaled_nearest); return; } } @@ -1694,4 +1771,3 @@ _pixman_implementation_create_fast_path (void) return imp; } - diff --git a/pixman/pixman/pixman-image.c b/pixman/pixman/pixman-image.c index 1619165ab..6036c568e 100644 --- a/pixman/pixman/pixman-image.c +++ b/pixman/pixman/pixman-image.c @@ -231,6 +231,12 @@ pixman_image_set_destroy_function (pixman_image_t * image, image->common.destroy_data = data; } +PIXMAN_EXPORT void * +pixman_image_get_destroy_data (pixman_image_t *image) +{ + return image->common.destroy_data; +} + void _pixman_image_reset_clip_region (pixman_image_t *image) { @@ -247,7 +253,7 @@ _pixman_image_validate (pixman_image_t *image) } if (image->common.alpha_map) - _pixman_image_validate (image->common.alpha_map); + _pixman_image_validate ((pixman_image_t *)image->common.alpha_map); } PIXMAN_EXPORT pixman_bool_t @@ -591,7 +597,7 @@ _pixman_image_is_opaque (pixman_image_t *image) break; case SOLID: - if (ALPHA_8 (image->solid.color) != 0xff) + if (image->solid.color.alpha != 0xffff) return FALSE; break; diff --git a/pixman/pixman/pixman-mmx.c b/pixman/pixman/pixman-mmx.c index b1591d967..a4affa57c 100644 --- a/pixman/pixman/pixman-mmx.c +++ b/pixman/pixman/pixman-mmx.c @@ -1102,7 +1102,7 @@ mmx_composite_over_n_8888 (pixman_implementation_t *imp, { uint32_t src; uint32_t *dst_line, *dst; - uint16_t w; + int32_t w; int dst_stride; __m64 vsrc, vsrca; @@ -1181,7 +1181,7 @@ mmx_composite_over_n_0565 (pixman_implementation_t *imp, { uint32_t src; uint16_t *dst_line, *dst; - uint16_t w; + int32_t w; int dst_stride; __m64 vsrc, vsrca; @@ -1376,7 +1376,7 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp, uint32_t mask; __m64 vmask; int dst_stride, src_stride; - uint16_t w; + int32_t w; __m64 srca; CHECKPOINT (); @@ -1461,7 +1461,7 @@ mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp, uint32_t mask; __m64 vmask; int dst_stride, src_stride; - uint16_t w; + int32_t w; __m64 srca; CHECKPOINT (); @@ -1596,7 +1596,7 @@ mmx_composite_over_8888_8888 (pixman_implementation_t *imp, uint32_t s; int dst_stride, src_stride; uint8_t a; - uint16_t w; + int32_t w; CHECKPOINT (); @@ -1652,7 +1652,7 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp, uint16_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; CHECKPOINT (); @@ -1756,7 +1756,7 @@ mmx_composite_over_n_8_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; __m64 vsrc, vsrca; uint64_t srcsrc; @@ -2030,7 +2030,7 @@ mmx_composite_src_n_8_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; __m64 vsrc, vsrca; uint64_t srcsrc; @@ -2165,7 +2165,7 @@ mmx_composite_over_n_8_0565 (pixman_implementation_t *imp, uint16_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; __m64 vsrc, vsrca, tmp; uint64_t srcsrcsrcsrc, src16; @@ -2305,7 +2305,7 @@ mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp, uint16_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; CHECKPOINT (); @@ -2425,7 +2425,7 @@ mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; CHECKPOINT (); @@ -2633,7 +2633,7 @@ mmx_composite_in_n_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint32_t src; uint8_t sa; __m64 vsrc, vsrca; @@ -2715,7 +2715,7 @@ mmx_composite_in_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *src_line, *src; int src_stride, dst_stride; - uint16_t w; + int32_t w; PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); @@ -2780,7 +2780,7 @@ mmx_composite_add_n_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint32_t src; uint8_t sa; __m64 vsrc, vsrca; @@ -2860,7 +2860,7 @@ mmx_composite_add_8000_8000 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint8_t s, d; uint16_t t; @@ -2934,7 +2934,7 @@ mmx_composite_add_8888_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; CHECKPOINT (); @@ -3165,7 +3165,7 @@ mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp, uint32_t *dst, *dst_line; uint8_t *mask, *mask_line; int src_stride, mask_stride, dst_stride; - uint16_t w; + int32_t w; PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); @@ -3216,72 +3216,74 @@ mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp, static const pixman_fast_path_t mmx_fast_paths[] = { - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, mmx_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, mmx_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, mmx_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_x8r8g8b8, mmx_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_r5g6b5, mmx_composite_over_n_8888_0565_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_a8b8g8r8, mmx_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_x8b8g8r8, mmx_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_b5g6r5, mmx_composite_over_n_8888_0565_ca }, - { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_a8r8g8b8, mmx_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_x8r8g8b8, mmx_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_r5g6b5, mmx_composite_over_pixbuf_0565 }, - { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_a8b8g8r8, mmx_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_x8b8g8r8, mmx_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_b5g6r5, mmx_composite_over_pixbuf_0565 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, mmx_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, mmx_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, mmx_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, mmx_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, mmx_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, mmx_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, mmx_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, mmx_composite_over_8888_n_8888 }, + PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mmx_composite_over_n_8_0565 ), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, mmx_composite_over_n_8_0565 ), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, mmx_composite_over_n_8_8888 ), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, mmx_composite_over_n_8_8888 ), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, mmx_composite_over_n_8_8888 ), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, mmx_composite_over_n_8_8888 ), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, mmx_composite_over_n_8888_8888_ca ), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, mmx_composite_over_n_8888_8888_ca ), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, mmx_composite_over_n_8888_0565_ca ), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, mmx_composite_over_n_8888_8888_ca ), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, mmx_composite_over_n_8888_8888_ca ), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, mmx_composite_over_n_8888_0565_ca ), + PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, mmx_composite_over_pixbuf_8888 ), + PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, mmx_composite_over_pixbuf_8888 ), + PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, mmx_composite_over_pixbuf_0565 ), + PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, mmx_composite_over_pixbuf_8888 ), + PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, mmx_composite_over_pixbuf_8888 ), + PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, mmx_composite_over_pixbuf_0565 ), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, mmx_composite_over_x888_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, mmx_composite_over_x888_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, mmx_composite_over_x888_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, mmx_composite_over_x888_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, mmx_composite_over_8888_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, mmx_composite_over_8888_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, mmx_composite_over_8888_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, mmx_composite_over_8888_n_8888 ), #if 0 - /* FIXME: This code is commented out since it's apparently not actually faster than the generic code. */ - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_x888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_x888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888 }, + /* FIXME: This code is commented out since it's apparently + * not actually faster than the generic code. + */ + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, mmx_composite_over_x888_8_8888 ), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, mmx_composite_over_x888_8_8888 ), + PIXMAN_STD_FAST_PATH (OVER, x8b8r8g8, a8, x8b8g8r8, mmx_composite_over_x888_8_8888 ), + PIXMAN_STD_FAST_PATH (OVER, x8b8r8g8, a8, a8r8g8b8, mmx_composite_over_x888_8_8888 ), #endif - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_over_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_over_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_over_n_0565 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area }, - - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_over_8888_0565 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, mmx_composite_over_8888_0565 }, - - { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_add_8888_8888 }, - { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_add_8888_8888 }, - { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, mmx_composite_add_8000_8000 }, - { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_add_n_8_8 }, - - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, mmx_composite_copy_area }, - - { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, mmx_composite_in_8_8 }, - { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_in_n_8_8 }, + PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, mmx_composite_over_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, mmx_composite_over_n_8888 ), + PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, mmx_composite_over_n_0565 ), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ), + + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, mmx_composite_over_8888_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, mmx_composite_over_8888_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, mmx_composite_over_8888_0565 ), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, mmx_composite_over_8888_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, mmx_composite_over_8888_8888 ), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, mmx_composite_over_8888_0565 ), + + PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, mmx_composite_add_8888_8888 ), + PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, mmx_composite_add_8888_8888 ), + PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mmx_composite_add_8000_8000 ), + PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, mmx_composite_add_n_8_8 ), + + PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, mmx_composite_src_n_8_8888 ), + PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, mmx_composite_src_n_8_8888 ), + PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, mmx_composite_src_n_8_8888 ), + PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, mmx_composite_src_n_8_8888 ), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, mmx_composite_copy_area ), + PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, mmx_composite_copy_area ), + + PIXMAN_STD_FAST_PATH (IN, a8, null, a8, mmx_composite_in_8_8 ), + PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, mmx_composite_in_n_8_8 ), { PIXMAN_OP_NONE }, }; diff --git a/pixman/pixman/pixman-private.h b/pixman/pixman/pixman-private.h index c99f2a23c..c99b10179 100644 --- a/pixman/pixman/pixman-private.h +++ b/pixman/pixman/pixman-private.h @@ -114,7 +114,10 @@ struct source_image struct solid_fill { source_image_t common; - uint32_t color; /* FIXME: shouldn't this be a pixman_color_t? */ + pixman_color_t color; + + uint32_t color_32; + uint64_t color_64; }; struct gradient @@ -558,20 +561,80 @@ _pixman_choose_implementation (void); */ #define PIXMAN_null PIXMAN_FORMAT (0, 0, 0, 0, 0, 0) #define PIXMAN_solid PIXMAN_FORMAT (0, 1, 0, 0, 0, 0) -#define PIXMAN_a8r8g8b8_ca PIXMAN_FORMAT (0, 2, 0, 0, 0, 0) -#define PIXMAN_a8b8g8r8_ca PIXMAN_FORMAT (0, 3, 0, 0, 0, 0) -#define PIXMAN_pixbuf PIXMAN_FORMAT (0, 4, 0, 0, 0, 0) -#define PIXMAN_rpixbuf PIXMAN_FORMAT (0, 5, 0, 0, 0, 0) +#define PIXMAN_pixbuf PIXMAN_FORMAT (0, 2, 0, 0, 0, 0) +#define PIXMAN_rpixbuf PIXMAN_FORMAT (0, 3, 0, 0, 0, 0) +#define PIXMAN_unknown PIXMAN_FORMAT (0, 4, 0, 0, 0, 0) + +#define FAST_PATH_ID_TRANSFORM (1 << 0) +#define FAST_PATH_NO_ALPHA_MAP (1 << 1) +#define FAST_PATH_NO_CONVOLUTION_FILTER (1 << 2) +#define FAST_PATH_NO_PAD_REPEAT (1 << 3) +#define FAST_PATH_NO_REFLECT_REPEAT (1 << 4) +#define FAST_PATH_NO_ACCESSORS (1 << 5) +#define FAST_PATH_NO_WIDE_FORMAT (1 << 6) +#define FAST_PATH_reserved (1 << 7) +#define FAST_PATH_COMPONENT_ALPHA (1 << 8) +#define FAST_PATH_UNIFIED_ALPHA (1 << 9) + +#define _FAST_PATH_STANDARD_FLAGS \ + (FAST_PATH_ID_TRANSFORM | \ + FAST_PATH_NO_ALPHA_MAP | \ + FAST_PATH_NO_CONVOLUTION_FILTER | \ + FAST_PATH_NO_PAD_REPEAT | \ + FAST_PATH_NO_REFLECT_REPEAT | \ + FAST_PATH_NO_ACCESSORS | \ + FAST_PATH_NO_WIDE_FORMAT) + +#define FAST_PATH_STD_SRC_FLAGS \ + _FAST_PATH_STANDARD_FLAGS +#define FAST_PATH_STD_MASK_U_FLAGS \ + (_FAST_PATH_STANDARD_FLAGS | \ + FAST_PATH_UNIFIED_ALPHA) +#define FAST_PATH_STD_MASK_CA_FLAGS \ + (_FAST_PATH_STANDARD_FLAGS | \ + FAST_PATH_COMPONENT_ALPHA) +#define FAST_PATH_STD_DEST_FLAGS \ + (FAST_PATH_NO_ACCESSORS | \ + FAST_PATH_NO_WIDE_FORMAT) typedef struct { pixman_op_t op; pixman_format_code_t src_format; + uint32_t src_flags; pixman_format_code_t mask_format; + uint32_t mask_flags; pixman_format_code_t dest_format; + uint32_t dest_flags; pixman_composite_func_t func; } pixman_fast_path_t; +#define FAST_PATH(op, src, src_flags, mask, mask_flags, dest, dest_flags, func) \ + PIXMAN_OP_ ## op, \ + PIXMAN_ ## src, \ + src_flags, \ + PIXMAN_ ## mask, \ + mask_flags, \ + PIXMAN_ ## dest, \ + dest_flags, \ + func + +#define PIXMAN_STD_FAST_PATH(op, src, mask, dest, func) \ + { FAST_PATH ( \ + op, \ + src, FAST_PATH_STD_SRC_FLAGS, \ + mask, (PIXMAN_ ## mask) ? FAST_PATH_STD_MASK_U_FLAGS : 0, \ + dest, FAST_PATH_STD_DEST_FLAGS, \ + func) } + +#define PIXMAN_STD_FAST_PATH_CA(op, src, mask, dest, func) \ + { FAST_PATH ( \ + op, \ + src, FAST_PATH_STD_SRC_FLAGS, \ + mask, FAST_PATH_STD_MASK_CA_FLAGS, \ + dest, FAST_PATH_STD_DEST_FLAGS, \ + func) } + /* Memory allocation helpers */ void * pixman_malloc_ab (unsigned int n, unsigned int b); diff --git a/pixman/pixman/pixman-region.c b/pixman/pixman/pixman-region.c index 8ce5deb77..9f7515c78 100644 --- a/pixman/pixman/pixman-region.c +++ b/pixman/pixman/pixman-region.c @@ -68,32 +68,21 @@ /* Turn on debugging depending on what type of release this is */ - -#if ((PIXMAN_VERSION_MICRO % 2) == 1) -/* Random git checkout. - * - * Those are often used for performance work, so we don't turn on the - * full self-checking, but we do turn on the asserts. - */ -# define FATAL_BUGS -# define noSELF_CHECKS -#elif ((PIXMAN_VERSION_MINOR % 2) == 0) -/* Stable release. - * - * We don't want assertions because the X server should stay alive - * if possible. We also don't want self-checks for performance-reasons. +#if (((PIXMAN_VERSION_MICRO % 2) == 0) && ((PIXMAN_VERSION_MINOR % 2) == 1)) +/* This is a development snapshot, so we want self-checking in order to + * catch as many bugs as possible. However, we don't turn on the asserts + * because that just leads to the X server crashing which leads to + * people not running the snapshots. */ # define noFATAL_BUGS -# define noSELF_CHECKS +# define SELF_CHECKS #else -/* Development snapshot. - * - * These are the things that get shipped in development distributions - * such as Rawhide. We want both self-checking and fatal assertions - * to catch as many bugs as possible. +/* This is either a stable release or a random git checkout. We don't + * want self checks in either case for performance reasons. (Random + * git checkouts are often used for performance work */ -# define FATAL_BUGS -# define SELF_CHECKS +# define noFATAL_BUGS +# define noSELF_CHECKS #endif #ifndef FATAL_BUGS @@ -110,7 +99,7 @@ log_region_error (const char *function, const char *message) { static int n_messages = 0; - if (n_messages < 50) + if (n_messages < 5) { fprintf (stderr, "*** BUG ***\n" @@ -2470,7 +2459,7 @@ PREFIX (_selfcheck) (region_type_t *reg) PIXMAN_EXPORT pixman_bool_t PREFIX (_init_rects) (region_type_t *region, - box_type_t *boxes, int count) + const box_type_t *boxes, int count) { box_type_t *rects; int displacement; diff --git a/pixman/pixman/pixman-solid-fill.c b/pixman/pixman/pixman-solid-fill.c index 38675dca8..48c999a0e 100644 --- a/pixman/pixman/pixman-solid-fill.c +++ b/pixman/pixman/pixman-solid-fill.c @@ -36,7 +36,7 @@ solid_fill_get_scanline_32 (pixman_image_t *image, uint32_t mask_bits) { uint32_t *end = buffer + width; - register uint32_t color = ((solid_fill_t *)image)->color; + uint32_t color = image->solid.color_32; while (buffer < end) *(buffer++) = color; @@ -44,6 +44,23 @@ solid_fill_get_scanline_32 (pixman_image_t *image, return; } +static void +solid_fill_get_scanline_64 (pixman_image_t *image, + int x, + int y, + int width, + uint32_t * buffer, + const uint32_t *mask, + uint32_t mask_bits) +{ + uint64_t *b = (uint64_t *)buffer; + uint64_t *e = b + width; + uint64_t color = image->solid.color_64; + + while (b < e) + *(b++) = color; +} + static source_image_class_t solid_fill_classify (pixman_image_t *image, int x, @@ -58,7 +75,7 @@ static void solid_fill_property_changed (pixman_image_t *image) { image->common.get_scanline_32 = solid_fill_get_scanline_32; - image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64; + image->common.get_scanline_64 = solid_fill_get_scanline_64; } static uint32_t @@ -71,6 +88,16 @@ color_to_uint32 (const pixman_color_t *color) (color->blue >> 8); } +static uint64_t +color_to_uint64 (const pixman_color_t *color) +{ + return + ((uint64_t)color->alpha << 48) | + ((uint64_t)color->red << 32) | + ((uint64_t)color->green << 16) | + ((uint64_t)color->blue); +} + PIXMAN_EXPORT pixman_image_t * pixman_image_create_solid_fill (pixman_color_t *color) { @@ -80,7 +107,9 @@ pixman_image_create_solid_fill (pixman_color_t *color) return NULL; img->type = SOLID; - img->solid.color = color_to_uint32 (color); + img->solid.color = *color; + img->solid.color_32 = color_to_uint32 (color); + img->solid.color_64 = color_to_uint64 (color); img->source.class = SOURCE_IMAGE_CLASS_UNKNOWN; img->common.classify = solid_fill_classify; diff --git a/pixman/pixman/pixman-sse2.c b/pixman/pixman/pixman-sse2.c index 1f8aa6e38..2bade7476 100644 --- a/pixman/pixman/pixman-sse2.c +++ b/pixman/pixman/pixman-sse2.c @@ -998,7 +998,7 @@ core_combine_reverse_out_u_sse2 (uint32_t* pd, pix_multiply_1x64 ( unpack_32_1x64 (d), negate_1x64 ( expand_alpha_1x64 (unpack_32_1x64 (s))))); - + if (pm) pm++; ps++; @@ -2652,8 +2652,8 @@ create_mask_2x32_64 (uint32_t mask0, /* Work around a code generation bug in Sun Studio 12. */ #if defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) -# define create_mask_2x32_128(mask0, mask1) \ - (_mm_set_epi32 ((mask0), (mask1), (mask0), (mask1))) +# define create_mask_2x32_128(mask0, mask1) \ + (_mm_set_epi32 ((mask0), (mask1), (mask0), (mask1))) #else static force_inline __m128i create_mask_2x32_128 (uint32_t mask0, @@ -2950,7 +2950,7 @@ sse2_composite_over_n_8888 (pixman_implementation_t *imp, { uint32_t src; uint32_t *dst_line, *dst, d; - uint16_t w; + int32_t w; int dst_stride; __m128i xmm_src, xmm_alpha; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; @@ -3041,7 +3041,7 @@ sse2_composite_over_n_0565 (pixman_implementation_t *imp, { uint32_t src; uint16_t *dst_line, *dst, d; - uint16_t w; + int32_t w; int dst_stride; __m128i xmm_src, xmm_alpha; __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; @@ -3152,7 +3152,7 @@ sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, src = _pixman_image_get_solid (src_image, dst_image->bits.format); srca = src >> 24; - + if (src == 0) return; @@ -3187,7 +3187,7 @@ sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, if (m) { d = *pd; - + mmx_mask = unpack_32_1x64 (m); mmx_dest = unpack_32_1x64 (d); @@ -3226,7 +3226,7 @@ sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); xmm_mask_hi = pack_2x128_128 (xmm_mask_lo, xmm_mask_hi); - + save_128_aligned ( (__m128i*)pd, _mm_adds_epu8 (xmm_mask_hi, xmm_dst)); } @@ -3421,7 +3421,7 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint32_t *src_line, *src; uint32_t mask; - uint16_t w; + int32_t w; int dst_stride, src_stride; __m128i xmm_mask; @@ -3539,7 +3539,7 @@ sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp, uint32_t *src_line, *src; uint32_t mask; int dst_stride, src_stride; - uint16_t w; + int32_t w; __m128i xmm_mask, xmm_alpha; __m128i xmm_src, xmm_src_lo, xmm_src_hi; @@ -3707,7 +3707,7 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp, uint16_t *dst_line, *dst, d; uint32_t *src_line, *src, s; int dst_stride, src_stride; - uint16_t w; + int32_t w; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src, xmm_src_lo, xmm_src_hi; @@ -3837,7 +3837,7 @@ sse2_composite_over_n_8_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint32_t m, d; __m128i xmm_src, xmm_alpha, xmm_def; @@ -4120,7 +4120,7 @@ sse2_composite_src_n_8_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint32_t m; __m128i xmm_src, xmm_def; @@ -4266,7 +4266,7 @@ sse2_composite_over_n_8_0565 (pixman_implementation_t *imp, uint16_t *dst_line, *dst, d; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint32_t m; __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest; @@ -4429,7 +4429,7 @@ sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp, uint16_t *dst_line, *dst, d; uint32_t *src_line, *src, s; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint32_t opaque, zero; __m64 ms; @@ -4575,7 +4575,7 @@ sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp, uint32_t *dst_line, *dst, d; uint32_t *src_line, *src, s; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint32_t opaque, zero; __m128i xmm_src_lo, xmm_src_hi; @@ -4861,9 +4861,10 @@ sse2_composite_in_n_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w, d, m; + uint32_t d, m; uint32_t src; uint8_t sa; + int32_t w; __m128i xmm_alpha; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; @@ -4976,7 +4977,7 @@ sse2_composite_in_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *src_line, *src; int src_stride, dst_stride; - uint16_t w; + int32_t w; uint32_t s, d; __m128i xmm_src, xmm_src_lo, xmm_src_hi; @@ -5074,7 +5075,7 @@ sse2_composite_add_n_8_8 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; - uint16_t w; + int32_t w; uint32_t src; uint8_t sa; uint32_t m, d; @@ -5190,7 +5191,7 @@ sse2_composite_add_8000_8000 (pixman_implementation_t *imp, uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; - uint16_t w; + int32_t w; uint16_t t; PIXMAN_IMAGE_GET_LINE ( @@ -5468,7 +5469,7 @@ sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp, uint8_t *mask, *mask_line; uint32_t m; int src_stride, mask_stride, dst_stride; - uint16_t w; + int32_t w; __m64 ms; __m128i xmm_src, xmm_src_lo, xmm_src_hi; @@ -5615,7 +5616,7 @@ sse2_composite_over_8888_8_8888 (pixman_implementation_t *imp, uint8_t *mask, *mask_line; uint32_t m; int src_stride, mask_stride, dst_stride; - uint16_t w; + int32_t w; __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; @@ -5712,7 +5713,7 @@ sse2_composite_over_8888_8_8888 (pixman_implementation_t *imp, expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - + in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); @@ -5766,73 +5767,77 @@ sse2_composite_over_8888_8_8888 (pixman_implementation_t *imp, static const pixman_fast_path_t sse2_fast_paths[] = { - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, sse2_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, sse2_composite_over_n_8_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_over_n_0565 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_over_8888_0565 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, sse2_composite_over_8888_0565 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_8888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_x888_8_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_r5g6b5, sse2_composite_over_n_8888_0565_ca }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_b5g6r5, sse2_composite_over_n_8888_0565_ca }, - { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888 }, - { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_r5g6b5, sse2_composite_over_pixbuf_0565 }, - { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_b5g6r5, sse2_composite_over_pixbuf_0565 }, - { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area }, - { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area }, - - { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, sse2_composite_add_n_8888_8888_ca }, - { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, sse2_composite_add_8000_8000 }, - { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888 }, - { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888 }, - { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, sse2_composite_add_n_8_8 }, - - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888 }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_copy_area }, - { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, sse2_composite_copy_area }, - - { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, sse2_composite_in_8_8 }, - { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, sse2_composite_in_n_8_8 }, + /* PIXMAN_OP_OVER */ + PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, sse2_composite_over_n_8_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, sse2_composite_over_n_8_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, sse2_composite_over_n_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, sse2_composite_over_n_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, sse2_composite_over_n_0565), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, sse2_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, sse2_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, sse2_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, sse2_composite_over_8888_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, sse2_composite_over_8888_0565), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, sse2_composite_over_8888_0565), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, sse2_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, sse2_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, sse2_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, sse2_composite_over_n_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, sse2_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, sse2_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, sse2_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, sse2_composite_over_8888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, sse2_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, sse2_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, sse2_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, sse2_composite_over_x888_8_8888), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, sse2_composite_over_x888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, sse2_composite_over_x888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, sse2_composite_over_x888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, sse2_composite_over_x888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, sse2_composite_over_8888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, sse2_composite_over_8888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, sse2_composite_over_8888_n_8888), + PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, sse2_composite_over_8888_n_8888), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, sse2_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, sse2_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, sse2_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, sse2_composite_over_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, sse2_composite_over_n_8888_0565_ca), + PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, sse2_composite_over_n_8888_0565_ca), + PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, sse2_composite_over_pixbuf_8888), + PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, sse2_composite_over_pixbuf_8888), + PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, sse2_composite_over_pixbuf_8888), + PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, sse2_composite_over_pixbuf_8888), + PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, sse2_composite_over_pixbuf_0565), + PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, sse2_composite_over_pixbuf_0565), + PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), + + /* PIXMAN_OP_ADD */ + PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, sse2_composite_add_n_8888_8888_ca), + PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, sse2_composite_add_8000_8000), + PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, sse2_composite_add_8888_8888), + PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, sse2_composite_add_8888_8888), + PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, sse2_composite_add_n_8_8), + + /* PIXMAN_OP_SRC */ + PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, sse2_composite_src_n_8_8888), + PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, sse2_composite_src_n_8_8888), + PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, sse2_composite_src_n_8_8888), + PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, sse2_composite_src_n_8_8888), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, sse2_composite_copy_area), + PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, sse2_composite_copy_area), + + /* PIXMAN_OP_IN */ + PIXMAN_STD_FAST_PATH (IN, a8, null, a8, sse2_composite_in_8_8), + PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, sse2_composite_in_n_8_8), { PIXMAN_OP_NONE }, }; diff --git a/pixman/pixman/pixman-utils.c b/pixman/pixman/pixman-utils.c index a80a226fa..5441b6b8c 100644 --- a/pixman/pixman/pixman-utils.c +++ b/pixman/pixman/pixman-utils.c @@ -498,102 +498,56 @@ _pixman_walk_composite_region (pixman_implementation_t *imp, } } -static const pixman_fast_path_t * -get_fast_path (const pixman_fast_path_t *fast_paths, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int src_x, - int src_y, - int mask_x, - int mask_y) +static void +get_image_info (pixman_image_t *image, + pixman_format_code_t *code, + uint32_t *flags) { - pixman_format_code_t src_format, mask_format, dest_format; - const pixman_fast_path_t *info; - - /* Check for pixbufs */ - if (mask_image && mask_image->type == BITS && - (mask_image->bits.format == PIXMAN_a8r8g8b8 || mask_image->bits.format == PIXMAN_a8b8g8r8) && - (src_image->type == BITS && src_image->bits.bits == mask_image->bits.bits) && - (src_image->common.repeat == mask_image->common.repeat) && - (src_x == mask_x && src_y == mask_y)) + *flags = 0; + + if (!image) { - if (src_image->bits.format == PIXMAN_x8b8g8r8) - src_format = mask_format = PIXMAN_pixbuf; - else if (src_image->bits.format == PIXMAN_x8r8g8b8) - src_format = mask_format = PIXMAN_rpixbuf; - else - return NULL; + *code = PIXMAN_null; } else { - /* Source */ - if (_pixman_image_is_solid (src_image)) - { - src_format = PIXMAN_solid; - } - else if (src_image->type == BITS) - { - src_format = src_image->bits.format; - } - else - { - return NULL; - } + if (!image->common.transform) + *flags |= FAST_PATH_ID_TRANSFORM; - /* Mask */ - if (!mask_image) - { - mask_format = PIXMAN_null; - } - else if (mask_image->common.component_alpha) - { - if (mask_image->type == BITS) - { - /* These are the *only* component_alpha formats - * we support for fast paths - */ - if (mask_image->bits.format == PIXMAN_a8r8g8b8) - mask_format = PIXMAN_a8r8g8b8_ca; - else if (mask_image->bits.format == PIXMAN_a8b8g8r8) - mask_format = PIXMAN_a8b8g8r8_ca; - else - return NULL; - } - else - { - return NULL; - } - } - else if (_pixman_image_is_solid (mask_image)) - { - mask_format = PIXMAN_solid; - } - else if (mask_image->common.type == BITS) + if (!image->common.alpha_map) + *flags |= FAST_PATH_NO_ALPHA_MAP; + + if (image->common.filter != PIXMAN_FILTER_CONVOLUTION) + *flags |= FAST_PATH_NO_CONVOLUTION_FILTER; + + if (image->common.repeat != PIXMAN_REPEAT_PAD) + *flags |= FAST_PATH_NO_PAD_REPEAT; + + if (image->common.repeat != PIXMAN_REPEAT_REFLECT) + *flags |= FAST_PATH_NO_REFLECT_REPEAT; + + *flags |= (FAST_PATH_NO_ACCESSORS | FAST_PATH_NO_WIDE_FORMAT); + if (image->type == BITS) { - mask_format = mask_image->bits.format; + if (image->bits.read_func || image->bits.write_func) + *flags &= ~FAST_PATH_NO_ACCESSORS; + + if (PIXMAN_FORMAT_IS_WIDE (image->bits.format)) + *flags &= ~FAST_PATH_NO_WIDE_FORMAT; } + + if (image->common.component_alpha) + *flags |= FAST_PATH_COMPONENT_ALPHA; else - { - return NULL; - } - } + *flags |= FAST_PATH_UNIFIED_ALPHA; - dest_format = dst_image->bits.format; - - for (info = fast_paths; info->op != PIXMAN_OP_NONE; ++info) - { - if (info->op == op && - info->src_format == src_format && - info->mask_format == mask_format && - info->dest_format == dest_format) - { - return info; - } + if (_pixman_image_is_solid (image)) + *code = PIXMAN_solid; + else if (image->common.type == BITS) + *code = image->bits.format; + else + *code = PIXMAN_unknown; } - - return NULL; } static force_inline pixman_bool_t @@ -655,82 +609,46 @@ _pixman_run_fast_path (const pixman_fast_path_t *paths, int32_t width, int32_t height) { - pixman_composite_func_t func = NULL; - pixman_bool_t src_repeat = - src->common.repeat == PIXMAN_REPEAT_NORMAL; - pixman_bool_t mask_repeat = - mask && mask->common.repeat == PIXMAN_REPEAT_NORMAL; + pixman_format_code_t src_format, mask_format, dest_format; + uint32_t src_flags, mask_flags, dest_flags; + pixman_composite_func_t func; + const pixman_fast_path_t *info; pixman_bool_t result; - pixman_bool_t has_fast_path; - has_fast_path = !dest->common.alpha_map && - !dest->bits.read_func && - !dest->bits.write_func; - - if (has_fast_path) - { - has_fast_path = !src->common.transform && - !src->common.alpha_map && - src->common.filter != PIXMAN_FILTER_CONVOLUTION && - src->common.repeat != PIXMAN_REPEAT_PAD && - src->common.repeat != PIXMAN_REPEAT_REFLECT; - if (has_fast_path && src->type == BITS) - { - has_fast_path = !src->bits.read_func && - !src->bits.write_func && - !PIXMAN_FORMAT_IS_WIDE (src->bits.format); - } - } - - if (mask && has_fast_path) + get_image_info (src, &src_format, &src_flags); + get_image_info (mask, &mask_format, &mask_flags); + get_image_info (dest, &dest_format, &dest_flags); + + /* Check for pixbufs */ + if ((mask_format == PIXMAN_a8r8g8b8 || mask_format == PIXMAN_a8b8g8r8) && + (src->type == BITS && src->bits.bits == mask->bits.bits) && + (src->common.repeat == mask->common.repeat) && + (src_x == mask_x && src_y == mask_y)) { - has_fast_path = - !mask->common.transform && - !mask->common.alpha_map && - !mask->bits.read_func && - !mask->bits.write_func && - mask->common.filter != PIXMAN_FILTER_CONVOLUTION && - mask->common.repeat != PIXMAN_REPEAT_PAD && - mask->common.repeat != PIXMAN_REPEAT_REFLECT && - !PIXMAN_FORMAT_IS_WIDE (mask->bits.format); + if (src_format == PIXMAN_x8b8g8r8) + src_format = mask_format = PIXMAN_pixbuf; + else if (src_format == PIXMAN_x8r8g8b8) + src_format = mask_format = PIXMAN_rpixbuf; } - if (has_fast_path) + func = NULL; + for (info = paths; info->op != PIXMAN_OP_NONE; ++info) { - const pixman_fast_path_t *info; - - if ((info = get_fast_path (paths, op, src, mask, dest, src_x, src_y, mask_x, mask_y))) + if (info->op == op && + (info->src_format == src_format) && + (info->src_flags & src_flags) == info->src_flags && + (info->mask_format == mask_format) && + (info->mask_flags & mask_flags) == info->mask_flags && + (info->dest_format == dest_format) && + (info->dest_flags & dest_flags) == info->dest_flags) { func = info->func; - - if (info->src_format == PIXMAN_solid) - src_repeat = FALSE; - - if (info->mask_format == PIXMAN_solid) - mask_repeat = FALSE; - - if ((src_repeat && - src->bits.width == 1 && - src->bits.height == 1) || - (mask_repeat && - mask->bits.width == 1 && - mask->bits.height == 1)) - { - /* If src or mask are repeating 1x1 images and src_repeat or - * mask_repeat are still TRUE, it means the fast path we - * selected does not actually handle repeating images. - * - * So rather than calling the "fast path" with a zillion - * 1x1 requests, we just fall back to the general code (which - * does do something sensible with 1x1 repeating images). - */ - func = NULL; - } + break; } } result = FALSE; - + if (func) { pixman_region32_t region; @@ -746,6 +664,21 @@ _pixman_run_fast_path (const pixman_fast_path_t *paths, src, mask, extents, src_x, src_y, mask_x, mask_y, dest_x, dest_y)) { + pixman_bool_t src_repeat, mask_repeat; + + src_repeat = + src->type == BITS && + src_flags & FAST_PATH_ID_TRANSFORM && + src->common.repeat == PIXMAN_REPEAT_NORMAL && + src_format != PIXMAN_solid; + + mask_repeat = + mask && + mask->type == BITS && + mask_flags & FAST_PATH_ID_TRANSFORM && + mask->common.repeat == PIXMAN_REPEAT_NORMAL && + mask_format != PIXMAN_solid; + walk_region_internal (imp, op, src, mask, dest, src_x, src_y, mask_x, mask_y, diff --git a/pixman/pixman/pixman-version.h b/pixman/pixman/pixman-version.h index dbd175ca4..bcf39c8a8 100644 --- a/pixman/pixman/pixman-version.h +++ b/pixman/pixman/pixman-version.h @@ -33,9 +33,9 @@ #define PIXMAN_VERSION_MAJOR 0 #define PIXMAN_VERSION_MINOR 17 -#define PIXMAN_VERSION_MICRO 4 +#define PIXMAN_VERSION_MICRO 6 -#define PIXMAN_VERSION_STRING "0.17.4" +#define PIXMAN_VERSION_STRING "0.17.6" #define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \ ((major) * 10000) \ diff --git a/pixman/pixman/pixman.c b/pixman/pixman/pixman.c index 0edd967cf..9ab875d7c 100644 --- a/pixman/pixman/pixman.c +++ b/pixman/pixman/pixman.c @@ -28,6 +28,8 @@ #endif #include "pixman-private.h" +#include <stdlib.h> + /* * Operator optimizations based on source or destination opacity */ @@ -109,8 +111,8 @@ pixman_optimize_operator (pixman_op_t op, static void apply_workaround (pixman_image_t *image, - int16_t * x, - int16_t * y, + int32_t * x, + int32_t * y, uint32_t ** save_bits, int * save_dx, int * save_dy) @@ -166,6 +168,24 @@ pixman_image_composite (pixman_op_t op, uint16_t width, uint16_t height) { + pixman_image_composite32 (op, src, mask, dest, src_x, src_y, + mask_x, mask_y, dest_x, dest_y, width, height); +} + +PIXMAN_EXPORT void +pixman_image_composite32 (pixman_op_t op, + pixman_image_t * src, + pixman_image_t * mask, + pixman_image_t * dest, + int32_t src_x, + int32_t src_y, + int32_t mask_x, + int32_t mask_y, + int32_t dest_x, + int32_t dest_y, + int32_t width, + int32_t height) +{ uint32_t *src_bits; int src_dx, src_dy; uint32_t *mask_bits; @@ -323,6 +343,45 @@ pixman_image_fill_rectangles (pixman_op_t op, int n_rects, const pixman_rectangle16_t *rects) { + pixman_box32_t stack_boxes[6]; + pixman_box32_t *boxes; + pixman_bool_t result; + int i; + + if (n_rects > 6) + { + boxes = pixman_malloc_ab (sizeof (pixman_box32_t), n_rects); + if (boxes == NULL) + return FALSE; + } + else + { + boxes = stack_boxes; + } + + for (i = 0; i < n_rects; ++i) + { + boxes[i].x1 = rects[i].x; + boxes[i].y1 = rects[i].y; + boxes[i].x2 = boxes[i].x1 + rects[i].width; + boxes[i].y2 = boxes[i].y1 + rects[i].height; + } + + result = pixman_image_fill_boxes (op, dest, color, n_rects, boxes); + + if (boxes != stack_boxes) + free (boxes); + + return result; +} + +PIXMAN_EXPORT pixman_bool_t +pixman_image_fill_boxes (pixman_op_t op, + pixman_image_t * dest, + pixman_color_t * color, + int n_boxes, + const pixman_box32_t *boxes) +{ pixman_image_t *solid; pixman_color_t c; int i; @@ -331,71 +390,69 @@ pixman_image_fill_rectangles (pixman_op_t op, if (color->alpha == 0xffff) { - if (op == PIXMAN_OP_OVER) - op = PIXMAN_OP_SRC; + if (op == PIXMAN_OP_OVER) + op = PIXMAN_OP_SRC; } if (op == PIXMAN_OP_CLEAR) { - c.red = 0; - c.green = 0; - c.blue = 0; - c.alpha = 0; + c.red = 0; + c.green = 0; + c.blue = 0; + c.alpha = 0; - color = &c; + color = &c; - op = PIXMAN_OP_SRC; + op = PIXMAN_OP_SRC; } if (op == PIXMAN_OP_SRC) { - uint32_t pixel; - - if (color_to_pixel (color, &pixel, dest->bits.format)) - { - for (i = 0; i < n_rects; ++i) - { - pixman_region32_t fill_region; - int n_boxes, j; - pixman_box32_t *boxes; - - pixman_region32_init_rect (&fill_region, rects[i].x, rects[i].y, rects[i].width, rects[i].height); - - if (dest->common.have_clip_region) - { - if (!pixman_region32_intersect (&fill_region, - &fill_region, - &dest->common.clip_region)) - return FALSE; - } - - boxes = pixman_region32_rectangles (&fill_region, &n_boxes); - for (j = 0; j < n_boxes; ++j) - { - const pixman_box32_t *box = &(boxes[j]); - pixman_fill (dest->bits.bits, dest->bits.rowstride, PIXMAN_FORMAT_BPP (dest->bits.format), - box->x1, box->y1, box->x2 - box->x1, box->y2 - box->y1, - pixel); - } - - pixman_region32_fini (&fill_region); - } - return TRUE; - } + uint32_t pixel; + + if (color_to_pixel (color, &pixel, dest->bits.format)) + { + pixman_region32_t fill_region; + int n_rects, j; + pixman_box32_t *rects; + + if (!pixman_region32_init_rects (&fill_region, boxes, n_boxes)) + return FALSE; + + if (dest->common.have_clip_region) + { + if (!pixman_region32_intersect (&fill_region, + &fill_region, + &dest->common.clip_region)) + return FALSE; + } + + rects = pixman_region32_rectangles (&fill_region, &n_rects); + for (j = 0; j < n_rects; ++j) + { + const pixman_box32_t *rect = &(rects[j]); + pixman_fill (dest->bits.bits, dest->bits.rowstride, PIXMAN_FORMAT_BPP (dest->bits.format), + rect->x1, rect->y1, rect->x2 - rect->x1, rect->y2 - rect->y1, + pixel); + } + + pixman_region32_fini (&fill_region); + return TRUE; + } } solid = pixman_image_create_solid_fill (color); if (!solid) - return FALSE; + return FALSE; - for (i = 0; i < n_rects; ++i) + for (i = 0; i < n_boxes; ++i) { - const pixman_rectangle16_t *rect = &(rects[i]); + const pixman_box32_t *box = &(boxes[i]); - pixman_image_composite (op, solid, NULL, dest, - 0, 0, 0, 0, - rect->x, rect->y, - rect->width, rect->height); + pixman_image_composite32 (op, solid, NULL, dest, + 0, 0, 0, 0, + box->x1, box->y1, + box->x2 - box->x1, box->y2 - box->y1); } pixman_image_unref (solid); diff --git a/pixman/pixman/pixman.h b/pixman/pixman/pixman.h index ba2b242ad..03a233e00 100644 --- a/pixman/pixman/pixman.h +++ b/pixman/pixman/pixman.h @@ -71,6 +71,10 @@ SOFTWARE. #include <pixman-version.h> +#ifdef __cplusplus +extern "C" { +#endif + /* * Standard integers */ @@ -393,7 +397,7 @@ void pixman_region_init_rect (pixman_region16_t *reg unsigned int width, unsigned int height); pixman_bool_t pixman_region_init_rects (pixman_region16_t *region, - pixman_box16_t *boxes, + const pixman_box16_t *boxes, int count); void pixman_region_init_with_extents (pixman_region16_t *region, pixman_box16_t *extents); @@ -429,7 +433,7 @@ pixman_bool_t pixman_region_contains_point (pixman_region16_t *reg int x, int y, pixman_box16_t *box); -pixman_region_overlap_t pixman_region_contains_rectangle (pixman_region16_t *pixman_region16_t, +pixman_region_overlap_t pixman_region_contains_rectangle (pixman_region16_t *region, pixman_box16_t *prect); pixman_bool_t pixman_region_not_empty (pixman_region16_t *region); pixman_box16_t * pixman_region_extents (pixman_region16_t *region); @@ -480,7 +484,7 @@ void pixman_region32_init_rect (pixman_region32_t *r unsigned int width, unsigned int height); pixman_bool_t pixman_region32_init_rects (pixman_region32_t *region, - pixman_box32_t *boxes, + const pixman_box32_t *boxes, int count); void pixman_region32_init_with_extents (pixman_region32_t *region, pixman_box32_t *extents); @@ -722,6 +726,7 @@ pixman_bool_t pixman_image_unref (pixman_image_t void pixman_image_set_destroy_function (pixman_image_t *image, pixman_image_destroy_func_t function, void *data); +void * pixman_image_get_destroy_data (pixman_image_t *image); /* Set properties */ pixman_bool_t pixman_image_set_clip_region (pixman_image_t *image, @@ -761,6 +766,11 @@ pixman_bool_t pixman_image_fill_rectangles (pixman_op_t op, pixman_color_t *color, int n_rects, const pixman_rectangle16_t *rects); +pixman_bool_t pixman_image_fill_boxes (pixman_op_t op, + pixman_image_t *dest, + pixman_color_t *color, + int n_boxes, + const pixman_box32_t *boxes); /* Composite */ pixman_bool_t pixman_compute_composite_region (pixman_region16_t *region, @@ -787,6 +797,18 @@ void pixman_image_composite (pixman_op_t op, int16_t dest_y, uint16_t width, uint16_t height); +void pixman_image_composite32 (pixman_op_t op, + pixman_image_t *src, + pixman_image_t *mask, + pixman_image_t *dest, + int32_t src_x, + int32_t src_y, + int32_t mask_x, + int32_t mask_y, + int32_t dest_x, + int32_t dest_y, + int32_t width, + int32_t height); /* Old X servers rely on out-of-bounds accesses when they are asked * to composite with a window as the source. They create a pixman image @@ -892,4 +914,8 @@ void pixman_rasterize_trapezoid (pixman_image_t *image, int x_off, int y_off); +#ifdef __cplusplus +} +#endif + #endif /* PIXMAN_H__ */ diff --git a/pixman/test/fetch-test.c b/pixman/test/fetch-test.c index c7b32ff3b..2ca16ddbf 100644 --- a/pixman/test/fetch-test.c +++ b/pixman/test/fetch-test.c @@ -6,7 +6,7 @@ #define SIZE 1024 -static const pixman_indexed_t mono_palette = +static pixman_indexed_t mono_palette = { .rgba = { 0x00000000, 0x00ffffff }, }; @@ -21,7 +21,7 @@ typedef struct { pixman_indexed_t *indexed; } testcase_t; -static const testcase_t testcases[] = +static testcase_t testcases[] = { { .format = PIXMAN_a8r8g8b8, @@ -100,7 +100,7 @@ static const testcase_t testcases[] = }, }; -const int n_test_cases = sizeof(testcases)/sizeof(testcases[0]); +int n_test_cases = sizeof(testcases)/sizeof(testcases[0]); static uint32_t |