aboutsummaryrefslogtreecommitdiff
path: root/pixman/pixman
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2009-11-20 14:53:20 +0000
committermarha <marha@users.sourceforge.net>2009-11-20 14:53:20 +0000
commit9e11b82359e90c0963f56c4695a4dcc8731e45c3 (patch)
treeffa3a53246352ca44ccfc4d90e7cca993ad0b02e /pixman/pixman
parent607c11e20aca5c9f13d5e6ab80153b10c249162b (diff)
parent4ac4a5b7ce8cc8f195d69a42da10d386eaa5c056 (diff)
downloadvcxsrv-9e11b82359e90c0963f56c4695a4dcc8731e45c3.tar.gz
vcxsrv-9e11b82359e90c0963f56c4695a4dcc8731e45c3.tar.bz2
vcxsrv-9e11b82359e90c0963f56c4695a4dcc8731e45c3.zip
svn merge ^/branches/released
Diffstat (limited to 'pixman/pixman')
-rw-r--r--pixman/pixman/Makefile.am20
-rw-r--r--pixman/pixman/Makefile.in341
-rw-r--r--pixman/pixman/makefile1
-rw-r--r--pixman/pixman/pixman-access.c166
-rw-r--r--pixman/pixman/pixman-arm-neon-asm.S1051
-rw-r--r--pixman/pixman/pixman-arm-neon-asm.h787
-rw-r--r--pixman/pixman/pixman-arm-neon.c2886
-rw-r--r--pixman/pixman/pixman-arm-simd.c28
-rw-r--r--pixman/pixman/pixman-bits-image.c350
-rw-r--r--pixman/pixman/pixman-edge.c3
-rw-r--r--pixman/pixman/pixman-fast-path.c373
-rw-r--r--pixman/pixman/pixman-general.c26
-rw-r--r--pixman/pixman/pixman-image.c6
-rw-r--r--pixman/pixman/pixman-implementation.c2
-rw-r--r--pixman/pixman/pixman-mmx.c6832
-rw-r--r--pixman/pixman/pixman-private.h40
-rw-r--r--pixman/pixman/pixman-sse2.c589
-rw-r--r--pixman/pixman/pixman-utils.c215
-rw-r--r--pixman/pixman/pixman-version.h4
-rw-r--r--pixman/pixman/pixman.h29
20 files changed, 6927 insertions, 6822 deletions
diff --git a/pixman/pixman/Makefile.am b/pixman/pixman/Makefile.am
index e19fa6e7f..cd0102313 100644
--- a/pixman/pixman/Makefile.am
+++ b/pixman/pixman/Makefile.am
@@ -1,7 +1,6 @@
lib_LTLIBRARIES = libpixman-1.la
libpixman_1_la_LDFLAGS = -version-info $(LT_VERSION_INFO) -no-undefined
libpixman_1_la_LIBADD = @DEP_LIBS@ -lm
-libpixman_1_la_CFLAGS = -DPIXMAN_DISABLE_DEPRECATED
libpixman_1_la_SOURCES = \
pixman.h \
pixman-accessor.h \
@@ -64,6 +63,8 @@ libpixman_mmx_la_CFLAGS = $(DEP_CFLAGS) $(MMX_CFLAGS)
libpixman_mmx_la_LIBADD = $(DEP_LIBS)
libpixman_1_la_LDFLAGS += $(MMX_LDFLAGS)
libpixman_1_la_LIBADD += libpixman-mmx.la
+
+$(libpixman_mmx_la_SOURCES:.c=.s) : ASM_CFLAGS=$(MMX_CFLAGS)
endif
# vmx code
@@ -75,6 +76,8 @@ libpixman_vmx_la_SOURCES = \
libpixman_vmx_la_CFLAGS = $(DEP_CFLAGS) $(VMX_CFLAGS)
libpixman_vmx_la_LIBADD = $(DEP_LIBS)
libpixman_1_la_LIBADD += libpixman-vmx.la
+
+$(libpixman_vmx_la_SOURCES:.c=.s) : ASM_CFLAGS=$(VMX_CFLAGS)
endif
# sse2 code
@@ -86,6 +89,8 @@ libpixman_sse2_la_CFLAGS = $(DEP_CFLAGS) $(SSE2_CFLAGS)
libpixman_sse2_la_LIBADD = $(DEP_LIBS)
libpixman_1_la_LDFLAGS += $(SSE2_LDFLAGS)
libpixman_1_la_LIBADD += libpixman-sse2.la
+
+$(libpixman_sse2_la_SOURCES:.c=.s) : ASM_CFLAGS=$(SSE2_CFLAGS)
endif
# arm simd code
@@ -96,16 +101,23 @@ libpixman_arm_simd_la_SOURCES = \
libpixman_arm_simd_la_CFLAGS = $(DEP_CFLAGS) $(ARM_SIMD_CFLAGS)
libpixman_arm_simd_la_LIBADD = $(DEP_LIBS)
libpixman_1_la_LIBADD += libpixman-arm-simd.la
+
+$(libpixman_arm_simd_la_SOURCES:.c=.s) : ASM_CFLAGS=$(ARM_SIMD_CFLAGS)
endif
# arm neon code
if USE_ARM_NEON
noinst_LTLIBRARIES += libpixman-arm-neon.la
libpixman_arm_neon_la_SOURCES = \
- pixman-arm-neon.c
-libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS) $(ARM_NEON_CFLAGS)
+ pixman-arm-neon.c \
+ pixman-arm-neon-asm.S \
+ pixman-arm-neon-asm.h
+libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS)
libpixman_arm_neon_la_LIBADD = $(DEP_LIBS)
libpixman_1_la_LIBADD += libpixman-arm-neon.la
-endif
+$(libpixman_arm_neon_la_SOURCES:.c=.s) : ASM_CFLAGS=
+endif
+.c.s : $(libpixmaninclude_HEADERS) $(BUILT_SOURCES)
+ $(CC) $(CFLAGS) $(ASM_CFLAGS) -DHAVE_CONFIG_H -I$(srcdir) -I$(builddir) -I$(top_builddir) -S -o $@ $<
diff --git a/pixman/pixman/Makefile.in b/pixman/pixman/Makefile.in
index 1ca63f731..a0e4f9e7b 100644
--- a/pixman/pixman/Makefile.in
+++ b/pixman/pixman/Makefile.in
@@ -1,4 +1,4 @@
-# Makefile.in generated by automake 1.10.2 from Makefile.am.
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
@@ -77,37 +77,28 @@ libLTLIBRARIES_INSTALL = $(INSTALL)
LTLIBRARIES = $(lib_LTLIBRARIES) $(noinst_LTLIBRARIES)
libpixman_1_la_DEPENDENCIES = $(am__append_3) $(am__append_5) \
$(am__append_8) $(am__append_10) $(am__append_12)
-am_libpixman_1_la_OBJECTS = libpixman_1_la-pixman-access.lo \
- libpixman_1_la-pixman-access-accessors.lo \
- libpixman_1_la-pixman-cpu.lo \
- libpixman_1_la-pixman-gradient-walker.lo \
- libpixman_1_la-pixman-region16.lo \
- libpixman_1_la-pixman-region32.lo \
- libpixman_1_la-pixman-image.lo \
- libpixman_1_la-pixman-implementation.lo \
- libpixman_1_la-pixman-combine32.lo \
- libpixman_1_la-pixman-combine64.lo \
- libpixman_1_la-pixman-general.lo libpixman_1_la-pixman.lo \
- libpixman_1_la-pixman-fast-path.lo \
- libpixman_1_la-pixman-solid-fill.lo \
- libpixman_1_la-pixman-conical-gradient.lo \
- libpixman_1_la-pixman-linear-gradient.lo \
- libpixman_1_la-pixman-radial-gradient.lo \
- libpixman_1_la-pixman-bits-image.lo \
- libpixman_1_la-pixman-utils.lo libpixman_1_la-pixman-edge.lo \
- libpixman_1_la-pixman-edge-accessors.lo \
- libpixman_1_la-pixman-trap.lo libpixman_1_la-pixman-timer.lo \
- libpixman_1_la-pixman-matrix.lo
+am_libpixman_1_la_OBJECTS = pixman-access.lo \
+ pixman-access-accessors.lo pixman-cpu.lo \
+ pixman-gradient-walker.lo pixman-region16.lo \
+ pixman-region32.lo pixman-image.lo pixman-implementation.lo \
+ pixman-combine32.lo pixman-combine64.lo pixman-general.lo \
+ pixman.lo pixman-fast-path.lo pixman-solid-fill.lo \
+ pixman-conical-gradient.lo pixman-linear-gradient.lo \
+ pixman-radial-gradient.lo pixman-bits-image.lo pixman-utils.lo \
+ pixman-edge.lo pixman-edge-accessors.lo pixman-trap.lo \
+ pixman-timer.lo pixman-matrix.lo
libpixman_1_la_OBJECTS = $(am_libpixman_1_la_OBJECTS)
libpixman_1_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CCLD) $(libpixman_1_la_CFLAGS) \
- $(CFLAGS) $(libpixman_1_la_LDFLAGS) $(LDFLAGS) -o $@
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(libpixman_1_la_LDFLAGS) $(LDFLAGS) -o $@
am__DEPENDENCIES_1 =
@USE_ARM_NEON_TRUE@libpixman_arm_neon_la_DEPENDENCIES = \
@USE_ARM_NEON_TRUE@ $(am__DEPENDENCIES_1)
-am__libpixman_arm_neon_la_SOURCES_DIST = pixman-arm-neon.c
+am__libpixman_arm_neon_la_SOURCES_DIST = pixman-arm-neon.c \
+ pixman-arm-neon-asm.S pixman-arm-neon-asm.h
@USE_ARM_NEON_TRUE@am_libpixman_arm_neon_la_OBJECTS = \
-@USE_ARM_NEON_TRUE@ libpixman_arm_neon_la-pixman-arm-neon.lo
+@USE_ARM_NEON_TRUE@ libpixman_arm_neon_la-pixman-arm-neon.lo \
+@USE_ARM_NEON_TRUE@ pixman-arm-neon-asm.lo
libpixman_arm_neon_la_OBJECTS = $(am_libpixman_arm_neon_la_OBJECTS)
libpixman_arm_neon_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CCLD) \
@@ -156,6 +147,11 @@ libpixman_vmx_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
+CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
+LTCPPASCOMPILE = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
@@ -182,30 +178,37 @@ DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AR = @AR@
-ARM_NEON_CFLAGS = @ARM_NEON_CFLAGS@
ARM_SIMD_CFLAGS = @ARM_SIMD_CFLAGS@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DEP_CFLAGS = @DEP_CFLAGS@
DEP_LIBS = @DEP_LIBS@
DSYMUTIL = @DSYMUTIL@
-DUMPBIN = @DUMPBIN@
+ECHO = @ECHO@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
-FGREP = @FGREP@
+F77 = @F77@
+FFLAGS = @FFLAGS@
GREP = @GREP@
GTK_CFLAGS = @GTK_CFLAGS@
GTK_LIBS = @GTK_LIBS@
@@ -214,12 +217,10 @@ INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
-LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
LT_VERSION_INFO = @LT_VERSION_INFO@
@@ -227,12 +228,8 @@ MAKEINFO = @MAKEINFO@
MKDIR_P = @MKDIR_P@
MMX_CFLAGS = @MMX_CFLAGS@
MMX_LDFLAGS = @MMX_LDFLAGS@
-NM = @NM@
NMEDIT = @NMEDIT@
-OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
-OTOOL = @OTOOL@
-OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
@@ -260,7 +257,8 @@ abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_CC = @ac_ct_CC@
-ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
@@ -291,7 +289,6 @@ libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
-lt_ECHO = @lt_ECHO@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
@@ -312,7 +309,6 @@ libpixman_1_la_LDFLAGS = -version-info $(LT_VERSION_INFO) \
-no-undefined $(am__append_2) $(am__append_7)
libpixman_1_la_LIBADD = @DEP_LIBS@ -lm $(am__append_3) $(am__append_5) \
$(am__append_8) $(am__append_10) $(am__append_12)
-libpixman_1_la_CFLAGS = -DPIXMAN_DISABLE_DEPRECATED
libpixman_1_la_SOURCES = \
pixman.h \
pixman-accessor.h \
@@ -377,27 +373,29 @@ CLEANFILES = pixman-combine32.c pixman-combine64.c pixman-combine32.h pixman-com
@USE_ARM_SIMD_TRUE@libpixman_arm_simd_la_CFLAGS = $(DEP_CFLAGS) $(ARM_SIMD_CFLAGS)
@USE_ARM_SIMD_TRUE@libpixman_arm_simd_la_LIBADD = $(DEP_LIBS)
@USE_ARM_NEON_TRUE@libpixman_arm_neon_la_SOURCES = \
-@USE_ARM_NEON_TRUE@ pixman-arm-neon.c
+@USE_ARM_NEON_TRUE@ pixman-arm-neon.c \
+@USE_ARM_NEON_TRUE@ pixman-arm-neon-asm.S \
+@USE_ARM_NEON_TRUE@ pixman-arm-neon-asm.h
-@USE_ARM_NEON_TRUE@libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS) $(ARM_NEON_CFLAGS)
+@USE_ARM_NEON_TRUE@libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS)
@USE_ARM_NEON_TRUE@libpixman_arm_neon_la_LIBADD = $(DEP_LIBS)
all: $(BUILT_SOURCES)
$(MAKE) $(AM_MAKEFLAGS) all-am
.SUFFIXES:
-.SUFFIXES: .c .lo .o .obj
+.SUFFIXES: .S .c .lo .o .obj .s
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+ && exit 0; \
exit 1;; \
esac; \
done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu pixman/Makefile'; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign pixman/Makefile'; \
cd $(top_srcdir) && \
- $(AUTOMAKE) --gnu pixman/Makefile
+ $(AUTOMAKE) --foreign pixman/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
@@ -472,35 +470,57 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-access-accessors.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-access.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-bits-image.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-combine32.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-combine64.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-conical-gradient.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-cpu.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-edge-accessors.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-edge.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-fast-path.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-general.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-gradient-walker.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-image.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-implementation.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-linear-gradient.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-matrix.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-radial-gradient.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-region16.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-region32.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-solid-fill.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-timer.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-trap.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman-utils.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_1_la-pixman.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_arm_neon_la-pixman-arm-neon.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_arm_simd_la-pixman-arm-simd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_mmx_la-pixman-mmx.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_sse2_la-pixman-sse2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpixman_vmx_la-pixman-vmx.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-access-accessors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-access.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-arm-neon-asm.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-bits-image.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-combine32.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-combine64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-conical-gradient.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-cpu.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-edge-accessors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-edge.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-fast-path.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-general.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-gradient-walker.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-image.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-implementation.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-linear-gradient.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-matrix.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-radial-gradient.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-region16.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-region32.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-solid-fill.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-timer.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-trap.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman-utils.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pixman.Plo@am__quote@
+
+.S.o:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ $<
+
+.S.obj:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCCAS_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.S.lo:
+@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(LTCPPASCOMPILE) -c -o $@ $<
.c.o:
@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -523,174 +543,6 @@ distclean-compile:
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
-libpixman_1_la-pixman-access.lo: pixman-access.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-access.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-access.Tpo -c -o libpixman_1_la-pixman-access.lo `test -f 'pixman-access.c' || echo '$(srcdir)/'`pixman-access.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-access.Tpo $(DEPDIR)/libpixman_1_la-pixman-access.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-access.c' object='libpixman_1_la-pixman-access.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-access.lo `test -f 'pixman-access.c' || echo '$(srcdir)/'`pixman-access.c
-
-libpixman_1_la-pixman-access-accessors.lo: pixman-access-accessors.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-access-accessors.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-access-accessors.Tpo -c -o libpixman_1_la-pixman-access-accessors.lo `test -f 'pixman-access-accessors.c' || echo '$(srcdir)/'`pixman-access-accessors.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-access-accessors.Tpo $(DEPDIR)/libpixman_1_la-pixman-access-accessors.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-access-accessors.c' object='libpixman_1_la-pixman-access-accessors.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-access-accessors.lo `test -f 'pixman-access-accessors.c' || echo '$(srcdir)/'`pixman-access-accessors.c
-
-libpixman_1_la-pixman-cpu.lo: pixman-cpu.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-cpu.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-cpu.Tpo -c -o libpixman_1_la-pixman-cpu.lo `test -f 'pixman-cpu.c' || echo '$(srcdir)/'`pixman-cpu.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-cpu.Tpo $(DEPDIR)/libpixman_1_la-pixman-cpu.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-cpu.c' object='libpixman_1_la-pixman-cpu.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-cpu.lo `test -f 'pixman-cpu.c' || echo '$(srcdir)/'`pixman-cpu.c
-
-libpixman_1_la-pixman-gradient-walker.lo: pixman-gradient-walker.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-gradient-walker.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-gradient-walker.Tpo -c -o libpixman_1_la-pixman-gradient-walker.lo `test -f 'pixman-gradient-walker.c' || echo '$(srcdir)/'`pixman-gradient-walker.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-gradient-walker.Tpo $(DEPDIR)/libpixman_1_la-pixman-gradient-walker.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-gradient-walker.c' object='libpixman_1_la-pixman-gradient-walker.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-gradient-walker.lo `test -f 'pixman-gradient-walker.c' || echo '$(srcdir)/'`pixman-gradient-walker.c
-
-libpixman_1_la-pixman-region16.lo: pixman-region16.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-region16.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-region16.Tpo -c -o libpixman_1_la-pixman-region16.lo `test -f 'pixman-region16.c' || echo '$(srcdir)/'`pixman-region16.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-region16.Tpo $(DEPDIR)/libpixman_1_la-pixman-region16.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-region16.c' object='libpixman_1_la-pixman-region16.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-region16.lo `test -f 'pixman-region16.c' || echo '$(srcdir)/'`pixman-region16.c
-
-libpixman_1_la-pixman-region32.lo: pixman-region32.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-region32.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-region32.Tpo -c -o libpixman_1_la-pixman-region32.lo `test -f 'pixman-region32.c' || echo '$(srcdir)/'`pixman-region32.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-region32.Tpo $(DEPDIR)/libpixman_1_la-pixman-region32.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-region32.c' object='libpixman_1_la-pixman-region32.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-region32.lo `test -f 'pixman-region32.c' || echo '$(srcdir)/'`pixman-region32.c
-
-libpixman_1_la-pixman-image.lo: pixman-image.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-image.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-image.Tpo -c -o libpixman_1_la-pixman-image.lo `test -f 'pixman-image.c' || echo '$(srcdir)/'`pixman-image.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-image.Tpo $(DEPDIR)/libpixman_1_la-pixman-image.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-image.c' object='libpixman_1_la-pixman-image.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-image.lo `test -f 'pixman-image.c' || echo '$(srcdir)/'`pixman-image.c
-
-libpixman_1_la-pixman-implementation.lo: pixman-implementation.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-implementation.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-implementation.Tpo -c -o libpixman_1_la-pixman-implementation.lo `test -f 'pixman-implementation.c' || echo '$(srcdir)/'`pixman-implementation.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-implementation.Tpo $(DEPDIR)/libpixman_1_la-pixman-implementation.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-implementation.c' object='libpixman_1_la-pixman-implementation.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-implementation.lo `test -f 'pixman-implementation.c' || echo '$(srcdir)/'`pixman-implementation.c
-
-libpixman_1_la-pixman-combine32.lo: pixman-combine32.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-combine32.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-combine32.Tpo -c -o libpixman_1_la-pixman-combine32.lo `test -f 'pixman-combine32.c' || echo '$(srcdir)/'`pixman-combine32.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-combine32.Tpo $(DEPDIR)/libpixman_1_la-pixman-combine32.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-combine32.c' object='libpixman_1_la-pixman-combine32.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-combine32.lo `test -f 'pixman-combine32.c' || echo '$(srcdir)/'`pixman-combine32.c
-
-libpixman_1_la-pixman-combine64.lo: pixman-combine64.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-combine64.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-combine64.Tpo -c -o libpixman_1_la-pixman-combine64.lo `test -f 'pixman-combine64.c' || echo '$(srcdir)/'`pixman-combine64.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-combine64.Tpo $(DEPDIR)/libpixman_1_la-pixman-combine64.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-combine64.c' object='libpixman_1_la-pixman-combine64.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-combine64.lo `test -f 'pixman-combine64.c' || echo '$(srcdir)/'`pixman-combine64.c
-
-libpixman_1_la-pixman-general.lo: pixman-general.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-general.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-general.Tpo -c -o libpixman_1_la-pixman-general.lo `test -f 'pixman-general.c' || echo '$(srcdir)/'`pixman-general.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-general.Tpo $(DEPDIR)/libpixman_1_la-pixman-general.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-general.c' object='libpixman_1_la-pixman-general.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-general.lo `test -f 'pixman-general.c' || echo '$(srcdir)/'`pixman-general.c
-
-libpixman_1_la-pixman.lo: pixman.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman.Tpo -c -o libpixman_1_la-pixman.lo `test -f 'pixman.c' || echo '$(srcdir)/'`pixman.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman.Tpo $(DEPDIR)/libpixman_1_la-pixman.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman.c' object='libpixman_1_la-pixman.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman.lo `test -f 'pixman.c' || echo '$(srcdir)/'`pixman.c
-
-libpixman_1_la-pixman-fast-path.lo: pixman-fast-path.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-fast-path.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-fast-path.Tpo -c -o libpixman_1_la-pixman-fast-path.lo `test -f 'pixman-fast-path.c' || echo '$(srcdir)/'`pixman-fast-path.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-fast-path.Tpo $(DEPDIR)/libpixman_1_la-pixman-fast-path.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-fast-path.c' object='libpixman_1_la-pixman-fast-path.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-fast-path.lo `test -f 'pixman-fast-path.c' || echo '$(srcdir)/'`pixman-fast-path.c
-
-libpixman_1_la-pixman-solid-fill.lo: pixman-solid-fill.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-solid-fill.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-solid-fill.Tpo -c -o libpixman_1_la-pixman-solid-fill.lo `test -f 'pixman-solid-fill.c' || echo '$(srcdir)/'`pixman-solid-fill.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-solid-fill.Tpo $(DEPDIR)/libpixman_1_la-pixman-solid-fill.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-solid-fill.c' object='libpixman_1_la-pixman-solid-fill.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-solid-fill.lo `test -f 'pixman-solid-fill.c' || echo '$(srcdir)/'`pixman-solid-fill.c
-
-libpixman_1_la-pixman-conical-gradient.lo: pixman-conical-gradient.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-conical-gradient.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-conical-gradient.Tpo -c -o libpixman_1_la-pixman-conical-gradient.lo `test -f 'pixman-conical-gradient.c' || echo '$(srcdir)/'`pixman-conical-gradient.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-conical-gradient.Tpo $(DEPDIR)/libpixman_1_la-pixman-conical-gradient.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-conical-gradient.c' object='libpixman_1_la-pixman-conical-gradient.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-conical-gradient.lo `test -f 'pixman-conical-gradient.c' || echo '$(srcdir)/'`pixman-conical-gradient.c
-
-libpixman_1_la-pixman-linear-gradient.lo: pixman-linear-gradient.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-linear-gradient.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-linear-gradient.Tpo -c -o libpixman_1_la-pixman-linear-gradient.lo `test -f 'pixman-linear-gradient.c' || echo '$(srcdir)/'`pixman-linear-gradient.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-linear-gradient.Tpo $(DEPDIR)/libpixman_1_la-pixman-linear-gradient.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-linear-gradient.c' object='libpixman_1_la-pixman-linear-gradient.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-linear-gradient.lo `test -f 'pixman-linear-gradient.c' || echo '$(srcdir)/'`pixman-linear-gradient.c
-
-libpixman_1_la-pixman-radial-gradient.lo: pixman-radial-gradient.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-radial-gradient.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-radial-gradient.Tpo -c -o libpixman_1_la-pixman-radial-gradient.lo `test -f 'pixman-radial-gradient.c' || echo '$(srcdir)/'`pixman-radial-gradient.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-radial-gradient.Tpo $(DEPDIR)/libpixman_1_la-pixman-radial-gradient.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-radial-gradient.c' object='libpixman_1_la-pixman-radial-gradient.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-radial-gradient.lo `test -f 'pixman-radial-gradient.c' || echo '$(srcdir)/'`pixman-radial-gradient.c
-
-libpixman_1_la-pixman-bits-image.lo: pixman-bits-image.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-bits-image.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-bits-image.Tpo -c -o libpixman_1_la-pixman-bits-image.lo `test -f 'pixman-bits-image.c' || echo '$(srcdir)/'`pixman-bits-image.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-bits-image.Tpo $(DEPDIR)/libpixman_1_la-pixman-bits-image.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-bits-image.c' object='libpixman_1_la-pixman-bits-image.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-bits-image.lo `test -f 'pixman-bits-image.c' || echo '$(srcdir)/'`pixman-bits-image.c
-
-libpixman_1_la-pixman-utils.lo: pixman-utils.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-utils.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-utils.Tpo -c -o libpixman_1_la-pixman-utils.lo `test -f 'pixman-utils.c' || echo '$(srcdir)/'`pixman-utils.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-utils.Tpo $(DEPDIR)/libpixman_1_la-pixman-utils.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-utils.c' object='libpixman_1_la-pixman-utils.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-utils.lo `test -f 'pixman-utils.c' || echo '$(srcdir)/'`pixman-utils.c
-
-libpixman_1_la-pixman-edge.lo: pixman-edge.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-edge.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-edge.Tpo -c -o libpixman_1_la-pixman-edge.lo `test -f 'pixman-edge.c' || echo '$(srcdir)/'`pixman-edge.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-edge.Tpo $(DEPDIR)/libpixman_1_la-pixman-edge.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-edge.c' object='libpixman_1_la-pixman-edge.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-edge.lo `test -f 'pixman-edge.c' || echo '$(srcdir)/'`pixman-edge.c
-
-libpixman_1_la-pixman-edge-accessors.lo: pixman-edge-accessors.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-edge-accessors.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-edge-accessors.Tpo -c -o libpixman_1_la-pixman-edge-accessors.lo `test -f 'pixman-edge-accessors.c' || echo '$(srcdir)/'`pixman-edge-accessors.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-edge-accessors.Tpo $(DEPDIR)/libpixman_1_la-pixman-edge-accessors.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-edge-accessors.c' object='libpixman_1_la-pixman-edge-accessors.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-edge-accessors.lo `test -f 'pixman-edge-accessors.c' || echo '$(srcdir)/'`pixman-edge-accessors.c
-
-libpixman_1_la-pixman-trap.lo: pixman-trap.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-trap.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-trap.Tpo -c -o libpixman_1_la-pixman-trap.lo `test -f 'pixman-trap.c' || echo '$(srcdir)/'`pixman-trap.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-trap.Tpo $(DEPDIR)/libpixman_1_la-pixman-trap.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-trap.c' object='libpixman_1_la-pixman-trap.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-trap.lo `test -f 'pixman-trap.c' || echo '$(srcdir)/'`pixman-trap.c
-
-libpixman_1_la-pixman-timer.lo: pixman-timer.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-timer.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-timer.Tpo -c -o libpixman_1_la-pixman-timer.lo `test -f 'pixman-timer.c' || echo '$(srcdir)/'`pixman-timer.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-timer.Tpo $(DEPDIR)/libpixman_1_la-pixman-timer.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-timer.c' object='libpixman_1_la-pixman-timer.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-timer.lo `test -f 'pixman-timer.c' || echo '$(srcdir)/'`pixman-timer.c
-
-libpixman_1_la-pixman-matrix.lo: pixman-matrix.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -MT libpixman_1_la-pixman-matrix.lo -MD -MP -MF $(DEPDIR)/libpixman_1_la-pixman-matrix.Tpo -c -o libpixman_1_la-pixman-matrix.lo `test -f 'pixman-matrix.c' || echo '$(srcdir)/'`pixman-matrix.c
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_1_la-pixman-matrix.Tpo $(DEPDIR)/libpixman_1_la-pixman-matrix.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pixman-matrix.c' object='libpixman_1_la-pixman-matrix.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_1_la_CFLAGS) $(CFLAGS) -c -o libpixman_1_la-pixman-matrix.lo `test -f 'pixman-matrix.c' || echo '$(srcdir)/'`pixman-matrix.c
-
libpixman_arm_neon_la-pixman-arm-neon.lo: pixman-arm-neon.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpixman_arm_neon_la_CFLAGS) $(CFLAGS) -MT libpixman_arm_neon_la-pixman-arm-neon.lo -MD -MP -MF $(DEPDIR)/libpixman_arm_neon_la-pixman-arm-neon.Tpo -c -o libpixman_arm_neon_la-pixman-arm-neon.lo `test -f 'pixman-arm-neon.c' || echo '$(srcdir)/'`pixman-arm-neon.c
@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libpixman_arm_neon_la-pixman-arm-neon.Tpo $(DEPDIR)/libpixman_arm_neon_la-pixman-arm-neon.Plo
@@ -754,7 +606,7 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
mkid -fID $$unique
tags: TAGS
@@ -945,6 +797,19 @@ pixman-combine64.c : pixman-combine.c.template pixman-combine64.h make-combine.p
$(PERL) $(srcdir)/make-combine.pl 16 < $(srcdir)/pixman-combine.c.template > $@ || ($(RM) $@; exit 1)
pixman-combine64.h : pixman-combine.h.template make-combine.pl
$(PERL) $(srcdir)/make-combine.pl 16 < $(srcdir)/pixman-combine.h.template > $@ || ($(RM) $@; exit 1)
+
+@USE_MMX_TRUE@$(libpixman_mmx_la_SOURCES:.c=.s) : ASM_CFLAGS=$(MMX_CFLAGS)
+
+@USE_VMX_TRUE@$(libpixman_vmx_la_SOURCES:.c=.s) : ASM_CFLAGS=$(VMX_CFLAGS)
+
+@USE_SSE2_TRUE@$(libpixman_sse2_la_SOURCES:.c=.s) : ASM_CFLAGS=$(SSE2_CFLAGS)
+
+@USE_ARM_SIMD_TRUE@$(libpixman_arm_simd_la_SOURCES:.c=.s) : ASM_CFLAGS=$(ARM_SIMD_CFLAGS)
+
+@USE_ARM_NEON_TRUE@$(libpixman_arm_neon_la_SOURCES:.c=.s) : ASM_CFLAGS=
+
+.c.s : $(libpixmaninclude_HEADERS) $(BUILT_SOURCES)
+ $(CC) $(CFLAGS) $(ASM_CFLAGS) -DHAVE_CONFIG_H -I$(srcdir) -I$(builddir) -I$(top_builddir) -S -o $@ $<
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
diff --git a/pixman/pixman/makefile b/pixman/pixman/makefile
index d7380bad7..8209dc394 100644
--- a/pixman/pixman/makefile
+++ b/pixman/pixman/makefile
@@ -1,5 +1,4 @@
LIBRARY = libpixman-1
-DEFINES += PIXMAN_DISABLE_DEPRECATED
CSRCS = \
pixman-access.c \
diff --git a/pixman/pixman/pixman-access.c b/pixman/pixman/pixman-access.c
index d9fd38c15..389cf2a76 100644
--- a/pixman/pixman/pixman-access.c
+++ b/pixman/pixman/pixman-access.c
@@ -180,11 +180,11 @@ fetch_scanline_b8g8r8a8 (pixman_image_t *image,
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
-
+
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
-
+
*buffer++ = (((p & 0xff000000) >> 24) |
((p & 0x00ff0000) >> 8) |
((p & 0x0000ff00) << 8) |
@@ -731,23 +731,27 @@ fetch_scanline_b2g3r3 (pixman_image_t *image,
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
-
+
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
-
- b = (((p & 0xc0) ) |
- ((p & 0xc0) >> 2) |
- ((p & 0xc0) >> 4) |
- ((p & 0xc0) >> 6));
-
- g = ((p & 0x38) | ((p & 0x38) >> 3) | ((p & 0x30) << 2)) << 8;
-
- r = (((p & 0x07) ) |
- ((p & 0x07) << 3) |
- ((p & 0x06) << 6)) << 16;
-
+
+ b = p & 0xc0;
+ b |= b >> 2;
+ b |= b >> 4;
+ b &= 0xff;
+
+ g = (p & 0x38) << 10;
+ g |= g >> 3;
+ g |= g >> 6;
+ g &= 0xff00;
+
+ r = (p & 0x7) << 21;
+ r |= r >> 3;
+ r |= r >> 6;
+ r &= 0xff0000;
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -798,7 +802,7 @@ fetch_scanline_a2b2g2r2 (pixman_image_t *image,
uint32_t a, r, g, b;
a = ((p & 0xc0) * 0x55) << 18;
- b = ((p & 0x30) * 0x55) >> 6;
+ b = ((p & 0x30) * 0x55) >> 4;
g = ((p & 0x0c) * 0x55) << 6;
r = ((p & 0x03) * 0x55) << 16;
@@ -840,20 +844,22 @@ fetch_scanline_x4a4 (pixman_image_t *image,
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
-
+
while (pixel < end)
{
uint8_t p = READ (image, pixel++) & 0xf;
-
+
*buffer++ = (p | (p << 4)) << 24;
}
}
-#define FETCH_8(img,l,o) (READ (img, (uint8_t *)(l) + ((o) >> 2)))
+#define FETCH_8(img,l,o) (READ (img, (((uint8_t *)(l)) + ((o) >> 3))))
#ifdef WORDS_BIGENDIAN
-#define FETCH_4(img,l,o) ((o) & 2 ? FETCH_8 (img,l,o) & 0xf : FETCH_8 (img,l,o) >> 4)
+#define FETCH_4(img,l,o) \
+ (((4 * (o)) & 4) ? (FETCH_8 (img,l, 4 * (o)) & 0xf) : (FETCH_8 (img,l,(4 * (o))) >> 4))
#else
-#define FETCH_4(img,l,o) ((o) & 2 ? FETCH_8 (img,l,o) >> 4 : FETCH_8 (img,l,o) & 0xf)
+#define FETCH_4(img,l,o) \
+ (((4 * (o)) & 4) ? (FETCH_8 (img, l, 4 * (o)) >> 4) : (FETCH_8 (img, l, (4 * (o))) & 0xf))
#endif
static void
@@ -867,13 +873,13 @@ fetch_scanline_a4 (pixman_image_t *image,
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
-
+
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
-
+
p |= p << 4;
-
+
*buffer++ = p << 24;
}
}
@@ -923,7 +929,7 @@ fetch_scanline_b1g2r1 (pixman_image_t *image,
b = ((p & 0x8) * 0xff) >> 3;
g = ((p & 0x6) * 0x55) << 7;
r = ((p & 0x1) * 0xff) << 16;
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -940,16 +946,16 @@ fetch_scanline_a1r1g1b1 (pixman_image_t *image,
uint32_t a, r, g, b;
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
-
+
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
-
+
a = ((p & 0x8) * 0xff) << 21;
r = ((p & 0x4) * 0xff) << 14;
g = ((p & 0x2) * 0xff) << 7;
b = ((p & 0x1) * 0xff);
-
+
*buffer++ = a | r | g | b;
}
}
@@ -965,17 +971,17 @@ fetch_scanline_a1b1g1r1 (pixman_image_t *image,
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
-
+
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
uint32_t a, r, g, b;
-
+
a = ((p & 0x8) * 0xff) << 21;
- r = ((p & 0x4) * 0xff) >> 3;
+ b = ((p & 0x4) * 0xff) >> 2;
g = ((p & 0x2) * 0xff) << 7;
- b = ((p & 0x1) * 0xff) << 16;
-
+ r = ((p & 0x1) * 0xff) << 16;
+
*buffer++ = a | r | g | b;
}
}
@@ -1546,23 +1552,25 @@ fetch_pixel_b2g3r3 (bits_image_t *image,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
- uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+ uint32_t p = READ (image, (uint8_t *) bits + offset);
uint32_t r, g, b;
-
- b = ((pixel & 0xc0) |
- ((pixel & 0xc0) >> 2) |
- ((pixel & 0xc0) >> 4) |
- ((pixel & 0xc0) >> 6));
-
- g = ((pixel & 0x38) |
- ((pixel & 0x38) >> 3) |
- ((pixel & 0x30) << 2)) << 8;
-
- r = ((pixel & 0x07) |
- ((pixel & 0x07) << 3) |
- ((pixel & 0x06) << 6)) << 16;
-
- return (0xff000000 | r | g | b);
+
+ b = p & 0xc0;
+ b |= b >> 2;
+ b |= b >> 4;
+ b &= 0xff;
+
+ g = (p & 0x38) << 10;
+ g |= g >> 3;
+ g |= g >> 6;
+ g &= 0xff00;
+
+ r = (p & 0x7) << 21;
+ r |= r >> 3;
+ r |= r >> 6;
+ r &= 0xff0000;
+
+ return 0xff000000 | r | g | b;
}
static uint32_t
@@ -1592,7 +1600,7 @@ fetch_pixel_a2b2g2r2 (bits_image_t *image,
uint32_t a, r, g, b;
a = ((pixel & 0xc0) * 0x55) << 18;
- b = ((pixel & 0x30) * 0x55) >> 6;
+ b = ((pixel & 0x30) * 0x55) >> 4;
g = ((pixel & 0x0c) * 0x55) << 6;
r = ((pixel & 0x03) * 0x55) << 16;
@@ -1674,12 +1682,12 @@ fetch_pixel_a1r1g1b1 (bits_image_t *image,
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
uint32_t a, r, g, b;
-
+
a = ((pixel & 0x8) * 0xff) << 21;
r = ((pixel & 0x4) * 0xff) << 14;
g = ((pixel & 0x2) * 0xff) << 7;
b = ((pixel & 0x1) * 0xff);
-
+
return a | r | g | b;
}
@@ -1691,12 +1699,12 @@ fetch_pixel_a1b1g1r1 (bits_image_t *image,
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
uint32_t a, r, g, b;
-
+
a = ((pixel & 0x8) * 0xff) << 21;
- r = ((pixel & 0x4) * 0xff) >> 3;
+ b = ((pixel & 0x4) * 0xff) >> 2;
g = ((pixel & 0x2) * 0xff) << 7;
- b = ((pixel & 0x1) * 0xff) << 16;
-
+ r = ((pixel & 0x1) * 0xff) << 16;
+
return a | r | g | b;
}
@@ -1708,7 +1716,7 @@ fetch_pixel_c4 (bits_image_t *image,
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
const pixman_indexed_t * indexed = image->indexed;
-
+
return indexed->rgba[pixel];
}
@@ -2425,22 +2433,32 @@ store_scanline_x4a4 (bits_image_t * image,
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
int i;
-
+
for (i = 0; i < width; ++i)
WRITE (image, pixel++, values[i] >> 28);
}
#define STORE_8(img,l,o,v) (WRITE (img, (uint8_t *)(l) + ((o) >> 3), (v)))
#ifdef WORDS_BIGENDIAN
-#define STORE_4(img,l,o,v) \
- STORE_8 (img,l,o,((o) & 4 ? \
- (FETCH_8 (img,l,o) & 0xf0) | (v) : \
- (FETCH_8 (img,l,o) & 0x0f) | ((v) << 4)))
+
+#define STORE_4(img,l,o,v) \
+ do \
+ { \
+ int bo = 4 * (o); \
+ STORE_8 (img, l, bo, (bo & 4 ? \
+ (FETCH_8 (img, l, bo) & 0xf0) | (v) : \
+ (FETCH_8 (img, l, bo) & 0x0f) | ((v) << 4))); \
+ } while (0)
#else
-#define STORE_4(img,l,o,v) \
- STORE_8 (img,l,o,((o) & 4 ? \
- (FETCH_8 (img,l,o) & 0x0f) | ((v) << 4) : \
- (FETCH_8 (img,l,o) & 0xf0) | (v)))
+
+#define STORE_4(img,l,o,v) \
+ do \
+ { \
+ int bo = 4 * (o); \
+ STORE_8 (img, l, bo, (bo & 4 ? \
+ (FETCH_8 (img, l, bo) & 0x0f) | ((v) << 4) : \
+ (FETCH_8 (img, l, bo) & 0xf0) | (v))); \
+ } while (0)
#endif
static void
@@ -2452,7 +2470,7 @@ store_scanline_a4 (bits_image_t * image,
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
-
+
for (i = 0; i < width; ++i)
STORE_4 (image, bits, i + x, values[i] >> 28);
}
@@ -2488,11 +2506,11 @@ store_scanline_b1g2r1 (bits_image_t * image,
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
-
+
for (i = 0; i < width; ++i)
{
uint32_t pixel;
-
+
SPLIT (values[i]);
pixel = (((b >> 4) & 0x8) |
((g >> 5) & 0x6) |
@@ -2510,16 +2528,17 @@ store_scanline_a1r1g1b1 (bits_image_t * image,
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
-
+
for (i = 0; i < width; ++i)
{
uint32_t pixel;
-
+
SPLIT_A (values[i]);
pixel = (((a >> 4) & 0x8) |
((r >> 5) & 0x4) |
((g >> 6) & 0x2) |
((b >> 7) ));
+
STORE_4 (image, bits, i + x, pixel);
}
}
@@ -2533,16 +2552,17 @@ store_scanline_a1b1g1r1 (bits_image_t * image,
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
-
+
for (i = 0; i < width; ++i)
{
uint32_t pixel;
-
+
SPLIT_A (values[i]);
pixel = (((a >> 4) & 0x8) |
((b >> 5) & 0x4) |
((g >> 6) & 0x2) |
((r >> 7) ));
+
STORE_4 (image, bits, i + x, pixel);
}
}
diff --git a/pixman/pixman/pixman-arm-neon-asm.S b/pixman/pixman/pixman-arm-neon-asm.S
new file mode 100644
index 000000000..e8ccf77a9
--- /dev/null
+++ b/pixman/pixman/pixman-arm-neon-asm.S
@@ -0,0 +1,1051 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Nokia Corporation not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. Nokia Corporation makes no
+ * representations about the suitability of this software for any purpose.
+ * It is provided "as is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
+ */
+
+/*
+ * This file contains implementations of NEON optimized pixel processing
+ * functions. There is no full and detailed tutorial, but some functions
+ * (those which are exposing some new or interesting features) are
+ * extensively commented and can be used as examples.
+ *
+ * You may want to have a look at the comments for following functions:
+ * - pixman_composite_over_8888_0565_asm_neon
+ * - pixman_composite_over_n_8_0565_asm_neon
+ */
+
+/* Prevent the stack from becoming executable for no reason... */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+ .text
+ .fpu neon
+ .altmacro
+
+#include "pixman-arm-neon-asm.h"
+
+/* Global configuration options and preferences */
+
+/*
+ * The code can optionally make use of unaligned memory accesses to improve
+ * performance of handling leading/trailing pixels for each scanline.
+ * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
+ * example in linux if unaligned memory accesses are not configured to
+ * generate.exceptions.
+ */
+.set RESPECT_STRICT_ALIGNMENT, 1
+
+/*
+ * Set default prefetch type. There is a choice between the following options:
+ *
+ * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
+ * as NOP to workaround some HW bugs or for whatever other reason)
+ *
+ * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
+ * advanced prefetch intruduces heavy overhead)
+ *
+ * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
+ * which can run ARM and NEON instructions simultaneously so that extra ARM
+ * instructions do not add (many) extra cycles, but improve prefetch efficiency)
+ *
+ * Note: some types of function can't support advanced prefetch and fallback
+ * to simple one (those which handle 24bpp pixels)
+ */
+.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
+
+/* Prefetch distance in pixels for simple prefetch */
+.set PREFETCH_DISTANCE_SIMPLE, 64
+
+/*
+ * Implementation of pixman_composite_over_8888_0565_asm_neon
+ *
+ * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
+ * performs OVER compositing operation. Function fast_composite_over_8888_0565
+ * from pixman-fast-path.c does the same in C and can be used as a reference.
+ *
+ * First we need to have some NEON assembly code which can do the actual
+ * operation on the pixels and provide it to the template macro.
+ *
+ * Template macro quite conveniently takes care of emitting all the necessary
+ * code for memory reading and writing (including quite tricky cases of
+ * handling unaligned leading/trailing pixels), so we only need to deal with
+ * the data in NEON registers.
+ *
+ * NEON registers allocation in general is recommented to be the following:
+ * d0, d1, d2, d3 - contain loaded source pixel data
+ * d4, d5, d6, d7 - contain loaded destination pixels (if they are needed)
+ * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
+ * d28, d29, d30, d31 - place for storing the result (destination pixels)
+ *
+ * As can be seen above, four 64-bit NEON registers are used for keeping
+ * intermediate pixel data and up to 8 pixels can be processed in one step
+ * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
+ *
+ * This particular function uses the following registers allocation:
+ * d0, d1, d2, d3 - contain loaded source pixel data
+ * d4, d5 - contain loaded destination pixels (they are needed)
+ * d28, d29 - place for storing the result (destination pixels)
+ */
+
+/*
+ * Step one. We need to have some code to do some arithmetics on pixel data.
+ * This is implemented as a pair of macros: '*_head' and '*_tail'. When used
+ * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
+ * perform all the needed calculations and write the result to {d28, d29}.
+ * The rationale for having two macros and not just one will be explained
+ * later. In practice, any single monolitic function which does the work can
+ * be split into two parts in any arbitrary way without affecting correctness.
+ *
+ * There is one special trick here too. Common template macro can optionally
+ * make our life a bit easier by doing R, G, B, A color components
+ * deinterleaving for 32bpp pixel formats (and this feature is used in
+ * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
+ * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we
+ * actually use d0 register for blue channel (a vector of eight 8-bit
+ * values), d1 register for green, d2 for red and d3 for alpha. This
+ * simple conversion can be also done with a few NEON instructions:
+ *
+ * Packed to planar conversion:
+ * vuzp.8 d0, d1
+ * vuzp.8 d2, d3
+ * vuzp.8 d1, d3
+ * vuzp.8 d0, d2
+ *
+ * Planar to packed conversion:
+ * vzip.8 d0, d2
+ * vzip.8 d1, d3
+ * vzip.8 d2, d3
+ * vzip.8 d0, d1
+ *
+ * But pixel can be loaded directly in planar format using VLD4.8 NEON
+ * instruction. It is 1 cycle slower than VLD1.32, so this is not always
+ * desirable, that's why deinterleaving is optional.
+ *
+ * But anyway, here is the code:
+ */
+.macro pixman_composite_over_8888_0565_process_pixblock_head
+ /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
+ and put data into d6 - red, d7 - green, d30 - blue */
+ vshrn.u16 d6, q2, #8
+ vshrn.u16 d7, q2, #3
+ vsli.u16 q2, q2, #5
+ vsri.u8 d6, d6, #5
+ vmvn.8 d3, d3 /* invert source alpha */
+ vsri.u8 d7, d7, #6
+ vshrn.u16 d30, q2, #2
+ /* now do alpha blending, storing results in 8-bit planar format
+ into d16 - red, d19 - green, d18 - blue */
+ vmull.u8 q10, d3, d6
+ vmull.u8 q11, d3, d7
+ vmull.u8 q12, d3, d30
+ vrshr.u16 q13, q10, #8
+ vrshr.u16 q3, q11, #8
+ vrshr.u16 q15, q12, #8
+ vraddhn.u16 d20, q10, q13
+ vraddhn.u16 d23, q11, q3
+ vraddhn.u16 d22, q12, q15
+.endm
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail
+ /* ... continue alpha blending */
+ vqadd.u8 d16, d2, d20
+ vqadd.u8 q9, q0, q11
+ /* convert the result to r5g6b5 and store it into {d28, d29} */
+ vshll.u8 q14, d16, #8
+ vshll.u8 q8, d19, #8
+ vshll.u8 q9, d18, #8
+ vsri.u16 q14, q8, #5
+ vsri.u16 q14, q9, #11
+.endm
+
+/*
+ * OK, now we got almost everything that we need. Using the above two
+ * macros, the work can be done right. But now we want to optimize
+ * it a bit. ARM Cortex-A8 is an in-order core, and benefits really
+ * a lot from good code scheduling and software pipelining.
+ *
+ * Let's construct some code, which will run in the core main loop.
+ * Some pseudo-code of the main loop will look like this:
+ * head
+ * while (...) {
+ * tail
+ * head
+ * }
+ * tail
+ *
+ * It may look a bit weird, but this setup allows to hide instruction
+ * latencies better and also utilize dual-issue capability more
+ * efficiently (make pairs of load-store and ALU instructions).
+ *
+ * So what we need now is a '*_tail_head' macro, which will be used
+ * in the core main loop. A trivial straightforward implementation
+ * of this macro would look like this:
+ *
+ * pixman_composite_over_8888_0565_process_pixblock_tail
+ * vst1.16 {d28, d29}, [DST_W, :128]!
+ * vld1.16 {d4, d5}, [DST_R, :128]!
+ * vld4.32 {d0, d1, d2, d3}, [SRC]!
+ * pixman_composite_over_8888_0565_process_pixblock_head
+ * cache_preload 8, 8
+ *
+ * Now it also got some VLD/VST instructions. We simply can't move from
+ * processing one block of pixels to the other one with just arithmetics.
+ * The previously processed data needs to be written to memory and new
+ * data needs to be fetched. Fortunately, this main loop does not deal
+ * with partial leading/trailing pixels and can load/store a full block
+ * of pixels in a bulk. Additionally, destination buffer is already
+ * 16 bytes aligned here (which is good for performance).
+ *
+ * New things here are DST_R, DST_W, SRC and MASK identifiers. These
+ * are the aliases for ARM registers which are used as pointers for
+ * accessing data. We maintain separate pointers for reading and writing
+ * destination buffer (DST_R and DST_W).
+ *
+ * Another new thing is 'cache_preload' macro. It is used for prefetching
+ * data into CPU L2 cache and improve performance when dealing with large
+ * images which are far larger than cache size. It uses one argument
+ * (actually two, but they need to be the same here) - number of pixels
+ * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
+ * details about this macro. Moreover, if good performance is needed
+ * the code from this macro needs to be copied into '*_tail_head' macro
+ * and mixed with the rest of code for optimal instructions scheduling.
+ * We are actually doing it below.
+ *
+ * Now after all the explanations, here is the optimized code.
+ * Different instruction streams (originaling from '*_head', '*_tail'
+ * and 'cache_preload' macro) use different indentation levels for
+ * better readability. Actually taking the code from one of these
+ * indentation levels and ignoring a few VLD/VST instructions would
+ * result in exactly the code from '*_head', '*_tail' or 'cache_preload'
+ * macro!
+ */
+
+#if 1
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+ vqadd.u8 d16, d2, d20
+ vld1.16 {d4, d5}, [DST_R, :128]!
+ vqadd.u8 q9, q0, q11
+ vshrn.u16 d6, q2, #8
+ vld4.8 {d0, d1, d2, d3}, [SRC]!
+ vshrn.u16 d7, q2, #3
+ vsli.u16 q2, q2, #5
+ vshll.u8 q14, d16, #8
+ PF add PF_X, PF_X, #8
+ vshll.u8 q8, d19, #8
+ PF tst PF_CTL, #0xF
+ vsri.u8 d6, d6, #5
+ PF addne PF_X, PF_X, #8
+ vmvn.8 d3, d3
+ PF subne PF_CTL, PF_CTL, #1
+ vsri.u8 d7, d7, #6
+ vshrn.u16 d30, q2, #2
+ vmull.u8 q10, d3, d6
+ PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+ vmull.u8 q11, d3, d7
+ vmull.u8 q12, d3, d30
+ PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+ vsri.u16 q14, q8, #5
+ PF cmp PF_X, ORIG_W
+ vshll.u8 q9, d18, #8
+ vrshr.u16 q13, q10, #8
+ PF subge PF_X, PF_X, ORIG_W
+ vrshr.u16 q3, q11, #8
+ vrshr.u16 q15, q12, #8
+ PF subges PF_CTL, PF_CTL, #0x10
+ vsri.u16 q14, q9, #11
+ PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+ vraddhn.u16 d20, q10, q13
+ vraddhn.u16 d23, q11, q3
+ PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+ vraddhn.u16 d22, q12, q15
+ vst1.16 {d28, d29}, [DST_W, :128]!
+.endm
+
+#else
+
+/* If we did not care much about the performance, we would just use this... */
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+ pixman_composite_over_8888_0565_process_pixblock_tail
+ vst1.16 {d28, d29}, [DST_W, :128]!
+ vld1.16 {d4, d5}, [DST_R, :128]!
+ vld4.32 {d0, d1, d2, d3}, [SRC]!
+ pixman_composite_over_8888_0565_process_pixblock_head
+ cache_preload 8, 8
+.endm
+
+#endif
+
+/*
+ * And now the final part. We are using 'generate_composite_function' macro
+ * to put all the stuff together. We are specifying the name of the function
+ * which we want to get, number of bits per pixel for the source, mask and
+ * destination (0 if unused, like mask in this case). Next come some bit
+ * flags:
+ * FLAG_DST_READWRITE - tells that the destination buffer is both read
+ * and written, for write-only buffer we would use
+ * FLAG_DST_WRITEONLY flag instead
+ * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
+ * and separate color channels for 32bpp format.
+ * The next things are:
+ * - the number of pixels processed per iteration (8 in this case, because
+ * that's the maximum what can fit into four 64-bit NEON registers).
+ * - prefetch distance, measured in pixel blocks. In this case it is 5 times
+ * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
+ * prefetch distance can be selected by running some benchmarks.
+ *
+ * After that we specify some macros, these are 'default_init',
+ * 'default_cleanup' here which are empty (but it is possible to have custom
+ * init/cleanup macros to be able to save/restore some extra NEON registers
+ * like d8-d15 or do anything else) followed by
+ * 'pixman_composite_over_8888_0565_process_pixblock_head',
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail' and
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
+ * which we got implemented above.
+ *
+ * The last part is the NEON registers allocation scheme.
+ */
+generate_composite_function \
+ pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_over_8888_0565_process_pixblock_head, \
+ pixman_composite_over_8888_0565_process_pixblock_tail, \
+ pixman_composite_over_8888_0565_process_pixblock_tail_head, \
+ 28, /* dst_w_basereg */ \
+ 4, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 24 /* mask_basereg */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_0565_process_pixblock_head
+ vshll.u8 q8, d1, #8
+ vshll.u8 q14, d2, #8
+ vshll.u8 q9, d0, #8
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail
+ vsri.u16 q14, q8, #5
+ vsri.u16 q14, q9, #11
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail_head
+ vsri.u16 q14, q8, #5
+ PF add PF_X, PF_X, #8
+ PF tst PF_CTL, #0xF
+ vld4.8 {d0, d1, d2, d3}, [SRC]!
+ PF addne PF_X, PF_X, #8
+ PF subne PF_CTL, PF_CTL, #1
+ vsri.u16 q14, q9, #11
+ PF cmp PF_X, ORIG_W
+ PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+ vshll.u8 q8, d1, #8
+ vst1.16 {d28, d29}, [DST_W, :128]!
+ PF subge PF_X, PF_X, ORIG_W
+ PF subges PF_CTL, PF_CTL, #0x10
+ vshll.u8 q14, d2, #8
+ PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+ vshll.u8 q9, d0, #8
+.endm
+
+generate_composite_function \
+ pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \
+ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ 10, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_src_8888_0565_process_pixblock_head, \
+ pixman_composite_src_8888_0565_process_pixblock_tail, \
+ pixman_composite_src_8888_0565_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8000_8000_process_pixblock_head
+ vqadd.u8 q14, q0, q2
+ vqadd.u8 q15, q1, q3
+.endm
+
+.macro pixman_composite_add_8000_8000_process_pixblock_tail
+.endm
+
+.macro pixman_composite_add_8000_8000_process_pixblock_tail_head
+ vld1.8 {d0, d1, d2, d3}, [SRC]!
+ PF add PF_X, PF_X, #32
+ PF tst PF_CTL, #0xF
+ vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ PF addne PF_X, PF_X, #32
+ PF subne PF_CTL, PF_CTL, #1
+ vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
+ PF cmp PF_X, ORIG_W
+ PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+ PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+ PF subge PF_X, PF_X, ORIG_W
+ PF subges PF_CTL, PF_CTL, #0x10
+ vqadd.u8 q14, q0, q2
+ PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+ PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+ vqadd.u8 q15, q1, q3
+.endm
+
+generate_composite_function \
+ pixman_composite_add_8000_8000_asm_neon, 8, 0, 8, \
+ FLAG_DST_READWRITE, \
+ 32, /* number of pixels, processed in a single block */ \
+ 10, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_add_8000_8000_process_pixblock_head, \
+ pixman_composite_add_8000_8000_process_pixblock_tail, \
+ pixman_composite_add_8000_8000_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_8888_process_pixblock_head
+ vmvn.8 d24, d3 /* get inverted alpha */
+ /* do alpha blending */
+ vmull.u8 q8, d24, d4
+ vmull.u8 q9, d24, d5
+ vmull.u8 q10, d24, d6
+ vmull.u8 q11, d24, d7
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail
+ vrshr.u16 q14, q8, #8
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q12, q10, #8
+ vrshr.u16 q13, q11, #8
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ vraddhn.u16 d30, q12, q10
+ vraddhn.u16 d31, q13, q11
+ vqadd.u8 q14, q0, q14
+ vqadd.u8 q15, q1, q15
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
+ vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ vrshr.u16 q14, q8, #8
+ PF add PF_X, PF_X, #8
+ PF tst PF_CTL, #0xF
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q12, q10, #8
+ vrshr.u16 q13, q11, #8
+ PF addne PF_X, PF_X, #8
+ PF subne PF_CTL, PF_CTL, #1
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ PF cmp PF_X, ORIG_W
+ vraddhn.u16 d30, q12, q10
+ vraddhn.u16 d31, q13, q11
+ vqadd.u8 q14, q0, q14
+ vqadd.u8 q15, q1, q15
+ vld4.8 {d0, d1, d2, d3}, [SRC]!
+ PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+ vmvn.8 d22, d3
+ PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+ vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
+ PF subge PF_X, PF_X, ORIG_W
+ vmull.u8 q8, d22, d4
+ PF subges PF_CTL, PF_CTL, #0x10
+ vmull.u8 q9, d22, d5
+ PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+ vmull.u8 q10, d22, d6
+ PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+ vmull.u8 q11, d22, d7
+.endm
+
+generate_composite_function \
+ pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_over_8888_8888_process_pixblock_head, \
+ pixman_composite_over_8888_8888_process_pixblock_tail, \
+ pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_0565_process_pixblock_head
+ /* in */
+ vmull.u8 q0, d24, d8
+ vmull.u8 q1, d24, d9
+ vmull.u8 q6, d24, d10
+ vmull.u8 q7, d24, d11
+ vrshr.u16 q10, q0, #8
+ vrshr.u16 q11, q1, #8
+ vrshr.u16 q12, q6, #8
+ vrshr.u16 q13, q7, #8
+ vraddhn.u16 d0, q0, q10
+ vraddhn.u16 d1, q1, q11
+ vraddhn.u16 d2, q6, q12
+ vraddhn.u16 d3, q7, q13
+
+ vshrn.u16 d6, q2, #8
+ vshrn.u16 d7, q2, #3
+ vsli.u16 q2, q2, #5
+ vsri.u8 d6, d6, #5
+ vmvn.8 d3, d3
+ vsri.u8 d7, d7, #6
+ vshrn.u16 d30, q2, #2
+ /* now do alpha blending */
+ vmull.u8 q10, d3, d6
+ vmull.u8 q11, d3, d7
+ vmull.u8 q12, d3, d30
+ vrshr.u16 q13, q10, #8
+ vrshr.u16 q3, q11, #8
+ vrshr.u16 q15, q12, #8
+ vraddhn.u16 d20, q10, q13
+ vraddhn.u16 d23, q11, q3
+ vraddhn.u16 d22, q12, q15
+.endm
+
+.macro pixman_composite_over_n_8_0565_process_pixblock_tail
+ vqadd.u8 d16, d2, d20
+ vqadd.u8 q9, q0, q11
+ /* convert to r5g6b5 */
+ vshll.u8 q14, d16, #8
+ vshll.u8 q8, d19, #8
+ vshll.u8 q9, d18, #8
+ vsri.u16 q14, q8, #5
+ vsri.u16 q14, q9, #11
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8_0565_process_pixblock_tail_head
+ pixman_composite_over_n_8_0565_process_pixblock_tail
+ vst1.16 {d28, d29}, [DST_W, :128]!
+ vld1.16 {d4, d5}, [DST_R, :128]!
+ vld1.8 {d24}, [MASK]!
+ cache_preload 8, 8
+ pixman_composite_over_n_8_0565_process_pixblock_head
+.endm
+
+/*
+ * This function needs a special initialization of solid mask.
+ * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET
+ * offset, split into color components and replicated in d8-d11
+ * registers. Additionally, this function needs all the NEON registers,
+ * so it has to save d8-d15 registers which are callee saved according
+ * to ABI. These registers are restored from 'cleanup' macro. All the
+ * other NEON registers are caller saved, so can be clobbered freely
+ * without introducing any problems.
+ */
+.macro pixman_composite_over_n_8_0565_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vpush {d8-d15}
+ vld1.32 {d11[0]}, [DUMMY]
+ vdup.8 d8, d11[0]
+ vdup.8 d9, d11[1]
+ vdup.8 d10, d11[2]
+ vdup.8 d11, d11[3]
+.endm
+
+.macro pixman_composite_over_n_8_0565_cleanup
+ vpop {d8-d15}
+.endm
+
+generate_composite_function \
+ pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \
+ FLAG_DST_READWRITE, \
+ 8, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ pixman_composite_over_n_8_0565_init, \
+ pixman_composite_over_n_8_0565_cleanup, \
+ pixman_composite_over_n_8_0565_process_pixblock_head, \
+ pixman_composite_over_n_8_0565_process_pixblock_tail, \
+ pixman_composite_over_n_8_0565_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0565_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail_head
+ vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
+ vld1.16 {d0, d1, d2, d3}, [SRC]!
+ cache_preload 16, 16
+.endm
+
+generate_composite_function \
+ pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \
+ FLAG_DST_WRITEONLY, \
+ 16, /* number of pixels, processed in a single block */ \
+ 10, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_src_0565_0565_process_pixblock_head, \
+ pixman_composite_src_0565_0565_process_pixblock_tail, \
+ pixman_composite_src_0565_0565_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail_head
+ vst1.8 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_8_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vld1.32 {d0[0]}, [DUMMY]
+ vsli.u64 d0, d0, #8
+ vsli.u64 d0, d0, #16
+ vsli.u64 d0, d0, #32
+ vmov d1, d0
+ vmov q1, q0
+.endm
+
+.macro pixman_composite_src_n_8_cleanup
+.endm
+
+generate_composite_function \
+ pixman_composite_src_n_8_asm_neon, 0, 0, 8, \
+ FLAG_DST_WRITEONLY, \
+ 32, /* number of pixels, processed in a single block */ \
+ 0, /* prefetch distance */ \
+ pixman_composite_src_n_8_init, \
+ pixman_composite_src_n_8_cleanup, \
+ pixman_composite_src_n_8_process_pixblock_head, \
+ pixman_composite_src_n_8_process_pixblock_tail, \
+ pixman_composite_src_n_8_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail_head
+ vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_0565_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vld1.32 {d0[0]}, [DUMMY]
+ vsli.u64 d0, d0, #16
+ vsli.u64 d0, d0, #32
+ vmov d1, d0
+ vmov q1, q0
+.endm
+
+.macro pixman_composite_src_n_0565_cleanup
+.endm
+
+generate_composite_function \
+ pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \
+ FLAG_DST_WRITEONLY, \
+ 16, /* number of pixels, processed in a single block */ \
+ 0, /* prefetch distance */ \
+ pixman_composite_src_n_0565_init, \
+ pixman_composite_src_n_0565_cleanup, \
+ pixman_composite_src_n_0565_process_pixblock_head, \
+ pixman_composite_src_n_0565_process_pixblock_tail, \
+ pixman_composite_src_n_0565_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail_head
+ vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_8888_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vld1.32 {d0[0]}, [DUMMY]
+ vsli.u64 d0, d0, #32
+ vmov d1, d0
+ vmov q1, q0
+.endm
+
+.macro pixman_composite_src_n_8888_cleanup
+.endm
+
+generate_composite_function \
+ pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
+ FLAG_DST_WRITEONLY, \
+ 8, /* number of pixels, processed in a single block */ \
+ 0, /* prefetch distance */ \
+ pixman_composite_src_n_8888_init, \
+ pixman_composite_src_n_8888_cleanup, \
+ pixman_composite_src_n_8888_process_pixblock_head, \
+ pixman_composite_src_n_8888_process_pixblock_tail, \
+ pixman_composite_src_n_8888_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
+ vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+ vld1.32 {d0, d1, d2, d3}, [SRC]!
+ cache_preload 8, 8
+.endm
+
+generate_composite_function \
+ pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
+ FLAG_DST_WRITEONLY, \
+ 8, /* number of pixels, processed in a single block */ \
+ 10, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_src_8888_8888_process_pixblock_head, \
+ pixman_composite_src_8888_8888_process_pixblock_tail, \
+ pixman_composite_src_8888_8888_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_head
+ /* expecting deinterleaved source data in {d8, d9, d10, d11} */
+ /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
+ /* and destination data in {d4, d5, d6, d7} */
+ /* mask is in d24 (d25, d26, d27 are unused) */
+
+ /* in */
+ vmull.u8 q0, d24, d8
+ vmull.u8 q1, d24, d9
+ vmull.u8 q6, d24, d10
+ vmull.u8 q7, d24, d11
+ vrshr.u16 q10, q0, #8
+ vrshr.u16 q11, q1, #8
+ vrshr.u16 q12, q6, #8
+ vrshr.u16 q13, q7, #8
+ vraddhn.u16 d0, q0, q10
+ vraddhn.u16 d1, q1, q11
+ vraddhn.u16 d2, q6, q12
+ vraddhn.u16 d3, q7, q13
+ vmvn.8 d24, d3 /* get inverted alpha */
+ /* source: d0 - blue, d1 - green, d2 - red, d3 - alpha */
+ /* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */
+ /* now do alpha blending */
+ vmull.u8 q8, d24, d4
+ vmull.u8 q9, d24, d5
+ vmull.u8 q10, d24, d6
+ vmull.u8 q11, d24, d7
+.endm
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail
+ vrshr.u16 q14, q8, #8
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q12, q10, #8
+ vrshr.u16 q13, q11, #8
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ vraddhn.u16 d30, q12, q10
+ vraddhn.u16 d31, q13, q11
+ vqadd.u8 q14, q0, q14
+ vqadd.u8 q15, q1, q15
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail_head
+ pixman_composite_over_n_8_8888_process_pixblock_tail
+ vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
+ vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ vld1.8 {d24}, [MASK]!
+ cache_preload 8, 8
+ pixman_composite_over_n_8_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_n_8_8888_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vpush {d8-d15}
+ vld1.32 {d11[0]}, [DUMMY]
+ vdup.8 d8, d11[0]
+ vdup.8 d9, d11[1]
+ vdup.8 d10, d11[2]
+ vdup.8 d11, d11[3]
+.endm
+
+.macro pixman_composite_over_n_8_8888_cleanup
+ vpop {d8-d15}
+.endm
+
+generate_composite_function \
+ pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ pixman_composite_over_n_8_8888_init, \
+ pixman_composite_over_n_8_8888_cleanup, \
+ pixman_composite_over_n_8_8888_process_pixblock_head, \
+ pixman_composite_over_n_8_8888_process_pixblock_tail, \
+ pixman_composite_over_n_8_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_n_8_8_process_pixblock_head
+ /* expecting source data in {d8, d9, d10, d11} */
+ /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
+ /* and destination data in {d4, d5, d6, d7} */
+ /* mask is in d24, d25, d26, d27 */
+ vmull.u8 q0, d24, d11
+ vmull.u8 q1, d25, d11
+ vmull.u8 q6, d26, d11
+ vmull.u8 q7, d27, d11
+ vrshr.u16 q10, q0, #8
+ vrshr.u16 q11, q1, #8
+ vrshr.u16 q12, q6, #8
+ vrshr.u16 q13, q7, #8
+ vraddhn.u16 d0, q0, q10
+ vraddhn.u16 d1, q1, q11
+ vraddhn.u16 d2, q6, q12
+ vraddhn.u16 d3, q7, q13
+ vqadd.u8 q14, q0, q2
+ vqadd.u8 q15, q1, q3
+.endm
+
+.macro pixman_composite_add_n_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_n_8_8_process_pixblock_tail_head
+ pixman_composite_add_n_8_8_process_pixblock_tail
+ vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
+ vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ vld1.8 {d24, d25, d26, d27}, [MASK]!
+ cache_preload 32, 32
+ pixman_composite_add_n_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_n_8_8_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vpush {d8-d15}
+ vld1.32 {d11[0]}, [DUMMY]
+ vdup.8 d11, d11[3]
+.endm
+
+.macro pixman_composite_add_n_8_8_cleanup
+ vpop {d8-d15}
+.endm
+
+generate_composite_function \
+ pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \
+ FLAG_DST_READWRITE, \
+ 32, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ pixman_composite_add_n_8_8_init, \
+ pixman_composite_add_n_8_8_cleanup, \
+ pixman_composite_add_n_8_8_process_pixblock_head, \
+ pixman_composite_add_n_8_8_process_pixblock_tail, \
+ pixman_composite_add_n_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8_8_8_process_pixblock_head
+ /* expecting source data in {d0, d1, d2, d3} */
+ /* destination data in {d4, d5, d6, d7} */
+ /* mask in {d24, d25, d26, d27} */
+ vmull.u8 q8, d24, d0
+ vmull.u8 q9, d25, d1
+ vmull.u8 q10, d26, d2
+ vmull.u8 q11, d27, d3
+ vrshr.u16 q0, q8, #8
+ vrshr.u16 q1, q9, #8
+ vrshr.u16 q12, q10, #8
+ vrshr.u16 q13, q11, #8
+ vraddhn.u16 d0, q0, q8
+ vraddhn.u16 d1, q1, q9
+ vraddhn.u16 d2, q12, q10
+ vraddhn.u16 d3, q13, q11
+ vqadd.u8 q14, q0, q2
+ vqadd.u8 q15, q1, q3
+.endm
+
+.macro pixman_composite_add_8_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_8_8_8_process_pixblock_tail_head
+ pixman_composite_add_8_8_8_process_pixblock_tail
+ vst1.8 {d28, d29, d30, d31}, [DST_W, :128]!
+ vld1.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ vld1.8 {d24, d25, d26, d27}, [MASK]!
+ vld1.8 {d0, d1, d2, d3}, [SRC]!
+ cache_preload 32, 32
+ pixman_composite_add_8_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_8_8_8_init
+.endm
+
+.macro pixman_composite_add_8_8_8_cleanup
+.endm
+
+generate_composite_function \
+ pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \
+ FLAG_DST_READWRITE, \
+ 32, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ pixman_composite_add_8_8_8_init, \
+ pixman_composite_add_8_8_8_cleanup, \
+ pixman_composite_add_8_8_8_process_pixblock_head, \
+ pixman_composite_add_8_8_8_process_pixblock_tail, \
+ pixman_composite_add_8_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_head
+ /* expecting source data in {d0, d1, d2, d3} */
+ /* destination data in {d4, d5, d6, d7} */
+ /* solid mask is in d15 */
+
+ /* 'in' */
+ vmull.u8 q8, d15, d3
+ vmull.u8 q6, d15, d2
+ vmull.u8 q5, d15, d1
+ vmull.u8 q4, d15, d0
+ vrshr.u16 q13, q8, #8
+ vrshr.u16 q12, q6, #8
+ vrshr.u16 q11, q5, #8
+ vrshr.u16 q10, q4, #8
+ vraddhn.u16 d3, q8, q13
+ vraddhn.u16 d2, q6, q12
+ vraddhn.u16 d1, q5, q11
+ vraddhn.u16 d0, q4, q10
+ vmvn.8 d24, d3 /* get inverted alpha */
+ /* now do alpha blending */
+ vmull.u8 q8, d24, d4
+ vmull.u8 q9, d24, d5
+ vmull.u8 q10, d24, d6
+ vmull.u8 q11, d24, d7
+.endm
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail
+ vrshr.u16 q14, q8, #8
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q12, q10, #8
+ vrshr.u16 q13, q11, #8
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ vraddhn.u16 d30, q12, q10
+ vraddhn.u16 d31, q13, q11
+ vqadd.u8 q14, q0, q14
+ vqadd.u8 q15, q1, q15
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+ vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ pixman_composite_over_8888_n_8888_process_pixblock_tail
+ vld4.8 {d0, d1, d2, d3}, [SRC]!
+ cache_preload 8, 8
+ pixman_composite_over_8888_n_8888_process_pixblock_head
+ vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_over_8888_n_8888_init
+ add DUMMY, sp, #48
+ vpush {d8-d15}
+ vld1.32 {d15[0]}, [DUMMY]
+ vdup.8 d15, d15[3]
+.endm
+
+.macro pixman_composite_over_8888_n_8888_cleanup
+ vpop {d8-d15}
+.endm
+
+generate_composite_function \
+ pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ pixman_composite_over_8888_n_8888_init, \
+ pixman_composite_over_8888_n_8888_cleanup, \
+ pixman_composite_over_8888_n_8888_process_pixblock_head, \
+ pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+ pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_0888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail_head
+ vst3.8 {d0, d1, d2}, [DST_W]!
+ vld3.8 {d0, d1, d2}, [SRC]!
+ cache_preload 8, 8
+.endm
+
+generate_composite_function \
+ pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \
+ FLAG_DST_WRITEONLY, \
+ 8, /* number of pixels, processed in a single block */ \
+ 10, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_src_0888_0888_process_pixblock_head, \
+ pixman_composite_src_0888_0888_process_pixblock_tail, \
+ pixman_composite_src_0888_0888_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
diff --git a/pixman/pixman/pixman-arm-neon-asm.h b/pixman/pixman/pixman-arm-neon-asm.h
new file mode 100644
index 000000000..e7be5cddf
--- /dev/null
+++ b/pixman/pixman/pixman-arm-neon-asm.h
@@ -0,0 +1,787 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Nokia Corporation not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. Nokia Corporation makes no
+ * representations about the suitability of this software for any purpose.
+ * It is provided "as is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
+ */
+
+/*
+ * This file contains a macro ('generate_composite_function') which can
+ * construct 2D image processing functions, based on a common template.
+ * Any combinations of source, destination and mask images with 8bpp,
+ * 16bpp, 24bpp, 32bpp color formats are supported.
+ *
+ * This macro takes care of:
+ * - handling of leading and trailing unaligned pixels
+ * - doing most of the work related to L2 cache preload
+ * - encourages the use of software pipelining for better instructions
+ * scheduling
+ *
+ * The user of this macro has to provide some configuration parameters
+ * (bit depths for the images, prefetch distance, etc.) and a set of
+ * macros, which should implement basic code chunks responsible for
+ * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
+ * examples.
+ *
+ * TODO:
+ * - try overlapped pixel method (from Ian Rickards) when processing
+ * exactly two blocks of pixels
+ * - maybe add an option to do reverse scanline processing
+ */
+
+/*
+ * Bit flags for 'generate_composite_function' macro which are used
+ * to tune generated functions behavior.
+ */
+.set FLAG_DST_WRITEONLY, 0
+.set FLAG_DST_READWRITE, 1
+.set FLAG_DEINTERLEAVE_32BPP, 2
+
+/*
+ * Offset in stack where mask and source pointer/stride can be accessed
+ * from 'init' macro. This is useful for doing special handling for solid mask.
+ */
+.set ARGS_STACK_OFFSET, 40
+
+/*
+ * Constants for selecting preferable prefetch type.
+ */
+.set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */
+.set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */
+.set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */
+
+/*
+ * Definitions of supplementary pixld/pixst macros (for partial load/store of
+ * pixel data).
+ */
+
+.macro pixldst1 op, elem_size, reg1, mem_operand, abits
+.if abits > 0
+ op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
+.else
+ op&.&elem_size {d&reg1}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
+.if abits > 0
+ op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
+.else
+ op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
+.if abits > 0
+ op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
+.else
+ op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
+ op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand
+ op&.&elem_size {d&reg1, d&reg2, d&reg3}, [&mem_operand&]!
+.endm
+
+.macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand
+ op&.&elem_size {d&reg1[idx], d&reg2[idx], d&reg3[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
+.if numbytes == 32
+ pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
+ %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif numbytes == 16
+ pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
+.elseif numbytes == 8
+ pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
+.elseif numbytes == 4
+ .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
+ pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
+ .elseif elem_size == 16
+ pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
+ pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
+ .else
+ pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
+ .endif
+.elseif numbytes == 2
+ .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
+ pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
+ .else
+ pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
+ .endif
+.elseif numbytes == 1
+ pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
+.else
+ .error "unsupported size: numbytes"
+.endif
+.endm
+
+.macro pixld numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
+ %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+ pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+ pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixst numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
+ %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+ pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+ pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixld_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+ pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+ pixld numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro pixst_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+ pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+ pixst numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro vuzp8 reg1, reg2
+ vuzp.8 d&reg1, d&reg2
+.endm
+
+.macro vzip8 reg1, reg2
+ vzip.8 d&reg1, d&reg2
+.endm
+
+/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixdeinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ vuzp8 %(basereg+0), %(basereg+1)
+ vuzp8 %(basereg+2), %(basereg+3)
+ vuzp8 %(basereg+1), %(basereg+3)
+ vuzp8 %(basereg+0), %(basereg+2)
+.endif
+.endm
+
+/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ vzip8 %(basereg+0), %(basereg+2)
+ vzip8 %(basereg+1), %(basereg+3)
+ vzip8 %(basereg+2), %(basereg+3)
+ vzip8 %(basereg+0), %(basereg+1)
+.endif
+.endm
+
+/*
+ * This is a macro for implementing cache preload. The main idea is that
+ * cache preload logic is mostly independent from the rest of pixels
+ * processing code. It starts at the top left pixel and moves forward
+ * across pixels and can jump across scanlines. Prefetch distance is
+ * handled in an 'incremental' way: it starts from 0 and advances to the
+ * optimal distance over time. After reaching optimal prefetch distance,
+ * it is kept constant. There are some checks which prevent prefetching
+ * unneeded pixel lines below the image (but it still can prefetch a bit
+ * more data on the right side of the image - not a big issue and may
+ * be actually helpful when rendering text glyphs). Additional trick is
+ * the use of LDR instruction for prefetch instead of PLD when moving to
+ * the next line, the point is that we have a high chance of getting TLB
+ * miss in this case, and PLD would be useless.
+ *
+ * This sounds like it may introduce a noticeable overhead (when working with
+ * fully cached data). But in reality, due to having a separate pipeline and
+ * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
+ * execute simultaneously with NEON and be completely shadowed by it. Thus
+ * we get no performance overhead at all (*). This looks like a very nice
+ * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher,
+ * but still can implement some rather advanced prefetch logic in sofware
+ * for almost zero cost!
+ *
+ * (*) The overhead of the prefetcher is visible when running some trivial
+ * pixels processing like simple copy. Anyway, having prefetch is a must
+ * when working with the graphics data.
+ */
+.macro PF a, x:vararg
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED)
+ a x
+.endif
+.endm
+
+.macro cache_preload std_increment, boost_increment
+.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
+.if regs_shortage
+ PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
+.endif
+.if std_increment != 0
+ PF add PF_X, PF_X, #std_increment
+.endif
+ PF tst PF_CTL, #0xF
+ PF addne PF_X, PF_X, #boost_increment
+ PF subne PF_CTL, PF_CTL, #1
+ PF cmp PF_X, ORIG_W
+.if src_bpp_shift >= 0
+ PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+.endif
+.if dst_r_bpp != 0
+ PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+.endif
+.if mask_bpp_shift >= 0
+ PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
+.endif
+ PF subge PF_X, PF_X, ORIG_W
+ PF subges PF_CTL, PF_CTL, #0x10
+.if src_bpp_shift >= 0
+ PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+.endif
+.if dst_r_bpp != 0
+ PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+.endif
+.if mask_bpp_shift >= 0
+ PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
+.endif
+.endif
+.endm
+
+.macro cache_preload_simple
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE)
+.if src_bpp > 0
+ pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)]
+.endif
+.if dst_r_bpp > 0
+ pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)]
+.endif
+.if mask_bpp > 0
+ pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)]
+.endif
+.endif
+.endm
+
+/*
+ * Macro which is used to process leading pixels until destination
+ * pointer is properly aligned (at 16 bytes boundary). When destination
+ * buffer uses 16bpp format, this is unnecessary, or even pointless.
+ */
+.macro ensure_destination_ptr_alignment process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+.if dst_w_bpp != 24
+ tst DST_R, #0xF
+ beq 2f
+
+.irp lowbit, 1, 2, 4, 8, 16
+local skip1
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+ tst DST_R, #lowbit
+ beq 1f
+.endif
+ pixld (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
+ pixld (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
+.if dst_r_bpp > 0
+ pixld_a (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
+.else
+ add DST_R, DST_R, #lowbit
+.endif
+ PF add PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
+ sub W, W, #(lowbit * 8 / dst_w_bpp)
+1:
+.endif
+.endr
+ pixdeinterleave src_bpp, src_basereg
+ pixdeinterleave mask_bpp, mask_basereg
+ pixdeinterleave dst_r_bpp, dst_r_basereg
+
+ process_pixblock_head
+ cache_preload 0, pixblock_size
+ cache_preload_simple
+ process_pixblock_tail
+
+ pixinterleave dst_w_bpp, dst_w_basereg
+.irp lowbit, 1, 2, 4, 8, 16
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+ tst DST_W, #lowbit
+ beq 1f
+.endif
+ pixst_a (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
+1:
+.endif
+.endr
+.endif
+2:
+.endm
+
+/*
+ * Special code for processing up to (pixblock_size - 1) remaining
+ * trailing pixels. As SIMD processing performs operation on
+ * pixblock_size pixels, anything smaller than this has to be loaded
+ * and stored in a special way. Loading and storing of pixel data is
+ * performed in such a way that we fill some 'slots' in the NEON
+ * registers (some slots naturally are unused), then perform compositing
+ * operation as usual. In the end, the data is taken from these 'slots'
+ * and saved to memory.
+ *
+ * cache_preload_flag - allows to suppress prefetch if
+ * set to 0
+ * dst_aligned_flag - selects whether destination buffer
+ * is aligned
+ */
+.macro process_trailing_pixels cache_preload_flag, \
+ dst_aligned_flag, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+ tst W, #(pixblock_size - 1)
+ beq 2f
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+ tst W, #chunk_size
+ beq 1f
+ pixld chunk_size, src_bpp, src_basereg, SRC
+ pixld chunk_size, mask_bpp, mask_basereg, MASK
+.if dst_aligned_flag != 0
+ pixld_a chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.else
+ pixld chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.endif
+.if cache_preload_flag != 0
+ PF add PF_X, PF_X, #chunk_size
+.endif
+1:
+.endif
+.endr
+ pixdeinterleave src_bpp, src_basereg
+ pixdeinterleave mask_bpp, mask_basereg
+ pixdeinterleave dst_r_bpp, dst_r_basereg
+
+ process_pixblock_head
+.if cache_preload_flag != 0
+ cache_preload 0, pixblock_size
+ cache_preload_simple
+.endif
+ process_pixblock_tail
+ pixinterleave dst_w_bpp, dst_w_basereg
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+ tst W, #chunk_size
+ beq 1f
+.if dst_aligned_flag != 0
+ pixst_a chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.else
+ pixst chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.endif
+1:
+.endif
+.endr
+2:
+.endm
+
+/*
+ * Macro, which performs all the needed operations to switch to the next
+ * scanline and start the next loop iteration unless all the scanlines
+ * are already processed.
+ */
+.macro advance_to_next_scanline start_of_loop_label
+.if regs_shortage
+ ldrd W, [sp] /* load W and H (width and height) from stack */
+.else
+ mov W, ORIG_W
+.endif
+ add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
+.if src_bpp != 0
+ add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
+.endif
+.if mask_bpp != 0
+ add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
+.endif
+.if (dst_w_bpp != 24)
+ sub DST_W, DST_W, W, lsl #dst_bpp_shift
+.endif
+.if (src_bpp != 24) && (src_bpp != 0)
+ sub SRC, SRC, W, lsl #src_bpp_shift
+.endif
+.if (mask_bpp != 24) && (mask_bpp != 0)
+ sub MASK, MASK, W, lsl #mask_bpp_shift
+.endif
+ subs H, H, #1
+ mov DST_R, DST_W
+.if regs_shortage
+ str H, [sp, #4] /* save updated height to stack */
+.endif
+ bge start_of_loop_label
+.endm
+
+/*
+ * Registers are allocated in the following way by default:
+ * d0, d1, d2, d3 - reserved for loading source pixel data
+ * d4, d5, d6, d7 - reserved for loading destination pixel data
+ * d24, d25, d26, d27 - reserved for loading mask pixel data
+ * d28, d29, d30, d31 - final destination pixel data for writeback to memory
+ */
+.macro generate_composite_function fname, \
+ src_bpp_, \
+ mask_bpp_, \
+ dst_w_bpp_, \
+ flags, \
+ pixblock_size_, \
+ prefetch_distance, \
+ init, \
+ cleanup, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head, \
+ dst_w_basereg_ = 28, \
+ dst_r_basereg_ = 4, \
+ src_basereg_ = 0, \
+ mask_basereg_ = 24
+
+ .func fname
+ .global fname
+ /* For ELF format also set function visibility to hidden */
+#ifdef __ELF__
+ .hidden fname
+ .type fname, %function
+#endif
+fname:
+ push {r4-r12, lr} /* save all registers */
+
+/*
+ * Select prefetch type for this function. If prefetch distance is
+ * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch
+ * has to be used instead of ADVANCED.
+ */
+ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT
+.if prefetch_distance == 0
+ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+.elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \
+ ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24))
+ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE
+.endif
+
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+ .set src_bpp, src_bpp_
+ .set mask_bpp, mask_bpp_
+ .set dst_w_bpp, dst_w_bpp_
+ .set pixblock_size, pixblock_size_
+ .set dst_w_basereg, dst_w_basereg_
+ .set dst_r_basereg, dst_r_basereg_
+ .set src_basereg, src_basereg_
+ .set mask_basereg, mask_basereg_
+
+/*
+ * Assign symbolic names to registers
+ */
+ W .req r0 /* width (is updated during processing) */
+ H .req r1 /* height (is updated during processing) */
+ DST_W .req r2 /* destination buffer pointer for writes */
+ DST_STRIDE .req r3 /* destination image stride */
+ SRC .req r4 /* source buffer pointer */
+ SRC_STRIDE .req r5 /* source image stride */
+ DST_R .req r6 /* destination buffer pointer for reads */
+
+ MASK .req r7 /* mask pointer */
+ MASK_STRIDE .req r8 /* mask stride */
+
+ PF_CTL .req r9 /* combined lines counter and prefetch */
+ /* distance increment counter */
+ PF_X .req r10 /* pixel index in a scanline for current */
+ /* pretetch position */
+ PF_SRC .req r11 /* pointer to source scanline start */
+ /* for prefetch purposes */
+ PF_DST .req r12 /* pointer to destination scanline start */
+ /* for prefetch purposes */
+ PF_MASK .req r14 /* pointer to mask scanline start */
+ /* for prefetch purposes */
+/*
+ * Check whether we have enough registers for all the local variables.
+ * If we don't have enough registers, original width and height are
+ * kept on top of stack (and 'regs_shortage' variable is set to indicate
+ * this for the rest of code). Even if there are enough registers, the
+ * allocation scheme may be a bit different depending on whether source
+ * or mask is not used.
+ */
+.if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED)
+ ORIG_W .req r10 /* saved original width */
+ DUMMY .req r12 /* temporary register */
+ .set regs_shortage, 0
+.elseif mask_bpp == 0
+ ORIG_W .req r7 /* saved original width */
+ DUMMY .req r8 /* temporary register */
+ .set regs_shortage, 0
+.elseif src_bpp == 0
+ ORIG_W .req r4 /* saved original width */
+ DUMMY .req r5 /* temporary register */
+ .set regs_shortage, 0
+.else
+ ORIG_W .req r1 /* saved original width */
+ DUMMY .req r1 /* temporary register */
+ .set regs_shortage, 1
+.endif
+
+ .set mask_bpp_shift, -1
+.if src_bpp == 32
+ .set src_bpp_shift, 2
+.elseif src_bpp == 24
+ .set src_bpp_shift, 0
+.elseif src_bpp == 16
+ .set src_bpp_shift, 1
+.elseif src_bpp == 8
+ .set src_bpp_shift, 0
+.elseif src_bpp == 0
+ .set src_bpp_shift, -1
+.else
+ .error "requested src bpp (src_bpp) is not supported"
+.endif
+.if mask_bpp == 32
+ .set mask_bpp_shift, 2
+.elseif mask_bpp == 24
+ .set mask_bpp_shift, 0
+.elseif mask_bpp == 8
+ .set mask_bpp_shift, 0
+.elseif mask_bpp == 0
+ .set mask_bpp_shift, -1
+.else
+ .error "requested mask bpp (mask_bpp) is not supported"
+.endif
+.if dst_w_bpp == 32
+ .set dst_bpp_shift, 2
+.elseif dst_w_bpp == 24
+ .set dst_bpp_shift, 0
+.elseif dst_w_bpp == 16
+ .set dst_bpp_shift, 1
+.elseif dst_w_bpp == 8
+ .set dst_bpp_shift, 0
+.else
+ .error "requested dst bpp (dst_w_bpp) is not supported"
+.endif
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+ .set dst_r_bpp, dst_w_bpp
+.else
+ .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+ .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+ .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+.if prefetch_distance < 0 || prefetch_distance > 15
+ .error "invalid prefetch distance (prefetch_distance)"
+.endif
+
+.if src_bpp > 0
+ ldr SRC, [sp, #40]
+.endif
+.if mask_bpp > 0
+ ldr MASK, [sp, #48]
+.endif
+ PF mov PF_X, #0
+.if src_bpp > 0
+ ldr SRC_STRIDE, [sp, #44]
+.endif
+.if mask_bpp > 0
+ ldr MASK_STRIDE, [sp, #52]
+.endif
+ mov DST_R, DST_W
+
+.if src_bpp == 24
+ sub SRC_STRIDE, SRC_STRIDE, W
+ sub SRC_STRIDE, SRC_STRIDE, W, lsl #1
+.endif
+.if mask_bpp == 24
+ sub MASK_STRIDE, MASK_STRIDE, W
+ sub MASK_STRIDE, MASK_STRIDE, W, lsl #1
+.endif
+.if dst_w_bpp == 24
+ sub DST_STRIDE, DST_STRIDE, W
+ sub DST_STRIDE, DST_STRIDE, W, lsl #1
+.endif
+
+/*
+ * Setup advanced prefetcher initial state
+ */
+ PF mov PF_SRC, SRC
+ PF mov PF_DST, DST_R
+ PF mov PF_MASK, MASK
+ /* PF_CTL = prefetch_distance | ((h - 1) << 4) */
+ PF mov PF_CTL, H, lsl #4
+ PF add PF_CTL, #(prefetch_distance - 0x10)
+
+ init
+.if regs_shortage
+ push {r0, r1}
+.endif
+ subs H, H, #1
+.if regs_shortage
+ str H, [sp, #4] /* save updated height to stack */
+.else
+ mov ORIG_W, W
+.endif
+ blt 9f
+ cmp W, #(pixblock_size * 2)
+ blt 8f
+/*
+ * This is the start of the pipelined loop, which if optimized for
+ * long scanlines
+ */
+0:
+ ensure_destination_ptr_alignment process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+
+ /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+ pixld_a pixblock_size, dst_r_bpp, \
+ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+ pixld pixblock_size, src_bpp, \
+ (src_basereg - pixblock_size * src_bpp / 64), SRC
+ pixld pixblock_size, mask_bpp, \
+ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+ PF add PF_X, PF_X, #pixblock_size
+ process_pixblock_head
+ cache_preload 0, pixblock_size
+ cache_preload_simple
+ subs W, W, #(pixblock_size * 2)
+ blt 2f
+1:
+ process_pixblock_tail_head
+ cache_preload_simple
+ subs W, W, #pixblock_size
+ bge 1b
+2:
+ process_pixblock_tail
+ pixst_a pixblock_size, dst_w_bpp, \
+ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+
+ /* Process the remaining trailing pixels in the scanline */
+ process_trailing_pixels 1, 1, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+ advance_to_next_scanline 0b
+
+.if regs_shortage
+ pop {r0, r1}
+.endif
+ cleanup
+ pop {r4-r12, pc} /* exit */
+/*
+ * This is the start of the loop, designed to process images with small width
+ * (less than pixblock_size * 2 pixels). In this case neither pipelining
+ * nor prefetch are used.
+ */
+8:
+ /* Process exactly pixblock_size pixels if needed */
+ tst W, #pixblock_size
+ beq 1f
+ pixld pixblock_size, dst_r_bpp, \
+ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+ pixld pixblock_size, src_bpp, \
+ (src_basereg - pixblock_size * src_bpp / 64), SRC
+ pixld pixblock_size, mask_bpp, \
+ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+ process_pixblock_head
+ process_pixblock_tail
+ pixst pixblock_size, dst_w_bpp, \
+ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+1:
+ /* Process the remaining trailing pixels in the scanline */
+ process_trailing_pixels 0, 0, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+ advance_to_next_scanline 8b
+9:
+.if regs_shortage
+ pop {r0, r1}
+.endif
+ cleanup
+ pop {r4-r12, pc} /* exit */
+
+ .unreq SRC
+ .unreq MASK
+ .unreq DST_R
+ .unreq DST_W
+ .unreq ORIG_W
+ .unreq W
+ .unreq H
+ .unreq SRC_STRIDE
+ .unreq DST_STRIDE
+ .unreq MASK_STRIDE
+ .unreq PF_CTL
+ .unreq PF_X
+ .unreq PF_SRC
+ .unreq PF_DST
+ .unreq PF_MASK
+ .unreq DUMMY
+ .endfunc
+.endm
+
+.macro default_init
+.endm
+
+.macro default_cleanup
+.endm
diff --git a/pixman/pixman/pixman-arm-neon.c b/pixman/pixman/pixman-arm-neon.c
index 8a2d72ea3..2ed8b4bdf 100644
--- a/pixman/pixman/pixman-arm-neon.c
+++ b/pixman/pixman/pixman-arm-neon.c
@@ -30,1670 +30,223 @@
#include <config.h>
#endif
-#include <arm_neon.h>
#include <string.h>
#include "pixman-private.h"
-/* Deal with an intrinsic that is defined differently in GCC */
-#if !defined(__ARMCC_VERSION) && !defined(__pld)
-#define __pld(_x) __builtin_prefetch (_x)
-#endif
-
-static force_inline uint8x8x4_t
-unpack0565 (uint16x8_t rgb)
-{
- uint16x8_t gb, b;
- uint8x8x4_t res;
-
- res.val[3] = vdup_n_u8 (0);
- gb = vshrq_n_u16 (rgb, 5);
- b = vshrq_n_u16 (rgb, 5 + 6);
-
- res.val[0] = vmovn_u16 (rgb); /* get low 5 bits */
- res.val[1] = vmovn_u16 (gb); /* get mid 6 bits */
- res.val[2] = vmovn_u16 (b); /* get top 5 bits */
-
- res.val[0] = vshl_n_u8 (res.val[0], 3); /* shift to top */
- res.val[1] = vshl_n_u8 (res.val[1], 2); /* shift to top */
- res.val[2] = vshl_n_u8 (res.val[2], 3); /* shift to top */
-
- res.val[0] = vsri_n_u8 (res.val[0], res.val[0], 5);
- res.val[1] = vsri_n_u8 (res.val[1], res.val[1], 6);
- res.val[2] = vsri_n_u8 (res.val[2], res.val[2], 5);
-
- return res;
-}
-
-#ifdef USE_GCC_INLINE_ASM
-/* Some versions of gcc have problems with vshll_n_u8 intrinsic (Bug 23576) */
-#define vshll_n_u8(a, n) ({ uint16x8_t r; \
- asm ("vshll.u8 %q0, %P1, %2\n" : "=w" (r) : "w" (a), "i" (n)); r; })
-#endif
-
-static force_inline uint16x8_t
-pack0565 (uint8x8x4_t s)
-{
- uint16x8_t rgb, val_g, val_r;
-
- rgb = vshll_n_u8 (s.val[2], 8);
- val_g = vshll_n_u8 (s.val[1], 8);
- val_r = vshll_n_u8 (s.val[0], 8);
- rgb = vsriq_n_u16 (rgb, val_g, 5);
- rgb = vsriq_n_u16 (rgb, val_r, 5 + 6);
-
- return rgb;
-}
-
-static force_inline uint8x8_t
-neon2mul (uint8x8_t x,
- uint8x8_t alpha)
-{
- uint16x8_t tmp, tmp2;
- uint8x8_t res;
-
- tmp = vmull_u8 (x, alpha);
- tmp2 = vrshrq_n_u16 (tmp, 8);
- res = vraddhn_u16 (tmp, tmp2);
-
- return res;
-}
-
-static force_inline uint8x8x4_t
-neon8mul (uint8x8x4_t x,
- uint8x8_t alpha)
-{
- uint16x8x4_t tmp;
- uint8x8x4_t res;
- uint16x8_t qtmp1, qtmp2;
-
- tmp.val[0] = vmull_u8 (x.val[0], alpha);
- tmp.val[1] = vmull_u8 (x.val[1], alpha);
- tmp.val[2] = vmull_u8 (x.val[2], alpha);
- tmp.val[3] = vmull_u8 (x.val[3], alpha);
-
- qtmp1 = vrshrq_n_u16 (tmp.val[0], 8);
- qtmp2 = vrshrq_n_u16 (tmp.val[1], 8);
- res.val[0] = vraddhn_u16 (tmp.val[0], qtmp1);
- qtmp1 = vrshrq_n_u16 (tmp.val[2], 8);
- res.val[1] = vraddhn_u16 (tmp.val[1], qtmp2);
- qtmp2 = vrshrq_n_u16 (tmp.val[3], 8);
- res.val[2] = vraddhn_u16 (tmp.val[2], qtmp1);
- res.val[3] = vraddhn_u16 (tmp.val[3], qtmp2);
-
- return res;
-}
-
-static force_inline uint8x8x4_t
-neon8qadd (uint8x8x4_t x,
- uint8x8x4_t y)
-{
- uint8x8x4_t res;
-
- res.val[0] = vqadd_u8 (x.val[0], y.val[0]);
- res.val[1] = vqadd_u8 (x.val[1], y.val[1]);
- res.val[2] = vqadd_u8 (x.val[2], y.val[2]);
- res.val[3] = vqadd_u8 (x.val[3], y.val[3]);
-
- return res;
-}
-
-static void
-neon_composite_add_8000_8000 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint8_t *dst_line, *dst;
- uint8_t *src_line, *src;
- int dst_stride, src_stride;
- uint16_t w;
-
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
-
- if (width >= 8)
- {
- /* Use overlapping 8-pixel method */
- while (height--)
- {
- uint8_t *keep_dst = 0;
- uint8x8_t sval, dval, temp;
-
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
-#ifndef USE_GCC_INLINE_ASM
- sval = vld1_u8 ((void *)src);
- dval = vld1_u8 ((void *)dst);
- keep_dst = dst;
-
- temp = vqadd_u8 (dval, sval);
-
- src += (w & 7);
- dst += (w & 7);
- w -= (w & 7);
-
- while (w)
- {
- sval = vld1_u8 ((void *)src);
- dval = vld1_u8 ((void *)dst);
-
- vst1_u8 ((void *)keep_dst, temp);
- keep_dst = dst;
-
- temp = vqadd_u8 (dval, sval);
-
- src += 8;
- dst += 8;
- w -= 8;
- }
-
- vst1_u8 ((void *)keep_dst, temp);
-#else
- asm volatile (
-/* avoid using d8-d15 (q4-q7) aapcs callee-save registers */
- "vld1.8 {d0}, [%[src]]\n\t"
- "vld1.8 {d4}, [%[dst]]\n\t"
- "mov %[keep_dst], %[dst]\n\t"
-
- "and ip, %[w], #7\n\t"
- "add %[src], %[src], ip\n\t"
- "add %[dst], %[dst], ip\n\t"
- "subs %[w], %[w], ip\n\t"
- "b 9f\n\t"
-/* LOOP */
- "2:\n\t"
- "vld1.8 {d0}, [%[src]]!\n\t"
- "vld1.8 {d4}, [%[dst]]!\n\t"
- "vst1.8 {d20}, [%[keep_dst]]\n\t"
- "sub %[keep_dst], %[dst], #8\n\t"
- "subs %[w], %[w], #8\n\t"
- "9:\n\t"
- "vqadd.u8 d20, d0, d4\n\t"
-
- "bne 2b\n\t"
-
- "1:\n\t"
- "vst1.8 {d20}, [%[keep_dst]]\n\t"
-
- : [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "=r" (keep_dst)
- :
- : "ip", "cc", "memory", "d0", "d4",
- "d20"
- );
-#endif
- }
- }
- else
- {
- const uint8_t nil = 0;
- const uint8x8_t vnil = vld1_dup_u8 (&nil);
-
- while (height--)
- {
- uint8x8_t sval = vnil, dval = vnil;
- uint8_t *dst4 = 0, *dst2 = 0;
-
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- if (w & 4)
- {
- sval = vreinterpret_u8_u32 (
- vld1_lane_u32 ((void *)src, vreinterpret_u32_u8 (sval), 1));
- dval = vreinterpret_u8_u32 (
- vld1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (dval), 1));
-
- dst4 = dst;
- src += 4;
- dst += 4;
- }
-
- if (w & 2)
- {
- sval = vreinterpret_u8_u16 (
- vld1_lane_u16 ((void *)src, vreinterpret_u16_u8 (sval), 1));
- dval = vreinterpret_u8_u16 (
- vld1_lane_u16 ((void *)dst, vreinterpret_u16_u8 (dval), 1));
-
- dst2 = dst;
- src += 2;
- dst += 2;
- }
-
- if (w & 1)
- {
- sval = vld1_lane_u8 (src, sval, 1);
- dval = vld1_lane_u8 (dst, dval, 1);
- }
-
- dval = vqadd_u8 (dval, sval);
-
- if (w & 1)
- vst1_lane_u8 (dst, dval, 1);
-
- if (w & 2)
- vst1_lane_u16 ((void *)dst2, vreinterpret_u16_u8 (dval), 1);
-
- if (w & 4)
- vst1_lane_u32 ((void *)dst4, vreinterpret_u32_u8 (dval), 1);
- }
- }
-}
-
-static void
-neon_composite_over_8888_8888 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *dst_line, *dst;
- uint32_t *src_line, *src;
- int dst_stride, src_stride;
- uint32_t w;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
- if (width >= 8)
- {
- /* Use overlapping 8-pixel method */
- while (height--)
- {
- uint32_t *keep_dst = 0;
- uint8x8x4_t sval, dval, temp;
-
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
-#ifndef USE_GCC_INLINE_ASM
- sval = vld4_u8 ((void *)src);
- dval = vld4_u8 ((void *)dst);
- keep_dst = dst;
-
- temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
- temp = neon8qadd (sval, temp);
-
- src += (w & 7);
- dst += (w & 7);
- w -= (w & 7);
-
- while (w)
- {
- sval = vld4_u8 ((void *)src);
- dval = vld4_u8 ((void *)dst);
-
- vst4_u8 ((void *)keep_dst, temp);
- keep_dst = dst;
-
- temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
- temp = neon8qadd (sval, temp);
-
- src += 8;
- dst += 8;
- w -= 8;
- }
-
- vst4_u8 ((void *)keep_dst, temp);
-#else
- asm volatile (
-/* avoid using d8-d15 (q4-q7) aapcs callee-save registers */
- "vld4.8 {d0-d3}, [%[src]]\n\t"
- "vld4.8 {d4-d7}, [%[dst]]\n\t"
- "mov %[keep_dst], %[dst]\n\t"
-
- "and ip, %[w], #7\n\t"
- "add %[src], %[src], ip, LSL#2\n\t"
- "add %[dst], %[dst], ip, LSL#2\n\t"
- "subs %[w], %[w], ip\n\t"
- "b 9f\n\t"
-/* LOOP */
- "2:\n\t"
- "vld4.8 {d0-d3}, [%[src]]!\n\t"
- "vld4.8 {d4-d7}, [%[dst]]!\n\t"
- "vst4.8 {d20-d23}, [%[keep_dst]]\n\t"
- "sub %[keep_dst], %[dst], #8*4\n\t"
- "subs %[w], %[w], #8\n\t"
- "9:\n\t"
- "vmvn.8 d31, d3\n\t"
- "vmull.u8 q10, d31, d4\n\t"
- "vmull.u8 q11, d31, d5\n\t"
- "vmull.u8 q12, d31, d6\n\t"
- "vmull.u8 q13, d31, d7\n\t"
- "vrshr.u16 q8, q10, #8\n\t"
- "vrshr.u16 q9, q11, #8\n\t"
- "vraddhn.u16 d20, q10, q8\n\t"
- "vraddhn.u16 d21, q11, q9\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vraddhn.u16 d22, q12, q8\n\t"
- "vraddhn.u16 d23, q13, q9\n\t"
-/* result in d20-d23 */
- "vqadd.u8 d20, d0, d20\n\t"
- "vqadd.u8 d21, d1, d21\n\t"
- "vqadd.u8 d22, d2, d22\n\t"
- "vqadd.u8 d23, d3, d23\n\t"
-
- "bne 2b\n\t"
-
- "1:\n\t"
- "vst4.8 {d20-d23}, [%[keep_dst]]\n\t"
-
- : [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "=r" (keep_dst)
- :
- : "ip", "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23"
- );
-#endif
- }
- }
- else
- {
- uint8x8_t alpha_selector = vreinterpret_u8_u64 (
- vcreate_u64 (0x0707070703030303ULL));
-
- /* Handle width < 8 */
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w >= 2)
- {
- uint8x8_t sval, dval;
-
- /* two 32-bit pixels packed into D-reg; ad-hoc vectorization */
- sval = vreinterpret_u8_u32 (vld1_u32 ((void *)src));
- dval = vreinterpret_u8_u32 (vld1_u32 ((void *)dst));
- dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
- vst1_u8 ((void *)dst, vqadd_u8 (sval, dval));
-
- src += 2;
- dst += 2;
- w -= 2;
- }
-
- if (w)
- {
- uint8x8_t sval, dval;
-
- /* single 32-bit pixel in lane 0 */
- sval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)src)); /* only interested in lane 0 */
- dval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)dst)); /* only interested in lane 0 */
- dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
- vst1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (vqadd_u8 (sval, dval)), 0);
- }
- }
- }
-}
-
-static void
-neon_composite_over_8888_n_8888 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *dst_line, *dst;
- uint32_t *src_line, *src;
- uint32_t mask;
- int dst_stride, src_stride;
- uint32_t w;
- uint8x8_t mask_alpha;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
- mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
- mask_alpha = vdup_n_u8 ((mask) >> 24);
-
- if (width >= 8)
- {
- /* Use overlapping 8-pixel method */
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- uint32_t *keep_dst = 0;
-
-#ifndef USE_GCC_INLINE_ASM
- uint8x8x4_t sval, dval, temp;
-
- sval = vld4_u8 ((void *)src);
- dval = vld4_u8 ((void *)dst);
- keep_dst = dst;
-
- sval = neon8mul (sval, mask_alpha);
- temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
- temp = neon8qadd (sval, temp);
-
- src += (w & 7);
- dst += (w & 7);
- w -= (w & 7);
-
- while (w)
- {
- sval = vld4_u8 ((void *)src);
- dval = vld4_u8 ((void *)dst);
-
- vst4_u8 ((void *)keep_dst, temp);
- keep_dst = dst;
-
- sval = neon8mul (sval, mask_alpha);
- temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
- temp = neon8qadd (sval, temp);
-
- src += 8;
- dst += 8;
- w -= 8;
- }
- vst4_u8 ((void *)keep_dst, temp);
-#else
- asm volatile (
-/* avoid using d8-d15 (q4-q7) aapcs callee-save registers */
- "vdup.32 d30, %[mask]\n\t"
- "vdup.8 d30, d30[3]\n\t"
-
- "vld4.8 {d0-d3}, [%[src]]\n\t"
- "vld4.8 {d4-d7}, [%[dst]]\n\t"
- "mov %[keep_dst], %[dst]\n\t"
-
- "and ip, %[w], #7\n\t"
- "add %[src], %[src], ip, LSL#2\n\t"
- "add %[dst], %[dst], ip, LSL#2\n\t"
- "subs %[w], %[w], ip\n\t"
- "b 9f\n\t"
-/* LOOP */
- "2:\n\t"
- "vld4.8 {d0-d3}, [%[src]]!\n\t"
- "vld4.8 {d4-d7}, [%[dst]]!\n\t"
- "vst4.8 {d20-d23}, [%[keep_dst]]\n\t"
- "sub %[keep_dst], %[dst], #8*4\n\t"
- "subs %[w], %[w], #8\n\t"
-
- "9:\n\t"
- "vmull.u8 q10, d30, d0\n\t"
- "vmull.u8 q11, d30, d1\n\t"
- "vmull.u8 q12, d30, d2\n\t"
- "vmull.u8 q13, d30, d3\n\t"
- "vrshr.u16 q8, q10, #8\n\t"
- "vrshr.u16 q9, q11, #8\n\t"
- "vraddhn.u16 d0, q10, q8\n\t"
- "vraddhn.u16 d1, q11, q9\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vraddhn.u16 d3, q13, q9\n\t"
- "vraddhn.u16 d2, q12, q8\n\t"
-
- "vmvn.8 d31, d3\n\t"
- "vmull.u8 q10, d31, d4\n\t"
- "vmull.u8 q11, d31, d5\n\t"
- "vmull.u8 q12, d31, d6\n\t"
- "vmull.u8 q13, d31, d7\n\t"
- "vrshr.u16 q8, q10, #8\n\t"
- "vrshr.u16 q9, q11, #8\n\t"
- "vraddhn.u16 d20, q10, q8\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vraddhn.u16 d21, q11, q9\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vraddhn.u16 d22, q12, q8\n\t"
- "vraddhn.u16 d23, q13, q9\n\t"
-
-/* result in d20-d23 */
- "vqadd.u8 d20, d0, d20\n\t"
- "vqadd.u8 d21, d1, d21\n\t"
- "vqadd.u8 d22, d2, d22\n\t"
- "vqadd.u8 d23, d3, d23\n\t"
-
- "bne 2b\n\t"
-
- "1:\n\t"
- "vst4.8 {d20-d23}, [%[keep_dst]]\n\t"
-
- : [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "=r" (keep_dst)
- : [mask] "r" (mask)
- : "ip", "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27",
- "d30", "d31"
- );
-#endif
- }
- }
- else
- {
- uint8x8_t alpha_selector = vreinterpret_u8_u64 (vcreate_u64 (0x0707070703030303ULL));
-
- /* Handle width < 8 */
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w >= 2)
- {
- uint8x8_t sval, dval;
-
- sval = vreinterpret_u8_u32 (vld1_u32 ((void *)src));
- dval = vreinterpret_u8_u32 (vld1_u32 ((void *)dst));
-
- /* sval * const alpha_mul */
- sval = neon2mul (sval, mask_alpha);
-
- /* dval * 255-(src alpha) */
- dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
-
- vst1_u8 ((void *)dst, vqadd_u8 (sval, dval));
-
- src += 2;
- dst += 2;
- w -= 2;
- }
-
- if (w)
- {
- uint8x8_t sval, dval;
-
- sval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)src));
- dval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)dst));
-
- /* sval * const alpha_mul */
- sval = neon2mul (sval, mask_alpha);
-
- /* dval * 255-(src alpha) */
- dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
-
- vst1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (vqadd_u8 (sval, dval)), 0);
- }
- }
- }
+#define BIND_SRC_NULL_DST(name, src_type, src_cnt, dst_type, dst_cnt) \
+void \
+pixman_##name##_asm_neon (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ src_type *src, \
+ int32_t src_stride); \
+ \
+static void \
+neon_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ src_type *src_line; \
+ int32_t dst_stride, src_stride; \
+ \
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \
+ src_stride, src_line, src_cnt); \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ \
+ pixman_##name##_asm_neon (width, height, \
+ dst_line, dst_stride, \
+ src_line, src_stride); \
}
-static void
-neon_composite_over_n_8_0565 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint16_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint32_t w;
- uint8x8_t sval2;
- uint8x8x4_t sval8;
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- srca = src >> 24;
- if (src == 0)
- return;
-
- sval2=vreinterpret_u8_u32 (vdup_n_u32 (src));
- sval8.val[0]=vdup_lane_u8 (sval2,0);
- sval8.val[1]=vdup_lane_u8 (sval2,1);
- sval8.val[2]=vdup_lane_u8 (sval2,2);
- sval8.val[3]=vdup_lane_u8 (sval2,3);
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- if (width>=8)
- {
- /* Use overlapping 8-pixel method, modified to avoid rewritten dest being reused */
- while (height--)
- {
- uint16_t *keep_dst=0;
-
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
-#ifndef USE_GCC_INLINE_ASM
- uint8x8_t alpha;
- uint16x8_t dval, temp;
- uint8x8x4_t sval8temp;
-
- alpha = vld1_u8 ((void *)mask);
- dval = vld1q_u16 ((void *)dst);
- keep_dst = dst;
-
- sval8temp = neon8mul (sval8, alpha);
- temp = pack0565 (neon8qadd (sval8temp, neon8mul (unpack0565 (dval), vmvn_u8 (sval8temp.val[3]))));
-
- mask += (w & 7);
- dst += (w & 7);
- w -= (w & 7);
-
- while (w)
- {
- dval = vld1q_u16 ((void *)dst);
- alpha = vld1_u8 ((void *)mask);
-
- vst1q_u16 ((void *)keep_dst, temp);
- keep_dst = dst;
-
- sval8temp = neon8mul (sval8, alpha);
- temp = pack0565 (neon8qadd (sval8temp, neon8mul (unpack0565 (dval), vmvn_u8 (sval8temp.val[3]))));
-
- mask+=8;
- dst+=8;
- w-=8;
- }
- vst1q_u16 ((void *)keep_dst, temp);
-#else
- asm volatile (
- "vdup.32 d0, %[src]\n\t"
- "vdup.8 d1, d0[1]\n\t"
- "vdup.8 d2, d0[2]\n\t"
- "vdup.8 d3, d0[3]\n\t"
- "vdup.8 d0, d0[0]\n\t"
-
- "vld1.8 {q12}, [%[dst]]\n\t"
- "vld1.8 {d31}, [%[mask]]\n\t"
- "mov %[keep_dst], %[dst]\n\t"
-
- "and ip, %[w], #7\n\t"
- "add %[mask], %[mask], ip\n\t"
- "add %[dst], %[dst], ip, LSL#1\n\t"
- "subs %[w], %[w], ip\n\t"
- "b 9f\n\t"
-/* LOOP */
- "2:\n\t"
-
- "vld1.16 {q12}, [%[dst]]!\n\t"
- "vld1.8 {d31}, [%[mask]]!\n\t"
- "vst1.16 {q10}, [%[keep_dst]]\n\t"
- "sub %[keep_dst], %[dst], #8*2\n\t"
- "subs %[w], %[w], #8\n\t"
- "9:\n\t"
-/* expand 0565 q12 to 8888 {d4-d7} */
- "vmovn.u16 d4, q12\t\n"
- "vshr.u16 q11, q12, #5\t\n"
- "vshr.u16 q10, q12, #6+5\t\n"
- "vmovn.u16 d5, q11\t\n"
- "vmovn.u16 d6, q10\t\n"
- "vshl.u8 d4, d4, #3\t\n"
- "vshl.u8 d5, d5, #2\t\n"
- "vshl.u8 d6, d6, #3\t\n"
- "vsri.u8 d4, d4, #5\t\n"
- "vsri.u8 d5, d5, #6\t\n"
- "vsri.u8 d6, d6, #5\t\n"
-
- "vmull.u8 q10, d31, d0\n\t"
- "vmull.u8 q11, d31, d1\n\t"
- "vmull.u8 q12, d31, d2\n\t"
- "vmull.u8 q13, d31, d3\n\t"
- "vrshr.u16 q8, q10, #8\n\t"
- "vrshr.u16 q9, q11, #8\n\t"
- "vraddhn.u16 d20, q10, q8\n\t"
- "vraddhn.u16 d21, q11, q9\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vraddhn.u16 d23, q13, q9\n\t"
- "vraddhn.u16 d22, q12, q8\n\t"
-
-/* duplicate in 4/2/1 & 8pix vsns */
- "vmvn.8 d30, d23\n\t"
- "vmull.u8 q14, d30, d6\n\t"
- "vmull.u8 q13, d30, d5\n\t"
- "vmull.u8 q12, d30, d4\n\t"
- "vrshr.u16 q8, q14, #8\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vraddhn.u16 d6, q14, q8\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vraddhn.u16 d5, q13, q9\n\t"
- "vqadd.u8 d6, d6, d22\n\t" /* moved up */
- "vraddhn.u16 d4, q12, q8\n\t"
-/* intentionally don't calculate alpha */
-/* result in d4-d6 */
-
-/* "vqadd.u8 d6, d6, d22\n\t" ** moved up */
- "vqadd.u8 d5, d5, d21\n\t"
- "vqadd.u8 d4, d4, d20\n\t"
-
-/* pack 8888 {d20-d23} to 0565 q10 */
- "vshll.u8 q10, d6, #8\n\t"
- "vshll.u8 q3, d5, #8\n\t"
- "vshll.u8 q2, d4, #8\n\t"
- "vsri.u16 q10, q3, #5\t\n"
- "vsri.u16 q10, q2, #11\t\n"
-
- "bne 2b\n\t"
-
- "1:\n\t"
- "vst1.16 {q10}, [%[keep_dst]]\n\t"
-
- : [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [keep_dst] "=r" (keep_dst)
- : [src] "r" (src)
- : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
- "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
- "d30","d31"
- );
-#endif
- }
- }
- else
- {
- while (height--)
- {
- void *dst4=0, *dst2=0;
-
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
-
-#if 1 /* #ifndef USE_GCC_INLINE_ASM */
- uint8x8_t alpha;
- uint16x8_t dval, temp;
- uint8x8x4_t sval8temp;
-
- if (w&4)
- {
- alpha = vreinterpret_u8_u32 (vld1_lane_u32 ((void *)mask, vreinterpret_u32_u8 (alpha),1));
- dval = vreinterpretq_u16_u64 (vld1q_lane_u64 ((void *)dst, vreinterpretq_u64_u16 (dval),1));
- dst4=dst;
- mask+=4;
- dst+=4;
- }
- if (w&2)
- {
- alpha = vreinterpret_u8_u16 (vld1_lane_u16 ((void *)mask, vreinterpret_u16_u8 (alpha),1));
- dval = vreinterpretq_u16_u32 (vld1q_lane_u32 ((void *)dst, vreinterpretq_u32_u16 (dval),1));
- dst2=dst;
- mask+=2;
- dst+=2;
- }
- if (w&1)
- {
- alpha = vld1_lane_u8 ((void *)mask, alpha,1);
- dval = vld1q_lane_u16 ((void *)dst, dval,1);
- }
-
- sval8temp = neon8mul (sval8, alpha);
- temp = pack0565 (neon8qadd (sval8temp, neon8mul (unpack0565 (dval), vmvn_u8 (sval8temp.val[3]))));
-
- if (w&1)
- vst1q_lane_u16 ((void *)dst, temp,1);
- if (w&2)
- vst1q_lane_u32 ((void *)dst2, vreinterpretq_u32_u16 (temp),1);
- if (w&4)
- vst1q_lane_u64 ((void *)dst4, vreinterpretq_u64_u16 (temp),1);
-#else
- /* this code has some bug (does not pass blitters-test) */
- asm volatile (
- "vdup.32 d0, %[src]\n\t"
- "vdup.8 d1, d0[1]\n\t"
- "vdup.8 d2, d0[2]\n\t"
- "vdup.8 d3, d0[3]\n\t"
- "vdup.8 d0, d0[0]\n\t"
-
- "tst %[w], #4\t\n"
- "beq skip_load4\t\n"
-
- "vld1.64 {d25}, [%[dst]]\n\t"
- "vld1.32 {d31[1]}, [%[mask]]\n\t"
- "mov %[dst4], %[dst]\t\n"
- "add %[mask], %[mask], #4\t\n"
- "add %[dst], %[dst], #4*2\t\n"
-
- "skip_load4:\t\n"
- "tst %[w], #2\t\n"
- "beq skip_load2\t\n"
- "vld1.32 {d24[1]}, [%[dst]]\n\t"
- "vld1.16 {d31[1]}, [%[mask]]\n\t"
- "mov %[dst2], %[dst]\t\n"
- "add %[mask], %[mask], #2\t\n"
- "add %[dst], %[dst], #2*2\t\n"
-
- "skip_load2:\t\n"
- "tst %[w], #1\t\n"
- "beq skip_load1\t\n"
- "vld1.16 {d24[1]}, [%[dst]]\n\t"
- "vld1.8 {d31[1]}, [%[mask]]\n\t"
-
- "skip_load1:\t\n"
-/* expand 0565 q12 to 8888 {d4-d7} */
- "vmovn.u16 d4, q12\t\n"
- "vshr.u16 q11, q12, #5\t\n"
- "vshr.u16 q10, q12, #6+5\t\n"
- "vmovn.u16 d5, q11\t\n"
- "vmovn.u16 d6, q10\t\n"
- "vshl.u8 d4, d4, #3\t\n"
- "vshl.u8 d5, d5, #2\t\n"
- "vshl.u8 d6, d6, #3\t\n"
- "vsri.u8 d4, d4, #5\t\n"
- "vsri.u8 d5, d5, #6\t\n"
- "vsri.u8 d6, d6, #5\t\n"
-
- "vmull.u8 q10, d31, d0\n\t"
- "vmull.u8 q11, d31, d1\n\t"
- "vmull.u8 q12, d31, d2\n\t"
- "vmull.u8 q13, d31, d3\n\t"
- "vrshr.u16 q8, q10, #8\n\t"
- "vrshr.u16 q9, q11, #8\n\t"
- "vraddhn.u16 d20, q10, q8\n\t"
- "vraddhn.u16 d21, q11, q9\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vraddhn.u16 d23, q13, q9\n\t"
- "vraddhn.u16 d22, q12, q8\n\t"
-
-/* duplicate in 4/2/1 & 8pix vsns */
- "vmvn.8 d30, d23\n\t"
- "vmull.u8 q14, d30, d6\n\t"
- "vmull.u8 q13, d30, d5\n\t"
- "vmull.u8 q12, d30, d4\n\t"
- "vrshr.u16 q8, q14, #8\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vraddhn.u16 d6, q14, q8\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vraddhn.u16 d5, q13, q9\n\t"
- "vqadd.u8 d6, d6, d22\n\t" /* moved up */
- "vraddhn.u16 d4, q12, q8\n\t"
-/* intentionally don't calculate alpha */
-/* result in d4-d6 */
-
-/* "vqadd.u8 d6, d6, d22\n\t" ** moved up */
- "vqadd.u8 d5, d5, d21\n\t"
- "vqadd.u8 d4, d4, d20\n\t"
-
-/* pack 8888 {d20-d23} to 0565 q10 */
- "vshll.u8 q10, d6, #8\n\t"
- "vshll.u8 q3, d5, #8\n\t"
- "vshll.u8 q2, d4, #8\n\t"
- "vsri.u16 q10, q3, #5\t\n"
- "vsri.u16 q10, q2, #11\t\n"
-
- "tst %[w], #1\n\t"
- "beq skip_store1\t\n"
- "vst1.16 {d20[1]}, [%[dst]]\t\n"
- "skip_store1:\t\n"
- "tst %[w], #2\n\t"
- "beq skip_store2\t\n"
- "vst1.32 {d20[1]}, [%[dst2]]\t\n"
- "skip_store2:\t\n"
- "tst %[w], #4\n\t"
- "beq skip_store4\t\n"
- "vst1.16 {d21}, [%[dst4]]\t\n"
- "skip_store4:\t\n"
-
- : [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [dst4] "+r" (dst4), [dst2] "+r" (dst2)
- : [src] "r" (src)
- : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
- "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
- "d30","d31"
- );
-#endif
- }
- }
+#define BIND_N_MASK_DST(name, mask_type, mask_cnt, dst_type, dst_cnt) \
+void \
+pixman_##name##_asm_neon (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ uint32_t src, \
+ int32_t unused, \
+ mask_type *mask, \
+ int32_t mask_stride); \
+ \
+static void \
+neon_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ mask_type *mask_line; \
+ int32_t dst_stride, mask_stride; \
+ uint32_t src; \
+ \
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format); \
+ \
+ if (src == 0) \
+ return; \
+ \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \
+ mask_stride, mask_line, mask_cnt); \
+ \
+ pixman_##name##_asm_neon (width, height, \
+ dst_line, dst_stride, \
+ src, 0, \
+ mask_line, mask_stride); \
}
-static void
-neon_composite_over_n_8_8888 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint32_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint32_t w;
- uint8x8_t sval2;
- uint8x8x4_t sval8;
- uint8x8_t mask_selector = vreinterpret_u8_u64 (vcreate_u64 (0x0101010100000000ULL));
- uint8x8_t alpha_selector = vreinterpret_u8_u64 (vcreate_u64 (0x0707070703030303ULL));
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- /* bail out if fully transparent */
- srca = src >> 24;
- if (src == 0)
- return;
-
- sval2 = vreinterpret_u8_u32 (vdup_n_u32 (src));
- sval8.val[0] = vdup_lane_u8 (sval2, 0);
- sval8.val[1] = vdup_lane_u8 (sval2, 1);
- sval8.val[2] = vdup_lane_u8 (sval2, 2);
- sval8.val[3] = vdup_lane_u8 (sval2, 3);
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- if (width >= 8)
- {
- /* Use overlapping 8-pixel method, modified to avoid
- * rewritten dest being reused
- */
- while (height--)
- {
- uint32_t *keep_dst = 0;
-
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
-#ifndef USE_GCC_INLINE_ASM
- uint8x8_t alpha;
- uint8x8x4_t dval, temp;
-
- alpha = vld1_u8 ((void *)mask);
- dval = vld4_u8 ((void *)dst);
- keep_dst = dst;
-
- temp = neon8mul (sval8, alpha);
- dval = neon8mul (dval, vmvn_u8 (temp.val[3]));
- temp = neon8qadd (temp, dval);
-
- mask += (w & 7);
- dst += (w & 7);
- w -= (w & 7);
-
- while (w)
- {
- alpha = vld1_u8 ((void *)mask);
- dval = vld4_u8 ((void *)dst);
-
- vst4_u8 ((void *)keep_dst, temp);
- keep_dst = dst;
-
- temp = neon8mul (sval8, alpha);
- dval = neon8mul (dval, vmvn_u8 (temp.val[3]));
- temp = neon8qadd (temp, dval);
-
- mask += 8;
- dst += 8;
- w -= 8;
- }
- vst4_u8 ((void *)keep_dst, temp);
-#else
- asm volatile (
- "vdup.32 d0, %[src]\n\t"
- "vdup.8 d1, d0[1]\n\t"
- "vdup.8 d2, d0[2]\n\t"
- "vdup.8 d3, d0[3]\n\t"
- "vdup.8 d0, d0[0]\n\t"
-
- "vld4.8 {d4-d7}, [%[dst]]\n\t"
- "vld1.8 {d31}, [%[mask]]\n\t"
- "mov %[keep_dst], %[dst]\n\t"
-
- "and ip, %[w], #7\n\t"
- "add %[mask], %[mask], ip\n\t"
- "add %[dst], %[dst], ip, LSL#2\n\t"
- "subs %[w], %[w], ip\n\t"
- "b 9f\n\t"
-/* LOOP */
- "2:\n\t"
- "vld4.8 {d4-d7}, [%[dst]]!\n\t"
- "vld1.8 {d31}, [%[mask]]!\n\t"
- "vst4.8 {d20-d23}, [%[keep_dst]]\n\t"
- "sub %[keep_dst], %[dst], #8*4\n\t"
- "subs %[w], %[w], #8\n\t"
- "9:\n\t"
-
- "vmull.u8 q10, d31, d0\n\t"
- "vmull.u8 q11, d31, d1\n\t"
- "vmull.u8 q12, d31, d2\n\t"
- "vmull.u8 q13, d31, d3\n\t"
- "vrshr.u16 q8, q10, #8\n\t"
- "vrshr.u16 q9, q11, #8\n\t"
- "vraddhn.u16 d20, q10, q8\n\t"
- "vraddhn.u16 d21, q11, q9\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vrshr.u16 q8, q12, #8\n\t"
- "vraddhn.u16 d23, q13, q9\n\t"
- "vraddhn.u16 d22, q12, q8\n\t"
-
- "vmvn.8 d30, d23\n\t"
- "vmull.u8 q12, d30, d4\n\t"
- "vmull.u8 q13, d30, d5\n\t"
- "vmull.u8 q14, d30, d6\n\t"
- "vmull.u8 q15, d30, d7\n\t"
-
- "vrshr.u16 q8, q12, #8\n\t"
- "vrshr.u16 q9, q13, #8\n\t"
- "vraddhn.u16 d4, q12, q8\n\t"
- "vrshr.u16 q8, q14, #8\n\t"
- "vraddhn.u16 d5, q13, q9\n\t"
- "vrshr.u16 q9, q15, #8\n\t"
- "vraddhn.u16 d6, q14, q8\n\t"
- "vraddhn.u16 d7, q15, q9\n\t"
-/* result in d4-d7 */
-
- "vqadd.u8 d20, d4, d20\n\t"
- "vqadd.u8 d21, d5, d21\n\t"
- "vqadd.u8 d22, d6, d22\n\t"
- "vqadd.u8 d23, d7, d23\n\t"
-
- "bne 2b\n\t"
-
- "1:\n\t"
- "vst4.8 {d20-d23}, [%[keep_dst]]\n\t"
-
- : [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [keep_dst] "=r" (keep_dst)
- : [src] "r" (src)
- : "ip", "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
- "d30", "d31"
- );
-#endif
- }
- }
- else
- {
- while (height--)
- {
- uint8x8_t alpha;
-
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- while (w >= 2)
- {
- uint8x8_t dval, temp, res;
-
- alpha = vtbl1_u8 (
- vreinterpret_u8_u16 (vld1_dup_u16 ((void *)mask)), mask_selector);
- dval = vld1_u8 ((void *)dst);
-
- temp = neon2mul (sval2, alpha);
- res = vqadd_u8 (
- temp, neon2mul (dval, vtbl1_u8 (vmvn_u8 (temp), alpha_selector)));
-
- vst1_u8 ((void *)dst, res);
-
- mask += 2;
- dst += 2;
- w -= 2;
- }
-
- if (w)
- {
- uint8x8_t dval, temp, res;
-
- alpha = vtbl1_u8 (vld1_dup_u8 ((void *)mask), mask_selector);
- dval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)dst));
-
- temp = neon2mul (sval2, alpha);
- res = vqadd_u8 (
- temp, neon2mul (dval, vtbl1_u8 (vmvn_u8 (temp), alpha_selector)));
-
- vst1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (res), 0);
- }
- }
- }
-}
-
-static void
-neon_composite_add_8888_8_8 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint8_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint32_t w;
- uint32_t src;
- uint8x8_t sa;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
- sa = vdup_n_u8 ((src) >> 24);
-
- if (width >= 8)
- {
- /* Use overlapping 8-pixel method, modified to avoid rewritten dest being reused */
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- uint8x8_t mval, dval, res;
- uint8_t *keep_dst;
-
- mval = vld1_u8 ((void *)mask);
- dval = vld1_u8 ((void *)dst);
- keep_dst = dst;
-
- res = vqadd_u8 (neon2mul (mval, sa), dval);
-
- mask += (w & 7);
- dst += (w & 7);
- w -= w & 7;
-
- while (w)
- {
- mval = vld1_u8 ((void *)mask);
- dval = vld1_u8 ((void *)dst);
- vst1_u8 ((void *)keep_dst, res);
- keep_dst = dst;
-
- res = vqadd_u8 (neon2mul (mval, sa), dval);
-
- mask += 8;
- dst += 8;
- w -= 8;
- }
- vst1_u8 ((void *)keep_dst, res);
- }
- }
- else
- {
- /* Use 4/2/1 load/store method to handle 1-7 pixels */
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- uint8x8_t mval = sa, dval = sa, res;
- uint8_t *dst4 = 0, *dst2 = 0;
-
- if (w & 4)
- {
- mval = vreinterpret_u8_u32 (
- vld1_lane_u32 ((void *)mask, vreinterpret_u32_u8 (mval), 1));
- dval = vreinterpret_u8_u32 (
- vld1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (dval), 1));
-
- dst4 = dst;
- mask += 4;
- dst += 4;
- }
-
- if (w & 2)
- {
- mval = vreinterpret_u8_u16 (
- vld1_lane_u16 ((void *)mask, vreinterpret_u16_u8 (mval), 1));
- dval = vreinterpret_u8_u16 (
- vld1_lane_u16 ((void *)dst, vreinterpret_u16_u8 (dval), 1));
- dst2 = dst;
- mask += 2;
- dst += 2;
- }
-
- if (w & 1)
- {
- mval = vld1_lane_u8 (mask, mval, 1);
- dval = vld1_lane_u8 (dst, dval, 1);
- }
-
- res = vqadd_u8 (neon2mul (mval, sa), dval);
-
- if (w & 1)
- vst1_lane_u8 (dst, res, 1);
- if (w & 2)
- vst1_lane_u16 ((void *)dst2, vreinterpret_u16_u8 (res), 1);
- if (w & 4)
- vst1_lane_u32 ((void *)dst4, vreinterpret_u32_u8 (res), 1);
- }
- }
+#define BIND_SRC_N_DST(name, src_type, src_cnt, dst_type, dst_cnt) \
+void \
+pixman_##name##_asm_neon (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ src_type *src, \
+ int32_t src_stride, \
+ uint32_t mask); \
+ \
+static void \
+neon_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ src_type *src_line; \
+ int32_t dst_stride, src_stride; \
+ uint32_t mask; \
+ \
+ mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);\
+ \
+ if (mask == 0) \
+ return; \
+ \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \
+ src_stride, src_line, src_cnt); \
+ \
+ pixman_##name##_asm_neon (width, height, \
+ dst_line, dst_stride, \
+ src_line, src_stride, \
+ mask); \
}
-#ifdef USE_GCC_INLINE_ASM
-
-static void
-neon_composite_src_16_16 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint16_t *dst_line, *src_line;
- uint32_t dst_stride, src_stride;
-
- if (!height || !width)
- return;
-
- /* We simply copy 16-bit-aligned pixels from one place to another. */
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1);
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-
- /* Preload the first input scanline */
- {
- uint16_t *src_ptr = src_line;
- uint32_t count = width;
-
- asm volatile (
- "0: @ loop \n"
- " subs %[count], %[count], #32 \n"
- " pld [%[src]] \n"
- " add %[src], %[src], #64 \n"
- " bgt 0b \n"
-
- /* Clobbered input registers marked as input/outputs */
- : [src] "+r" (src_ptr), [count] "+r" (count)
- : /* no unclobbered inputs */
- : "cc"
- );
- }
-
- while (height--)
- {
- uint16_t *dst_ptr = dst_line;
- uint16_t *src_ptr = src_line;
- uint32_t count = width;
- uint32_t tmp = 0;
-
- /* Uses multi-register access and preloading to maximise bandwidth.
- * Each pixel is one halfword, so a quadword contains 8px.
- * Preload frequency assumed a 64-byte cacheline.
- */
- asm volatile (
- " cmp %[count], #64 \n"
- " blt 1f @ skip oversized fragments \n"
- "0: @ start with eight quadwords at a time \n"
- /* preload from next scanline */
- " pld [%[src], %[src_stride], LSL #1] \n"
- " sub %[count], %[count], #64 \n"
- " vld1.16 {d16, d17, d18, d19}, [%[src]]! \n"
- " vld1.16 {d20, d21, d22, d23}, [%[src]]! \n"
- /* preload from next scanline */
- " pld [%[src], %[src_stride], LSL #1] \n"
- " vld1.16 {d24, d25, d26, d27}, [%[src]]! \n"
- " vld1.16 {d28, d29, d30, d31}, [%[src]]! \n"
- " cmp %[count], #64 \n"
- " vst1.16 {d16, d17, d18, d19}, [%[dst]]! \n"
- " vst1.16 {d20, d21, d22, d23}, [%[dst]]! \n"
- " vst1.16 {d24, d25, d26, d27}, [%[dst]]! \n"
- " vst1.16 {d28, d29, d30, d31}, [%[dst]]! \n"
- " bge 0b \n"
- " cmp %[count], #0 \n"
- " beq 7f @ aligned fastpath \n"
- "1: @ four quadwords \n"
- " tst %[count], #32 \n"
- " beq 2f @ skip oversized fragment \n"
- /* preload from next scanline */
- " pld [%[src], %[src_stride], LSL #1] \n"
- " vld1.16 {d16, d17, d18, d19}, [%[src]]! \n"
- " vld1.16 {d20, d21, d22, d23}, [%[src]]! \n"
- " vst1.16 {d16, d17, d18, d19}, [%[dst]]! \n"
- " vst1.16 {d20, d21, d22, d23}, [%[dst]]! \n"
- "2: @ two quadwords \n"
- " tst %[count], #16 \n"
- " beq 3f @ skip oversized fragment \n"
- /* preload from next scanline */
- " pld [%[src], %[src_stride], LSL #1] \n"
- " vld1.16 {d16, d17, d18, d19}, [%[src]]! \n"
- " vst1.16 {d16, d17, d18, d19}, [%[dst]]! \n"
- "3: @ one quadword \n"
- " tst %[count], #8 \n"
- " beq 4f @ skip oversized fragment \n"
- " vld1.16 {d16, d17}, [%[src]]! \n"
- " vst1.16 {d16, d17}, [%[dst]]! \n"
- "4: @ one doubleword \n"
- " tst %[count], #4 \n"
- " beq 5f @ skip oversized fragment \n"
- " vld1.16 {d16}, [%[src]]! \n"
- " vst1.16 {d16}, [%[dst]]! \n"
- "5: @ one word \n"
- " tst %[count], #2 \n"
- " beq 6f @ skip oversized fragment \n"
- " ldr %[tmp], [%[src]], #4 \n"
- " str %[tmp], [%[dst]], #4 \n"
- "6: @ one halfword \n"
- " tst %[count], #1 \n"
- " beq 7f @ skip oversized fragment \n"
- " ldrh %[tmp], [%[src]] \n"
- " strh %[tmp], [%[dst]] \n"
- "7: @ end \n"
-
- /* Clobbered input registers marked as input/outputs */
- : [dst] "+r" (dst_ptr), [src] "+r" (src_ptr),
- [count] "+r" (count), [tmp] "+r" (tmp)
-
- /* Unclobbered input */
- : [src_stride] "r" (src_stride)
-
- /* Clobbered vector registers */
- : "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", "cc", "memory"
- );
-
- src_line += src_stride;
- dst_line += dst_stride;
- }
+#define BIND_SRC_MASK_DST(name, src_type, src_cnt, mask_type, mask_cnt, \
+ dst_type, dst_cnt) \
+void \
+pixman_##name##_asm_neon (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ src_type *src, \
+ int32_t src_stride, \
+ mask_type *mask, \
+ int32_t mask_stride); \
+ \
+static void \
+neon_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ src_type *src_line; \
+ mask_type *mask_line; \
+ int32_t dst_stride, src_stride, mask_stride; \
+ \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \
+ src_stride, src_line, src_cnt); \
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \
+ mask_stride, mask_line, mask_cnt); \
+ \
+ pixman_##name##_asm_neon (width, height, \
+ dst_line, dst_stride, \
+ src_line, src_stride, \
+ mask_line, mask_stride); \
}
-#endif /* USE_GCC_INLINE_ASM */
-
-static void
-neon_composite_src_24_16 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint16_t *dst_line;
- uint32_t *src_line;
- uint32_t dst_stride, src_stride;
-
- if (!width || !height)
- return;
-
- /* We simply copy pixels from one place to another,
- * assuming that the source's alpha is opaque.
- */
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-
- /* Preload the first input scanline */
- {
- uint8_t *src_ptr = (uint8_t*) src_line;
- uint32_t count = (width + 15) / 16;
-#ifdef USE_GCC_INLINE_ASM
- asm volatile (
- "0: @ loop \n"
- " subs %[count], %[count], #1 \n"
- " pld [%[src]] \n"
- " add %[src], %[src], #64 \n"
- " bgt 0b \n"
+BIND_SRC_NULL_DST(composite_src_8888_8888, uint32_t, 1, uint32_t, 1)
+BIND_SRC_NULL_DST(composite_src_0565_0565, uint16_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(composite_src_0888_0888, uint8_t, 3, uint8_t, 3)
+BIND_SRC_NULL_DST(composite_src_8888_0565, uint32_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(composite_add_8000_8000, uint8_t, 1, uint8_t, 1)
- /* Clobbered input registers marked as input/outputs */
- : [src] "+r" (src_ptr), [count] "+r" (count)
- : /* no unclobbered inputs */
- : "cc"
- );
-#else
- do
- {
- __pld (src_ptr);
- src_ptr += 64;
- }
- while (--count);
-#endif
- }
-
- while (height--)
- {
- uint16_t *dst_ptr = dst_line;
- uint32_t *src_ptr = src_line;
- uint32_t count = width;
- const uint32_t rb_mask = 0x1F;
- const uint32_t g_mask = 0x3F;
+BIND_SRC_NULL_DST(composite_over_8888_0565, uint32_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(composite_over_8888_8888, uint32_t, 1, uint32_t, 1)
- /* If you're going to complain about a goto, take a long hard look
- * at the massive blocks of assembler this skips over. ;-)
- */
- if (count < 8)
- goto small_stuff;
+BIND_N_MASK_DST(composite_over_n_8_0565, uint8_t, 1, uint16_t, 1)
+BIND_N_MASK_DST(composite_over_n_8_8888, uint8_t, 1, uint32_t, 1)
+BIND_N_MASK_DST(composite_add_n_8_8, uint8_t, 1, uint8_t, 1)
-#ifdef USE_GCC_INLINE_ASM
+BIND_SRC_N_DST(composite_over_8888_n_8888, uint32_t, 1, uint32_t, 1)
- /* This is not as aggressive as the RGB565-source case.
- * Generally the source is in cached RAM when the formats are
- * different, so we use preload.
- *
- * We don't need to blend, so we are not reading from the
- * uncached framebuffer.
- */
- asm volatile (
- " cmp %[count], #16 \n"
- " blt 1f @ skip oversized fragments \n"
- "0: @ start with sixteen pixels at a time \n"
- " sub %[count], %[count], #16 \n"
- " pld [%[src], %[src_stride], lsl #2] @ preload from next scanline \n"
- " vld4.8 {d0, d1, d2, d3}, [%[src]]! @ d3 is alpha and ignored, d2-0 are rgb. \n"
- " vld4.8 {d4, d5, d6, d7}, [%[src]]! @ d7 is alpha and ignored, d6-4 are rgb. \n"
- " vshll.u8 q8, d2, #8 @ expand first red for repacking \n"
- " vshll.u8 q10, d1, #8 @ expand first green for repacking \n"
- " vshll.u8 q11, d0, #8 @ expand first blue for repacking \n"
- " vshll.u8 q9, d6, #8 @ expand second red for repacking \n"
- " vsri.u16 q8, q10, #5 @ insert first green after red \n"
- " vshll.u8 q10, d5, #8 @ expand second green for repacking \n"
- " vsri.u16 q8, q11, #11 @ insert first blue after green \n"
- " vshll.u8 q11, d4, #8 @ expand second blue for repacking \n"
- " vsri.u16 q9, q10, #5 @ insert second green after red \n"
- " vsri.u16 q9, q11, #11 @ insert second blue after green \n"
- " cmp %[count], #16 \n"
- " vst1.16 {d16, d17, d18, d19}, [%[dst]]! @ store 16 pixels \n"
- " bge 0b \n"
- "1: @ end of main loop \n"
- " cmp %[count], #8 @ can we still do an 8-pixel block? \n"
- " blt 2f \n"
- " sub %[count], %[count], #8 \n"
- " pld [%[src], %[src_stride], lsl #2] @ preload from next scanline \n"
- " vld4.8 {d0, d1, d2, d3}, [%[src]]! @ d3 is alpha and ignored, d2-0 are rgb. \n"
- " vshll.u8 q8, d2, #8 @ expand first red for repacking \n"
- " vshll.u8 q10, d1, #8 @ expand first green for repacking \n"
- " vshll.u8 q11, d0, #8 @ expand first blue for repacking \n"
- " vsri.u16 q8, q10, #5 @ insert first green after red \n"
- " vsri.u16 q8, q11, #11 @ insert first blue after green \n"
- " vst1.16 {d16, d17}, [%[dst]]! @ store 8 pixels \n"
- "2: @ end \n"
+BIND_SRC_MASK_DST(composite_add_8_8_8, uint8_t, 1, uint8_t, 1, uint8_t, 1)
- /* Clobbered input and working registers marked as input/outputs */
- : [dst] "+r" (dst_ptr), [src] "+r" (src_ptr), [count] "+r" (count)
+void
+pixman_composite_src_n_8_asm_neon (int32_t w,
+ int32_t h,
+ uint8_t *dst,
+ int32_t dst_stride,
+ uint8_t src);
- /* Unclobbered input */
- : [src_stride] "r" (src_stride)
+void
+pixman_composite_src_n_0565_asm_neon (int32_t w,
+ int32_t h,
+ uint16_t *dst,
+ int32_t dst_stride,
+ uint16_t src);
- /* Clobbered vector registers */
-
- /* NB: these are the quad aliases of the
- * double registers used in the asm
- */
- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d16", "d17",
- "d18", "d19", "d20", "d21", "d22", "d23", "cc", "memory"
- );
-#else
- /* A copy of the above code, in intrinsics-form. */
- while (count >= 16)
- {
- uint8x8x4_t pixel_set_a, pixel_set_b;
- uint16x8_t red_a, green_a, blue_a;
- uint16x8_t red_b, green_b, blue_b;
- uint16x8_t dest_pixels_a, dest_pixels_b;
-
- count -= 16;
- __pld (src_ptr + src_stride);
- pixel_set_a = vld4_u8 ((uint8_t*)(src_ptr));
- pixel_set_b = vld4_u8 ((uint8_t*)(src_ptr + 8));
- src_ptr += 16;
-
- red_a = vshll_n_u8 (pixel_set_a.val[2], 8);
- green_a = vshll_n_u8 (pixel_set_a.val[1], 8);
- blue_a = vshll_n_u8 (pixel_set_a.val[0], 8);
-
- red_b = vshll_n_u8 (pixel_set_b.val[2], 8);
- green_b = vshll_n_u8 (pixel_set_b.val[1], 8);
- blue_b = vshll_n_u8 (pixel_set_b.val[0], 8);
-
- dest_pixels_a = vsriq_n_u16 (red_a, green_a, 5);
- dest_pixels_b = vsriq_n_u16 (red_b, green_b, 5);
-
- dest_pixels_a = vsriq_n_u16 (dest_pixels_a, blue_a, 11);
- dest_pixels_b = vsriq_n_u16 (dest_pixels_b, blue_b, 11);
-
- /* There doesn't seem to be an intrinsic for the
- * double-quadword variant
- */
- vst1q_u16 (dst_ptr, dest_pixels_a);
- vst1q_u16 (dst_ptr + 8, dest_pixels_b);
- dst_ptr += 16;
- }
-
- /* 8-pixel loop */
- if (count >= 8)
- {
- uint8x8x4_t pixel_set_a;
- uint16x8_t red_a, green_a, blue_a;
- uint16x8_t dest_pixels_a;
-
- __pld (src_ptr + src_stride);
- count -= 8;
- pixel_set_a = vld4_u8 ((uint8_t*)(src_ptr));
- src_ptr += 8;
-
- red_a = vshll_n_u8 (pixel_set_a.val[2], 8);
- green_a = vshll_n_u8 (pixel_set_a.val[1], 8);
- blue_a = vshll_n_u8 (pixel_set_a.val[0], 8);
-
- dest_pixels_a = vsriq_n_u16 (red_a, green_a, 5);
- dest_pixels_a = vsriq_n_u16 (dest_pixels_a, blue_a, 11);
-
- vst1q_u16 (dst_ptr, dest_pixels_a);
- dst_ptr += 8;
- }
-
-#endif /* USE_GCC_INLINE_ASM */
-
- small_stuff:
- if (count)
- __pld (src_ptr + src_stride);
-
- while (count >= 2)
- {
- uint32_t src_pixel_a = *src_ptr++;
- uint32_t src_pixel_b = *src_ptr++;
-
- /* ARM is really good at shift-then-ALU ops. */
- /* This should be a total of six shift-ANDs and five shift-ORs. */
- uint32_t dst_pixels_a;
- uint32_t dst_pixels_b;
-
- dst_pixels_a = ((src_pixel_a >> 3) & rb_mask);
- dst_pixels_a |= ((src_pixel_a >> 10) & g_mask) << 5;
- dst_pixels_a |= ((src_pixel_a >> 19) & rb_mask) << 11;
-
- dst_pixels_b = ((src_pixel_b >> 3) & rb_mask);
- dst_pixels_b |= ((src_pixel_b >> 10) & g_mask) << 5;
- dst_pixels_b |= ((src_pixel_b >> 19) & rb_mask) << 11;
-
- /* little-endian mode only */
- *((uint32_t*) dst_ptr) = dst_pixels_a | (dst_pixels_b << 16);
- dst_ptr += 2;
- count -= 2;
- }
-
- if (count)
- {
- uint32_t src_pixel = *src_ptr++;
-
- /* ARM is really good at shift-then-ALU ops.
- * This block should end up as three shift-ANDs
- * and two shift-ORs.
- */
- uint32_t tmp_blue = (src_pixel >> 3) & rb_mask;
- uint32_t tmp_green = (src_pixel >> 10) & g_mask;
- uint32_t tmp_red = (src_pixel >> 19) & rb_mask;
- uint16_t dst_pixel = (tmp_red << 11) | (tmp_green << 5) | tmp_blue;
-
- *dst_ptr++ = dst_pixel;
- count--;
- }
-
- src_line += src_stride;
- dst_line += dst_stride;
- }
-}
+void
+pixman_composite_src_n_8888_asm_neon (int32_t w,
+ int32_t h,
+ uint32_t *dst,
+ int32_t dst_stride,
+ uint32_t src);
static pixman_bool_t
pixman_fill_neon (uint32_t *bits,
@@ -1705,936 +258,70 @@ pixman_fill_neon (uint32_t *bits,
int height,
uint32_t _xor)
{
- uint32_t byte_stride, color;
- char *dst;
-
/* stride is always multiple of 32bit units in pixman */
- byte_stride = stride * sizeof(uint32_t);
+ uint32_t byte_stride = stride * sizeof(uint32_t);
switch (bpp)
{
case 8:
- dst = ((char *) bits) + y * byte_stride + x;
- _xor &= 0xff;
- color = _xor << 24 | _xor << 16 | _xor << 8 | _xor;
- break;
-
+ pixman_composite_src_n_8_asm_neon (
+ width,
+ height,
+ (uint8_t *)(((char *) bits) + y * byte_stride + x),
+ byte_stride,
+ _xor & 0xff);
+ return TRUE;
case 16:
- dst = ((char *) bits) + y * byte_stride + x * 2;
- _xor &= 0xffff;
- color = _xor << 16 | _xor;
- width *= 2; /* width to bytes */
- break;
-
+ pixman_composite_src_n_0565_asm_neon (
+ width,
+ height,
+ (uint16_t *)(((char *) bits) + y * byte_stride + x * 2),
+ byte_stride / 2,
+ _xor & 0xffff);
+ return TRUE;
case 32:
- dst = ((char *) bits) + y * byte_stride + x * 4;
- color = _xor;
- width *= 4; /* width to bytes */
- break;
-
+ pixman_composite_src_n_8888_asm_neon (
+ width,
+ height,
+ (uint32_t *)(((char *) bits) + y * byte_stride + x * 4),
+ byte_stride / 4,
+ _xor);
+ return TRUE;
default:
return FALSE;
}
-
-#ifdef USE_GCC_INLINE_ASM
- if (width < 16)
- {
- /* We have a special case for such small widths that don't allow
- * us to use wide 128-bit stores anyway. We don't waste time
- * trying to align writes, since there are only very few of them anyway
- */
- asm volatile (
- "cmp %[height], #0\n"/* Check if empty fill */
- "beq 3f\n"
- "vdup.32 d0, %[color]\n"/* Fill the color to neon req */
-
- /* Check if we have a such width that can easily be handled by single
- * operation for each scanline. This significantly reduces the number
- * of test/branch instructions for each scanline
- */
- "cmp %[width], #8\n"
- "beq 4f\n"
- "cmp %[width], #4\n"
- "beq 5f\n"
- "cmp %[width], #2\n"
- "beq 6f\n"
-
- /* Loop starts here for each scanline */
- "1:\n"
- "mov r4, %[dst]\n" /* Starting address of the current line */
- "tst %[width], #8\n"
- "beq 2f\n"
- "vst1.8 {d0}, [r4]!\n"
- "2:\n"
- "tst %[width], #4\n"
- "beq 2f\n"
- "str %[color], [r4], #4\n"
- "2:\n"
- "tst %[width], #2\n"
- "beq 2f\n"
- "strh %[color], [r4], #2\n"
- "2:\n"
- "tst %[width], #1\n"
- "beq 2f\n"
- "strb %[color], [r4], #1\n"
- "2:\n"
-
- "subs %[height], %[height], #1\n"
- "add %[dst], %[dst], %[byte_stride]\n"
- "bne 1b\n"
- "b 3f\n"
-
- /* Special fillers for those widths that we can do with single operation */
- "4:\n"
- "subs %[height], %[height], #1\n"
- "vst1.8 {d0}, [%[dst]]\n"
- "add %[dst], %[dst], %[byte_stride]\n"
- "bne 4b\n"
- "b 3f\n"
-
- "5:\n"
- "subs %[height], %[height], #1\n"
- "str %[color], [%[dst]]\n"
- "add %[dst], %[dst], %[byte_stride]\n"
- "bne 5b\n"
- "b 3f\n"
-
- "6:\n"
- "subs %[height], %[height], #1\n"
- "strh %[color], [%[dst]]\n"
- "add %[dst], %[dst], %[byte_stride]\n"
- "bne 6b\n"
-
- "3:\n"
- : [height] "+r" (height), [dst] "+r" (dst)
- : [color] "r" (color), [width] "r" (width),
- [byte_stride] "r" (byte_stride)
- : "memory", "cc", "d0", "r4");
- }
- else
- {
- asm volatile (
- "cmp %[height], #0\n"/* Check if empty fill */
- "beq 5f\n"
- "vdup.32 q0, %[color]\n"/* Fill the color to neon req */
-
- /* Loop starts here for each scanline */
- "1:\n"
- "mov r4, %[dst]\n"/* Starting address of the current line */
- "mov r5, %[width]\n"/* We're going to write this many bytes */
- "ands r6, r4, #15\n"/* Are we at the 128-bit aligned address? */
- "beq 2f\n"/* Jump to the best case */
-
- /* We're not 128-bit aligned: However, we know that we can get to the
- next aligned location, since the fill is at least 16 bytes wide */
- "rsb r6, r6, #16\n" /* We would need to go forward this much */
- "sub r5, r5, r6\n"/* Update bytes left */
- "tst r6, #1\n"
- "beq 6f\n"
- "vst1.8 {d0[0]}, [r4]!\n"/* Store byte, now we are word aligned */
- "6:\n"
- "tst r6, #2\n"
- "beq 6f\n"
- "vst1.16 {d0[0]}, [r4, :16]!\n"/* Store half word, now we are 16-bit aligned */
- "6:\n"
- "tst r6, #4\n"
- "beq 6f\n"
- "vst1.32 {d0[0]}, [r4, :32]!\n"/* Store word, now we're 32-bit aligned */
- "6:\n"
- "tst r6, #8\n"
- "beq 2f\n"
- "vst1.64 {d0}, [r4, :64]!\n"/* Store qword now we're 64-bit aligned */
-
- /* The good case: We're 128-bit aligned for this scanline */
- "2:\n"
- "and r6, r5, #15\n"/* Number of tailing bytes */
- "cmp r5, r6\n"/* Do we have at least one qword to write? */
- "beq 6f\n"/* No, we just write the tail */
- "lsr r5, r5, #4\n"/* This many full qwords to write */
-
- /* The main block: Do 128-bit aligned writes */
- "3:\n"
- "subs r5, r5, #1\n"
- "vst1.64 {d0, d1}, [r4, :128]!\n"
- "bne 3b\n"
-
- /* Handle the tailing bytes: Do 64, 32, 16 and 8-bit aligned writes as needed.
- We know that we're currently at 128-bit aligned address, so we can just
- pick the biggest operations that the remaining write width allows */
- "6:\n"
- "cmp r6, #0\n"
- "beq 4f\n"
- "tst r6, #8\n"
- "beq 6f\n"
- "vst1.64 {d0}, [r4, :64]!\n"
- "6:\n"
- "tst r6, #4\n"
- "beq 6f\n"
- "vst1.32 {d0[0]}, [r4, :32]!\n"
- "6:\n"
- "tst r6, #2\n"
- "beq 6f\n"
- "vst1.16 {d0[0]}, [r4, :16]!\n"
- "6:\n"
- "tst r6, #1\n"
- "beq 4f\n"
- "vst1.8 {d0[0]}, [r4]!\n"
- "4:\n"
-
- /* Handle the next scanline */
- "subs %[height], %[height], #1\n"
- "add %[dst], %[dst], %[byte_stride]\n"
- "bne 1b\n"
- "5:\n"
- : [height] "+r" (height), [dst] "+r" (dst)
- : [color] "r" (color), [width] "r" (width),
- [byte_stride] "r" (byte_stride)
- : "memory", "cc", "d0", "d1", "r4", "r5", "r6");
- }
- return TRUE;
-
-#else
-
- /* TODO: intrinsic version for armcc */
- return FALSE;
-
-#endif
-}
-
-/* TODO: is there a more generic way of doing this being introduced? */
-#define NEON_SCANLINE_BUFFER_PIXELS (1024)
-
-static inline void
-neon_quadword_copy (void * dst,
- void * src,
- uint32_t count, /* of quadwords */
- uint32_t trailer_count /* of bytes */)
-{
- uint8_t *t_dst = dst, *t_src = src;
-
- /* Uses aligned multi-register loads to maximise read bandwidth
- * on uncached memory such as framebuffers
- * The accesses do not have the aligned qualifiers, so that the copy
- * may convert between aligned-uncached and unaligned-cached memory.
- * It is assumed that the CPU can infer alignedness from the address.
- */
-
-#ifdef USE_GCC_INLINE_ASM
-
- asm volatile (
- " cmp %[count], #8 \n"
- " blt 1f @ skip oversized fragments \n"
- "0: @ start with eight quadwords at a time \n"
- " sub %[count], %[count], #8 \n"
- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n"
- " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n"
- " vld1.8 {d24, d25, d26, d27}, [%[src]]! \n"
- " vld1.8 {d28, d29, d30, d31}, [%[src]]! \n"
- " cmp %[count], #8 \n"
- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n"
- " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n"
- " vst1.8 {d24, d25, d26, d27}, [%[dst]]! \n"
- " vst1.8 {d28, d29, d30, d31}, [%[dst]]! \n"
- " bge 0b \n"
- "1: @ four quadwords \n"
- " tst %[count], #4 \n"
- " beq 2f @ skip oversized fragment \n"
- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n"
- " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n"
- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n"
- " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n"
- "2: @ two quadwords \n"
- " tst %[count], #2 \n"
- " beq 3f @ skip oversized fragment \n"
- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n"
- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n"
- "3: @ one quadword \n"
- " tst %[count], #1 \n"
- " beq 4f @ skip oversized fragment \n"
- " vld1.8 {d16, d17}, [%[src]]! \n"
- " vst1.8 {d16, d17}, [%[dst]]! \n"
- "4: @ end \n"
-
- /* Clobbered input registers marked as input/outputs */
- : [dst] "+r" (t_dst), [src] "+r" (t_src), [count] "+r" (count)
-
- /* No unclobbered inputs */
- :
-
- /* Clobbered vector registers */
- : "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25",
- "d26", "d27", "d28", "d29", "d30", "d31", "cc", "memory");
-
-#else
-
- while (count >= 8)
- {
- uint8x16x4_t t1 = vld4q_u8 (t_src);
- uint8x16x4_t t2 = vld4q_u8 (t_src + sizeof(uint8x16x4_t));
-
- t_src += sizeof(uint8x16x4_t) * 2;
- vst4q_u8 (t_dst, t1);
- vst4q_u8 (t_dst + sizeof(uint8x16x4_t), t2);
- t_dst += sizeof(uint8x16x4_t) * 2;
- count -= 8;
- }
-
- if (count & 4)
- {
- uint8x16x4_t t1 = vld4q_u8 (t_src);
-
- t_src += sizeof(uint8x16x4_t);
- vst4q_u8 (t_dst, t1);
- t_dst += sizeof(uint8x16x4_t);
- }
-
- if (count & 2)
- {
- uint8x8x4_t t1 = vld4_u8 (t_src);
-
- t_src += sizeof(uint8x8x4_t);
- vst4_u8 (t_dst, t1);
- t_dst += sizeof(uint8x8x4_t);
- }
-
- if (count & 1)
- {
- uint8x16_t t1 = vld1q_u8 (t_src);
-
- t_src += sizeof(uint8x16_t);
- vst1q_u8 (t_dst, t1);
- t_dst += sizeof(uint8x16_t);
- }
-
-#endif /* !USE_GCC_INLINE_ASM */
-
- if (trailer_count)
- {
- if (trailer_count & 8)
- {
- uint8x8_t t1 = vld1_u8 (t_src);
-
- t_src += sizeof(uint8x8_t);
- vst1_u8 (t_dst, t1);
- t_dst += sizeof(uint8x8_t);
- }
-
- if (trailer_count & 4)
- {
- *((uint32_t*) t_dst) = *((uint32_t*) t_src);
-
- t_dst += 4;
- t_src += 4;
- }
-
- if (trailer_count & 2)
- {
- *((uint16_t*) t_dst) = *((uint16_t*) t_src);
-
- t_dst += 2;
- t_src += 2;
- }
-
- if (trailer_count & 1)
- {
- *t_dst++ = *t_src++;
- }
- }
-}
-
-static inline void
-solid_over_565_8_pix_neon (uint32_t glyph_colour,
- uint16_t *dest,
- uint8_t * in_mask,
- uint32_t dest_stride, /* bytes, not elements */
- uint32_t mask_stride,
- uint32_t count /* 8-pixel groups */)
-{
- /* Inner loop of glyph blitter (solid colour, alpha mask) */
-
-#ifdef USE_GCC_INLINE_ASM
-
- asm volatile (
- " vld4.8 {d20[], d21[], d22[], d23[]}, [%[glyph_colour]] @ splat solid colour components \n"
- "0: @ loop \n"
- " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n"
- " vld1.8 {d17}, [%[in_mask]] @ load alpha mask of glyph \n"
- " vmull.u8 q9, d17, d23 @ apply glyph colour alpha to mask \n"
- " vshrn.u16 d17, q9, #8 @ reformat it to match original mask \n"
- " vmvn d18, d17 @ we need the inverse mask for the background \n"
- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n"
- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n"
- " vshrn.u16 d4, q0, #3 @ unpack green \n"
- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n"
- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n"
- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n"
- " vmull.u8 q1, d2, d18 @ apply inverse mask to background red... \n"
- " vmull.u8 q2, d4, d18 @ ...green... \n"
- " vmull.u8 q3, d6, d18 @ ...blue \n"
- " subs %[count], %[count], #1 @ decrement/test loop counter \n"
- " vmlal.u8 q1, d17, d22 @ add masked foreground red... \n"
- " vmlal.u8 q2, d17, d21 @ ...green... \n"
- " vmlal.u8 q3, d17, d20 @ ...blue \n"
- " add %[in_mask], %[in_mask], %[mask_stride] @ advance mask pointer, while we wait \n"
- " vsri.16 q1, q2, #5 @ pack green behind red \n"
- " vsri.16 q1, q3, #11 @ pack blue into pixels \n"
- " vst1.16 {d2, d3}, [%[dest]] @ store composited pixels \n"
- " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n"
- " bne 0b @ next please \n"
-
- /* Clobbered registers marked as input/outputs */
- : [dest] "+r" (dest), [in_mask] "+r" (in_mask), [count] "+r" (count)
-
- /* Inputs */
- : [dest_stride] "r" (dest_stride), [mask_stride] "r" (mask_stride), [glyph_colour] "r" (&glyph_colour)
-
- /* Clobbers, including the inputs we modify, and potentially lots of memory */
- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d19",
- "d20", "d21", "d22", "d23", "d24", "d25", "cc", "memory"
- );
-
-#else
-
- uint8x8x4_t solid_colour = vld4_dup_u8 ((uint8_t*) &glyph_colour);
-
- while (count--)
- {
- uint16x8_t pixels = vld1q_u16 (dest);
- uint8x8_t mask = vshrn_n_u16 (vmull_u8 (solid_colour.val[3], vld1_u8 (in_mask)), 8);
- uint8x8_t mask_image = vmvn_u8 (mask);
-
- uint8x8_t t_red = vshrn_n_u16 (pixels, 8);
- uint8x8_t t_green = vshrn_n_u16 (pixels, 3);
- uint8x8_t t_blue = vshrn_n_u16 (vsli_n_u8 (pixels, pixels, 5), 2);
-
- uint16x8_t s_red = vmull_u8 (vsri_n_u8 (t_red, t_red, 5), mask_image);
- uint16x8_t s_green = vmull_u8 (vsri_n_u8 (t_green, t_green, 6), mask_image);
- uint16x8_t s_blue = vmull_u8 (t_blue, mask_image);
-
- s_red = vmlal (s_red, mask, solid_colour.val[2]);
- s_green = vmlal (s_green, mask, solid_colour.val[1]);
- s_blue = vmlal (s_blue, mask, solid_colour.val[0]);
-
- pixels = vsri_n_u16 (s_red, s_green, 5);
- pixels = vsri_n_u16 (pixels, s_blue, 11);
- vst1q_u16 (dest, pixels);
-
- dest += dest_stride;
- mask += mask_stride;
- }
-
-#endif
}
-#if 0 /* this is broken currently */
-static void
-neon_composite_over_n_8_0565 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint16_t *dst_line, *aligned_line;
- uint8_t *mask_line;
- uint32_t dst_stride, mask_stride;
- uint32_t kernel_count, copy_count, copy_tail;
- uint8_t kernel_offset, copy_offset;
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- /* bail out if fully transparent or degenerate */
- srca = src >> 24;
- if (src == 0)
- return;
-
- if (width == 0 || height == 0)
- return;
-
- if (width > NEON_SCANLINE_BUFFER_PIXELS)
- {
- /* split the blit, so we can use a fixed-size scanline buffer
- * TODO: there must be a more elegant way of doing this.
- */
- int x;
- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
- {
- neon_composite_over_n_8_0565 (
- impl, op,
- src_image, mask_image, dst_image,
- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
- }
-
- return;
- }
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- /* keep within minimum number of aligned quadwords on width
- * while also keeping the minimum number of columns to process
- */
- {
- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
-
- /* the fast copy should be quadword aligned */
- copy_offset = dst_line - ((uint16_t*) aligned_left);
- aligned_line = dst_line - copy_offset;
- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
- copy_tail = 0;
-
- if (aligned_right - aligned_left > ceiling_length)
- {
- /* unaligned routine is tightest */
- kernel_count = (uint32_t) (ceiling_length >> 4);
- kernel_offset = copy_offset;
- }
- else
- {
- /* aligned routine is equally tight, so it is safer to align */
- kernel_count = copy_count;
- kernel_offset = 0;
- }
-
- /* We should avoid reading beyond scanline ends for safety */
- if (aligned_line < (dst_line - dest_x) ||
- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
- {
- /* switch to precise read */
- copy_offset = kernel_offset = 0;
- aligned_line = dst_line;
- kernel_count = (uint32_t) (ceiling_length >> 4);
- copy_count = (width * sizeof(*dst_line)) >> 4;
- copy_tail = (width * sizeof(*dst_line)) & 0xF;
- }
- }
-
- {
- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */
- uint8_t glyph_line[NEON_SCANLINE_BUFFER_PIXELS + 8];
- int y = height;
-
- /* row-major order */
- /* left edge, middle block, right edge */
- for ( ; y--; mask_line += mask_stride, aligned_line += dst_stride, dst_line += dst_stride)
- {
- /* We don't want to overrun the edges of the glyph,
- * so realign the edge data into known buffers
- */
- neon_quadword_copy (glyph_line + copy_offset, mask_line, width >> 4, width & 0xF);
-
- /* Uncached framebuffer access is really, really slow
- * if we do it piecemeal. It should be much faster if we
- * grab it all at once. One scanline should easily fit in
- * L1 cache, so this should not waste RAM bandwidth.
- */
- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
-
- /* Apply the actual filter */
- solid_over_565_8_pix_neon (
- src, scan_line + kernel_offset,
- glyph_line + kernel_offset, 8 * sizeof(*dst_line),
- 8, kernel_count);
-
- /* Copy the modified scanline back */
- neon_quadword_copy (dst_line, scan_line + copy_offset,
- width >> 3, (width & 7) * 2);
- }
- }
-}
-#endif
-
-#ifdef USE_GCC_INLINE_ASM
-
-static inline void
-plain_over_565_8_pix_neon (uint32_t colour,
- uint16_t *dest,
- uint32_t dest_stride, /* bytes, not elements */
- uint32_t count /* 8-pixel groups */)
-{
- /* Inner loop for plain translucent rects
- * (solid colour without alpha mask)
- */
- asm volatile (
- " vld4.8 {d20[], d21[], d22[], d23[]}, [%[colour]] @ solid colour load/splat \n"
- " vmull.u8 q12, d23, d22 @ premultiply alpha red \n"
- " vmull.u8 q13, d23, d21 @ premultiply alpha green \n"
- " vmull.u8 q14, d23, d20 @ premultiply alpha blue \n"
- " vmvn d18, d23 @ inverse alpha for background \n"
- "0: @ loop\n"
- " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n"
- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n"
- " vshrn.u16 d4, q0, #3 @ unpack green \n"
- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n"
- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n"
- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n"
- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n"
- " vmov q0, q12 @ retrieve foreground red \n"
- " vmlal.u8 q0, d2, d18 @ blend red - my kingdom for a four-operand MLA \n"
- " vmov q1, q13 @ retrieve foreground green \n"
- " vmlal.u8 q1, d4, d18 @ blend green \n"
- " vmov q2, q14 @ retrieve foreground blue \n"
- " vmlal.u8 q2, d6, d18 @ blend blue \n"
- " subs %[count], %[count], #1 @ decrement/test loop counter \n"
- " vsri.16 q0, q1, #5 @ pack green behind red \n"
- " vsri.16 q0, q2, #11 @ pack blue into pixels \n"
- " vst1.16 {d0, d1}, [%[dest]] @ store composited pixels \n"
- " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n"
- " bne 0b @ next please \n"
-
- /* Clobbered registers marked as input/outputs */
- : [dest] "+r" (dest), [count] "+r" (count)
-
- /* Inputs */
- : [dest_stride] "r" (dest_stride), [colour] "r" (&colour)
-
- /* Clobbers, including the inputs we modify, and
- * potentially lots of memory
- */
- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d18", "d19",
- "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
- "cc", "memory"
- );
-}
-
-static void
-neon_composite_over_n_0565 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint16_t *dst_line, *aligned_line;
- uint32_t dst_stride;
- uint32_t kernel_count, copy_count, copy_tail;
- uint8_t kernel_offset, copy_offset;
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- /* bail out if fully transparent */
- srca = src >> 24;
- if (src == 0)
- return;
-
- if (width == 0 || height == 0)
- return;
-
- if (width > NEON_SCANLINE_BUFFER_PIXELS)
- {
- /* split the blit, so we can use a fixed-size scanline buffer *
- * TODO: there must be a more elegant way of doing this.
- */
- int x;
-
- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
- {
- neon_composite_over_n_0565 (
- impl, op,
- src_image, mask_image, dst_image,
- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
- }
- return;
- }
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-
- /* keep within minimum number of aligned quadwords on width
- * while also keeping the minimum number of columns to process
- */
- {
- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
-
- /* the fast copy should be quadword aligned */
- copy_offset = dst_line - ((uint16_t*) aligned_left);
- aligned_line = dst_line - copy_offset;
- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
- copy_tail = 0;
-
- if (aligned_right - aligned_left > ceiling_length)
- {
- /* unaligned routine is tightest */
- kernel_count = (uint32_t) (ceiling_length >> 4);
- kernel_offset = copy_offset;
- }
- else
- {
- /* aligned routine is equally tight, so it is safer to align */
- kernel_count = copy_count;
- kernel_offset = 0;
- }
-
- /* We should avoid reading beyond scanline ends for safety */
- if (aligned_line < (dst_line - dest_x) ||
- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
- {
- /* switch to precise read */
- copy_offset = kernel_offset = 0;
- aligned_line = dst_line;
- kernel_count = (uint32_t) (ceiling_length >> 4);
- copy_count = (width * sizeof(*dst_line)) >> 4;
- copy_tail = (width * sizeof(*dst_line)) & 0xF;
- }
- }
-
- {
- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */
-
- /* row-major order */
- /* left edge, middle block, right edge */
- for ( ; height--; aligned_line += dst_stride, dst_line += dst_stride)
- {
- /* Uncached framebuffer access is really, really slow if we do it piecemeal.
- * It should be much faster if we grab it all at once.
- * One scanline should easily fit in L1 cache, so this should
- * not waste RAM bandwidth.
- */
- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
-
- /* Apply the actual filter */
- plain_over_565_8_pix_neon (
- src, scan_line + kernel_offset, 8 * sizeof(*dst_line), kernel_count);
-
- /* Copy the modified scanline back */
- neon_quadword_copy (
- dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2);
- }
- }
-}
-
-static inline void
-ARGB8_over_565_8_pix_neon (uint32_t *src,
- uint16_t *dest,
- uint32_t src_stride, /* bytes, not elements */
- uint32_t count /* 8-pixel groups */)
-{
- asm volatile (
- "0: @ loop\n"
- " pld [%[src], %[src_stride]] @ preload from next scanline \n"
- " vld1.16 {d0, d1}, [%[dest]] @ load pixels from framebuffer \n"
- " vld4.8 {d20, d21, d22, d23},[%[src]]! @ load source image pixels \n"
- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n"
- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n"
- " vshrn.u16 d4, q0, #3 @ unpack green \n"
- " vmvn d18, d23 @ we need the inverse alpha for the background \n"
- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n"
- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n"
- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n"
- " vmull.u8 q1, d2, d18 @ apply inverse alpha to background red... \n"
- " vmull.u8 q2, d4, d18 @ ...green... \n"
- " vmull.u8 q3, d6, d18 @ ...blue \n"
- " subs %[count], %[count], #1 @ decrement/test loop counter \n"
- " vmlal.u8 q1, d23, d22 @ add blended foreground red... \n"
- " vmlal.u8 q2, d23, d21 @ ...green... \n"
- " vmlal.u8 q3, d23, d20 @ ...blue \n"
- " vsri.16 q1, q2, #5 @ pack green behind red \n"
- " vsri.16 q1, q3, #11 @ pack blue into pixels \n"
- " vst1.16 {d2, d3}, [%[dest]]! @ store composited pixels \n"
- " bne 0b @ next please \n"
-
- /* Clobbered registers marked as input/outputs */
- : [dest] "+r" (dest), [src] "+r" (src), [count] "+r" (count)
-
- /* Inputs */
- : [src_stride] "r" (src_stride)
-
- /* Clobbers, including the inputs we modify, and potentially lots of memory */
- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d20",
- "d21", "d22", "d23", "cc", "memory"
- );
-}
-
-static void
-neon_composite_over_8888_0565 (pixman_implementation_t * impl,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *src_line;
- uint16_t *dst_line, *aligned_line;
- uint32_t dst_stride, src_stride;
- uint32_t kernel_count, copy_count, copy_tail;
- uint8_t kernel_offset, copy_offset;
-
- /* we assume mask is opaque
- * so the only alpha to deal with is embedded in src
- */
- if (width > NEON_SCANLINE_BUFFER_PIXELS)
- {
- /* split the blit, so we can use a fixed-size scanline buffer */
- int x;
- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
- {
- neon_composite_over_8888_0565 (
- impl, op,
- src_image, mask_image, dst_image,
- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
- }
- return;
- }
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
- /* keep within minimum number of aligned quadwords on width
- * while also keeping the minimum number of columns to process
- */
- {
- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
-
- /* the fast copy should be quadword aligned */
- copy_offset = dst_line - ((uint16_t*) aligned_left);
- aligned_line = dst_line - copy_offset;
- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
- copy_tail = 0;
-
- if (aligned_right - aligned_left > ceiling_length)
- {
- /* unaligned routine is tightest */
- kernel_count = (uint32_t) (ceiling_length >> 4);
- kernel_offset = copy_offset;
- }
- else
- {
- /* aligned routine is equally tight, so it is safer to align */
- kernel_count = copy_count;
- kernel_offset = 0;
- }
-
- /* We should avoid reading beyond scanline ends for safety */
- if (aligned_line < (dst_line - dest_x) ||
- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
- {
- /* switch to precise read */
- copy_offset = kernel_offset = 0;
- aligned_line = dst_line;
- kernel_count = (uint32_t) (ceiling_length >> 4);
- copy_count = (width * sizeof(*dst_line)) >> 4;
- copy_tail = (width * sizeof(*dst_line)) & 0xF;
- }
- }
-
- /* Preload the first input scanline */
- {
- uint8_t *src_ptr = (uint8_t*) src_line;
- uint32_t count = (width + 15) / 16;
-
-#ifdef USE_GCC_INLINE_ASM
- asm volatile (
- "0: @ loop \n"
- " subs %[count], %[count], #1 \n"
- " pld [%[src]] \n"
- " add %[src], %[src], #64 \n"
- " bgt 0b \n"
-
- /* Clobbered input registers marked as input/outputs */
- : [src] "+r" (src_ptr), [count] "+r" (count)
- : /* no unclobbered inputs */
- : "cc"
- );
-#else
- do
- {
- __pld (src_ptr);
- src_ptr += 64;
- }
- while (--count);
-#endif
- }
-
- {
- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */
-
- /* row-major order */
- /* left edge, middle block, right edge */
- for ( ; height--; src_line += src_stride, aligned_line += dst_stride)
- {
- /* Uncached framebuffer access is really, really slow if we do
- * it piecemeal. It should be much faster if we grab it all at
- * once. One scanline should easily fit in L1 cache, so this
- * should not waste RAM bandwidth.
- */
- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
-
- /* Apply the actual filter */
- ARGB8_over_565_8_pix_neon (
- src_line, scan_line + kernel_offset,
- src_stride * sizeof(*src_line), kernel_count);
-
- /* Copy the modified scanline back */
- neon_quadword_copy (dst_line,
- scan_line + copy_offset,
- width >> 3, (width & 7) * 2);
- }
- }
-}
-
-#endif /* USE_GCC_INLINE_ASM */
-
static const pixman_fast_path_t arm_neon_fast_path_array[] =
{
- { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, neon_composite_add_8888_8_8, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, neon_composite_add_8000_8000, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, neon_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, neon_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_24_16, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_24_16, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_24_16, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_24_16, 0 },
-#ifdef USE_GCC_INLINE_ASM
- { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_16_16, 0 },
- { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_16_16, 0 },
-#if 0 /* this code has some bugs */
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_n_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_n_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565, 0 },
-#endif
-#endif
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, neon_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888, 0 },
+ { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_0565_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_0565_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_8888_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_8888_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_8888_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_8888_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_src_8888_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_src_8888_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_src_8888_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_src_8888_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_r8g8b8, PIXMAN_null, PIXMAN_r8g8b8, neon_composite_src_0888_0888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, neon_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, neon_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, neon_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, neon_composite_over_8888_8888 },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, neon_composite_add_n_8_8 },
+ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_a8, PIXMAN_a8, neon_composite_add_8_8_8 },
+ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, neon_composite_add_8000_8000 },
{ PIXMAN_OP_NONE },
};
@@ -2674,79 +361,6 @@ arm_neon_composite (pixman_implementation_t *imp,
}
static pixman_bool_t
-pixman_blt_neon (void *src_bits,
- void *dst_bits,
- int src_stride,
- int dst_stride,
- int src_bpp,
- int dst_bpp,
- int src_x,
- int src_y,
- int dst_x,
- int dst_y,
- int width,
- int height)
-{
- if (!width || !height)
- return TRUE;
-
- /* accelerate only straight copies involving complete bytes */
- if (src_bpp != dst_bpp || (src_bpp & 7))
- return FALSE;
-
- {
- uint32_t bytes_per_pixel = src_bpp >> 3;
- uint32_t byte_width = width * bytes_per_pixel;
- /* parameter is in words for some reason */
- int32_t src_stride_bytes = src_stride * 4;
- int32_t dst_stride_bytes = dst_stride * 4;
- uint8_t *src_bytes = ((uint8_t*) src_bits) +
- src_y * src_stride_bytes + src_x * bytes_per_pixel;
- uint8_t *dst_bytes = ((uint8_t*) dst_bits) +
- dst_y * dst_stride_bytes + dst_x * bytes_per_pixel;
- uint32_t quadword_count = byte_width / 16;
- uint32_t offset = byte_width % 16;
-
- while (height--)
- {
- neon_quadword_copy (dst_bytes, src_bytes, quadword_count, offset);
- src_bytes += src_stride_bytes;
- dst_bytes += dst_stride_bytes;
- }
- }
-
- return TRUE;
-}
-
-static pixman_bool_t
-arm_neon_blt (pixman_implementation_t *imp,
- uint32_t * src_bits,
- uint32_t * dst_bits,
- int src_stride,
- int dst_stride,
- int src_bpp,
- int dst_bpp,
- int src_x,
- int src_y,
- int dst_x,
- int dst_y,
- int width,
- int height)
-{
- if (pixman_blt_neon (
- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
- src_x, src_y, dst_x, dst_y, width, height))
- {
- return TRUE;
- }
-
- return _pixman_implementation_blt (
- imp->delegate,
- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
- src_x, src_y, dst_x, dst_y, width, height);
-}
-
-static pixman_bool_t
arm_neon_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
@@ -2771,11 +385,7 @@ _pixman_implementation_create_arm_neon (void)
pixman_implementation_t *imp = _pixman_implementation_create (general);
imp->composite = arm_neon_composite;
-#if 0 /* this code has some bugs */
- imp->blt = arm_neon_blt;
-#endif
imp->fill = arm_neon_fill;
return imp;
}
-
diff --git a/pixman/pixman/pixman-arm-simd.c b/pixman/pixman/pixman-arm-simd.c
index fb7bf3da8..265c820b4 100644
--- a/pixman/pixman/pixman-arm-simd.c
+++ b/pixman/pixman/pixman-arm-simd.c
@@ -221,7 +221,7 @@ arm_composite_over_8888_n_8888 (
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
- mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
+ mask = _pixman_image_get_solid (mask_image, PIXMAN_a8r8g8b8);
mask = (mask) >> 24;
while (height--)
@@ -421,19 +421,19 @@ arm_composite_over_n_8_8888 (pixman_implementation_t * impl,
static const pixman_fast_path_t arm_simd_fast_path_array[] =
{
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, arm_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, arm_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, arm_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, arm_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888, NEED_SOLID_MASK },
-
- { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, arm_composite_add_8000_8000, 0 },
-
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888, 0 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, arm_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, arm_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, arm_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, arm_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888 },
+
+ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, arm_composite_add_8000_8000 },
+
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_NONE },
};
diff --git a/pixman/pixman/pixman-bits-image.c b/pixman/pixman/pixman-bits-image.c
index 4e78ce107..5a5a69057 100644
--- a/pixman/pixman/pixman-bits-image.c
+++ b/pixman/pixman/pixman-bits-image.c
@@ -4,6 +4,7 @@
* 2008 Aaron Plattner, NVIDIA Corporation
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007, 2009 Red Hat, Inc.
+ * Copyright © 2008 André Tupinambá <andrelrt@gmail.com>
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
@@ -28,6 +29,7 @@
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pixman-private.h"
@@ -157,6 +159,9 @@ repeat (pixman_repeat_t repeat, int size, int *coord)
case PIXMAN_REPEAT_NONE:
break;
+
+ default:
+ break;
}
}
@@ -181,6 +186,97 @@ bits_image_fetch_pixel_nearest (bits_image_t *image,
}
}
+#if SIZEOF_LONG > 4
+
+static force_inline uint32_t
+bilinear_interpolation (uint32_t tl, uint32_t tr,
+ uint32_t bl, uint32_t br,
+ int distx, int disty)
+{
+ uint64_t distxy, distxiy, distixy, distixiy;
+ uint64_t tl64, tr64, bl64, br64;
+ uint64_t f, r;
+
+ distxy = distx * disty;
+ distxiy = distx * (256 - disty);
+ distixy = (256 - distx) * disty;
+ distixiy = (256 - distx) * (256 - disty);
+
+ /* Alpha and Blue */
+ tl64 = tl & 0xff0000ff;
+ tr64 = tr & 0xff0000ff;
+ bl64 = bl & 0xff0000ff;
+ br64 = br & 0xff0000ff;
+
+ f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy;
+ r = f & 0x0000ff0000ff0000ull;
+
+ /* Red and Green */
+ tl64 = tl;
+ tl64 = ((tl64 << 16) & 0x000000ff00000000ull) | (tl64 & 0x0000ff00ull);
+
+ tr64 = tr;
+ tr64 = ((tr64 << 16) & 0x000000ff00000000ull) | (tr64 & 0x0000ff00ull);
+
+ bl64 = bl;
+ bl64 = ((bl64 << 16) & 0x000000ff00000000ull) | (bl64 & 0x0000ff00ull);
+
+ br64 = br;
+ br64 = ((br64 << 16) & 0x000000ff00000000ull) | (br64 & 0x0000ff00ull);
+
+ f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy;
+ r |= ((f >> 16) & 0x000000ff00000000ull) | (f & 0xff000000ull);
+
+ return (uint32_t)(r >> 16);
+}
+
+#else
+
+static force_inline uint32_t
+bilinear_interpolation (uint32_t tl, uint32_t tr,
+ uint32_t bl, uint32_t br,
+ int distx, int disty)
+{
+ int distxy, distxiy, distixy, distixiy;
+ uint32_t f, r;
+
+ distxy = distx * disty;
+ distxiy = (distx << 8) - distxy; /* distx * (256 - disty) */
+ distixy = (disty << 8) - distxy; /* disty * (256 - distx) */
+ distixiy =
+ 256 * 256 - (disty << 8) -
+ (distx << 8) + distxy; /* (256 - distx) * (256 - disty) */
+
+ /* Blue */
+ r = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy
+ + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy;
+
+ /* Green */
+ f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy
+ + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy;
+ r |= f & 0xff000000;
+
+ tl >>= 16;
+ tr >>= 16;
+ bl >>= 16;
+ br >>= 16;
+ r >>= 16;
+
+ /* Red */
+ f = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy
+ + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy;
+ r |= f & 0x00ff0000;
+
+ /* Alpha */
+ f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy
+ + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy;
+ r |= f & 0xff000000;
+
+ return r;
+}
+
+#endif
+
static force_inline uint32_t
bits_image_fetch_pixel_bilinear (bits_image_t *image,
pixman_fixed_t x,
@@ -190,9 +286,8 @@ bits_image_fetch_pixel_bilinear (bits_image_t *image,
int width = image->width;
int height = image->height;
int x1, y1, x2, y2;
- uint32_t tl, tr, bl, br, r;
- int32_t distx, disty, idistx, idisty;
- uint32_t ft, fb;
+ uint32_t tl, tr, bl, br;
+ int32_t distx, disty;
x1 = x - pixman_fixed_1 / 2;
y1 = y - pixman_fixed_1 / 2;
@@ -211,7 +306,7 @@ bits_image_fetch_pixel_bilinear (bits_image_t *image,
repeat (repeat_mode, height, &y1);
repeat (repeat_mode, width, &x2);
repeat (repeat_mode, height, &y2);
-
+
tl = get_pixel (image, x1, y1, FALSE);
bl = get_pixel (image, x1, y2, FALSE);
tr = get_pixel (image, x2, y1, FALSE);
@@ -225,24 +320,218 @@ bits_image_fetch_pixel_bilinear (bits_image_t *image,
br = get_pixel (image, x2, y2, TRUE);
}
- idistx = 256 - distx;
- idisty = 256 - disty;
+ return bilinear_interpolation (tl, tr, bl, br, distx, disty);
+}
+
+static void
+bits_image_fetch_bilinear_no_repeat_8888 (pixman_image_t * ima,
+ int offset,
+ int line,
+ int width,
+ uint32_t * buffer,
+ const uint32_t * mask,
+ uint32_t mask_bits)
+{
+ bits_image_t *bits = &ima->bits;
+ pixman_fixed_t x_top, x_bottom, x;
+ pixman_fixed_t ux_top, ux_bottom, ux;
+ pixman_vector_t v;
+ uint32_t top_mask, bottom_mask;
+ uint32_t *top_row;
+ uint32_t *bottom_row;
+ uint32_t *end;
+ uint32_t zero[2] = { 0, 0 };
+ int y, y1, y2;
+ int disty;
+ int mask_inc;
+ int w;
+
+ /* reference point is the center of the pixel */
+ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
+ v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
+ v.vector[2] = pixman_fixed_1;
-#define GET8(v, i) ((uint16_t) (uint8_t) ((v) >> i))
- ft = GET8 (tl, 0) * idistx + GET8 (tr, 0) * distx;
- fb = GET8 (bl, 0) * idistx + GET8 (br, 0) * distx;
- r = (((ft * idisty + fb * disty) >> 16) & 0xff);
- ft = GET8 (tl, 8) * idistx + GET8 (tr, 8) * distx;
- fb = GET8 (bl, 8) * idistx + GET8 (br, 8) * distx;
- r |= (((ft * idisty + fb * disty) >> 8) & 0xff00);
- ft = GET8 (tl, 16) * idistx + GET8 (tr, 16) * distx;
- fb = GET8 (bl, 16) * idistx + GET8 (br, 16) * distx;
- r |= (((ft * idisty + fb * disty)) & 0xff0000);
- ft = GET8 (tl, 24) * idistx + GET8 (tr, 24) * distx;
- fb = GET8 (bl, 24) * idistx + GET8 (br, 24) * distx;
- r |= (((ft * idisty + fb * disty) << 8) & 0xff000000);
+ if (!pixman_transform_point_3d (bits->common.transform, &v))
+ return;
- return r;
+ ux = ux_top = ux_bottom = bits->common.transform->matrix[0][0];
+ x = x_top = x_bottom = v.vector[0] - pixman_fixed_1/2;
+
+ y = v.vector[1] - pixman_fixed_1/2;
+ disty = (y >> 8) & 0xff;
+
+ /* Load the pointers to the first and second lines from the source
+ * image that bilinear code must read.
+ *
+ * The main trick in this code is about the check if any line are
+ * outside of the image;
+ *
+ * When I realize that a line (any one) is outside, I change
+ * the pointer to a dummy area with zeros. Once I change this, I
+ * must be sure the pointer will not change, so I set the
+ * variables to each pointer increments inside the loop.
+ */
+ y1 = pixman_fixed_to_int (y);
+ y2 = y1 + 1;
+
+ if (y1 < 0 || y1 >= bits->height)
+ {
+ top_row = zero;
+ x_top = 0;
+ ux_top = 0;
+ }
+ else
+ {
+ top_row = bits->bits + y1 * bits->rowstride;
+ x_top = x;
+ ux_top = ux;
+ }
+
+ if (y2 < 0 || y2 >= bits->height)
+ {
+ bottom_row = zero;
+ x_bottom = 0;
+ ux_bottom = 0;
+ }
+ else
+ {
+ bottom_row = bits->bits + y2 * bits->rowstride;
+ x_bottom = x;
+ ux_bottom = ux;
+ }
+
+ /* Instead of checking whether the operation uses the mast in
+ * each loop iteration, verify this only once and prepare the
+ * variables to make the code smaller inside the loop.
+ */
+ if (!mask)
+ {
+ mask_inc = 0;
+ mask_bits = 1;
+ mask = &mask_bits;
+ }
+ else
+ {
+ /* If have a mask, prepare the variables to check it */
+ mask_inc = 1;
+ }
+
+ /* If both are zero, then the whole thing is zero */
+ if (top_row == zero && bottom_row == zero)
+ {
+ memset (buffer, 0, width * sizeof (uint32_t));
+ return;
+ }
+ else if (bits->format == PIXMAN_x8r8g8b8)
+ {
+ if (top_row == zero)
+ {
+ top_mask = 0;
+ bottom_mask = 0xff000000;
+ }
+ else if (bottom_row == zero)
+ {
+ top_mask = 0xff000000;
+ bottom_mask = 0;
+ }
+ else
+ {
+ top_mask = 0xff000000;
+ bottom_mask = 0xff000000;
+ }
+ }
+ else
+ {
+ top_mask = 0;
+ bottom_mask = 0;
+ }
+
+ end = buffer + width;
+
+ /* Zero fill to the left of the image */
+ while (buffer < end && x < pixman_fixed_minus_1)
+ {
+ *buffer++ = 0;
+ x += ux;
+ x_top += ux_top;
+ x_bottom += ux_bottom;
+ mask += mask_inc;
+ }
+
+ /* Left edge
+ */
+ while (buffer < end && x < 0)
+ {
+ uint32_t tr, br;
+ int32_t distx;
+
+ tr = top_row[pixman_fixed_to_int (x_top) + 1] | top_mask;
+ br = bottom_row[pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
+
+ distx = (x >> 8) & 0xff;
+
+ *buffer++ = bilinear_interpolation (0, tr, 0, br, distx, disty);
+
+ x += ux;
+ x_top += ux_top;
+ x_bottom += ux_bottom;
+ mask += mask_inc;
+ }
+
+ /* Main part */
+ w = pixman_int_to_fixed (bits->width - 1);
+
+ while (buffer < end && x < w)
+ {
+ if (*mask)
+ {
+ uint32_t tl, tr, bl, br;
+ int32_t distx;
+
+ tl = top_row [pixman_fixed_to_int (x_top)] | top_mask;
+ tr = top_row [pixman_fixed_to_int (x_top) + 1] | top_mask;
+ bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
+ br = bottom_row [pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
+
+ distx = (x >> 8) & 0xff;
+
+ *buffer = bilinear_interpolation (tl, tr, bl, br, distx, disty);
+ }
+
+ buffer++;
+ x += ux;
+ x_top += ux_top;
+ x_bottom += ux_bottom;
+ mask += mask_inc;
+ }
+
+ /* Right Edge */
+ w = pixman_int_to_fixed (bits->width);
+ while (buffer < end && x < w)
+ {
+ if (*mask)
+ {
+ uint32_t tl, bl;
+ int32_t distx;
+
+ tl = top_row [pixman_fixed_to_int (x_top)] | top_mask;
+ bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
+
+ distx = (x >> 8) & 0xff;
+
+ *buffer = bilinear_interpolation (tl, 0, bl, 0, distx, disty);
+ }
+
+ buffer++;
+ x += ux;
+ x_top += ux_top;
+ x_bottom += ux_bottom;
+ mask += mask_inc;
+ }
+
+ /* Zero fill to the left of the image */
+ while (buffer < end)
+ *buffer++ = 0;
}
static force_inline uint32_t
@@ -339,6 +628,9 @@ bits_image_fetch_pixel_filtered (bits_image_t *image,
case PIXMAN_FILTER_CONVOLUTION:
return bits_image_fetch_pixel_convolution (image, x, y);
break;
+
+ default:
+ break;
}
return 0;
@@ -664,6 +956,24 @@ bits_image_property_changed (pixman_image_t *image)
image->common.get_scanline_64 = bits_image_fetch_untransformed_64;
image->common.get_scanline_32 = bits_image_fetch_untransformed_32;
}
+ else if (bits->common.transform &&
+ bits->common.transform->matrix[2][0] == 0 &&
+ bits->common.transform->matrix[2][1] == 0 &&
+ bits->common.transform->matrix[2][2] == pixman_fixed_1 &&
+ bits->common.transform->matrix[0][0] > 0 &&
+ bits->common.transform->matrix[1][0] == 0 &&
+ (bits->common.filter == PIXMAN_FILTER_BILINEAR ||
+ bits->common.filter == PIXMAN_FILTER_GOOD ||
+ bits->common.filter == PIXMAN_FILTER_BEST) &&
+ bits->common.repeat == PIXMAN_REPEAT_NONE &&
+ (bits->format == PIXMAN_a8r8g8b8 ||
+ bits->format == PIXMAN_x8r8g8b8))
+ {
+ image->common.get_scanline_64 =
+ _pixman_image_get_scanline_generic_64;
+ image->common.get_scanline_32 =
+ bits_image_fetch_bilinear_no_repeat_8888;
+ }
else
{
image->common.get_scanline_64 =
diff --git a/pixman/pixman/pixman-edge.c b/pixman/pixman/pixman-edge.c
index 81a2e960a..ab9bea0f8 100644
--- a/pixman/pixman/pixman-edge.c
+++ b/pixman/pixman/pixman-edge.c
@@ -358,6 +358,9 @@ PIXMAN_RASTERIZE_EDGES (pixman_image_t *image,
case 8:
rasterize_edges_8 (image, l, r, t, b);
break;
+
+ default:
+ break;
}
}
diff --git a/pixman/pixman/pixman-fast-path.c b/pixman/pixman/pixman-fast-path.c
index 5ab8d8c99..75a0c1e07 100644
--- a/pixman/pixman/pixman-fast-path.c
+++ b/pixman/pixman/pixman-fast-path.c
@@ -723,6 +723,7 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
}
}
+#if 0
static void
fast_composite_over_8888_0888 (pixman_implementation_t *imp,
pixman_op_t op,
@@ -773,6 +774,7 @@ fast_composite_over_8888_0888 (pixman_implementation_t *imp,
}
}
}
+#endif
static void
fast_composite_over_8888_0565 (pixman_implementation_t *imp,
@@ -971,19 +973,19 @@ fast_composite_add_8888_8888 (pixman_implementation_t *imp,
}
static void
-fast_composite_add_8888_8_8 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
+fast_composite_add_n_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *mask_line, *mask;
@@ -1023,6 +1025,254 @@ fast_composite_add_8888_8_8 (pixman_implementation_t *imp,
}
}
+#ifdef WORDS_BIGENDIAN
+#define CREATE_BITMASK(n) (0x80000000 >> (n))
+#define UPDATE_BITMASK(n) ((n) >> 1)
+#else
+#define CREATE_BITMASK(n) (1 << (n))
+#define UPDATE_BITMASK(n) ((n) << 1)
+#endif
+
+#define TEST_BIT(p, n) \
+ (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31))
+#define SET_BIT(p, n) \
+ do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0);
+
+static void
+fast_composite_add_1000_1000 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ int dst_stride, src_stride;
+ int32_t w;
+
+ PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t,
+ src_stride, src_line, 1);
+ PIXMAN_IMAGE_GET_LINE (dst_image, 0, dest_y, uint32_t,
+ dst_stride, dst_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w--)
+ {
+ /*
+ * TODO: improve performance by processing uint32_t data instead
+ * of individual bits
+ */
+ if (TEST_BIT (src, src_x + w))
+ SET_BIT (dst, dest_x + w);
+ }
+ }
+}
+
+static void
+fast_composite_over_n_1_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst, *dst_line;
+ uint32_t *mask, *mask_line;
+ int mask_stride, dst_stride;
+ uint32_t bitcache, bitmask;
+ int32_t w;
+
+ if (width <= 0)
+ return;
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,
+ dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
+ mask_stride, mask_line, 1);
+ mask_line += mask_x >> 5;
+
+ if (srca == 0xff)
+ {
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (mask_x & 31);
+
+ while (w--)
+ {
+ if (bitmask == 0)
+ {
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (0);
+ }
+ if (bitcache & bitmask)
+ *dst = src;
+ bitmask = UPDATE_BITMASK (bitmask);
+ dst++;
+ }
+ }
+ }
+ else
+ {
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (mask_x & 31);
+
+ while (w--)
+ {
+ if (bitmask == 0)
+ {
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (0);
+ }
+ if (bitcache & bitmask)
+ *dst = over (src, *dst);
+ bitmask = UPDATE_BITMASK (bitmask);
+ dst++;
+ }
+ }
+ }
+}
+
+static void
+fast_composite_over_n_1_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint16_t *dst, *dst_line;
+ uint32_t *mask, *mask_line;
+ int mask_stride, dst_stride;
+ uint32_t bitcache, bitmask;
+ int32_t w;
+ uint32_t d;
+ uint16_t src565;
+
+ if (width <= 0)
+ return;
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
+ dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
+ mask_stride, mask_line, 1);
+ mask_line += mask_x >> 5;
+
+ if (srca == 0xff)
+ {
+ src565 = CONVERT_8888_TO_0565 (src);
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (mask_x & 31);
+
+ while (w--)
+ {
+ if (bitmask == 0)
+ {
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (0);
+ }
+ if (bitcache & bitmask)
+ *dst = src565;
+ bitmask = UPDATE_BITMASK (bitmask);
+ dst++;
+ }
+ }
+ }
+ else
+ {
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (mask_x & 31);
+
+ while (w--)
+ {
+ if (bitmask == 0)
+ {
+ bitcache = *mask++;
+ bitmask = CREATE_BITMASK (0);
+ }
+ if (bitcache & bitmask)
+ {
+ d = over (src, CONVERT_0565_TO_0888 (*dst));
+ *dst = CONVERT_8888_TO_0565 (d);
+ }
+ bitmask = UPDATE_BITMASK (bitmask);
+ dst++;
+ }
+ }
+ }
+}
+
/*
* Simple bitblt
*/
@@ -1097,51 +1347,58 @@ fast_composite_src_8888_x888 (pixman_implementation_t *imp,
static const pixman_fast_path_t c_fast_paths[] =
{
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, fast_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, fast_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r8g8b8, fast_composite_over_n_8_0888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b8g8r8, fast_composite_over_n_8_0888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5, fast_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5, fast_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_over_8888_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_over_8888_0565, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_add_8888_8888, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_add_8888_8888, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_add_8000_8000, 0 },
- { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_composite_add_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_add_8888_8_8, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_solid_fill, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_solid_fill, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_solid_fill, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_solid_fill, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8, fast_composite_solid_fill, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_solid_fill, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565, 0 },
- { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_in_8_8, 0 },
- { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_in_n_8_8, 0 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, fast_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, fast_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r8g8b8, fast_composite_over_n_8_0888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b8g8r8, fast_composite_over_n_8_0888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_a8r8g8b8, fast_composite_over_n_1_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_x8r8g8b8, fast_composite_over_n_1_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_a8b8g8r8, fast_composite_over_n_1_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_x8b8g8r8, fast_composite_over_n_1_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_r5g6b5, fast_composite_over_n_1_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_b5g6r5, fast_composite_over_n_1_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, fast_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_x8r8g8b8, fast_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_r5g6b5, fast_composite_over_n_8888_0565_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_a8b8g8r8, fast_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_x8b8g8r8, fast_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_b5g6r5, fast_composite_over_n_8888_0565_ca },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_x888_8_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_x888_8_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_x888_8_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_x888_8_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_over_8888_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_over_8888_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_over_8888_0565, },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_over_8888_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_over_8888_8888, },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_over_8888_0565, },
+ { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_add_8888_8888, },
+ { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_add_8888_8888, },
+ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_add_8000_8000, },
+ { PIXMAN_OP_ADD, PIXMAN_a1, PIXMAN_null, PIXMAN_a1, fast_composite_add_1000_1000, },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, fast_composite_add_n_8888_8888_ca },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_add_n_8_8, },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_solid_fill },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_solid_fill },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_solid_fill },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_solid_fill },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8, fast_composite_solid_fill },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_solid_fill },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888 },
+ { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888 },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888 },
+ { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888 },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565 },
+ { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565 },
+ { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_in_8_8, },
+ { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_in_n_8_8 },
{ PIXMAN_OP_NONE },
};
diff --git a/pixman/pixman/pixman-general.c b/pixman/pixman/pixman-general.c
index 3ead3dac7..c96a3f97e 100644
--- a/pixman/pixman/pixman-general.c
+++ b/pixman/pixman/pixman-general.c
@@ -133,15 +133,27 @@ general_composite_rect (pixman_implementation_t *imp,
/* Skip the store step and composite directly into the
* destination if the output format of the compose func matches
* the destination format.
+ *
+ * If the destination format is a8r8g8b8 then we can always do
+ * this. If it is x8r8g8b8, then we can only do it if the
+ * operator doesn't make use of destination alpha.
*/
- if (!wide &&
- !dest->common.alpha_map &&
- !dest->bits.write_func &&
- (op == PIXMAN_OP_ADD || op == PIXMAN_OP_OVER) &&
- (dest->bits.format == PIXMAN_a8r8g8b8 ||
- dest->bits.format == PIXMAN_x8r8g8b8))
+ if ((dest->bits.format == PIXMAN_a8r8g8b8) ||
+ (dest->bits.format == PIXMAN_x8r8g8b8 &&
+ (op == PIXMAN_OP_OVER ||
+ op == PIXMAN_OP_ADD ||
+ op == PIXMAN_OP_SRC ||
+ op == PIXMAN_OP_CLEAR ||
+ op == PIXMAN_OP_IN_REVERSE ||
+ op == PIXMAN_OP_OUT_REVERSE ||
+ op == PIXMAN_OP_DST)))
{
- store = NULL;
+ if (!wide &&
+ !dest->common.alpha_map &&
+ !dest->bits.write_func)
+ {
+ store = NULL;
+ }
}
if (!store)
diff --git a/pixman/pixman/pixman-image.c b/pixman/pixman/pixman-image.c
index bba5fd68c..e55ba2c15 100644
--- a/pixman/pixman/pixman-image.c
+++ b/pixman/pixman/pixman-image.c
@@ -48,8 +48,6 @@ _pixman_init_gradient (gradient_t * gradient,
gradient->n_stops = n_stops;
gradient->stop_range = 0xffff;
- gradient->color_table = NULL;
- gradient->color_table_size = 0;
gradient->common.class = SOURCE_IMAGE_CLASS_UNKNOWN;
return TRUE;
@@ -593,6 +591,10 @@ _pixman_image_is_opaque (pixman_image_t *image)
if (ALPHA_8 (image->solid.color) != 0xff)
return FALSE;
break;
+
+ default:
+ return FALSE;
+ break;
}
/* Convolution filters can introduce translucency if the sum of the
diff --git a/pixman/pixman/pixman-implementation.c b/pixman/pixman/pixman-implementation.c
index bcda9fe85..6488332b0 100644
--- a/pixman/pixman/pixman-implementation.c
+++ b/pixman/pixman/pixman-implementation.c
@@ -156,7 +156,7 @@ _pixman_implementation_create (pixman_implementation_t *delegate)
imp->blt = delegate_blt;
imp->fill = delegate_fill;
- for (i = 0; i < PIXMAN_OP_LAST; ++i)
+ for (i = 0; i < PIXMAN_N_OPERATORS; ++i)
{
imp->combine_32[i] = delegate_combine_32;
imp->combine_64[i] = delegate_combine_64;
diff --git a/pixman/pixman/pixman-mmx.c b/pixman/pixman/pixman-mmx.c
index 319c5a79d..afece1178 100644
--- a/pixman/pixman/pixman-mmx.c
+++ b/pixman/pixman/pixman-mmx.c
@@ -1,3418 +1,3414 @@
-/*
- * Copyright © 2004, 2005 Red Hat, Inc.
- * Copyright © 2004 Nicholas Miell
- * Copyright © 2005 Trolltech AS
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that
- * copyright notice and this permission notice appear in supporting
- * documentation, and that the name of Red Hat not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. Red Hat makes no representations about the
- * suitability of this software for any purpose. It is provided "as is"
- * without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
- * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
- * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
- * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
- * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- * SOFTWARE.
- *
- * Author: Søren Sandmann (sandmann@redhat.com)
- * Minor Improvements: Nicholas Miell (nmiell@gmail.com)
- * MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com)
- *
- * Based on work by Owen Taylor
- */
-
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#ifdef USE_MMX
-
-#include <mmintrin.h>
-#include "pixman-private.h"
-#include "pixman-combine32.h"
-
-#define no_vERBOSE
-
-#ifdef VERBOSE
-#define CHECKPOINT() error_f ("at %s %d\n", __FUNCTION__, __LINE__)
-#else
-#define CHECKPOINT()
-#endif
-
-/* Notes about writing mmx code
- *
- * give memory operands as the second operand. If you give it as the
- * first, gcc will first load it into a register, then use that
- * register
- *
- * ie. use
- *
- * _mm_mullo_pi16 (x, mmx_constant);
- *
- * not
- *
- * _mm_mullo_pi16 (mmx_constant, x);
- *
- * Also try to minimize dependencies. i.e. when you need a value, try
- * to calculate it from a value that was calculated as early as
- * possible.
- */
-
-/* --------------- MMX primitives ------------------------------------- */
-
-#ifdef __GNUC__
-typedef uint64_t mmxdatafield;
-#else
-typedef __m64 mmxdatafield;
-/* If __m64 is defined as a struct or union, define M64_MEMBER to be the
- name of the member used to access the data */
-# ifdef _MSC_VER
-# define M64_MEMBER m64_u64
-# elif defined(__SUNPRO_C)
-# define M64_MEMBER l_
-# endif
-#endif
-
-typedef struct
-{
- mmxdatafield mmx_4x00ff;
- mmxdatafield mmx_4x0080;
- mmxdatafield mmx_565_rgb;
- mmxdatafield mmx_565_unpack_multiplier;
- mmxdatafield mmx_565_r;
- mmxdatafield mmx_565_g;
- mmxdatafield mmx_565_b;
- mmxdatafield mmx_mask_0;
- mmxdatafield mmx_mask_1;
- mmxdatafield mmx_mask_2;
- mmxdatafield mmx_mask_3;
- mmxdatafield mmx_full_alpha;
- mmxdatafield mmx_ffff0000ffff0000;
- mmxdatafield mmx_0000ffff00000000;
- mmxdatafield mmx_000000000000ffff;
-} mmx_data_t;
-
-#if defined(_MSC_VER)
-# define MMXDATA_INIT(field, val) { val ## UI64 }
-#elif defined(M64_MEMBER) /* __m64 is a struct, not an integral type */
-# define MMXDATA_INIT(field, val) field = { val ## ULL }
-#else /* __m64 is an integral type */
-# define MMXDATA_INIT(field, val) field = val ## ULL
-#endif
-
-static const mmx_data_t c =
-{
- MMXDATA_INIT (.mmx_4x00ff, 0x00ff00ff00ff00ff),
- MMXDATA_INIT (.mmx_4x0080, 0x0080008000800080),
- MMXDATA_INIT (.mmx_565_rgb, 0x000001f0003f001f),
- MMXDATA_INIT (.mmx_565_unpack_multiplier, 0x0000008404100840),
- MMXDATA_INIT (.mmx_565_r, 0x000000f800000000),
- MMXDATA_INIT (.mmx_565_g, 0x0000000000fc0000),
- MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8),
- MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000),
- MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff),
- MMXDATA_INIT (.mmx_mask_2, 0xffff0000ffffffff),
- MMXDATA_INIT (.mmx_mask_3, 0x0000ffffffffffff),
- MMXDATA_INIT (.mmx_full_alpha, 0x00ff000000000000),
- MMXDATA_INIT (.mmx_ffff0000ffff0000, 0xffff0000ffff0000),
- MMXDATA_INIT (.mmx_0000ffff00000000, 0x0000ffff00000000),
- MMXDATA_INIT (.mmx_000000000000ffff, 0x000000000000ffff),
-};
-
-#ifdef __GNUC__
-# ifdef __ICC
-# define MC(x) M64 (c.mmx_ ## x)
-# else
-# define MC(x) ((__m64)c.mmx_ ## x)
-# endif
-#else
-# define MC(x) c.mmx_ ## x
-#endif
-
-static force_inline __m64
-M64 (uint64_t x)
-{
-#ifdef __ICC
- return _mm_cvtsi64_m64 (x);
-#elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
- __m64 res;
-
- res.M64_MEMBER = x;
- return res;
-#else /* __m64 is an integral type */
- return (__m64)x;
-#endif
-}
-
-static force_inline uint64_t
-UINT64 (__m64 x)
-{
-#ifdef __ICC
- return _mm_cvtm64_si64 (x);
-#elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
- uint64_t res = x.M64_MEMBER;
- return res;
-#else /* __m64 is an integral type */
- return (uint64_t)x;
-#endif
-}
-
-static force_inline __m64
-shift (__m64 v,
- int s)
-{
- if (s > 0)
- return _mm_slli_si64 (v, s);
- else if (s < 0)
- return _mm_srli_si64 (v, -s);
- else
- return v;
-}
-
-static force_inline __m64
-negate (__m64 mask)
-{
- return _mm_xor_si64 (mask, MC (4x00ff));
-}
-
-static force_inline __m64
-pix_multiply (__m64 a, __m64 b)
-{
- __m64 res;
-
- res = _mm_mullo_pi16 (a, b);
- res = _mm_adds_pu16 (res, MC (4x0080));
- res = _mm_adds_pu16 (res, _mm_srli_pi16 (res, 8));
- res = _mm_srli_pi16 (res, 8);
-
- return res;
-}
-
-static force_inline __m64
-pix_add (__m64 a, __m64 b)
-{
- return _mm_adds_pu8 (a, b);
-}
-
-static force_inline __m64
-expand_alpha (__m64 pixel)
-{
- __m64 t1, t2;
-
- t1 = shift (pixel, -48);
- t2 = shift (t1, 16);
- t1 = _mm_or_si64 (t1, t2);
- t2 = shift (t1, 32);
- t1 = _mm_or_si64 (t1, t2);
-
- return t1;
-}
-
-static force_inline __m64
-expand_alpha_rev (__m64 pixel)
-{
- __m64 t1, t2;
-
- /* move alpha to low 16 bits and zero the rest */
- t1 = shift (pixel, 48);
- t1 = shift (t1, -48);
-
- t2 = shift (t1, 16);
- t1 = _mm_or_si64 (t1, t2);
- t2 = shift (t1, 32);
- t1 = _mm_or_si64 (t1, t2);
-
- return t1;
-}
-
-static force_inline __m64
-invert_colors (__m64 pixel)
-{
- __m64 x, y, z;
-
- x = y = z = pixel;
-
- x = _mm_and_si64 (x, MC (ffff0000ffff0000));
- y = _mm_and_si64 (y, MC (000000000000ffff));
- z = _mm_and_si64 (z, MC (0000ffff00000000));
-
- y = shift (y, 32);
- z = shift (z, -32);
-
- x = _mm_or_si64 (x, y);
- x = _mm_or_si64 (x, z);
-
- return x;
-}
-
-static force_inline __m64
-over (__m64 src,
- __m64 srca,
- __m64 dest)
-{
- return _mm_adds_pu8 (src, pix_multiply (dest, negate (srca)));
-}
-
-static force_inline __m64
-over_rev_non_pre (__m64 src, __m64 dest)
-{
- __m64 srca = expand_alpha (src);
- __m64 srcfaaa = _mm_or_si64 (srca, MC (full_alpha));
-
- return over (pix_multiply (invert_colors (src), srcfaaa), srca, dest);
-}
-
-static force_inline __m64
-in (__m64 src, __m64 mask)
-{
- return pix_multiply (src, mask);
-}
-
-static force_inline __m64
-in_over_full_src_alpha (__m64 src, __m64 mask, __m64 dest)
-{
- src = _mm_or_si64 (src, MC (full_alpha));
-
- return over (in (src, mask), mask, dest);
-}
-
-#ifndef _MSC_VER
-static force_inline __m64
-in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest)
-{
- return over (in (src, mask), pix_multiply (srca, mask), dest);
-}
-
-#else
-
-#define in_over(src, srca, mask, dest) \
- over (in (src, mask), pix_multiply (srca, mask), dest)
-
-#endif
-
-static force_inline __m64
-load8888 (uint32_t v)
-{
- return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (v), _mm_setzero_si64 ());
-}
-
-static force_inline __m64
-pack8888 (__m64 lo, __m64 hi)
-{
- return _mm_packs_pu16 (lo, hi);
-}
-
-static force_inline uint32_t
-store8888 (__m64 v)
-{
- return _mm_cvtsi64_si32 (pack8888 (v, _mm_setzero_si64 ()));
-}
-
-/* Expand 16 bits positioned at @pos (0-3) of a mmx register into
- *
- * 00RR00GG00BB
- *
- * --- Expanding 565 in the low word ---
- *
- * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
- * m = m & (01f0003f001f);
- * m = m * (008404100840);
- * m = m >> 8;
- *
- * Note the trick here - the top word is shifted by another nibble to
- * avoid it bumping into the middle word
- */
-static force_inline __m64
-expand565 (__m64 pixel, int pos)
-{
- __m64 p = pixel;
- __m64 t1, t2;
-
- /* move pixel to low 16 bit and zero the rest */
- p = shift (shift (p, (3 - pos) * 16), -48);
-
- t1 = shift (p, 36 - 11);
- t2 = shift (p, 16 - 5);
-
- p = _mm_or_si64 (t1, p);
- p = _mm_or_si64 (t2, p);
- p = _mm_and_si64 (p, MC (565_rgb));
-
- pixel = _mm_mullo_pi16 (p, MC (565_unpack_multiplier));
- return _mm_srli_pi16 (pixel, 8);
-}
-
-static force_inline __m64
-expand8888 (__m64 in, int pos)
-{
- if (pos == 0)
- return _mm_unpacklo_pi8 (in, _mm_setzero_si64 ());
- else
- return _mm_unpackhi_pi8 (in, _mm_setzero_si64 ());
-}
-
-static force_inline __m64
-expandx888 (__m64 in, int pos)
-{
- return _mm_or_si64 (expand8888 (in, pos), MC (full_alpha));
-}
-
-static force_inline __m64
-pack_565 (__m64 pixel, __m64 target, int pos)
-{
- __m64 p = pixel;
- __m64 t = target;
- __m64 r, g, b;
-
- r = _mm_and_si64 (p, MC (565_r));
- g = _mm_and_si64 (p, MC (565_g));
- b = _mm_and_si64 (p, MC (565_b));
-
- r = shift (r, -(32 - 8) + pos * 16);
- g = shift (g, -(16 - 3) + pos * 16);
- b = shift (b, -(0 + 3) + pos * 16);
-
- if (pos == 0)
- t = _mm_and_si64 (t, MC (mask_0));
- else if (pos == 1)
- t = _mm_and_si64 (t, MC (mask_1));
- else if (pos == 2)
- t = _mm_and_si64 (t, MC (mask_2));
- else if (pos == 3)
- t = _mm_and_si64 (t, MC (mask_3));
-
- p = _mm_or_si64 (r, t);
- p = _mm_or_si64 (g, p);
-
- return _mm_or_si64 (b, p);
-}
-
-#ifndef _MSC_VER
-
-static force_inline __m64
-pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
-{
- x = pix_multiply (x, a);
- y = pix_multiply (y, b);
-
- return pix_add (x, y);
-}
-
-#else
-
-#define pix_add_mul(x, a, y, b) \
- ( x = pix_multiply (x, a), \
- y = pix_multiply (y, a), \
- pix_add (x, y) )
-
-#endif
-
-/* --------------- MMX code patch for fbcompose.c --------------------- */
-
-static force_inline uint32_t
-combine (const uint32_t *src, const uint32_t *mask)
-{
- uint32_t ssrc = *src;
-
- if (mask)
- {
- __m64 m = load8888 (*mask);
- __m64 s = load8888 (ssrc);
-
- m = expand_alpha (m);
- s = pix_multiply (s, m);
-
- ssrc = store8888 (s);
- }
-
- return ssrc;
-}
-
-static void
-mmx_combine_over_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- uint32_t ssrc = combine (src, mask);
- uint32_t a = ssrc >> 24;
-
- if (a == 0xff)
- {
- *dest = ssrc;
- }
- else if (ssrc)
- {
- __m64 s, sa;
- s = load8888 (ssrc);
- sa = expand_alpha (s);
- *dest = store8888 (over (s, sa, load8888 (*dest)));
- }
-
- ++dest;
- ++src;
- if (mask)
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_over_reverse_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 d, da;
- uint32_t s = combine (src, mask);
-
- d = load8888 (*dest);
- da = expand_alpha (d);
- *dest = store8888 (over (d, da, load8888 (s)));
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_in_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 x, a;
-
- x = load8888 (combine (src, mask));
- a = load8888 (*dest);
- a = expand_alpha (a);
- x = pix_multiply (x, a);
-
- *dest = store8888 (x);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_in_reverse_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 x, a;
-
- x = load8888 (*dest);
- a = load8888 (combine (src, mask));
- a = expand_alpha (a);
- x = pix_multiply (x, a);
- *dest = store8888 (x);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_out_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 x, a;
-
- x = load8888 (combine (src, mask));
- a = load8888 (*dest);
- a = expand_alpha (a);
- a = negate (a);
- x = pix_multiply (x, a);
- *dest = store8888 (x);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_out_reverse_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 x, a;
-
- x = load8888 (*dest);
- a = load8888 (combine (src, mask));
- a = expand_alpha (a);
- a = negate (a);
- x = pix_multiply (x, a);
-
- *dest = store8888 (x);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_atop_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 s, da, d, sia;
-
- s = load8888 (combine (src, mask));
- d = load8888 (*dest);
- sia = expand_alpha (s);
- sia = negate (sia);
- da = expand_alpha (d);
- s = pix_add_mul (s, da, d, sia);
- *dest = store8888 (s);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_atop_reverse_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end;
-
- end = dest + width;
-
- while (dest < end)
- {
- __m64 s, dia, d, sa;
-
- s = load8888 (combine (src, mask));
- d = load8888 (*dest);
- sa = expand_alpha (s);
- dia = expand_alpha (d);
- dia = negate (dia);
- s = pix_add_mul (s, dia, d, sa);
- *dest = store8888 (s);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_xor_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 s, dia, d, sia;
-
- s = load8888 (combine (src, mask));
- d = load8888 (*dest);
- sia = expand_alpha (s);
- dia = expand_alpha (d);
- sia = negate (sia);
- dia = negate (dia);
- s = pix_add_mul (s, dia, d, sia);
- *dest = store8888 (s);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_add_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- __m64 s, d;
-
- s = load8888 (combine (src, mask));
- d = load8888 (*dest);
- s = pix_add (s, d);
- *dest = store8888 (s);
-
- ++dest;
- ++src;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_saturate_u (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = dest + width;
-
- while (dest < end)
- {
- uint32_t s = combine (src, mask);
- uint32_t d = *dest;
- __m64 ms = load8888 (s);
- __m64 md = load8888 (d);
- uint32_t sa = s >> 24;
- uint32_t da = ~d >> 24;
-
- if (sa > da)
- {
- __m64 msa = load8888 (DIV_UN8 (da, sa) << 24);
- msa = expand_alpha (msa);
- ms = pix_multiply (ms, msa);
- }
-
- md = pix_add (md, ms);
- *dest = store8888 (md);
-
- ++src;
- ++dest;
- if (mask)
- mask++;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_src_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
-
- s = pix_multiply (s, a);
- *dest = store8888 (s);
-
- ++src;
- ++mask;
- ++dest;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_over_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 sa = expand_alpha (s);
-
- *dest = store8888 (in_over (s, sa, a, d));
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_over_reverse_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 da = expand_alpha (d);
-
- *dest = store8888 (over (d, da, in (s, a)));
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_in_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 da = expand_alpha (d);
-
- s = pix_multiply (s, a);
- s = pix_multiply (s, da);
- *dest = store8888 (s);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_in_reverse_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 sa = expand_alpha (s);
-
- a = pix_multiply (a, sa);
- d = pix_multiply (d, a);
- *dest = store8888 (d);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_out_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 da = expand_alpha (d);
-
- da = negate (da);
- s = pix_multiply (s, a);
- s = pix_multiply (s, da);
- *dest = store8888 (s);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_out_reverse_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 sa = expand_alpha (s);
-
- a = pix_multiply (a, sa);
- a = negate (a);
- d = pix_multiply (d, a);
- *dest = store8888 (d);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_atop_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 da = expand_alpha (d);
- __m64 sa = expand_alpha (s);
-
- s = pix_multiply (s, a);
- a = pix_multiply (a, sa);
- a = negate (a);
- d = pix_add_mul (d, a, s, da);
- *dest = store8888 (d);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 da = expand_alpha (d);
- __m64 sa = expand_alpha (s);
-
- s = pix_multiply (s, a);
- a = pix_multiply (a, sa);
- da = negate (da);
- d = pix_add_mul (d, a, s, da);
- *dest = store8888 (d);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_xor_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
- __m64 da = expand_alpha (d);
- __m64 sa = expand_alpha (s);
-
- s = pix_multiply (s, a);
- a = pix_multiply (a, sa);
- da = negate (da);
- a = negate (a);
- d = pix_add_mul (d, a, s, da);
- *dest = store8888 (d);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-static void
-mmx_combine_add_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- uint32_t * dest,
- const uint32_t * src,
- const uint32_t * mask,
- int width)
-{
- const uint32_t *end = src + width;
-
- while (src < end)
- {
- __m64 a = load8888 (*mask);
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dest);
-
- s = pix_multiply (s, a);
- d = pix_add (s, d);
- *dest = store8888 (d);
-
- ++src;
- ++dest;
- ++mask;
- }
- _mm_empty ();
-}
-
-/* ------------- MMX code paths called from fbpict.c -------------------- */
-
-static void
-mmx_composite_over_n_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src;
- uint32_t *dst_line, *dst;
- uint16_t w;
- int dst_stride;
- __m64 vsrc, vsrca;
-
- CHECKPOINT ();
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- if (src == 0)
- return;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- w = width;
-
- CHECKPOINT ();
-
- while (w && (unsigned long)dst & 7)
- {
- *dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
-
- w--;
- dst++;
- }
-
- while (w >= 2)
- {
- __m64 vdest;
- __m64 dest0, dest1;
-
- vdest = *(__m64 *)dst;
-
- dest0 = over (vsrc, vsrca, expand8888 (vdest, 0));
- dest1 = over (vsrc, vsrca, expand8888 (vdest, 1));
-
- *(__m64 *)dst = pack8888 (dest0, dest1);
-
- dst += 2;
- w -= 2;
- }
-
- CHECKPOINT ();
-
- while (w)
- {
- *dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
-
- w--;
- dst++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_n_0565 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src;
- uint16_t *dst_line, *dst;
- uint16_t w;
- int dst_stride;
- __m64 vsrc, vsrca;
-
- CHECKPOINT ();
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- if (src == 0)
- return;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- w = width;
-
- CHECKPOINT ();
-
- while (w && (unsigned long)dst & 7)
- {
- uint64_t d = *dst;
- __m64 vdest = expand565 (M64 (d), 0);
-
- vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
- *dst = UINT64 (vdest);
-
- w--;
- dst++;
- }
-
- while (w >= 4)
- {
- __m64 vdest;
-
- vdest = *(__m64 *)dst;
-
- vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 0)), vdest, 0);
- vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 1)), vdest, 1);
- vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 2)), vdest, 2);
- vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 3)), vdest, 3);
-
- *(__m64 *)dst = vdest;
-
- dst += 4;
- w -= 4;
- }
-
- CHECKPOINT ();
-
- while (w)
- {
- uint64_t d = *dst;
- __m64 vdest = expand565 (M64 (d), 0);
-
- vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
- *dst = UINT64 (vdest);
-
- w--;
- dst++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint32_t *dst_line;
- uint32_t *mask_line;
- int dst_stride, mask_stride;
- __m64 vsrc, vsrca;
-
- CHECKPOINT ();
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- srca = src >> 24;
- if (src == 0)
- return;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- int twidth = width;
- uint32_t *p = (uint32_t *)mask_line;
- uint32_t *q = (uint32_t *)dst_line;
-
- while (twidth && (unsigned long)q & 7)
- {
- uint32_t m = *(uint32_t *)p;
-
- if (m)
- {
- __m64 vdest = load8888 (*q);
- vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
- *q = store8888 (vdest);
- }
-
- twidth--;
- p++;
- q++;
- }
-
- while (twidth >= 2)
- {
- uint32_t m0, m1;
- m0 = *p;
- m1 = *(p + 1);
-
- if (m0 | m1)
- {
- __m64 dest0, dest1;
- __m64 vdest = *(__m64 *)q;
-
- dest0 = in_over (vsrc, vsrca, load8888 (m0),
- expand8888 (vdest, 0));
- dest1 = in_over (vsrc, vsrca, load8888 (m1),
- expand8888 (vdest, 1));
-
- *(__m64 *)q = pack8888 (dest0, dest1);
- }
-
- p += 2;
- q += 2;
- twidth -= 2;
- }
-
- while (twidth)
- {
- uint32_t m = *(uint32_t *)p;
-
- if (m)
- {
- __m64 vdest = load8888 (*q);
- vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
- *q = store8888 (vdest);
- }
-
- twidth--;
- p++;
- q++;
- }
-
- dst_line += dst_stride;
- mask_line += mask_stride;
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *dst_line, *dst;
- uint32_t *src_line, *src;
- uint32_t mask;
- __m64 vmask;
- int dst_stride, src_stride;
- uint16_t w;
- __m64 srca;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
- mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
- mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
- vmask = load8888 (mask);
- srca = MC (4x00ff);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w && (unsigned long)dst & 7)
- {
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dst);
-
- *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
-
- w--;
- dst++;
- src++;
- }
-
- while (w >= 2)
- {
- __m64 vs = *(__m64 *)src;
- __m64 vd = *(__m64 *)dst;
- __m64 vsrc0 = expand8888 (vs, 0);
- __m64 vsrc1 = expand8888 (vs, 1);
-
- *(__m64 *)dst = pack8888 (
- in_over (vsrc0, expand_alpha (vsrc0), vmask, expand8888 (vd, 0)),
- in_over (vsrc1, expand_alpha (vsrc1), vmask, expand8888 (vd, 1)));
-
- w -= 2;
- dst += 2;
- src += 2;
- }
-
- while (w)
- {
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dst);
-
- *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
-
- w--;
- dst++;
- src++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *dst_line, *dst;
- uint32_t *src_line, *src;
- uint32_t mask;
- __m64 vmask;
- int dst_stride, src_stride;
- uint16_t w;
- __m64 srca;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
- mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
-
- mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
- vmask = load8888 (mask);
- srca = MC (4x00ff);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w && (unsigned long)dst & 7)
- {
- __m64 s = load8888 (*src | 0xff000000);
- __m64 d = load8888 (*dst);
-
- *dst = store8888 (in_over (s, srca, vmask, d));
-
- w--;
- dst++;
- src++;
- }
-
- while (w >= 16)
- {
- __m64 vd0 = *(__m64 *)(dst + 0);
- __m64 vd1 = *(__m64 *)(dst + 2);
- __m64 vd2 = *(__m64 *)(dst + 4);
- __m64 vd3 = *(__m64 *)(dst + 6);
- __m64 vd4 = *(__m64 *)(dst + 8);
- __m64 vd5 = *(__m64 *)(dst + 10);
- __m64 vd6 = *(__m64 *)(dst + 12);
- __m64 vd7 = *(__m64 *)(dst + 14);
-
- __m64 vs0 = *(__m64 *)(src + 0);
- __m64 vs1 = *(__m64 *)(src + 2);
- __m64 vs2 = *(__m64 *)(src + 4);
- __m64 vs3 = *(__m64 *)(src + 6);
- __m64 vs4 = *(__m64 *)(src + 8);
- __m64 vs5 = *(__m64 *)(src + 10);
- __m64 vs6 = *(__m64 *)(src + 12);
- __m64 vs7 = *(__m64 *)(src + 14);
-
- vd0 = pack8888 (
- in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)),
- in_over (expandx888 (vs0, 1), srca, vmask, expand8888 (vd0, 1)));
-
- vd1 = pack8888 (
- in_over (expandx888 (vs1, 0), srca, vmask, expand8888 (vd1, 0)),
- in_over (expandx888 (vs1, 1), srca, vmask, expand8888 (vd1, 1)));
-
- vd2 = pack8888 (
- in_over (expandx888 (vs2, 0), srca, vmask, expand8888 (vd2, 0)),
- in_over (expandx888 (vs2, 1), srca, vmask, expand8888 (vd2, 1)));
-
- vd3 = pack8888 (
- in_over (expandx888 (vs3, 0), srca, vmask, expand8888 (vd3, 0)),
- in_over (expandx888 (vs3, 1), srca, vmask, expand8888 (vd3, 1)));
-
- vd4 = pack8888 (
- in_over (expandx888 (vs4, 0), srca, vmask, expand8888 (vd4, 0)),
- in_over (expandx888 (vs4, 1), srca, vmask, expand8888 (vd4, 1)));
-
- vd5 = pack8888 (
- in_over (expandx888 (vs5, 0), srca, vmask, expand8888 (vd5, 0)),
- in_over (expandx888 (vs5, 1), srca, vmask, expand8888 (vd5, 1)));
-
- vd6 = pack8888 (
- in_over (expandx888 (vs6, 0), srca, vmask, expand8888 (vd6, 0)),
- in_over (expandx888 (vs6, 1), srca, vmask, expand8888 (vd6, 1)));
-
- vd7 = pack8888 (
- in_over (expandx888 (vs7, 0), srca, vmask, expand8888 (vd7, 0)),
- in_over (expandx888 (vs7, 1), srca, vmask, expand8888 (vd7, 1)));
-
- *(__m64 *)(dst + 0) = vd0;
- *(__m64 *)(dst + 2) = vd1;
- *(__m64 *)(dst + 4) = vd2;
- *(__m64 *)(dst + 6) = vd3;
- *(__m64 *)(dst + 8) = vd4;
- *(__m64 *)(dst + 10) = vd5;
- *(__m64 *)(dst + 12) = vd6;
- *(__m64 *)(dst + 14) = vd7;
-
- w -= 16;
- dst += 16;
- src += 16;
- }
-
- while (w)
- {
- __m64 s = load8888 (*src | 0xff000000);
- __m64 d = load8888 (*dst);
-
- *dst = store8888 (in_over (s, srca, vmask, d));
-
- w--;
- dst++;
- src++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *dst_line, *dst;
- uint32_t *src_line, *src;
- uint32_t s;
- int dst_stride, src_stride;
- uint8_t a;
- uint16_t w;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w--)
- {
- s = *src++;
- a = s >> 24;
-
- if (a == 0xff)
- {
- *dst = s;
- }
- else if (s)
- {
- __m64 ms, sa;
- ms = load8888 (s);
- sa = expand_alpha (ms);
- *dst = store8888 (over (ms, sa, load8888 (*dst)));
- }
-
- dst++;
- }
- }
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint16_t *dst_line, *dst;
- uint32_t *src_line, *src;
- int dst_stride, src_stride;
- uint16_t w;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
-#if 0
- /* FIXME */
- assert (src_image->drawable == mask_image->drawable);
-#endif
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- CHECKPOINT ();
-
- while (w && (unsigned long)dst & 7)
- {
- __m64 vsrc = load8888 (*src);
- uint64_t d = *dst;
- __m64 vdest = expand565 (M64 (d), 0);
-
- vdest = pack_565 (
- over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
-
- *dst = UINT64 (vdest) & 0xffff;
-
- w--;
- dst++;
- src++;
- }
-
- CHECKPOINT ();
-
- while (w >= 4)
- {
- __m64 vsrc0, vsrc1, vsrc2, vsrc3;
- __m64 vdest;
-
- vsrc0 = load8888 (*(src + 0));
- vsrc1 = load8888 (*(src + 1));
- vsrc2 = load8888 (*(src + 2));
- vsrc3 = load8888 (*(src + 3));
-
- vdest = *(__m64 *)dst;
-
- vdest = pack_565 (over (vsrc0, expand_alpha (vsrc0), expand565 (vdest, 0)), vdest, 0);
- vdest = pack_565 (over (vsrc1, expand_alpha (vsrc1), expand565 (vdest, 1)), vdest, 1);
- vdest = pack_565 (over (vsrc2, expand_alpha (vsrc2), expand565 (vdest, 2)), vdest, 2);
- vdest = pack_565 (over (vsrc3, expand_alpha (vsrc3), expand565 (vdest, 3)), vdest, 3);
-
- *(__m64 *)dst = vdest;
-
- w -= 4;
- dst += 4;
- src += 4;
- }
-
- CHECKPOINT ();
-
- while (w)
- {
- __m64 vsrc = load8888 (*src);
- uint64_t d = *dst;
- __m64 vdest = expand565 (M64 (d), 0);
-
- vdest = pack_565 (over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
-
- *dst = UINT64 (vdest) & 0xffff;
-
- w--;
- dst++;
- src++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint32_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint16_t w;
- __m64 vsrc, vsrca;
- uint64_t srcsrc;
-
- CHECKPOINT ();
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- srca = src >> 24;
- if (src == 0)
- return;
-
- srcsrc = (uint64_t)src << 32 | src;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- CHECKPOINT ();
-
- while (w && (unsigned long)dst & 7)
- {
- uint64_t m = *mask;
-
- if (m)
- {
- __m64 vdest = in_over (vsrc, vsrca,
- expand_alpha_rev (M64 (m)),
- load8888 (*dst));
-
- *dst = store8888 (vdest);
- }
-
- w--;
- mask++;
- dst++;
- }
-
- CHECKPOINT ();
-
- while (w >= 2)
- {
- uint64_t m0, m1;
-
- m0 = *mask;
- m1 = *(mask + 1);
-
- if (srca == 0xff && (m0 & m1) == 0xff)
- {
- *(uint64_t *)dst = srcsrc;
- }
- else if (m0 | m1)
- {
- __m64 vdest;
- __m64 dest0, dest1;
-
- vdest = *(__m64 *)dst;
-
- dest0 = in_over (vsrc, vsrca, expand_alpha_rev (M64 (m0)),
- expand8888 (vdest, 0));
- dest1 = in_over (vsrc, vsrca, expand_alpha_rev (M64 (m1)),
- expand8888 (vdest, 1));
-
- *(__m64 *)dst = pack8888 (dest0, dest1);
- }
-
- mask += 2;
- dst += 2;
- w -= 2;
- }
-
- CHECKPOINT ();
-
- while (w)
- {
- uint64_t m = *mask;
-
- if (m)
- {
- __m64 vdest = load8888 (*dst);
-
- vdest = in_over (
- vsrc, vsrca, expand_alpha_rev (M64 (m)), vdest);
- *dst = store8888 (vdest);
- }
-
- w--;
- mask++;
- dst++;
- }
- }
-
- _mm_empty ();
-}
-
-pixman_bool_t
-pixman_fill_mmx (uint32_t *bits,
- int stride,
- int bpp,
- int x,
- int y,
- int width,
- int height,
- uint32_t xor)
-{
- uint64_t fill;
- __m64 vfill;
- uint32_t byte_width;
- uint8_t *byte_line;
-
-#ifdef __GNUC__
- __m64 v1, v2, v3, v4, v5, v6, v7;
-#endif
-
- if (bpp != 16 && bpp != 32 && bpp != 8)
- return FALSE;
-
- if (bpp == 16 && (xor >> 16 != (xor & 0xffff)))
- return FALSE;
-
- if (bpp == 8 &&
- ((xor >> 16 != (xor & 0xffff)) ||
- (xor >> 24 != (xor & 0x00ff) >> 16)))
- {
- return FALSE;
- }
-
- if (bpp == 8)
- {
- stride = stride * (int) sizeof (uint32_t) / 1;
- byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
- byte_width = width;
- stride *= 1;
- }
- else if (bpp == 16)
- {
- stride = stride * (int) sizeof (uint32_t) / 2;
- byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
- byte_width = 2 * width;
- stride *= 2;
- }
- else
- {
- stride = stride * (int) sizeof (uint32_t) / 4;
- byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
- byte_width = 4 * width;
- stride *= 4;
- }
-
- fill = ((uint64_t)xor << 32) | xor;
- vfill = M64 (fill);
-
-#ifdef __GNUC__
- __asm__ (
- "movq %7, %0\n"
- "movq %7, %1\n"
- "movq %7, %2\n"
- "movq %7, %3\n"
- "movq %7, %4\n"
- "movq %7, %5\n"
- "movq %7, %6\n"
- : "=y" (v1), "=y" (v2), "=y" (v3),
- "=y" (v4), "=y" (v5), "=y" (v6), "=y" (v7)
- : "y" (vfill));
-#endif
-
- while (height--)
- {
- int w;
- uint8_t *d = byte_line;
-
- byte_line += stride;
- w = byte_width;
-
- while (w >= 1 && ((unsigned long)d & 1))
- {
- *(uint8_t *)d = (xor & 0xff);
- w--;
- d++;
- }
-
- while (w >= 2 && ((unsigned long)d & 3))
- {
- *(uint16_t *)d = (xor & 0xffff);
- w -= 2;
- d += 2;
- }
-
- while (w >= 4 && ((unsigned long)d & 7))
- {
- *(uint32_t *)d = xor;
-
- w -= 4;
- d += 4;
- }
-
- while (w >= 64)
- {
-#ifdef __GNUC__
- __asm__ (
- "movq %1, (%0)\n"
- "movq %2, 8(%0)\n"
- "movq %3, 16(%0)\n"
- "movq %4, 24(%0)\n"
- "movq %5, 32(%0)\n"
- "movq %6, 40(%0)\n"
- "movq %7, 48(%0)\n"
- "movq %8, 56(%0)\n"
- :
- : "r" (d),
- "y" (vfill), "y" (v1), "y" (v2), "y" (v3),
- "y" (v4), "y" (v5), "y" (v6), "y" (v7)
- : "memory");
-#else
- *(__m64*) (d + 0) = vfill;
- *(__m64*) (d + 8) = vfill;
- *(__m64*) (d + 16) = vfill;
- *(__m64*) (d + 24) = vfill;
- *(__m64*) (d + 32) = vfill;
- *(__m64*) (d + 40) = vfill;
- *(__m64*) (d + 48) = vfill;
- *(__m64*) (d + 56) = vfill;
-#endif
- w -= 64;
- d += 64;
- }
-
- while (w >= 4)
- {
- *(uint32_t *)d = xor;
-
- w -= 4;
- d += 4;
- }
- while (w >= 2)
- {
- *(uint16_t *)d = (xor & 0xffff);
- w -= 2;
- d += 2;
- }
- while (w >= 1)
- {
- *(uint8_t *)d = (xor & 0xff);
- w--;
- d++;
- }
-
- }
-
- _mm_empty ();
- return TRUE;
-}
-
-static void
-mmx_composite_src_n_8_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint32_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint16_t w;
- __m64 vsrc, vsrca;
- uint64_t srcsrc;
-
- CHECKPOINT ();
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- srca = src >> 24;
- if (src == 0)
- {
- pixman_fill_mmx (dst_image->bits.bits, dst_image->bits.rowstride,
- PIXMAN_FORMAT_BPP (dst_image->bits.format),
- dest_x, dest_y, width, height, 0);
- return;
- }
-
- srcsrc = (uint64_t)src << 32 | src;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- CHECKPOINT ();
-
- while (w && (unsigned long)dst & 7)
- {
- uint64_t m = *mask;
-
- if (m)
- {
- __m64 vdest = in (vsrc, expand_alpha_rev (M64 (m)));
-
- *dst = store8888 (vdest);
- }
- else
- {
- *dst = 0;
- }
-
- w--;
- mask++;
- dst++;
- }
-
- CHECKPOINT ();
-
- while (w >= 2)
- {
- uint64_t m0, m1;
- m0 = *mask;
- m1 = *(mask + 1);
-
- if (srca == 0xff && (m0 & m1) == 0xff)
- {
- *(uint64_t *)dst = srcsrc;
- }
- else if (m0 | m1)
- {
- __m64 vdest;
- __m64 dest0, dest1;
-
- vdest = *(__m64 *)dst;
-
- dest0 = in (vsrc, expand_alpha_rev (M64 (m0)));
- dest1 = in (vsrc, expand_alpha_rev (M64 (m1)));
-
- *(__m64 *)dst = pack8888 (dest0, dest1);
- }
- else
- {
- *(uint64_t *)dst = 0;
- }
-
- mask += 2;
- dst += 2;
- w -= 2;
- }
-
- CHECKPOINT ();
-
- while (w)
- {
- uint64_t m = *mask;
-
- if (m)
- {
- __m64 vdest = load8888 (*dst);
-
- vdest = in (vsrc, expand_alpha_rev (M64 (m)));
- *dst = store8888 (vdest);
- }
- else
- {
- *dst = 0;
- }
-
- w--;
- mask++;
- dst++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint16_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint16_t w;
- __m64 vsrc, vsrca, tmp;
- uint64_t srcsrcsrcsrc, src16;
-
- CHECKPOINT ();
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- srca = src >> 24;
- if (src == 0)
- return;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- tmp = pack_565 (vsrc, _mm_setzero_si64 (), 0);
- src16 = UINT64 (tmp);
-
- srcsrcsrcsrc =
- (uint64_t)src16 << 48 | (uint64_t)src16 << 32 |
- (uint64_t)src16 << 16 | (uint64_t)src16;
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- CHECKPOINT ();
-
- while (w && (unsigned long)dst & 7)
- {
- uint64_t m = *mask;
-
- if (m)
- {
- uint64_t d = *dst;
- __m64 vd = M64 (d);
- __m64 vdest = in_over (
- vsrc, vsrca, expand_alpha_rev (M64 (m)), expand565 (vd, 0));
-
- vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
- *dst = UINT64 (vd);
- }
-
- w--;
- mask++;
- dst++;
- }
-
- CHECKPOINT ();
-
- while (w >= 4)
- {
- uint64_t m0, m1, m2, m3;
- m0 = *mask;
- m1 = *(mask + 1);
- m2 = *(mask + 2);
- m3 = *(mask + 3);
-
- if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff)
- {
- *(uint64_t *)dst = srcsrcsrcsrc;
- }
- else if (m0 | m1 | m2 | m3)
- {
- __m64 vdest;
- __m64 vm0, vm1, vm2, vm3;
-
- vdest = *(__m64 *)dst;
-
- vm0 = M64 (m0);
- vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm0),
- expand565 (vdest, 0)), vdest, 0);
- vm1 = M64 (m1);
- vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm1),
- expand565 (vdest, 1)), vdest, 1);
- vm2 = M64 (m2);
- vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm2),
- expand565 (vdest, 2)), vdest, 2);
- vm3 = M64 (m3);
- vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm3),
- expand565 (vdest, 3)), vdest, 3);
-
- *(__m64 *)dst = vdest;
- }
-
- w -= 4;
- mask += 4;
- dst += 4;
- }
-
- CHECKPOINT ();
-
- while (w)
- {
- uint64_t m = *mask;
-
- if (m)
- {
- uint64_t d = *dst;
- __m64 vd = M64 (d);
- __m64 vdest = in_over (vsrc, vsrca, expand_alpha_rev (M64 (m)),
- expand565 (vd, 0));
- vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
- *dst = UINT64 (vd);
- }
-
- w--;
- mask++;
- dst++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint16_t *dst_line, *dst;
- uint32_t *src_line, *src;
- int dst_stride, src_stride;
- uint16_t w;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
-#if 0
- /* FIXME */
- assert (src_image->drawable == mask_image->drawable);
-#endif
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- CHECKPOINT ();
-
- while (w && (unsigned long)dst & 7)
- {
- __m64 vsrc = load8888 (*src);
- uint64_t d = *dst;
- __m64 vdest = expand565 (M64 (d), 0);
-
- vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
-
- *dst = UINT64 (vdest);
-
- w--;
- dst++;
- src++;
- }
-
- CHECKPOINT ();
-
- while (w >= 4)
- {
- uint32_t s0, s1, s2, s3;
- unsigned char a0, a1, a2, a3;
-
- s0 = *src;
- s1 = *(src + 1);
- s2 = *(src + 2);
- s3 = *(src + 3);
-
- a0 = (s0 >> 24);
- a1 = (s1 >> 24);
- a2 = (s2 >> 24);
- a3 = (s3 >> 24);
-
- if ((a0 & a1 & a2 & a3) == 0xFF)
- {
- __m64 vdest;
- vdest = pack_565 (invert_colors (load8888 (s0)), _mm_setzero_si64 (), 0);
- vdest = pack_565 (invert_colors (load8888 (s1)), vdest, 1);
- vdest = pack_565 (invert_colors (load8888 (s2)), vdest, 2);
- vdest = pack_565 (invert_colors (load8888 (s3)), vdest, 3);
-
- *(__m64 *)dst = vdest;
- }
- else if (s0 | s1 | s2 | s3)
- {
- __m64 vdest = *(__m64 *)dst;
-
- vdest = pack_565 (over_rev_non_pre (load8888 (s0), expand565 (vdest, 0)), vdest, 0);
- vdest = pack_565 (over_rev_non_pre (load8888 (s1), expand565 (vdest, 1)), vdest, 1);
- vdest = pack_565 (over_rev_non_pre (load8888 (s2), expand565 (vdest, 2)), vdest, 2);
- vdest = pack_565 (over_rev_non_pre (load8888 (s3), expand565 (vdest, 3)), vdest, 3);
-
- *(__m64 *)dst = vdest;
- }
-
- w -= 4;
- dst += 4;
- src += 4;
- }
-
- CHECKPOINT ();
-
- while (w)
- {
- __m64 vsrc = load8888 (*src);
- uint64_t d = *dst;
- __m64 vdest = expand565 (M64 (d), 0);
-
- vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
-
- *dst = UINT64 (vdest);
-
- w--;
- dst++;
- src++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *dst_line, *dst;
- uint32_t *src_line, *src;
- int dst_stride, src_stride;
- uint16_t w;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
-#if 0
- /* FIXME */
- assert (src_image->drawable == mask_image->drawable);
-#endif
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w && (unsigned long)dst & 7)
- {
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dst);
-
- *dst = store8888 (over_rev_non_pre (s, d));
-
- w--;
- dst++;
- src++;
- }
-
- while (w >= 2)
- {
- uint64_t s0, s1;
- unsigned char a0, a1;
- __m64 d0, d1;
-
- s0 = *src;
- s1 = *(src + 1);
-
- a0 = (s0 >> 24);
- a1 = (s1 >> 24);
-
- if ((a0 & a1) == 0xFF)
- {
- d0 = invert_colors (load8888 (s0));
- d1 = invert_colors (load8888 (s1));
-
- *(__m64 *)dst = pack8888 (d0, d1);
- }
- else if (s0 | s1)
- {
- __m64 vdest = *(__m64 *)dst;
-
- d0 = over_rev_non_pre (load8888 (s0), expand8888 (vdest, 0));
- d1 = over_rev_non_pre (load8888 (s1), expand8888 (vdest, 1));
-
- *(__m64 *)dst = pack8888 (d0, d1);
- }
-
- w -= 2;
- dst += 2;
- src += 2;
- }
-
- while (w)
- {
- __m64 s = load8888 (*src);
- __m64 d = load8888 (*dst);
-
- *dst = store8888 (over_rev_non_pre (s, d));
-
- w--;
- dst++;
- src++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t src, srca;
- uint16_t *dst_line;
- uint32_t *mask_line;
- int dst_stride, mask_stride;
- __m64 vsrc, vsrca;
-
- CHECKPOINT ();
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- srca = src >> 24;
- if (src == 0)
- return;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- int twidth = width;
- uint32_t *p = (uint32_t *)mask_line;
- uint16_t *q = (uint16_t *)dst_line;
-
- while (twidth && ((unsigned long)q & 7))
- {
- uint32_t m = *(uint32_t *)p;
-
- if (m)
- {
- uint64_t d = *q;
- __m64 vdest = expand565 (M64 (d), 0);
- vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
- *q = UINT64 (vdest);
- }
-
- twidth--;
- p++;
- q++;
- }
-
- while (twidth >= 4)
- {
- uint32_t m0, m1, m2, m3;
-
- m0 = *p;
- m1 = *(p + 1);
- m2 = *(p + 2);
- m3 = *(p + 3);
-
- if ((m0 | m1 | m2 | m3))
- {
- __m64 vdest = *(__m64 *)q;
-
- vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m0), expand565 (vdest, 0)), vdest, 0);
- vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m1), expand565 (vdest, 1)), vdest, 1);
- vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m2), expand565 (vdest, 2)), vdest, 2);
- vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m3), expand565 (vdest, 3)), vdest, 3);
-
- *(__m64 *)q = vdest;
- }
- twidth -= 4;
- p += 4;
- q += 4;
- }
-
- while (twidth)
- {
- uint32_t m;
-
- m = *(uint32_t *)p;
- if (m)
- {
- uint64_t d = *q;
- __m64 vdest = expand565 (M64 (d), 0);
- vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
- *q = UINT64 (vdest);
- }
-
- twidth--;
- p++;
- q++;
- }
-
- mask_line += mask_stride;
- dst_line += dst_stride;
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_in_n_8_8 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint8_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint16_t w;
- uint32_t src;
- uint8_t sa;
- __m64 vsrc, vsrca;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- sa = src >> 24;
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- if ((((unsigned long)dst_image & 3) == 0) &&
- (((unsigned long)src_image & 3) == 0))
- {
- while (w >= 4)
- {
- uint32_t m;
- __m64 vmask;
- __m64 vdest;
-
- m = 0;
-
- vmask = load8888 (*(uint32_t *)mask);
- vdest = load8888 (*(uint32_t *)dst);
-
- *(uint32_t *)dst = store8888 (in (in (vsrca, vmask), vdest));
-
- dst += 4;
- mask += 4;
- w -= 4;
- }
- }
-
- while (w--)
- {
- uint16_t tmp;
- uint8_t a;
- uint32_t m, d;
-
- a = *mask++;
- d = *dst;
-
- m = MUL_UN8 (sa, a, tmp);
- d = MUL_UN8 (m, d, tmp);
-
- *dst++ = d;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_in_8_8 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint8_t *dst_line, *dst;
- uint8_t *src_line, *src;
- int src_stride, dst_stride;
- uint16_t w;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- if ((((unsigned long)dst_image & 3) == 0) &&
- (((unsigned long)src_image & 3) == 0))
- {
- while (w >= 4)
- {
- uint32_t *s = (uint32_t *)src;
- uint32_t *d = (uint32_t *)dst;
-
- *d = store8888 (in (load8888 (*s), load8888 (*d)));
-
- w -= 4;
- dst += 4;
- src += 4;
- }
- }
-
- while (w--)
- {
- uint8_t s, d;
- uint16_t tmp;
-
- s = *src;
- d = *dst;
-
- *dst = MUL_UN8 (s, d, tmp);
-
- src++;
- dst++;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_add_n_8_8 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint8_t *dst_line, *dst;
- uint8_t *mask_line, *mask;
- int dst_stride, mask_stride;
- uint16_t w;
- uint32_t src;
- uint8_t sa;
- __m64 vsrc, vsrca;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
- src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
- sa = src >> 24;
-
- if (src == 0)
- return;
-
- vsrc = load8888 (src);
- vsrca = expand_alpha (vsrc);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
- w = width;
-
- if ((((unsigned long)mask_image & 3) == 0) &&
- (((unsigned long)dst_image & 3) == 0))
- {
- while (w >= 4)
- {
- __m64 vmask = load8888 (*(uint32_t *)mask);
- __m64 vdest = load8888 (*(uint32_t *)dst);
-
- *(uint32_t *)dst = store8888 (_mm_adds_pu8 (in (vsrca, vmask), vdest));
-
- w -= 4;
- dst += 4;
- mask += 4;
- }
- }
-
- while (w--)
- {
- uint16_t tmp;
- uint16_t a;
- uint32_t m, d;
- uint32_t r;
-
- a = *mask++;
- d = *dst;
-
- m = MUL_UN8 (sa, a, tmp);
- r = ADD_UN8 (m, d, tmp);
-
- *dst++ = r;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_add_8000_8000 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint8_t *dst_line, *dst;
- uint8_t *src_line, *src;
- int dst_stride, src_stride;
- uint16_t w;
- uint8_t s, d;
- uint16_t t;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w && (unsigned long)dst & 7)
- {
- s = *src;
- d = *dst;
- t = d + s;
- s = t | (0 - (t >> 8));
- *dst = s;
-
- dst++;
- src++;
- w--;
- }
-
- while (w >= 8)
- {
- *(__m64*)dst = _mm_adds_pu8 (*(__m64*)src, *(__m64*)dst);
- dst += 8;
- src += 8;
- w -= 8;
- }
-
- while (w)
- {
- s = *src;
- d = *dst;
- t = d + s;
- s = t | (0 - (t >> 8));
- *dst = s;
-
- dst++;
- src++;
- w--;
- }
- }
-
- _mm_empty ();
-}
-
-static void
-mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- __m64 dst64;
- uint32_t *dst_line, *dst;
- uint32_t *src_line, *src;
- int dst_stride, src_stride;
- uint16_t w;
-
- CHECKPOINT ();
-
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- src = src_line;
- src_line += src_stride;
- w = width;
-
- while (w && (unsigned long)dst & 7)
- {
- *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
- _mm_cvtsi32_si64 (*dst)));
- dst++;
- src++;
- w--;
- }
-
- while (w >= 2)
- {
- dst64 = _mm_adds_pu8 (*(__m64*)src, *(__m64*)dst);
- *(uint64_t*)dst = UINT64 (dst64);
- dst += 2;
- src += 2;
- w -= 2;
- }
-
- if (w)
- {
- *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
- _mm_cvtsi32_si64 (*dst)));
-
- }
- }
-
- _mm_empty ();
-}
-
-static pixman_bool_t
-pixman_blt_mmx (uint32_t *src_bits,
- uint32_t *dst_bits,
- int src_stride,
- int dst_stride,
- int src_bpp,
- int dst_bpp,
- int src_x,
- int src_y,
- int dst_x,
- int dst_y,
- int width,
- int height)
-{
- uint8_t * src_bytes;
- uint8_t * dst_bytes;
- int byte_width;
-
- if (src_bpp != dst_bpp)
- return FALSE;
-
- if (src_bpp == 16)
- {
- src_stride = src_stride * (int) sizeof (uint32_t) / 2;
- dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
- src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
- dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
- byte_width = 2 * width;
- src_stride *= 2;
- dst_stride *= 2;
- }
- else if (src_bpp == 32)
- {
- src_stride = src_stride * (int) sizeof (uint32_t) / 4;
- dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
- src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
- dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
- byte_width = 4 * width;
- src_stride *= 4;
- dst_stride *= 4;
- }
- else
- {
- return FALSE;
- }
-
- while (height--)
- {
- int w;
- uint8_t *s = src_bytes;
- uint8_t *d = dst_bytes;
- src_bytes += src_stride;
- dst_bytes += dst_stride;
- w = byte_width;
-
- while (w >= 2 && ((unsigned long)d & 3))
- {
- *(uint16_t *)d = *(uint16_t *)s;
- w -= 2;
- s += 2;
- d += 2;
- }
-
- while (w >= 4 && ((unsigned long)d & 7))
- {
- *(uint32_t *)d = *(uint32_t *)s;
-
- w -= 4;
- s += 4;
- d += 4;
- }
-
- while (w >= 64)
- {
-#if defined (__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
- __asm__ (
- "movq (%1), %%mm0\n"
- "movq 8(%1), %%mm1\n"
- "movq 16(%1), %%mm2\n"
- "movq 24(%1), %%mm3\n"
- "movq 32(%1), %%mm4\n"
- "movq 40(%1), %%mm5\n"
- "movq 48(%1), %%mm6\n"
- "movq 56(%1), %%mm7\n"
-
- "movq %%mm0, (%0)\n"
- "movq %%mm1, 8(%0)\n"
- "movq %%mm2, 16(%0)\n"
- "movq %%mm3, 24(%0)\n"
- "movq %%mm4, 32(%0)\n"
- "movq %%mm5, 40(%0)\n"
- "movq %%mm6, 48(%0)\n"
- "movq %%mm7, 56(%0)\n"
- :
- : "r" (d), "r" (s)
- : "memory",
- "%mm0", "%mm1", "%mm2", "%mm3",
- "%mm4", "%mm5", "%mm6", "%mm7");
-#else
- __m64 v0 = *(__m64 *)(s + 0);
- __m64 v1 = *(__m64 *)(s + 8);
- __m64 v2 = *(__m64 *)(s + 16);
- __m64 v3 = *(__m64 *)(s + 24);
- __m64 v4 = *(__m64 *)(s + 32);
- __m64 v5 = *(__m64 *)(s + 40);
- __m64 v6 = *(__m64 *)(s + 48);
- __m64 v7 = *(__m64 *)(s + 56);
- *(__m64 *)(d + 0) = v0;
- *(__m64 *)(d + 8) = v1;
- *(__m64 *)(d + 16) = v2;
- *(__m64 *)(d + 24) = v3;
- *(__m64 *)(d + 32) = v4;
- *(__m64 *)(d + 40) = v5;
- *(__m64 *)(d + 48) = v6;
- *(__m64 *)(d + 56) = v7;
-#endif
-
- w -= 64;
- s += 64;
- d += 64;
- }
- while (w >= 4)
- {
- *(uint32_t *)d = *(uint32_t *)s;
-
- w -= 4;
- s += 4;
- d += 4;
- }
- if (w >= 2)
- {
- *(uint16_t *)d = *(uint16_t *)s;
- w -= 2;
- s += 2;
- d += 2;
- }
- }
-
- _mm_empty ();
-
- return TRUE;
-}
-
-static void
-mmx_composite_copy_area (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- pixman_blt_mmx (src_image->bits.bits,
- dst_image->bits.bits,
- src_image->bits.rowstride,
- dst_image->bits.rowstride,
- PIXMAN_FORMAT_BPP (src_image->bits.format),
- PIXMAN_FORMAT_BPP (dst_image->bits.format),
- src_x, src_y, dest_x, dest_y, width, height);
-}
-
-static void
-mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- uint32_t *src, *src_line;
- uint32_t *dst, *dst_line;
- uint8_t *mask, *mask_line;
- int src_stride, mask_stride, dst_stride;
- uint16_t w;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
- while (height--)
- {
- src = src_line;
- src_line += src_stride;
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
-
- w = width;
-
- while (w--)
- {
- uint64_t m = *mask;
-
- if (m)
- {
- __m64 s = load8888 (*src | 0xff000000);
-
- if (m == 0xff)
- {
- *dst = store8888 (s);
- }
- else
- {
- __m64 sa = expand_alpha (s);
- __m64 vm = expand_alpha_rev (M64 (m));
- __m64 vdest = in_over (s, sa, vm, load8888 (*dst));
-
- *dst = store8888 (vdest);
- }
- }
-
- mask++;
- dst++;
- src++;
- }
- }
-
- _mm_empty ();
-}
-
-static const pixman_fast_path_t mmx_fast_paths[] =
-{
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, mmx_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, mmx_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5, mmx_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5, mmx_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5, mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5, mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5, mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5, mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_8888_n_8888, NEED_SOLID_MASK },
-#if 0
- /* FIXME: This code is commented out since it's apparently not actually faster than the generic code. */
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888, 0 },
-#endif
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_over_n_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_over_n_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_over_n_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area, 0 },
-
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_over_8888_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, mmx_composite_over_8888_0565, 0 },
-
- { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_add_8888_8888, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_add_8888_8888, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, mmx_composite_add_8000_8000, 0 },
- { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_add_n_8_8, 0 },
-
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, mmx_composite_copy_area, 0 },
-
- { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, mmx_composite_in_8_8, 0 },
- { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_in_n_8_8, 0 },
-
- { PIXMAN_OP_NONE },
-};
-
-static void
-mmx_composite (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src,
- pixman_image_t * mask,
- pixman_image_t * dest,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
-{
- if (_pixman_run_fast_path (mmx_fast_paths, imp,
- op, src, mask, dest,
- src_x, src_y,
- mask_x, mask_y,
- dest_x, dest_y,
- width, height))
- {
- return;
- }
-
- _pixman_implementation_composite (imp->delegate,
- op, src, mask, dest, src_x, src_y,
- mask_x, mask_y, dest_x, dest_y,
- width, height);
-}
-
-static pixman_bool_t
-mmx_blt (pixman_implementation_t *imp,
- uint32_t * src_bits,
- uint32_t * dst_bits,
- int src_stride,
- int dst_stride,
- int src_bpp,
- int dst_bpp,
- int src_x,
- int src_y,
- int dst_x,
- int dst_y,
- int width,
- int height)
-{
- if (!pixman_blt_mmx (
- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
- src_x, src_y, dst_x, dst_y, width, height))
-
- {
- return _pixman_implementation_blt (
- imp->delegate,
- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
- src_x, src_y, dst_x, dst_y, width, height);
- }
-
- return TRUE;
-}
-
-static pixman_bool_t
-mmx_fill (pixman_implementation_t *imp,
- uint32_t * bits,
- int stride,
- int bpp,
- int x,
- int y,
- int width,
- int height,
- uint32_t xor)
-{
- if (!pixman_fill_mmx (bits, stride, bpp, x, y, width, height, xor))
- {
- return _pixman_implementation_fill (
- imp->delegate, bits, stride, bpp, x, y, width, height, xor);
- }
-
- return TRUE;
-}
-
-pixman_implementation_t *
-_pixman_implementation_create_mmx (void)
-{
- pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
- pixman_implementation_t *imp = _pixman_implementation_create (general);
-
- imp->combine_32[PIXMAN_OP_OVER] = mmx_combine_over_u;
- imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_u;
- imp->combine_32[PIXMAN_OP_IN] = mmx_combine_in_u;
- imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_u;
- imp->combine_32[PIXMAN_OP_OUT] = mmx_combine_out_u;
- imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_u;
- imp->combine_32[PIXMAN_OP_ATOP] = mmx_combine_atop_u;
- imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_u;
- imp->combine_32[PIXMAN_OP_XOR] = mmx_combine_xor_u;
- imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u;
- imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u;
-
- imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_ca;
- imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_ca;
- imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_ca;
- imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_ca;
- imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_ca;
- imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_ca;
- imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_ca;
- imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_ca;
- imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_ca;
- imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_ca;
- imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_ca;
-
- imp->composite = mmx_composite;
- imp->blt = mmx_blt;
- imp->fill = mmx_fill;
-
- return imp;
-}
-
-#endif /* USE_MMX */
+/*
+ * Copyright © 2004, 2005 Red Hat, Inc.
+ * Copyright © 2004 Nicholas Miell
+ * Copyright © 2005 Trolltech AS
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Red Hat not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. Red Hat makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author: Søren Sandmann (sandmann@redhat.com)
+ * Minor Improvements: Nicholas Miell (nmiell@gmail.com)
+ * MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com)
+ *
+ * Based on work by Owen Taylor
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#ifdef USE_MMX
+
+#include <mmintrin.h>
+#include "pixman-private.h"
+#include "pixman-combine32.h"
+
+#define no_vERBOSE
+
+#ifdef VERBOSE
+#define CHECKPOINT() error_f ("at %s %d\n", __FUNCTION__, __LINE__)
+#else
+#define CHECKPOINT()
+#endif
+
+/* Notes about writing mmx code
+ *
+ * give memory operands as the second operand. If you give it as the
+ * first, gcc will first load it into a register, then use that
+ * register
+ *
+ * ie. use
+ *
+ * _mm_mullo_pi16 (x, mmx_constant);
+ *
+ * not
+ *
+ * _mm_mullo_pi16 (mmx_constant, x);
+ *
+ * Also try to minimize dependencies. i.e. when you need a value, try
+ * to calculate it from a value that was calculated as early as
+ * possible.
+ */
+
+/* --------------- MMX primitives ------------------------------------- */
+
+#ifdef __GNUC__
+typedef uint64_t mmxdatafield;
+#else
+typedef __m64 mmxdatafield;
+/* If __m64 is defined as a struct or union, define M64_MEMBER to be the
+ name of the member used to access the data */
+# ifdef _MSC_VER
+# define M64_MEMBER m64_u64
+# elif defined(__SUNPRO_C)
+# define M64_MEMBER l_
+# endif
+#endif
+
+typedef struct
+{
+ mmxdatafield mmx_4x00ff;
+ mmxdatafield mmx_4x0080;
+ mmxdatafield mmx_565_rgb;
+ mmxdatafield mmx_565_unpack_multiplier;
+ mmxdatafield mmx_565_r;
+ mmxdatafield mmx_565_g;
+ mmxdatafield mmx_565_b;
+ mmxdatafield mmx_mask_0;
+ mmxdatafield mmx_mask_1;
+ mmxdatafield mmx_mask_2;
+ mmxdatafield mmx_mask_3;
+ mmxdatafield mmx_full_alpha;
+ mmxdatafield mmx_ffff0000ffff0000;
+ mmxdatafield mmx_0000ffff00000000;
+ mmxdatafield mmx_000000000000ffff;
+} mmx_data_t;
+
+#if defined(_MSC_VER)
+# define MMXDATA_INIT(field, val) { val ## UI64 }
+#elif defined(M64_MEMBER) /* __m64 is a struct, not an integral type */
+# define MMXDATA_INIT(field, val) field = { val ## ULL }
+#else /* __m64 is an integral type */
+# define MMXDATA_INIT(field, val) field = val ## ULL
+#endif
+
+static const mmx_data_t c =
+{
+ MMXDATA_INIT (.mmx_4x00ff, 0x00ff00ff00ff00ff),
+ MMXDATA_INIT (.mmx_4x0080, 0x0080008000800080),
+ MMXDATA_INIT (.mmx_565_rgb, 0x000001f0003f001f),
+ MMXDATA_INIT (.mmx_565_unpack_multiplier, 0x0000008404100840),
+ MMXDATA_INIT (.mmx_565_r, 0x000000f800000000),
+ MMXDATA_INIT (.mmx_565_g, 0x0000000000fc0000),
+ MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8),
+ MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000),
+ MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff),
+ MMXDATA_INIT (.mmx_mask_2, 0xffff0000ffffffff),
+ MMXDATA_INIT (.mmx_mask_3, 0x0000ffffffffffff),
+ MMXDATA_INIT (.mmx_full_alpha, 0x00ff000000000000),
+ MMXDATA_INIT (.mmx_ffff0000ffff0000, 0xffff0000ffff0000),
+ MMXDATA_INIT (.mmx_0000ffff00000000, 0x0000ffff00000000),
+ MMXDATA_INIT (.mmx_000000000000ffff, 0x000000000000ffff),
+};
+
+#ifdef __GNUC__
+# ifdef __ICC
+# define MC(x) M64 (c.mmx_ ## x)
+# else
+# define MC(x) ((__m64)c.mmx_ ## x)
+# endif
+#else
+# define MC(x) c.mmx_ ## x
+#endif
+
+static force_inline __m64
+M64 (uint64_t x)
+{
+#ifdef __ICC
+ return _mm_cvtsi64_m64 (x);
+#elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
+ __m64 res;
+
+ res.M64_MEMBER = x;
+ return res;
+#else /* __m64 is an integral type */
+ return (__m64)x;
+#endif
+}
+
+static force_inline uint64_t
+UINT64 (__m64 x)
+{
+#ifdef __ICC
+ return _mm_cvtm64_si64 (x);
+#elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
+ uint64_t res = x.M64_MEMBER;
+ return res;
+#else /* __m64 is an integral type */
+ return (uint64_t)x;
+#endif
+}
+
+static force_inline __m64
+shift (__m64 v,
+ int s)
+{
+ if (s > 0)
+ return _mm_slli_si64 (v, s);
+ else if (s < 0)
+ return _mm_srli_si64 (v, -s);
+ else
+ return v;
+}
+
+static force_inline __m64
+negate (__m64 mask)
+{
+ return _mm_xor_si64 (mask, MC (4x00ff));
+}
+
+static force_inline __m64
+pix_multiply (__m64 a, __m64 b)
+{
+ __m64 res;
+
+ res = _mm_mullo_pi16 (a, b);
+ res = _mm_adds_pu16 (res, MC (4x0080));
+ res = _mm_adds_pu16 (res, _mm_srli_pi16 (res, 8));
+ res = _mm_srli_pi16 (res, 8);
+
+ return res;
+}
+
+static force_inline __m64
+pix_add (__m64 a, __m64 b)
+{
+ return _mm_adds_pu8 (a, b);
+}
+
+static force_inline __m64
+expand_alpha (__m64 pixel)
+{
+ __m64 t1, t2;
+
+ t1 = shift (pixel, -48);
+ t2 = shift (t1, 16);
+ t1 = _mm_or_si64 (t1, t2);
+ t2 = shift (t1, 32);
+ t1 = _mm_or_si64 (t1, t2);
+
+ return t1;
+}
+
+static force_inline __m64
+expand_alpha_rev (__m64 pixel)
+{
+ __m64 t1, t2;
+
+ /* move alpha to low 16 bits and zero the rest */
+ t1 = shift (pixel, 48);
+ t1 = shift (t1, -48);
+
+ t2 = shift (t1, 16);
+ t1 = _mm_or_si64 (t1, t2);
+ t2 = shift (t1, 32);
+ t1 = _mm_or_si64 (t1, t2);
+
+ return t1;
+}
+
+static force_inline __m64
+invert_colors (__m64 pixel)
+{
+ __m64 x, y, z;
+
+ x = y = z = pixel;
+
+ x = _mm_and_si64 (x, MC (ffff0000ffff0000));
+ y = _mm_and_si64 (y, MC (000000000000ffff));
+ z = _mm_and_si64 (z, MC (0000ffff00000000));
+
+ y = shift (y, 32);
+ z = shift (z, -32);
+
+ x = _mm_or_si64 (x, y);
+ x = _mm_or_si64 (x, z);
+
+ return x;
+}
+
+static force_inline __m64
+over (__m64 src,
+ __m64 srca,
+ __m64 dest)
+{
+ return _mm_adds_pu8 (src, pix_multiply (dest, negate (srca)));
+}
+
+static force_inline __m64
+over_rev_non_pre (__m64 src, __m64 dest)
+{
+ __m64 srca = expand_alpha (src);
+ __m64 srcfaaa = _mm_or_si64 (srca, MC (full_alpha));
+
+ return over (pix_multiply (invert_colors (src), srcfaaa), srca, dest);
+}
+
+static force_inline __m64
+in (__m64 src, __m64 mask)
+{
+ return pix_multiply (src, mask);
+}
+
+static force_inline __m64
+in_over_full_src_alpha (__m64 src, __m64 mask, __m64 dest)
+{
+ src = _mm_or_si64 (src, MC (full_alpha));
+
+ return over (in (src, mask), mask, dest);
+}
+
+#ifndef _MSC_VER
+static force_inline __m64
+in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest)
+{
+ return over (in (src, mask), pix_multiply (srca, mask), dest);
+}
+
+#else
+
+#define in_over(src, srca, mask, dest) \
+ over (in (src, mask), pix_multiply (srca, mask), dest)
+
+#endif
+
+static force_inline __m64
+load8888 (uint32_t v)
+{
+ return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (v), _mm_setzero_si64 ());
+}
+
+static force_inline __m64
+pack8888 (__m64 lo, __m64 hi)
+{
+ return _mm_packs_pu16 (lo, hi);
+}
+
+static force_inline uint32_t
+store8888 (__m64 v)
+{
+ return _mm_cvtsi64_si32 (pack8888 (v, _mm_setzero_si64 ()));
+}
+
+/* Expand 16 bits positioned at @pos (0-3) of a mmx register into
+ *
+ * 00RR00GG00BB
+ *
+ * --- Expanding 565 in the low word ---
+ *
+ * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
+ * m = m & (01f0003f001f);
+ * m = m * (008404100840);
+ * m = m >> 8;
+ *
+ * Note the trick here - the top word is shifted by another nibble to
+ * avoid it bumping into the middle word
+ */
+static force_inline __m64
+expand565 (__m64 pixel, int pos)
+{
+ __m64 p = pixel;
+ __m64 t1, t2;
+
+ /* move pixel to low 16 bit and zero the rest */
+ p = shift (shift (p, (3 - pos) * 16), -48);
+
+ t1 = shift (p, 36 - 11);
+ t2 = shift (p, 16 - 5);
+
+ p = _mm_or_si64 (t1, p);
+ p = _mm_or_si64 (t2, p);
+ p = _mm_and_si64 (p, MC (565_rgb));
+
+ pixel = _mm_mullo_pi16 (p, MC (565_unpack_multiplier));
+ return _mm_srli_pi16 (pixel, 8);
+}
+
+static force_inline __m64
+expand8888 (__m64 in, int pos)
+{
+ if (pos == 0)
+ return _mm_unpacklo_pi8 (in, _mm_setzero_si64 ());
+ else
+ return _mm_unpackhi_pi8 (in, _mm_setzero_si64 ());
+}
+
+static force_inline __m64
+expandx888 (__m64 in, int pos)
+{
+ return _mm_or_si64 (expand8888 (in, pos), MC (full_alpha));
+}
+
+static force_inline __m64
+pack_565 (__m64 pixel, __m64 target, int pos)
+{
+ __m64 p = pixel;
+ __m64 t = target;
+ __m64 r, g, b;
+
+ r = _mm_and_si64 (p, MC (565_r));
+ g = _mm_and_si64 (p, MC (565_g));
+ b = _mm_and_si64 (p, MC (565_b));
+
+ r = shift (r, -(32 - 8) + pos * 16);
+ g = shift (g, -(16 - 3) + pos * 16);
+ b = shift (b, -(0 + 3) + pos * 16);
+
+ if (pos == 0)
+ t = _mm_and_si64 (t, MC (mask_0));
+ else if (pos == 1)
+ t = _mm_and_si64 (t, MC (mask_1));
+ else if (pos == 2)
+ t = _mm_and_si64 (t, MC (mask_2));
+ else if (pos == 3)
+ t = _mm_and_si64 (t, MC (mask_3));
+
+ p = _mm_or_si64 (r, t);
+ p = _mm_or_si64 (g, p);
+
+ return _mm_or_si64 (b, p);
+}
+
+#ifndef _MSC_VER
+
+static force_inline __m64
+pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
+{
+ x = pix_multiply (x, a);
+ y = pix_multiply (y, b);
+
+ return pix_add (x, y);
+}
+
+#else
+
+#define pix_add_mul(x, a, y, b) \
+ ( x = pix_multiply (x, a), \
+ y = pix_multiply (y, a), \
+ pix_add (x, y) )
+
+#endif
+
+/* --------------- MMX code patch for fbcompose.c --------------------- */
+
+static force_inline uint32_t
+combine (const uint32_t *src, const uint32_t *mask)
+{
+ uint32_t ssrc = *src;
+
+ if (mask)
+ {
+ __m64 m = load8888 (*mask);
+ __m64 s = load8888 (ssrc);
+
+ m = expand_alpha (m);
+ s = pix_multiply (s, m);
+
+ ssrc = store8888 (s);
+ }
+
+ return ssrc;
+}
+
+static void
+mmx_combine_over_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ uint32_t ssrc = combine (src, mask);
+ uint32_t a = ssrc >> 24;
+
+ if (a == 0xff)
+ {
+ *dest = ssrc;
+ }
+ else if (ssrc)
+ {
+ __m64 s, sa;
+ s = load8888 (ssrc);
+ sa = expand_alpha (s);
+ *dest = store8888 (over (s, sa, load8888 (*dest)));
+ }
+
+ ++dest;
+ ++src;
+ if (mask)
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_over_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 d, da;
+ uint32_t s = combine (src, mask);
+
+ d = load8888 (*dest);
+ da = expand_alpha (d);
+ *dest = store8888 (over (d, da, load8888 (s)));
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_in_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 x, a;
+
+ x = load8888 (combine (src, mask));
+ a = load8888 (*dest);
+ a = expand_alpha (a);
+ x = pix_multiply (x, a);
+
+ *dest = store8888 (x);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_in_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 x, a;
+
+ x = load8888 (*dest);
+ a = load8888 (combine (src, mask));
+ a = expand_alpha (a);
+ x = pix_multiply (x, a);
+ *dest = store8888 (x);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_out_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 x, a;
+
+ x = load8888 (combine (src, mask));
+ a = load8888 (*dest);
+ a = expand_alpha (a);
+ a = negate (a);
+ x = pix_multiply (x, a);
+ *dest = store8888 (x);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_out_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 x, a;
+
+ x = load8888 (*dest);
+ a = load8888 (combine (src, mask));
+ a = expand_alpha (a);
+ a = negate (a);
+ x = pix_multiply (x, a);
+
+ *dest = store8888 (x);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_atop_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 s, da, d, sia;
+
+ s = load8888 (combine (src, mask));
+ d = load8888 (*dest);
+ sia = expand_alpha (s);
+ sia = negate (sia);
+ da = expand_alpha (d);
+ s = pix_add_mul (s, da, d, sia);
+ *dest = store8888 (s);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_atop_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end;
+
+ end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 s, dia, d, sa;
+
+ s = load8888 (combine (src, mask));
+ d = load8888 (*dest);
+ sa = expand_alpha (s);
+ dia = expand_alpha (d);
+ dia = negate (dia);
+ s = pix_add_mul (s, dia, d, sa);
+ *dest = store8888 (s);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_xor_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 s, dia, d, sia;
+
+ s = load8888 (combine (src, mask));
+ d = load8888 (*dest);
+ sia = expand_alpha (s);
+ dia = expand_alpha (d);
+ sia = negate (sia);
+ dia = negate (dia);
+ s = pix_add_mul (s, dia, d, sia);
+ *dest = store8888 (s);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_add_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ __m64 s, d;
+
+ s = load8888 (combine (src, mask));
+ d = load8888 (*dest);
+ s = pix_add (s, d);
+ *dest = store8888 (s);
+
+ ++dest;
+ ++src;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_saturate_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = dest + width;
+
+ while (dest < end)
+ {
+ uint32_t s = combine (src, mask);
+ uint32_t d = *dest;
+ __m64 ms = load8888 (s);
+ __m64 md = load8888 (d);
+ uint32_t sa = s >> 24;
+ uint32_t da = ~d >> 24;
+
+ if (sa > da)
+ {
+ __m64 msa = load8888 (DIV_UN8 (da, sa) << 24);
+ msa = expand_alpha (msa);
+ ms = pix_multiply (ms, msa);
+ }
+
+ md = pix_add (md, ms);
+ *dest = store8888 (md);
+
+ ++src;
+ ++dest;
+ if (mask)
+ mask++;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_src_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+
+ s = pix_multiply (s, a);
+ *dest = store8888 (s);
+
+ ++src;
+ ++mask;
+ ++dest;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_over_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 sa = expand_alpha (s);
+
+ *dest = store8888 (in_over (s, sa, a, d));
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_over_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 da = expand_alpha (d);
+
+ *dest = store8888 (over (d, da, in (s, a)));
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_in_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 da = expand_alpha (d);
+
+ s = pix_multiply (s, a);
+ s = pix_multiply (s, da);
+ *dest = store8888 (s);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_in_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 sa = expand_alpha (s);
+
+ a = pix_multiply (a, sa);
+ d = pix_multiply (d, a);
+ *dest = store8888 (d);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_out_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 da = expand_alpha (d);
+
+ da = negate (da);
+ s = pix_multiply (s, a);
+ s = pix_multiply (s, da);
+ *dest = store8888 (s);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_out_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 sa = expand_alpha (s);
+
+ a = pix_multiply (a, sa);
+ a = negate (a);
+ d = pix_multiply (d, a);
+ *dest = store8888 (d);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_atop_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 da = expand_alpha (d);
+ __m64 sa = expand_alpha (s);
+
+ s = pix_multiply (s, a);
+ a = pix_multiply (a, sa);
+ a = negate (a);
+ d = pix_add_mul (d, a, s, da);
+ *dest = store8888 (d);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 da = expand_alpha (d);
+ __m64 sa = expand_alpha (s);
+
+ s = pix_multiply (s, a);
+ a = pix_multiply (a, sa);
+ da = negate (da);
+ d = pix_add_mul (d, a, s, da);
+ *dest = store8888 (d);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_xor_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+ __m64 da = expand_alpha (d);
+ __m64 sa = expand_alpha (s);
+
+ s = pix_multiply (s, a);
+ a = pix_multiply (a, sa);
+ da = negate (da);
+ a = negate (a);
+ d = pix_add_mul (d, a, s, da);
+ *dest = store8888 (d);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_combine_add_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dest,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ const uint32_t *end = src + width;
+
+ while (src < end)
+ {
+ __m64 a = load8888 (*mask);
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dest);
+
+ s = pix_multiply (s, a);
+ d = pix_add (s, d);
+ *dest = store8888 (d);
+
+ ++src;
+ ++dest;
+ ++mask;
+ }
+ _mm_empty ();
+}
+
+/* ------------- MMX code paths called from fbpict.c -------------------- */
+
+static void
+mmx_composite_over_n_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src;
+ uint32_t *dst_line, *dst;
+ uint16_t w;
+ int dst_stride;
+ __m64 vsrc, vsrca;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ *dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
+
+ w--;
+ dst++;
+ }
+
+ while (w >= 2)
+ {
+ __m64 vdest;
+ __m64 dest0, dest1;
+
+ vdest = *(__m64 *)dst;
+
+ dest0 = over (vsrc, vsrca, expand8888 (vdest, 0));
+ dest1 = over (vsrc, vsrca, expand8888 (vdest, 1));
+
+ *(__m64 *)dst = pack8888 (dest0, dest1);
+
+ dst += 2;
+ w -= 2;
+ }
+
+ CHECKPOINT ();
+
+ while (w)
+ {
+ *dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
+
+ w--;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_n_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src;
+ uint16_t *dst_line, *dst;
+ uint16_t w;
+ int dst_stride;
+ __m64 vsrc, vsrca;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ uint64_t d = *dst;
+ __m64 vdest = expand565 (M64 (d), 0);
+
+ vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
+ *dst = UINT64 (vdest);
+
+ w--;
+ dst++;
+ }
+
+ while (w >= 4)
+ {
+ __m64 vdest;
+
+ vdest = *(__m64 *)dst;
+
+ vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 0)), vdest, 0);
+ vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 1)), vdest, 1);
+ vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 2)), vdest, 2);
+ vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 3)), vdest, 3);
+
+ *(__m64 *)dst = vdest;
+
+ dst += 4;
+ w -= 4;
+ }
+
+ CHECKPOINT ();
+
+ while (w)
+ {
+ uint64_t d = *dst;
+ __m64 vdest = expand565 (M64 (d), 0);
+
+ vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
+ *dst = UINT64 (vdest);
+
+ w--;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst_line;
+ uint32_t *mask_line;
+ int dst_stride, mask_stride;
+ __m64 vsrc, vsrca;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ int twidth = width;
+ uint32_t *p = (uint32_t *)mask_line;
+ uint32_t *q = (uint32_t *)dst_line;
+
+ while (twidth && (unsigned long)q & 7)
+ {
+ uint32_t m = *(uint32_t *)p;
+
+ if (m)
+ {
+ __m64 vdest = load8888 (*q);
+ vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
+ *q = store8888 (vdest);
+ }
+
+ twidth--;
+ p++;
+ q++;
+ }
+
+ while (twidth >= 2)
+ {
+ uint32_t m0, m1;
+ m0 = *p;
+ m1 = *(p + 1);
+
+ if (m0 | m1)
+ {
+ __m64 dest0, dest1;
+ __m64 vdest = *(__m64 *)q;
+
+ dest0 = in_over (vsrc, vsrca, load8888 (m0),
+ expand8888 (vdest, 0));
+ dest1 = in_over (vsrc, vsrca, load8888 (m1),
+ expand8888 (vdest, 1));
+
+ *(__m64 *)q = pack8888 (dest0, dest1);
+ }
+
+ p += 2;
+ q += 2;
+ twidth -= 2;
+ }
+
+ while (twidth)
+ {
+ uint32_t m = *(uint32_t *)p;
+
+ if (m)
+ {
+ __m64 vdest = load8888 (*q);
+ vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
+ *q = store8888 (vdest);
+ }
+
+ twidth--;
+ p++;
+ q++;
+ }
+
+ dst_line += dst_stride;
+ mask_line += mask_stride;
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ uint32_t mask;
+ __m64 vmask;
+ int dst_stride, src_stride;
+ uint16_t w;
+ __m64 srca;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
+ mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
+ vmask = load8888 (mask);
+ srca = MC (4x00ff);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 7)
+ {
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dst);
+
+ *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
+
+ w--;
+ dst++;
+ src++;
+ }
+
+ while (w >= 2)
+ {
+ __m64 vs = *(__m64 *)src;
+ __m64 vd = *(__m64 *)dst;
+ __m64 vsrc0 = expand8888 (vs, 0);
+ __m64 vsrc1 = expand8888 (vs, 1);
+
+ *(__m64 *)dst = pack8888 (
+ in_over (vsrc0, expand_alpha (vsrc0), vmask, expand8888 (vd, 0)),
+ in_over (vsrc1, expand_alpha (vsrc1), vmask, expand8888 (vd, 1)));
+
+ w -= 2;
+ dst += 2;
+ src += 2;
+ }
+
+ while (w)
+ {
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dst);
+
+ *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
+
+ w--;
+ dst++;
+ src++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ uint32_t mask;
+ __m64 vmask;
+ int dst_stride, src_stride;
+ uint16_t w;
+ __m64 srca;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+ mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
+
+ mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
+ vmask = load8888 (mask);
+ srca = MC (4x00ff);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 7)
+ {
+ __m64 s = load8888 (*src | 0xff000000);
+ __m64 d = load8888 (*dst);
+
+ *dst = store8888 (in_over (s, srca, vmask, d));
+
+ w--;
+ dst++;
+ src++;
+ }
+
+ while (w >= 16)
+ {
+ __m64 vd0 = *(__m64 *)(dst + 0);
+ __m64 vd1 = *(__m64 *)(dst + 2);
+ __m64 vd2 = *(__m64 *)(dst + 4);
+ __m64 vd3 = *(__m64 *)(dst + 6);
+ __m64 vd4 = *(__m64 *)(dst + 8);
+ __m64 vd5 = *(__m64 *)(dst + 10);
+ __m64 vd6 = *(__m64 *)(dst + 12);
+ __m64 vd7 = *(__m64 *)(dst + 14);
+
+ __m64 vs0 = *(__m64 *)(src + 0);
+ __m64 vs1 = *(__m64 *)(src + 2);
+ __m64 vs2 = *(__m64 *)(src + 4);
+ __m64 vs3 = *(__m64 *)(src + 6);
+ __m64 vs4 = *(__m64 *)(src + 8);
+ __m64 vs5 = *(__m64 *)(src + 10);
+ __m64 vs6 = *(__m64 *)(src + 12);
+ __m64 vs7 = *(__m64 *)(src + 14);
+
+ vd0 = pack8888 (
+ in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)),
+ in_over (expandx888 (vs0, 1), srca, vmask, expand8888 (vd0, 1)));
+
+ vd1 = pack8888 (
+ in_over (expandx888 (vs1, 0), srca, vmask, expand8888 (vd1, 0)),
+ in_over (expandx888 (vs1, 1), srca, vmask, expand8888 (vd1, 1)));
+
+ vd2 = pack8888 (
+ in_over (expandx888 (vs2, 0), srca, vmask, expand8888 (vd2, 0)),
+ in_over (expandx888 (vs2, 1), srca, vmask, expand8888 (vd2, 1)));
+
+ vd3 = pack8888 (
+ in_over (expandx888 (vs3, 0), srca, vmask, expand8888 (vd3, 0)),
+ in_over (expandx888 (vs3, 1), srca, vmask, expand8888 (vd3, 1)));
+
+ vd4 = pack8888 (
+ in_over (expandx888 (vs4, 0), srca, vmask, expand8888 (vd4, 0)),
+ in_over (expandx888 (vs4, 1), srca, vmask, expand8888 (vd4, 1)));
+
+ vd5 = pack8888 (
+ in_over (expandx888 (vs5, 0), srca, vmask, expand8888 (vd5, 0)),
+ in_over (expandx888 (vs5, 1), srca, vmask, expand8888 (vd5, 1)));
+
+ vd6 = pack8888 (
+ in_over (expandx888 (vs6, 0), srca, vmask, expand8888 (vd6, 0)),
+ in_over (expandx888 (vs6, 1), srca, vmask, expand8888 (vd6, 1)));
+
+ vd7 = pack8888 (
+ in_over (expandx888 (vs7, 0), srca, vmask, expand8888 (vd7, 0)),
+ in_over (expandx888 (vs7, 1), srca, vmask, expand8888 (vd7, 1)));
+
+ *(__m64 *)(dst + 0) = vd0;
+ *(__m64 *)(dst + 2) = vd1;
+ *(__m64 *)(dst + 4) = vd2;
+ *(__m64 *)(dst + 6) = vd3;
+ *(__m64 *)(dst + 8) = vd4;
+ *(__m64 *)(dst + 10) = vd5;
+ *(__m64 *)(dst + 12) = vd6;
+ *(__m64 *)(dst + 14) = vd7;
+
+ w -= 16;
+ dst += 16;
+ src += 16;
+ }
+
+ while (w)
+ {
+ __m64 s = load8888 (*src | 0xff000000);
+ __m64 d = load8888 (*dst);
+
+ *dst = store8888 (in_over (s, srca, vmask, d));
+
+ w--;
+ dst++;
+ src++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ uint32_t s;
+ int dst_stride, src_stride;
+ uint8_t a;
+ uint16_t w;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w--)
+ {
+ s = *src++;
+ a = s >> 24;
+
+ if (a == 0xff)
+ {
+ *dst = s;
+ }
+ else if (s)
+ {
+ __m64 ms, sa;
+ ms = load8888 (s);
+ sa = expand_alpha (ms);
+ *dst = store8888 (over (ms, sa, load8888 (*dst)));
+ }
+
+ dst++;
+ }
+ }
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint16_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ int dst_stride, src_stride;
+ uint16_t w;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+#if 0
+ /* FIXME */
+ assert (src_image->drawable == mask_image->drawable);
+#endif
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ __m64 vsrc = load8888 (*src);
+ uint64_t d = *dst;
+ __m64 vdest = expand565 (M64 (d), 0);
+
+ vdest = pack_565 (
+ over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
+
+ *dst = UINT64 (vdest) & 0xffff;
+
+ w--;
+ dst++;
+ src++;
+ }
+
+ CHECKPOINT ();
+
+ while (w >= 4)
+ {
+ __m64 vsrc0, vsrc1, vsrc2, vsrc3;
+ __m64 vdest;
+
+ vsrc0 = load8888 (*(src + 0));
+ vsrc1 = load8888 (*(src + 1));
+ vsrc2 = load8888 (*(src + 2));
+ vsrc3 = load8888 (*(src + 3));
+
+ vdest = *(__m64 *)dst;
+
+ vdest = pack_565 (over (vsrc0, expand_alpha (vsrc0), expand565 (vdest, 0)), vdest, 0);
+ vdest = pack_565 (over (vsrc1, expand_alpha (vsrc1), expand565 (vdest, 1)), vdest, 1);
+ vdest = pack_565 (over (vsrc2, expand_alpha (vsrc2), expand565 (vdest, 2)), vdest, 2);
+ vdest = pack_565 (over (vsrc3, expand_alpha (vsrc3), expand565 (vdest, 3)), vdest, 3);
+
+ *(__m64 *)dst = vdest;
+
+ w -= 4;
+ dst += 4;
+ src += 4;
+ }
+
+ CHECKPOINT ();
+
+ while (w)
+ {
+ __m64 vsrc = load8888 (*src);
+ uint64_t d = *dst;
+ __m64 vdest = expand565 (M64 (d), 0);
+
+ vdest = pack_565 (over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
+
+ *dst = UINT64 (vdest) & 0xffff;
+
+ w--;
+ dst++;
+ src++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ uint16_t w;
+ __m64 vsrc, vsrca;
+ uint64_t srcsrc;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ srcsrc = (uint64_t)src << 32 | src;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ uint64_t m = *mask;
+
+ if (m)
+ {
+ __m64 vdest = in_over (vsrc, vsrca,
+ expand_alpha_rev (M64 (m)),
+ load8888 (*dst));
+
+ *dst = store8888 (vdest);
+ }
+
+ w--;
+ mask++;
+ dst++;
+ }
+
+ CHECKPOINT ();
+
+ while (w >= 2)
+ {
+ uint64_t m0, m1;
+
+ m0 = *mask;
+ m1 = *(mask + 1);
+
+ if (srca == 0xff && (m0 & m1) == 0xff)
+ {
+ *(uint64_t *)dst = srcsrc;
+ }
+ else if (m0 | m1)
+ {
+ __m64 vdest;
+ __m64 dest0, dest1;
+
+ vdest = *(__m64 *)dst;
+
+ dest0 = in_over (vsrc, vsrca, expand_alpha_rev (M64 (m0)),
+ expand8888 (vdest, 0));
+ dest1 = in_over (vsrc, vsrca, expand_alpha_rev (M64 (m1)),
+ expand8888 (vdest, 1));
+
+ *(__m64 *)dst = pack8888 (dest0, dest1);
+ }
+
+ mask += 2;
+ dst += 2;
+ w -= 2;
+ }
+
+ CHECKPOINT ();
+
+ while (w)
+ {
+ uint64_t m = *mask;
+
+ if (m)
+ {
+ __m64 vdest = load8888 (*dst);
+
+ vdest = in_over (
+ vsrc, vsrca, expand_alpha_rev (M64 (m)), vdest);
+ *dst = store8888 (vdest);
+ }
+
+ w--;
+ mask++;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+pixman_bool_t
+pixman_fill_mmx (uint32_t *bits,
+ int stride,
+ int bpp,
+ int x,
+ int y,
+ int width,
+ int height,
+ uint32_t xor)
+{
+ uint64_t fill;
+ __m64 vfill;
+ uint32_t byte_width;
+ uint8_t *byte_line;
+
+#ifdef __GNUC__
+ __m64 v1, v2, v3, v4, v5, v6, v7;
+#endif
+
+ if (bpp != 16 && bpp != 32 && bpp != 8)
+ return FALSE;
+
+ if (bpp == 16 && (xor >> 16 != (xor & 0xffff)))
+ return FALSE;
+
+ if (bpp == 8 &&
+ ((xor >> 16 != (xor & 0xffff)) ||
+ (xor >> 24 != (xor & 0x00ff) >> 16)))
+ {
+ return FALSE;
+ }
+
+ if (bpp == 8)
+ {
+ stride = stride * (int) sizeof (uint32_t) / 1;
+ byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
+ byte_width = width;
+ stride *= 1;
+ }
+ else if (bpp == 16)
+ {
+ stride = stride * (int) sizeof (uint32_t) / 2;
+ byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
+ byte_width = 2 * width;
+ stride *= 2;
+ }
+ else
+ {
+ stride = stride * (int) sizeof (uint32_t) / 4;
+ byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
+ byte_width = 4 * width;
+ stride *= 4;
+ }
+
+ fill = ((uint64_t)xor << 32) | xor;
+ vfill = M64 (fill);
+
+#ifdef __GNUC__
+ __asm__ (
+ "movq %7, %0\n"
+ "movq %7, %1\n"
+ "movq %7, %2\n"
+ "movq %7, %3\n"
+ "movq %7, %4\n"
+ "movq %7, %5\n"
+ "movq %7, %6\n"
+ : "=y" (v1), "=y" (v2), "=y" (v3),
+ "=y" (v4), "=y" (v5), "=y" (v6), "=y" (v7)
+ : "y" (vfill));
+#endif
+
+ while (height--)
+ {
+ int w;
+ uint8_t *d = byte_line;
+
+ byte_line += stride;
+ w = byte_width;
+
+ while (w >= 1 && ((unsigned long)d & 1))
+ {
+ *(uint8_t *)d = (xor & 0xff);
+ w--;
+ d++;
+ }
+
+ while (w >= 2 && ((unsigned long)d & 3))
+ {
+ *(uint16_t *)d = (xor & 0xffff);
+ w -= 2;
+ d += 2;
+ }
+
+ while (w >= 4 && ((unsigned long)d & 7))
+ {
+ *(uint32_t *)d = xor;
+
+ w -= 4;
+ d += 4;
+ }
+
+ while (w >= 64)
+ {
+#ifdef __GNUC__
+ __asm__ (
+ "movq %1, (%0)\n"
+ "movq %2, 8(%0)\n"
+ "movq %3, 16(%0)\n"
+ "movq %4, 24(%0)\n"
+ "movq %5, 32(%0)\n"
+ "movq %6, 40(%0)\n"
+ "movq %7, 48(%0)\n"
+ "movq %8, 56(%0)\n"
+ :
+ : "r" (d),
+ "y" (vfill), "y" (v1), "y" (v2), "y" (v3),
+ "y" (v4), "y" (v5), "y" (v6), "y" (v7)
+ : "memory");
+#else
+ *(__m64*) (d + 0) = vfill;
+ *(__m64*) (d + 8) = vfill;
+ *(__m64*) (d + 16) = vfill;
+ *(__m64*) (d + 24) = vfill;
+ *(__m64*) (d + 32) = vfill;
+ *(__m64*) (d + 40) = vfill;
+ *(__m64*) (d + 48) = vfill;
+ *(__m64*) (d + 56) = vfill;
+#endif
+ w -= 64;
+ d += 64;
+ }
+
+ while (w >= 4)
+ {
+ *(uint32_t *)d = xor;
+
+ w -= 4;
+ d += 4;
+ }
+ while (w >= 2)
+ {
+ *(uint16_t *)d = (xor & 0xffff);
+ w -= 2;
+ d += 2;
+ }
+ while (w >= 1)
+ {
+ *(uint8_t *)d = (xor & 0xff);
+ w--;
+ d++;
+ }
+
+ }
+
+ _mm_empty ();
+ return TRUE;
+}
+
+static void
+mmx_composite_src_n_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ uint16_t w;
+ __m64 vsrc, vsrca;
+ uint64_t srcsrc;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ {
+ pixman_fill_mmx (dst_image->bits.bits, dst_image->bits.rowstride,
+ PIXMAN_FORMAT_BPP (dst_image->bits.format),
+ dest_x, dest_y, width, height, 0);
+ return;
+ }
+
+ srcsrc = (uint64_t)src << 32 | src;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ uint64_t m = *mask;
+
+ if (m)
+ {
+ __m64 vdest = in (vsrc, expand_alpha_rev (M64 (m)));
+
+ *dst = store8888 (vdest);
+ }
+ else
+ {
+ *dst = 0;
+ }
+
+ w--;
+ mask++;
+ dst++;
+ }
+
+ CHECKPOINT ();
+
+ while (w >= 2)
+ {
+ uint64_t m0, m1;
+ m0 = *mask;
+ m1 = *(mask + 1);
+
+ if (srca == 0xff && (m0 & m1) == 0xff)
+ {
+ *(uint64_t *)dst = srcsrc;
+ }
+ else if (m0 | m1)
+ {
+ __m64 vdest;
+ __m64 dest0, dest1;
+
+ vdest = *(__m64 *)dst;
+
+ dest0 = in (vsrc, expand_alpha_rev (M64 (m0)));
+ dest1 = in (vsrc, expand_alpha_rev (M64 (m1)));
+
+ *(__m64 *)dst = pack8888 (dest0, dest1);
+ }
+ else
+ {
+ *(uint64_t *)dst = 0;
+ }
+
+ mask += 2;
+ dst += 2;
+ w -= 2;
+ }
+
+ CHECKPOINT ();
+
+ while (w)
+ {
+ uint64_t m = *mask;
+
+ if (m)
+ {
+ __m64 vdest = load8888 (*dst);
+
+ vdest = in (vsrc, expand_alpha_rev (M64 (m)));
+ *dst = store8888 (vdest);
+ }
+ else
+ {
+ *dst = 0;
+ }
+
+ w--;
+ mask++;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint16_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ uint16_t w;
+ __m64 vsrc, vsrca, tmp;
+ uint64_t srcsrcsrcsrc, src16;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ tmp = pack_565 (vsrc, _mm_setzero_si64 (), 0);
+ src16 = UINT64 (tmp);
+
+ srcsrcsrcsrc =
+ (uint64_t)src16 << 48 | (uint64_t)src16 << 32 |
+ (uint64_t)src16 << 16 | (uint64_t)src16;
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ uint64_t m = *mask;
+
+ if (m)
+ {
+ uint64_t d = *dst;
+ __m64 vd = M64 (d);
+ __m64 vdest = in_over (
+ vsrc, vsrca, expand_alpha_rev (M64 (m)), expand565 (vd, 0));
+
+ vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
+ *dst = UINT64 (vd);
+ }
+
+ w--;
+ mask++;
+ dst++;
+ }
+
+ CHECKPOINT ();
+
+ while (w >= 4)
+ {
+ uint64_t m0, m1, m2, m3;
+ m0 = *mask;
+ m1 = *(mask + 1);
+ m2 = *(mask + 2);
+ m3 = *(mask + 3);
+
+ if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff)
+ {
+ *(uint64_t *)dst = srcsrcsrcsrc;
+ }
+ else if (m0 | m1 | m2 | m3)
+ {
+ __m64 vdest;
+ __m64 vm0, vm1, vm2, vm3;
+
+ vdest = *(__m64 *)dst;
+
+ vm0 = M64 (m0);
+ vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm0),
+ expand565 (vdest, 0)), vdest, 0);
+ vm1 = M64 (m1);
+ vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm1),
+ expand565 (vdest, 1)), vdest, 1);
+ vm2 = M64 (m2);
+ vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm2),
+ expand565 (vdest, 2)), vdest, 2);
+ vm3 = M64 (m3);
+ vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm3),
+ expand565 (vdest, 3)), vdest, 3);
+
+ *(__m64 *)dst = vdest;
+ }
+
+ w -= 4;
+ mask += 4;
+ dst += 4;
+ }
+
+ CHECKPOINT ();
+
+ while (w)
+ {
+ uint64_t m = *mask;
+
+ if (m)
+ {
+ uint64_t d = *dst;
+ __m64 vd = M64 (d);
+ __m64 vdest = in_over (vsrc, vsrca, expand_alpha_rev (M64 (m)),
+ expand565 (vd, 0));
+ vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
+ *dst = UINT64 (vd);
+ }
+
+ w--;
+ mask++;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint16_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ int dst_stride, src_stride;
+ uint16_t w;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+#if 0
+ /* FIXME */
+ assert (src_image->drawable == mask_image->drawable);
+#endif
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ CHECKPOINT ();
+
+ while (w && (unsigned long)dst & 7)
+ {
+ __m64 vsrc = load8888 (*src);
+ uint64_t d = *dst;
+ __m64 vdest = expand565 (M64 (d), 0);
+
+ vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
+
+ *dst = UINT64 (vdest);
+
+ w--;
+ dst++;
+ src++;
+ }
+
+ CHECKPOINT ();
+
+ while (w >= 4)
+ {
+ uint32_t s0, s1, s2, s3;
+ unsigned char a0, a1, a2, a3;
+
+ s0 = *src;
+ s1 = *(src + 1);
+ s2 = *(src + 2);
+ s3 = *(src + 3);
+
+ a0 = (s0 >> 24);
+ a1 = (s1 >> 24);
+ a2 = (s2 >> 24);
+ a3 = (s3 >> 24);
+
+ if ((a0 & a1 & a2 & a3) == 0xFF)
+ {
+ __m64 vdest;
+ vdest = pack_565 (invert_colors (load8888 (s0)), _mm_setzero_si64 (), 0);
+ vdest = pack_565 (invert_colors (load8888 (s1)), vdest, 1);
+ vdest = pack_565 (invert_colors (load8888 (s2)), vdest, 2);
+ vdest = pack_565 (invert_colors (load8888 (s3)), vdest, 3);
+
+ *(__m64 *)dst = vdest;
+ }
+ else if (s0 | s1 | s2 | s3)
+ {
+ __m64 vdest = *(__m64 *)dst;
+
+ vdest = pack_565 (over_rev_non_pre (load8888 (s0), expand565 (vdest, 0)), vdest, 0);
+ vdest = pack_565 (over_rev_non_pre (load8888 (s1), expand565 (vdest, 1)), vdest, 1);
+ vdest = pack_565 (over_rev_non_pre (load8888 (s2), expand565 (vdest, 2)), vdest, 2);
+ vdest = pack_565 (over_rev_non_pre (load8888 (s3), expand565 (vdest, 3)), vdest, 3);
+
+ *(__m64 *)dst = vdest;
+ }
+
+ w -= 4;
+ dst += 4;
+ src += 4;
+ }
+
+ CHECKPOINT ();
+
+ while (w)
+ {
+ __m64 vsrc = load8888 (*src);
+ uint64_t d = *dst;
+ __m64 vdest = expand565 (M64 (d), 0);
+
+ vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
+
+ *dst = UINT64 (vdest);
+
+ w--;
+ dst++;
+ src++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ int dst_stride, src_stride;
+ uint16_t w;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+#if 0
+ /* FIXME */
+ assert (src_image->drawable == mask_image->drawable);
+#endif
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 7)
+ {
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dst);
+
+ *dst = store8888 (over_rev_non_pre (s, d));
+
+ w--;
+ dst++;
+ src++;
+ }
+
+ while (w >= 2)
+ {
+ uint64_t s0, s1;
+ unsigned char a0, a1;
+ __m64 d0, d1;
+
+ s0 = *src;
+ s1 = *(src + 1);
+
+ a0 = (s0 >> 24);
+ a1 = (s1 >> 24);
+
+ if ((a0 & a1) == 0xFF)
+ {
+ d0 = invert_colors (load8888 (s0));
+ d1 = invert_colors (load8888 (s1));
+
+ *(__m64 *)dst = pack8888 (d0, d1);
+ }
+ else if (s0 | s1)
+ {
+ __m64 vdest = *(__m64 *)dst;
+
+ d0 = over_rev_non_pre (load8888 (s0), expand8888 (vdest, 0));
+ d1 = over_rev_non_pre (load8888 (s1), expand8888 (vdest, 1));
+
+ *(__m64 *)dst = pack8888 (d0, d1);
+ }
+
+ w -= 2;
+ dst += 2;
+ src += 2;
+ }
+
+ while (w)
+ {
+ __m64 s = load8888 (*src);
+ __m64 d = load8888 (*dst);
+
+ *dst = store8888 (over_rev_non_pre (s, d));
+
+ w--;
+ dst++;
+ src++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint16_t *dst_line;
+ uint32_t *mask_line;
+ int dst_stride, mask_stride;
+ __m64 vsrc, vsrca;
+
+ CHECKPOINT ();
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ int twidth = width;
+ uint32_t *p = (uint32_t *)mask_line;
+ uint16_t *q = (uint16_t *)dst_line;
+
+ while (twidth && ((unsigned long)q & 7))
+ {
+ uint32_t m = *(uint32_t *)p;
+
+ if (m)
+ {
+ uint64_t d = *q;
+ __m64 vdest = expand565 (M64 (d), 0);
+ vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
+ *q = UINT64 (vdest);
+ }
+
+ twidth--;
+ p++;
+ q++;
+ }
+
+ while (twidth >= 4)
+ {
+ uint32_t m0, m1, m2, m3;
+
+ m0 = *p;
+ m1 = *(p + 1);
+ m2 = *(p + 2);
+ m3 = *(p + 3);
+
+ if ((m0 | m1 | m2 | m3))
+ {
+ __m64 vdest = *(__m64 *)q;
+
+ vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m0), expand565 (vdest, 0)), vdest, 0);
+ vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m1), expand565 (vdest, 1)), vdest, 1);
+ vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m2), expand565 (vdest, 2)), vdest, 2);
+ vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m3), expand565 (vdest, 3)), vdest, 3);
+
+ *(__m64 *)q = vdest;
+ }
+ twidth -= 4;
+ p += 4;
+ q += 4;
+ }
+
+ while (twidth)
+ {
+ uint32_t m;
+
+ m = *(uint32_t *)p;
+ if (m)
+ {
+ uint64_t d = *q;
+ __m64 vdest = expand565 (M64 (d), 0);
+ vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
+ *q = UINT64 (vdest);
+ }
+
+ twidth--;
+ p++;
+ q++;
+ }
+
+ mask_line += mask_stride;
+ dst_line += dst_stride;
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_in_n_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ uint16_t w;
+ uint32_t src;
+ uint8_t sa;
+ __m64 vsrc, vsrca;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ sa = src >> 24;
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ if ((((unsigned long)dst_image & 3) == 0) &&
+ (((unsigned long)src_image & 3) == 0))
+ {
+ while (w >= 4)
+ {
+ uint32_t m;
+ __m64 vmask;
+ __m64 vdest;
+
+ m = 0;
+
+ vmask = load8888 (*(uint32_t *)mask);
+ vdest = load8888 (*(uint32_t *)dst);
+
+ *(uint32_t *)dst = store8888 (in (in (vsrca, vmask), vdest));
+
+ dst += 4;
+ mask += 4;
+ w -= 4;
+ }
+ }
+
+ while (w--)
+ {
+ uint16_t tmp;
+ uint8_t a;
+ uint32_t m, d;
+
+ a = *mask++;
+ d = *dst;
+
+ m = MUL_UN8 (sa, a, tmp);
+ d = MUL_UN8 (m, d, tmp);
+
+ *dst++ = d;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_in_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *src_line, *src;
+ int src_stride, dst_stride;
+ uint16_t w;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ if ((((unsigned long)dst_image & 3) == 0) &&
+ (((unsigned long)src_image & 3) == 0))
+ {
+ while (w >= 4)
+ {
+ uint32_t *s = (uint32_t *)src;
+ uint32_t *d = (uint32_t *)dst;
+
+ *d = store8888 (in (load8888 (*s), load8888 (*d)));
+
+ w -= 4;
+ dst += 4;
+ src += 4;
+ }
+ }
+
+ while (w--)
+ {
+ uint8_t s, d;
+ uint16_t tmp;
+
+ s = *src;
+ d = *dst;
+
+ *dst = MUL_UN8 (s, d, tmp);
+
+ src++;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_add_n_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ uint16_t w;
+ uint32_t src;
+ uint8_t sa;
+ __m64 vsrc, vsrca;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ sa = src >> 24;
+
+ if (src == 0)
+ return;
+
+ vsrc = load8888 (src);
+ vsrca = expand_alpha (vsrc);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ if ((((unsigned long)mask_image & 3) == 0) &&
+ (((unsigned long)dst_image & 3) == 0))
+ {
+ while (w >= 4)
+ {
+ __m64 vmask = load8888 (*(uint32_t *)mask);
+ __m64 vdest = load8888 (*(uint32_t *)dst);
+
+ *(uint32_t *)dst = store8888 (_mm_adds_pu8 (in (vsrca, vmask), vdest));
+
+ w -= 4;
+ dst += 4;
+ mask += 4;
+ }
+ }
+
+ while (w--)
+ {
+ uint16_t tmp;
+ uint16_t a;
+ uint32_t m, d;
+ uint32_t r;
+
+ a = *mask++;
+ d = *dst;
+
+ m = MUL_UN8 (sa, a, tmp);
+ r = ADD_UN8 (m, d, tmp);
+
+ *dst++ = r;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_add_8000_8000 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *src_line, *src;
+ int dst_stride, src_stride;
+ uint16_t w;
+ uint8_t s, d;
+ uint16_t t;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 7)
+ {
+ s = *src;
+ d = *dst;
+ t = d + s;
+ s = t | (0 - (t >> 8));
+ *dst = s;
+
+ dst++;
+ src++;
+ w--;
+ }
+
+ while (w >= 8)
+ {
+ *(__m64*)dst = _mm_adds_pu8 (*(__m64*)src, *(__m64*)dst);
+ dst += 8;
+ src += 8;
+ w -= 8;
+ }
+
+ while (w)
+ {
+ s = *src;
+ d = *dst;
+ t = d + s;
+ s = t | (0 - (t >> 8));
+ *dst = s;
+
+ dst++;
+ src++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ __m64 dst64;
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ int dst_stride, src_stride;
+ uint16_t w;
+
+ CHECKPOINT ();
+
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 7)
+ {
+ *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
+ _mm_cvtsi32_si64 (*dst)));
+ dst++;
+ src++;
+ w--;
+ }
+
+ while (w >= 2)
+ {
+ dst64 = _mm_adds_pu8 (*(__m64*)src, *(__m64*)dst);
+ *(uint64_t*)dst = UINT64 (dst64);
+ dst += 2;
+ src += 2;
+ w -= 2;
+ }
+
+ if (w)
+ {
+ *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
+ _mm_cvtsi32_si64 (*dst)));
+
+ }
+ }
+
+ _mm_empty ();
+}
+
+static pixman_bool_t
+pixman_blt_mmx (uint32_t *src_bits,
+ uint32_t *dst_bits,
+ int src_stride,
+ int dst_stride,
+ int src_bpp,
+ int dst_bpp,
+ int src_x,
+ int src_y,
+ int dst_x,
+ int dst_y,
+ int width,
+ int height)
+{
+ uint8_t * src_bytes;
+ uint8_t * dst_bytes;
+ int byte_width;
+
+ if (src_bpp != dst_bpp)
+ return FALSE;
+
+ if (src_bpp == 16)
+ {
+ src_stride = src_stride * (int) sizeof (uint32_t) / 2;
+ dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
+ src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
+ dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
+ byte_width = 2 * width;
+ src_stride *= 2;
+ dst_stride *= 2;
+ }
+ else if (src_bpp == 32)
+ {
+ src_stride = src_stride * (int) sizeof (uint32_t) / 4;
+ dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
+ src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
+ dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
+ byte_width = 4 * width;
+ src_stride *= 4;
+ dst_stride *= 4;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ while (height--)
+ {
+ int w;
+ uint8_t *s = src_bytes;
+ uint8_t *d = dst_bytes;
+ src_bytes += src_stride;
+ dst_bytes += dst_stride;
+ w = byte_width;
+
+ while (w >= 2 && ((unsigned long)d & 3))
+ {
+ *(uint16_t *)d = *(uint16_t *)s;
+ w -= 2;
+ s += 2;
+ d += 2;
+ }
+
+ while (w >= 4 && ((unsigned long)d & 7))
+ {
+ *(uint32_t *)d = *(uint32_t *)s;
+
+ w -= 4;
+ s += 4;
+ d += 4;
+ }
+
+ while (w >= 64)
+ {
+#if defined (__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
+ __asm__ (
+ "movq (%1), %%mm0\n"
+ "movq 8(%1), %%mm1\n"
+ "movq 16(%1), %%mm2\n"
+ "movq 24(%1), %%mm3\n"
+ "movq 32(%1), %%mm4\n"
+ "movq 40(%1), %%mm5\n"
+ "movq 48(%1), %%mm6\n"
+ "movq 56(%1), %%mm7\n"
+
+ "movq %%mm0, (%0)\n"
+ "movq %%mm1, 8(%0)\n"
+ "movq %%mm2, 16(%0)\n"
+ "movq %%mm3, 24(%0)\n"
+ "movq %%mm4, 32(%0)\n"
+ "movq %%mm5, 40(%0)\n"
+ "movq %%mm6, 48(%0)\n"
+ "movq %%mm7, 56(%0)\n"
+ :
+ : "r" (d), "r" (s)
+ : "memory",
+ "%mm0", "%mm1", "%mm2", "%mm3",
+ "%mm4", "%mm5", "%mm6", "%mm7");
+#else
+ __m64 v0 = *(__m64 *)(s + 0);
+ __m64 v1 = *(__m64 *)(s + 8);
+ __m64 v2 = *(__m64 *)(s + 16);
+ __m64 v3 = *(__m64 *)(s + 24);
+ __m64 v4 = *(__m64 *)(s + 32);
+ __m64 v5 = *(__m64 *)(s + 40);
+ __m64 v6 = *(__m64 *)(s + 48);
+ __m64 v7 = *(__m64 *)(s + 56);
+ *(__m64 *)(d + 0) = v0;
+ *(__m64 *)(d + 8) = v1;
+ *(__m64 *)(d + 16) = v2;
+ *(__m64 *)(d + 24) = v3;
+ *(__m64 *)(d + 32) = v4;
+ *(__m64 *)(d + 40) = v5;
+ *(__m64 *)(d + 48) = v6;
+ *(__m64 *)(d + 56) = v7;
+#endif
+
+ w -= 64;
+ s += 64;
+ d += 64;
+ }
+ while (w >= 4)
+ {
+ *(uint32_t *)d = *(uint32_t *)s;
+
+ w -= 4;
+ s += 4;
+ d += 4;
+ }
+ if (w >= 2)
+ {
+ *(uint16_t *)d = *(uint16_t *)s;
+ w -= 2;
+ s += 2;
+ d += 2;
+ }
+ }
+
+ _mm_empty ();
+
+ return TRUE;
+}
+
+static void
+mmx_composite_copy_area (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ pixman_blt_mmx (src_image->bits.bits,
+ dst_image->bits.bits,
+ src_image->bits.rowstride,
+ dst_image->bits.rowstride,
+ PIXMAN_FORMAT_BPP (src_image->bits.format),
+ PIXMAN_FORMAT_BPP (dst_image->bits.format),
+ src_x, src_y, dest_x, dest_y, width, height);
+}
+
+#if 0
+static void
+mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *src, *src_line;
+ uint32_t *dst, *dst_line;
+ uint8_t *mask, *mask_line;
+ int src_stride, mask_stride, dst_stride;
+ uint16_t w;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ src = src_line;
+ src_line += src_stride;
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+
+ w = width;
+
+ while (w--)
+ {
+ uint64_t m = *mask;
+
+ if (m)
+ {
+ __m64 s = load8888 (*src | 0xff000000);
+
+ if (m == 0xff)
+ {
+ *dst = store8888 (s);
+ }
+ else
+ {
+ __m64 sa = expand_alpha (s);
+ __m64 vm = expand_alpha_rev (M64 (m));
+ __m64 vdest = in_over (s, sa, vm, load8888 (*dst));
+
+ *dst = store8888 (vdest);
+ }
+ }
+
+ mask++;
+ dst++;
+ src++;
+ }
+ }
+
+ _mm_empty ();
+}
+#endif
+
+static const pixman_fast_path_t mmx_fast_paths[] =
+{
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, mmx_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, mmx_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, mmx_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_x8r8g8b8, mmx_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_r5g6b5, mmx_composite_over_n_8888_0565_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_a8b8g8r8, mmx_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_x8b8g8r8, mmx_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_b5g6r5, mmx_composite_over_n_8888_0565_ca },
+ { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_a8r8g8b8, mmx_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_x8r8g8b8, mmx_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_r5g6b5, mmx_composite_over_pixbuf_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_a8b8g8r8, mmx_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_x8b8g8r8, mmx_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_b5g6r5, mmx_composite_over_pixbuf_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, mmx_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, mmx_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, mmx_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, mmx_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, mmx_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, mmx_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, mmx_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, mmx_composite_over_8888_n_8888 },
+#if 0
+ /* FIXME: This code is commented out since it's apparently not actually faster than the generic code. */
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_over_x888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_over_x888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888 },
+#endif
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_over_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_over_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_over_n_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area },
+
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_over_8888_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, mmx_composite_over_8888_0565 },
+
+ { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_add_8888_8888 },
+ { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_add_8888_8888 },
+ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, mmx_composite_add_8000_8000 },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_add_n_8_8 },
+
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, mmx_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, mmx_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, mmx_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, mmx_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, mmx_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, mmx_composite_copy_area },
+
+ { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, mmx_composite_in_8_8 },
+ { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_in_n_8_8 },
+
+ { PIXMAN_OP_NONE },
+};
+
+static void
+mmx_composite (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src,
+ pixman_image_t * mask,
+ pixman_image_t * dest,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ if (_pixman_run_fast_path (mmx_fast_paths, imp,
+ op, src, mask, dest,
+ src_x, src_y,
+ mask_x, mask_y,
+ dest_x, dest_y,
+ width, height))
+ {
+ return;
+ }
+
+ _pixman_implementation_composite (imp->delegate,
+ op, src, mask, dest, src_x, src_y,
+ mask_x, mask_y, dest_x, dest_y,
+ width, height);
+}
+
+static pixman_bool_t
+mmx_blt (pixman_implementation_t *imp,
+ uint32_t * src_bits,
+ uint32_t * dst_bits,
+ int src_stride,
+ int dst_stride,
+ int src_bpp,
+ int dst_bpp,
+ int src_x,
+ int src_y,
+ int dst_x,
+ int dst_y,
+ int width,
+ int height)
+{
+ if (!pixman_blt_mmx (
+ src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+ src_x, src_y, dst_x, dst_y, width, height))
+
+ {
+ return _pixman_implementation_blt (
+ imp->delegate,
+ src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+ src_x, src_y, dst_x, dst_y, width, height);
+ }
+
+ return TRUE;
+}
+
+static pixman_bool_t
+mmx_fill (pixman_implementation_t *imp,
+ uint32_t * bits,
+ int stride,
+ int bpp,
+ int x,
+ int y,
+ int width,
+ int height,
+ uint32_t xor)
+{
+ if (!pixman_fill_mmx (bits, stride, bpp, x, y, width, height, xor))
+ {
+ return _pixman_implementation_fill (
+ imp->delegate, bits, stride, bpp, x, y, width, height, xor);
+ }
+
+ return TRUE;
+}
+
+pixman_implementation_t *
+_pixman_implementation_create_mmx (void)
+{
+ pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
+ pixman_implementation_t *imp = _pixman_implementation_create (general);
+
+ imp->combine_32[PIXMAN_OP_OVER] = mmx_combine_over_u;
+ imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_u;
+ imp->combine_32[PIXMAN_OP_IN] = mmx_combine_in_u;
+ imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_u;
+ imp->combine_32[PIXMAN_OP_OUT] = mmx_combine_out_u;
+ imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_u;
+ imp->combine_32[PIXMAN_OP_ATOP] = mmx_combine_atop_u;
+ imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_u;
+ imp->combine_32[PIXMAN_OP_XOR] = mmx_combine_xor_u;
+ imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u;
+ imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u;
+
+ imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_ca;
+ imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_ca;
+ imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_ca;
+ imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_ca;
+ imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_ca;
+ imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_ca;
+ imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_ca;
+
+ imp->composite = mmx_composite;
+ imp->blt = mmx_blt;
+ imp->fill = mmx_fill;
+
+ return imp;
+}
+
+#endif /* USE_MMX */
diff --git a/pixman/pixman/pixman-private.h b/pixman/pixman/pixman-private.h
index ff7a65f88..5000f910d 100644
--- a/pixman/pixman/pixman-private.h
+++ b/pixman/pixman/pixman-private.h
@@ -5,6 +5,9 @@
#ifndef PIXMAN_PRIVATE_H
#define PIXMAN_PRIVATE_H
+#define PIXMAN_DISABLE_DEPRECATED
+#define PIXMAN_USE_INTERNAL_API
+
#include "pixman.h"
#include <time.h>
#include <assert.h>
@@ -120,8 +123,6 @@ struct gradient
int n_stops;
pixman_gradient_stop_t *stops;
int stop_range;
- uint32_t * color_table;
- int color_table_size;
};
struct linear_gradient
@@ -437,10 +438,10 @@ struct pixman_implementation_t
pixman_blt_func_t blt;
pixman_fill_func_t fill;
- pixman_combine_32_func_t combine_32[PIXMAN_OP_LAST];
- pixman_combine_32_func_t combine_32_ca[PIXMAN_OP_LAST];
- pixman_combine_64_func_t combine_64[PIXMAN_OP_LAST];
- pixman_combine_64_func_t combine_64_ca[PIXMAN_OP_LAST];
+ pixman_combine_32_func_t combine_32[PIXMAN_N_OPERATORS];
+ pixman_combine_32_func_t combine_32_ca[PIXMAN_N_OPERATORS];
+ pixman_combine_64_func_t combine_64[PIXMAN_N_OPERATORS];
+ pixman_combine_64_func_t combine_64_ca[PIXMAN_N_OPERATORS];
};
pixman_implementation_t *
@@ -556,15 +557,15 @@ _pixman_choose_implementation (void);
* Utilities
*/
-/* These "formats" both have depth 0, so they
+/* These "formats" all have depth 0, so they
* will never clash with any real ones
*/
#define PIXMAN_null PIXMAN_FORMAT (0, 0, 0, 0, 0, 0)
#define PIXMAN_solid PIXMAN_FORMAT (0, 1, 0, 0, 0, 0)
-
-#define NEED_COMPONENT_ALPHA (1 << 0)
-#define NEED_PIXBUF (1 << 1)
-#define NEED_SOLID_MASK (1 << 2)
+#define PIXMAN_a8r8g8b8_ca PIXMAN_FORMAT (0, 2, 0, 0, 0, 0)
+#define PIXMAN_a8b8g8r8_ca PIXMAN_FORMAT (0, 3, 0, 0, 0, 0)
+#define PIXMAN_pixbuf PIXMAN_FORMAT (0, 4, 0, 0, 0, 0)
+#define PIXMAN_rpixbuf PIXMAN_FORMAT (0, 5, 0, 0, 0, 0)
typedef struct
{
@@ -573,7 +574,6 @@ typedef struct
pixman_format_code_t mask_format;
pixman_format_code_t dest_format;
pixman_composite_func_t func;
- uint32_t flags;
} pixman_fast_path_t;
/* Memory allocation helpers */
@@ -612,14 +612,14 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
- int16_t src_x,
- int16_t src_y,
- int16_t mask_x,
- int16_t mask_y,
- int16_t dest_x,
- int16_t dest_y,
- uint16_t width,
- uint16_t height,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height,
pixman_composite_func_t composite_rect);
void
diff --git a/pixman/pixman/pixman-sse2.c b/pixman/pixman/pixman-sse2.c
index bb74882b2..78b0ad185 100644
--- a/pixman/pixman/pixman-sse2.c
+++ b/pixman/pixman/pixman-sse2.c
@@ -368,6 +368,22 @@ cache_prefetch_next (__m128i* addr)
_mm_prefetch ((void const *)(addr + 4), _MM_HINT_T0); /* 64 bytes ahead */
}
+/* prefetching NULL is very slow on some systems. don't do that. */
+
+static force_inline void
+maybe_prefetch (__m128i* addr)
+{
+ if (addr)
+ cache_prefetch (addr);
+}
+
+static force_inline void
+maybe_prefetch_next (__m128i* addr)
+{
+ if (addr)
+ cache_prefetch_next (addr);
+}
+
/* load 4 pixels from a 16-byte boundary aligned address */
static force_inline __m128i
load_128_aligned (__m128i* src)
@@ -413,9 +429,15 @@ save_128_unaligned (__m128i* dst,
*/
static force_inline __m64
+load_32_1x64 (uint32_t data)
+{
+ return _mm_cvtsi32_si64 (data);
+}
+
+static force_inline __m64
unpack_32_1x64 (uint32_t data)
{
- return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (data), _mm_setzero_si64 ());
+ return _mm_unpacklo_pi8 (load_32_1x64 (data), _mm_setzero_si64 ());
}
static force_inline __m64
@@ -629,7 +651,7 @@ core_combine_over_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
/* Align dst on a 16-byte boundary */
while (w && ((unsigned long)pd & 15))
@@ -647,14 +669,14 @@ core_combine_over_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
/* I'm loading unaligned because I'm not sure about
* the address alignment.
@@ -720,7 +742,7 @@ core_combine_over_reverse_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
/* Align dst on a 16-byte boundary */
while (w &&
@@ -739,14 +761,14 @@ core_combine_over_reverse_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
/* I'm loading unaligned because I'm not sure
* about the address alignment.
@@ -822,7 +844,7 @@ core_combine_in_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && ((unsigned long) pd & 15))
{
@@ -839,14 +861,14 @@ core_combine_in_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_dst_hi = load_128_aligned ((__m128i*) pd);
xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm);
@@ -896,7 +918,7 @@ core_combine_reverse_in_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && ((unsigned long) pd & 15))
{
@@ -913,14 +935,14 @@ core_combine_reverse_in_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_dst_hi = load_128_aligned ((__m128i*) pd);
xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
@@ -965,7 +987,7 @@ core_combine_reverse_out_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && ((unsigned long) pd & 15))
{
@@ -986,7 +1008,7 @@ core_combine_reverse_out_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
@@ -996,7 +1018,7 @@ core_combine_reverse_out_u_sse2 (uint32_t* pd,
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
xmm_dst_hi = load_128_aligned ((__m128i*) pd);
@@ -1047,7 +1069,7 @@ core_combine_out_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && ((unsigned long) pd & 15))
{
@@ -1067,7 +1089,7 @@ core_combine_out_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
@@ -1077,7 +1099,7 @@ core_combine_out_u_sse2 (uint32_t* pd,
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
xmm_dst_hi = load_128_aligned ((__m128i*) pd);
@@ -1147,7 +1169,7 @@ core_combine_atop_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && ((unsigned long) pd & 15))
{
@@ -1164,14 +1186,14 @@ core_combine_atop_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
xmm_dst_hi = load_128_aligned ((__m128i*) pd);
@@ -1244,7 +1266,7 @@ core_combine_reverse_atop_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && ((unsigned long) pd & 15))
{
@@ -1261,14 +1283,14 @@ core_combine_reverse_atop_u_sse2 (uint32_t* pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
xmm_dst_hi = load_128_aligned ((__m128i*) pd);
@@ -1345,7 +1367,7 @@ core_combine_xor_u_sse2 (uint32_t* dst,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && ((unsigned long) pd & 15))
{
@@ -1362,14 +1384,14 @@ core_combine_xor_u_sse2 (uint32_t* dst,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm);
xmm_dst = load_128_aligned ((__m128i*) pd);
@@ -1430,7 +1452,7 @@ core_combine_add_u_sse2 (uint32_t* dst,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && (unsigned long)pd & 15)
{
@@ -1448,7 +1470,7 @@ core_combine_add_u_sse2 (uint32_t* dst,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
@@ -1457,7 +1479,7 @@ core_combine_add_u_sse2 (uint32_t* dst,
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
s = combine4 ((__m128i*)ps, (__m128i*)pm);
@@ -1516,7 +1538,7 @@ core_combine_saturate_u_sse2 (uint32_t * pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w && (unsigned long)pd & 15)
{
@@ -1533,14 +1555,14 @@ core_combine_saturate_u_sse2 (uint32_t * pd,
/* call prefetch hint to optimize cache load*/
cache_prefetch ((__m128i*)ps);
cache_prefetch ((__m128i*)pd);
- cache_prefetch ((__m128i*)pm);
+ maybe_prefetch ((__m128i*)pm);
while (w >= 4)
{
/* fill cache line with next memory */
cache_prefetch_next ((__m128i*)ps);
cache_prefetch_next ((__m128i*)pd);
- cache_prefetch_next ((__m128i*)pm);
+ maybe_prefetch_next ((__m128i*)pm);
xmm_dst = load_128_aligned ((__m128i*)pd);
xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm);
@@ -3221,7 +3243,7 @@ sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
if (m)
{
d = *pd;
-
+
mmx_mask = unpack_32_1x64 (m);
mmx_dest = unpack_32_1x64 (d);
@@ -3412,7 +3434,7 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
PIXMAN_IMAGE_GET_LINE (
src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
- mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
+ mask = _pixman_image_get_solid (mask_image, PIXMAN_a8r8g8b8);
xmm_mask = create_mask_16_128 (mask >> 24);
@@ -3528,7 +3550,7 @@ sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp,
PIXMAN_IMAGE_GET_LINE (
src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
- mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
+ mask = _pixman_image_get_solid (mask_image, PIXMAN_a8r8g8b8);
xmm_mask = create_mask_16_128 (mask >> 24);
xmm_alpha = mask_00ff;
@@ -5033,23 +5055,23 @@ sse2_composite_in_8_8 (pixman_implementation_t *imp,
}
/* -------------------------------------------------------------------------
- * composite_add_8888_8_8
+ * composite_add_n_8_8
*/
static void
-sse2_composite_add_8888_8_8 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
+sse2_composite_add_n_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *mask_line, *mask;
@@ -5428,9 +5450,7 @@ sse2_composite_copy_area (pixman_implementation_t *imp,
src_x, src_y, dest_x, dest_y, width, height);
}
-#if 0
-/* This code are buggy in MMX version, now the bug was translated to SSE2 version */
-void
+static void
sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
@@ -5451,6 +5471,7 @@ sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
uint32_t m;
int src_stride, mask_stride, dst_stride;
uint16_t w;
+ __m64 ms;
__m128i xmm_src, xmm_src_lo, xmm_src_hi;
__m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
@@ -5465,199 +5486,355 @@ sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
while (height--)
{
- src = src_line;
- src_line += src_stride;
- dst = dst_line;
- dst_line += dst_stride;
- mask = mask_line;
- mask_line += mask_stride;
+ src = src_line;
+ src_line += src_stride;
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+
+ w = width;
+
+ /* call prefetch hint to optimize cache load*/
+ cache_prefetch ((__m128i*)src);
+ cache_prefetch ((__m128i*)dst);
+ cache_prefetch ((__m128i*)mask);
+
+ while (w && (unsigned long)dst & 15)
+ {
+ s = 0xff000000 | *src++;
+ m = (uint32_t) *mask++;
+ d = *dst;
+ ms = unpack_32_1x64 (s);
+
+ if (m != 0xff)
+ {
+ __m64 ma = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+ __m64 md = unpack_32_1x64 (d);
+
+ ms = in_over_1x64 (&ms, &mask_x00ff, &ma, &md);
+ }
+
+ *dst++ = pack_1x64_32 (ms);
+ w--;
+ }
+
+ /* call prefetch hint to optimize cache load*/
+ cache_prefetch ((__m128i*)src);
+ cache_prefetch ((__m128i*)dst);
+ cache_prefetch ((__m128i*)mask);
+
+ while (w >= 4)
+ {
+ /* fill cache line with next memory */
+ cache_prefetch_next ((__m128i*)src);
+ cache_prefetch_next ((__m128i*)dst);
+ cache_prefetch_next ((__m128i*)mask);
+
+ m = *(uint32_t*) mask;
+ xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
+
+ if (m == 0xffffffff)
+ {
+ save_128_aligned ((__m128i*)dst, xmm_src);
+ }
+ else
+ {
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128());
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &mask_00ff, &mask_00ff, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ src += 4;
+ dst += 4;
+ mask += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ m = (uint32_t) *mask++;
+
+ if (m)
+ {
+ s = 0xff000000 | *src;
+
+ if (m == 0xff)
+ {
+ *dst = s;
+ }
+ else
+ {
+ __m64 ma, md, ms;
+
+ d = *dst;
+
+ ma = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+ md = unpack_32_1x64 (d);
+ ms = unpack_32_1x64 (s);
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &mask_x00ff, &ma, &md));
+ }
+
+ }
+
+ src++;
+ dst++;
+ w--;
+ }
+ }
- w = width;
+ _mm_empty ();
+}
- /* call prefetch hint to optimize cache load*/
- cache_prefetch ((__m128i*)src);
- cache_prefetch ((__m128i*)dst);
- cache_prefetch ((__m128i*)mask);
+static void
+sse2_composite_over_8888_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *src, *src_line, s;
+ uint32_t *dst, *dst_line, d;
+ uint8_t *mask, *mask_line;
+ uint32_t m;
+ int src_stride, mask_stride, dst_stride;
+ uint16_t w;
- while (w && (unsigned long)dst & 15)
- {
- s = 0xff000000 | *src++;
- m = (uint32_t) *mask++;
- d = *dst;
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
- __m64 ms = unpack_32_1x64 (s);
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
- if (m != 0xff)
- {
- ms = in_over_1x64 (ms,
- mask_x00ff,
- expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
- unpack_32_1x64 (d));
- }
+ while (height--)
+ {
+ src = src_line;
+ src_line += src_stride;
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
- *dst++ = pack_1x64_32 (ms);
- w--;
- }
+ w = width;
- /* call prefetch hint to optimize cache load*/
- cache_prefetch ((__m128i*)src);
- cache_prefetch ((__m128i*)dst);
- cache_prefetch ((__m128i*)mask);
+ /* call prefetch hint to optimize cache load*/
+ cache_prefetch ((__m128i *)src);
+ cache_prefetch ((__m128i *)dst);
+ cache_prefetch ((__m128i *)mask);
- while (w >= 4)
- {
- /* fill cache line with next memory */
- cache_prefetch_next ((__m128i*)src);
- cache_prefetch_next ((__m128i*)dst);
- cache_prefetch_next ((__m128i*)mask);
+ while (w && (unsigned long)dst & 15)
+ {
+ uint32_t sa;
- m = *(uint32_t*) mask;
- xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
+ s = *src++;
+ m = (uint32_t) *mask++;
+ d = *dst;
- if (m == 0xffffffff)
+ sa = s >> 24;
+
+ if (m)
{
- save_128_aligned ((__m128i*)dst, xmm_src);
+ if (sa == 0xff && m == 0xff)
+ {
+ *dst = s;
+ }
+ else
+ {
+ __m64 ms, md, ma, msa;
+
+ ma = expand_alpha_rev_1x64 (load_32_1x64 (m));
+ ms = unpack_32_1x64 (s);
+ md = unpack_32_1x64 (d);
+
+ msa = expand_alpha_rev_1x64 (load_32_1x64 (sa));
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md));
+ }
}
- else
+
+ dst++;
+ w--;
+ }
+
+ /* call prefetch hint to optimize cache load*/
+ cache_prefetch ((__m128i *)src);
+ cache_prefetch ((__m128i *)dst);
+ cache_prefetch ((__m128i *)mask);
+
+ while (w >= 4)
+ {
+ /* fill cache line with next memory */
+ cache_prefetch_next ((__m128i *)src);
+ cache_prefetch_next ((__m128i *)dst);
+ cache_prefetch_next ((__m128i *)mask);
+
+ m = *(uint32_t *) mask;
+
+ if (m)
{
- xmm_dst = load_128_aligned ((__m128i*)dst);
+ xmm_src = load_128_unaligned ((__m128i*)src);
- xmm_mask = _mm_unpacklo_epi16 (
- unpack_32_1x128 (m), _mm_setzero_si128 ());
+ if (m == 0xffffffff && is_opaque (xmm_src))
+ {
+ save_128_aligned ((__m128i *)dst, xmm_src);
+ }
+ else
+ {
+ xmm_dst = load_128_aligned ((__m128i *)dst);
- unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
- unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
- unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+ xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128());
- expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
- &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
- in_over_2x128 (xmm_src_lo, xmm_src_hi,
- mask_00ff, mask_00ff,
- xmm_mask_lo, xmm_mask_hi,
- &xmm_dst_lo, &xmm_dst_hi);
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi);
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi,
+ &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
- save_128_aligned (
- (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
}
- src += 4;
- dst += 4;
- mask += 4;
- w -= 4;
- }
+ src += 4;
+ dst += 4;
+ mask += 4;
+ w -= 4;
+ }
- while (w)
- {
- m = (uint32_t) *mask++;
+ while (w)
+ {
+ uint32_t sa;
+
+ s = *src++;
+ m = (uint32_t) *mask++;
+ d = *dst;
+
+ sa = s >> 24;
if (m)
{
- s = 0xff000000 | *src;
-
- if (m == 0xff)
+ if (sa == 0xff && m == 0xff)
{
*dst = s;
}
else
{
- d = *dst;
+ __m64 ms, md, ma, msa;
- *dst = pack_1x64_32 (
- in_over_1x64 (
- unpack_32_1x64 (s),
- mask_x00ff,
- expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
- unpack_32_1x64 (d)));
- }
+ ma = expand_alpha_rev_1x64 (load_32_1x64 (m));
+ ms = unpack_32_1x64 (s);
+ md = unpack_32_1x64 (d);
+ msa = expand_alpha_rev_1x64 (load_32_1x64 (sa));
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md));
+ }
}
- src++;
dst++;
- w--;
- }
+ w--;
+ }
}
_mm_empty ();
}
-#endif
-
static const pixman_fast_path_t sse2_fast_paths[] =
{
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, sse2_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, sse2_composite_over_n_8_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_over_n_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_over_8888_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, sse2_composite_over_8888_0565, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888, 0 },
-#if 0
- /* FIXME: This code are buggy in MMX version, now the bug was translated to SSE2 version */
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_x888_8_8888, 0 },
-#endif
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888, NEED_SOLID_MASK },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5, sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5, sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5, sse2_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5, sse2_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5, sse2_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5, sse2_composite_over_pixbuf_0565, NEED_PIXBUF },
- { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area, 0 },
-
- { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_add_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
- { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, sse2_composite_add_8000_8000, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888, 0 },
- { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888, 0 },
- { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, sse2_composite_add_8888_8_8, 0 },
-
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_copy_area, 0 },
- { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, sse2_composite_copy_area, 0 },
-
- { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, sse2_composite_in_8_8, 0 },
- { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, sse2_composite_in_n_8_8, 0 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, sse2_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, sse2_composite_over_n_8_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_over_n_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_over_8888_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, sse2_composite_over_8888_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_8888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_8888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_8888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_8888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_over_x888_8_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_solid, PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_r5g6b5, sse2_composite_over_n_8888_0565_ca },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8_ca, PIXMAN_b5g6r5, sse2_composite_over_n_8888_0565_ca },
+ { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888 },
+ { PIXMAN_OP_OVER, PIXMAN_pixbuf, PIXMAN_pixbuf, PIXMAN_r5g6b5, sse2_composite_over_pixbuf_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_rpixbuf, PIXMAN_rpixbuf, PIXMAN_b5g6r5, sse2_composite_over_pixbuf_0565 },
+ { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area },
+ { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area },
+
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8_ca, PIXMAN_a8r8g8b8, sse2_composite_add_n_8888_8888_ca },
+ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, sse2_composite_add_8000_8000 },
+ { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888 },
+ { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888 },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, sse2_composite_add_n_8_8 },
+
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888 },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, sse2_composite_copy_area },
+ { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, sse2_composite_copy_area },
+
+ { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, sse2_composite_in_8_8 },
+ { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, sse2_composite_in_n_8_8 },
{ PIXMAN_OP_NONE },
};
diff --git a/pixman/pixman/pixman-utils.c b/pixman/pixman/pixman-utils.c
index 71282062c..6d03f3c07 100644
--- a/pixman/pixman/pixman-utils.c
+++ b/pixman/pixman/pixman-utils.c
@@ -33,8 +33,6 @@
/*
* Computing composite region
*/
-#define BOUND(v) (int16_t) ((v) < INT16_MIN ? INT16_MIN : (v) > INT16_MAX ? INT16_MAX : (v))
-
static inline pixman_bool_t
clip_general_image (pixman_region32_t * region,
pixman_region32_t * clip,
@@ -49,17 +47,17 @@ clip_general_image (pixman_region32_t * region,
int v;
if (rbox->x1 < (v = cbox->x1 + dx))
- rbox->x1 = BOUND (v);
+ rbox->x1 = v;
if (rbox->x2 > (v = cbox->x2 + dx))
- rbox->x2 = BOUND (v);
+ rbox->x2 = v;
if (rbox->y1 < (v = cbox->y1 + dy))
- rbox->y1 = BOUND (v);
+ rbox->y1 = v;
if (rbox->y2 > (v = cbox->y2 + dy))
- rbox->y2 = BOUND (v);
- if (rbox->x1 >= rbox->x2 ||
- rbox->y1 >= rbox->y2)
+ rbox->y2 = v;
+ if (rbox->x1 >= rbox->x2 || rbox->y1 >= rbox->y2)
{
pixman_region32_init (region);
+ return FALSE;
}
}
else if (!pixman_region32_not_empty (clip))
@@ -70,11 +68,14 @@ clip_general_image (pixman_region32_t * region,
{
if (dx || dy)
pixman_region32_translate (region, -dx, -dy);
+
if (!pixman_region32_intersect (region, region, clip))
return FALSE;
+
if (dx || dy)
pixman_region32_translate (region, dx, dy);
}
+
return pixman_region32_not_empty (region);
}
@@ -106,23 +107,19 @@ pixman_compute_composite_region32 (pixman_region32_t * region,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
- int16_t src_x,
- int16_t src_y,
- int16_t mask_x,
- int16_t mask_y,
- int16_t dest_x,
- int16_t dest_y,
- uint16_t width,
- uint16_t height)
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
{
- int v;
-
region->extents.x1 = dest_x;
- v = dest_x + width;
- region->extents.x2 = BOUND (v);
+ region->extents.x2 = dest_x + width;
region->extents.y1 = dest_y;
- v = dest_y + height;
- region->extents.y2 = BOUND (v);
+ region->extents.y2 = dest_y + height;
region->extents.x1 = MAX (region->extents.x1, 0);
region->extents.y1 = MAX (region->extents.y1, 0);
@@ -378,14 +375,14 @@ walk_region_internal (pixman_implementation_t *imp,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
- int16_t src_x,
- int16_t src_y,
- int16_t mask_x,
- int16_t mask_y,
- int16_t dest_x,
- int16_t dest_y,
- uint16_t width,
- uint16_t height,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height,
pixman_bool_t src_repeat,
pixman_bool_t mask_repeat,
pixman_region32_t * region,
@@ -471,14 +468,14 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
- int16_t src_x,
- int16_t src_y,
- int16_t mask_x,
- int16_t mask_y,
- int16_t dest_x,
- int16_t dest_y,
- uint16_t width,
- uint16_t height,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height,
pixman_composite_func_t composite_rect)
{
pixman_region32_t region;
@@ -501,78 +498,97 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
}
}
-static pixman_bool_t
-mask_is_solid (pixman_image_t *mask)
-{
- if (mask->type == SOLID)
- return TRUE;
-
- if (mask->type == BITS &&
- mask->common.repeat == PIXMAN_REPEAT_NORMAL &&
- mask->bits.width == 1 &&
- mask->bits.height == 1)
- {
- return TRUE;
- }
-
- return FALSE;
-}
-
static const pixman_fast_path_t *
get_fast_path (const pixman_fast_path_t *fast_paths,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
- pixman_bool_t is_pixbuf)
+ int src_x,
+ int src_y,
+ int mask_x,
+ int mask_y)
{
+ pixman_format_code_t src_format, mask_format;
const pixman_fast_path_t *info;
- for (info = fast_paths; info->op != PIXMAN_OP_NONE; info++)
+ /* Check for pixbufs */
+ if (mask_image && mask_image->type == BITS &&
+ (mask_image->bits.format == PIXMAN_a8r8g8b8 || mask_image->bits.format == PIXMAN_a8b8g8r8) &&
+ (src_image->type == BITS && src_image->bits.bits == mask_image->bits.bits) &&
+ (src_image->common.repeat == mask_image->common.repeat) &&
+ (src_x == mask_x && src_y == mask_y))
{
- pixman_bool_t valid_src = FALSE;
- pixman_bool_t valid_mask = FALSE;
-
- if (info->op != op)
- continue;
-
- if ((info->src_format == PIXMAN_solid &&
- _pixman_image_is_solid (src_image)) ||
- (src_image->type == BITS &&
- info->src_format == src_image->bits.format))
+ if (src_image->bits.format == PIXMAN_x8b8g8r8)
+ src_format = mask_format = PIXMAN_pixbuf;
+ else if (src_image->bits.format == PIXMAN_x8r8g8b8)
+ src_format = mask_format = PIXMAN_rpixbuf;
+ else
+ return NULL;
+ }
+ else
+ {
+ if (_pixman_image_is_solid (src_image))
{
- valid_src = TRUE;
+ src_format = PIXMAN_solid;
}
-
- if (!valid_src)
- continue;
-
- if ((info->mask_format == PIXMAN_null && !mask_image) ||
- (mask_image && mask_image->type == BITS &&
- info->mask_format == mask_image->bits.format))
+ else if (src_image->type == BITS)
{
- valid_mask = TRUE;
-
- if (info->flags & NEED_SOLID_MASK)
+ src_format = src_image->bits.format;
+ }
+ else
+ {
+ return NULL;
+ }
+
+ if (!mask_image)
+ {
+ mask_format = PIXMAN_null;
+ }
+ else if (mask_image->common.component_alpha)
+ {
+ if (mask_image->type == BITS)
{
- if (!mask_image || !mask_is_solid (mask_image))
- valid_mask = FALSE;
+ /* These are the *only* component_alpha formats
+ * we support for fast paths
+ */
+ if (mask_image->bits.format == PIXMAN_a8r8g8b8)
+ mask_format = PIXMAN_a8r8g8b8_ca;
+ else if (mask_image->bits.format == PIXMAN_a8b8g8r8)
+ mask_format = PIXMAN_a8b8g8r8_ca;
+ else
+ return NULL;
}
-
- if (info->flags & NEED_COMPONENT_ALPHA)
+ else
{
- if (!mask_image || !mask_image->common.component_alpha)
- valid_mask = FALSE;
+ return NULL;
}
}
-
- if (!valid_mask)
+ else if (_pixman_image_is_solid (mask_image))
+ {
+ mask_format = PIXMAN_solid;
+ }
+ else if (mask_image->common.type == BITS)
+ {
+ mask_format = mask_image->bits.format;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+
+ for (info = fast_paths; info->op != PIXMAN_OP_NONE; info++)
+ {
+ if (info->op != op)
continue;
- if (info->dest_format != dst_image->bits.format)
+ if (info->src_format != src_format)
+ continue;
+ if (info->mask_format != mask_format)
continue;
- if ((info->flags & NEED_PIXBUF) && !is_pixbuf)
+ if (info->dest_format != dst_image->bits.format)
continue;
return info;
@@ -617,7 +633,7 @@ sources_cover (pixman_image_t *src,
if (!mask)
return TRUE;
-
+
if (!image_covers (mask, extents, dest_x - mask_x, dest_y - mask_y))
return FALSE;
@@ -654,8 +670,7 @@ _pixman_run_fast_path (const pixman_fast_path_t *paths,
if (has_fast_path)
{
- has_fast_path = (src->type == BITS || _pixman_image_is_solid (src)) &&
- !src->common.transform &&
+ has_fast_path = !src->common.transform &&
!src->common.alpha_map &&
src->common.filter != PIXMAN_FILTER_CONVOLUTION &&
src->common.repeat != PIXMAN_REPEAT_PAD &&
@@ -671,7 +686,6 @@ _pixman_run_fast_path (const pixman_fast_path_t *paths,
if (mask && has_fast_path)
{
has_fast_path =
- mask->type == BITS &&
!mask->common.transform &&
!mask->common.alpha_map &&
!mask->bits.read_func &&
@@ -685,28 +699,15 @@ _pixman_run_fast_path (const pixman_fast_path_t *paths,
if (has_fast_path)
{
const pixman_fast_path_t *info;
- pixman_bool_t pixbuf;
-
- pixbuf =
- src && src->type == BITS &&
- mask && mask->type == BITS &&
- src->bits.bits == mask->bits.bits &&
- src_x == mask_x &&
- src_y == mask_y &&
- !mask->common.component_alpha &&
- !mask_repeat;
-
- info = get_fast_path (paths, op, src, mask, dest, pixbuf);
- if (info)
+ if ((info = get_fast_path (paths, op, src, mask, dest, src_x, src_y, mask_x, mask_y)))
{
func = info->func;
if (info->src_format == PIXMAN_solid)
src_repeat = FALSE;
- if (info->mask_format == PIXMAN_solid ||
- info->flags & NEED_SOLID_MASK)
+ if (info->mask_format == PIXMAN_solid)
{
mask_repeat = FALSE;
}
diff --git a/pixman/pixman/pixman-version.h b/pixman/pixman/pixman-version.h
index 79d74fed4..6b93790e1 100644
--- a/pixman/pixman/pixman-version.h
+++ b/pixman/pixman/pixman-version.h
@@ -32,10 +32,10 @@
#endif
#define PIXMAN_VERSION_MAJOR 0
-#define PIXMAN_VERSION_MINOR 16
+#define PIXMAN_VERSION_MINOR 17
#define PIXMAN_VERSION_MICRO 2
-#define PIXMAN_VERSION_STRING "0.16.2"
+#define PIXMAN_VERSION_STRING "0.17.2"
#define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \
((major) * 10000) \
diff --git a/pixman/pixman/pixman.h b/pixman/pixman/pixman.h
index 5b90a0c8d..ba2b242ad 100644
--- a/pixman/pixman/pixman.h
+++ b/pixman/pixman/pixman.h
@@ -74,7 +74,7 @@ SOFTWARE.
/*
* Standard integers
*/
-#if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || defined (_sgi) || defined (__sun) || defined (sun) || defined (__digital__)
+#if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || defined (_sgi) || defined (__sun) || defined (sun) || defined (__digital__) || defined (__HP_cc)
# include <inttypes.h>
#elif defined (_MSC_VER)
typedef __int8 int8_t;
@@ -109,6 +109,7 @@ typedef pixman_fixed_16_16_t pixman_fixed_t;
#define pixman_fixed_e ((pixman_fixed_t) 1)
#define pixman_fixed_1 (pixman_int_to_fixed(1))
#define pixman_fixed_1_minus_e (pixman_fixed_1 - pixman_fixed_e)
+#define pixman_fixed_minus_1 (pixman_int_to_fixed(-1))
#define pixman_fixed_to_int(f) ((int) ((f) >> 16))
#define pixman_int_to_fixed(i) ((pixman_fixed_t) ((i) << 16))
#define pixman_fixed_to_double(f) (double) ((f) / (double) pixman_fixed_1)
@@ -333,8 +334,10 @@ typedef enum
PIXMAN_OP_HSL_COLOR = 0x3d,
PIXMAN_OP_HSL_LUMINOSITY = 0x3e,
- PIXMAN_OP_NONE,
- PIXMAN_OP_LAST = PIXMAN_OP_NONE
+#ifdef PIXMAN_USE_INTERNAL_API
+ PIXMAN_N_OPERATORS,
+ PIXMAN_OP_NONE = PIXMAN_N_OPERATORS
+#endif
} pixman_op_t;
/*
@@ -637,11 +640,11 @@ typedef enum {
/* 24bpp formats */
PIXMAN_r8g8b8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_b8g8r8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ABGR,0,8,8,8),
-
+
/* 16bpp formats */
PIXMAN_r5g6b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,6,5),
PIXMAN_b5g6r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,6,5),
-
+
PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,1,5,5,5),
PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,5,5),
PIXMAN_a1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,1,5,5,5),
@@ -650,35 +653,35 @@ typedef enum {
PIXMAN_x4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,4,4,4),
PIXMAN_a4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,4,4,4,4),
PIXMAN_x4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,4,4,4),
-
+
/* 8bpp formats */
PIXMAN_a8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,8,0,0,0),
PIXMAN_r3g3b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,0,3,3,2),
PIXMAN_b2g3r3 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,0,3,3,2),
PIXMAN_a2r2g2b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,2,2,2,2),
PIXMAN_a2b2g2r2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,2,2,2,2),
-
+
PIXMAN_c8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
-
+
PIXMAN_x4a4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,4,0,0,0),
-
+
PIXMAN_x4c4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_x4g4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
-
+
/* 4bpp formats */
PIXMAN_a4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_A,4,0,0,0),
PIXMAN_r1g2b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,0,1,2,1),
PIXMAN_b1g2r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,0,1,2,1),
PIXMAN_a1r1g1b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,1,1,1,1),
PIXMAN_a1b1g1r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,1,1,1,1),
-
+
PIXMAN_c4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_GRAY,0,0,0,0),
-
+
/* 1bpp formats */
PIXMAN_a1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_A,1,0,0,0),
-
+
PIXMAN_g1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_GRAY,0,0,0,0),
/* YUV formats */