aboutsummaryrefslogtreecommitdiff
path: root/pixman/pixman
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2009-09-02 19:08:07 +0000
committermarha <marha@users.sourceforge.net>2009-09-02 19:08:07 +0000
commit199039111284ed3a10508e2dc0db49ca4192389c (patch)
tree1aa6deae00e08c734015dd8a494ed0f2a3e8d3dd /pixman/pixman
parent4a3dbb926ae3f5410198d7cc4f4ebe4f62eebf05 (diff)
downloadvcxsrv-199039111284ed3a10508e2dc0db49ca4192389c.tar.gz
vcxsrv-199039111284ed3a10508e2dc0db49ca4192389c.tar.bz2
vcxsrv-199039111284ed3a10508e2dc0db49ca4192389c.zip
Switched to pixman-0.16.0.tar.gz
Diffstat (limited to 'pixman/pixman')
-rw-r--r--pixman/pixman/Makefile.in27
-rw-r--r--pixman/pixman/pixman-access.c1745
-rw-r--r--pixman/pixman/pixman-arm-neon.c3
-rw-r--r--pixman/pixman/pixman-bits-image.c610
-rw-r--r--pixman/pixman/pixman-combine.c.template22
-rw-r--r--pixman/pixman/pixman-combine.h.template257
-rw-r--r--pixman/pixman/pixman-combine32.c22
-rw-r--r--pixman/pixman/pixman-combine32.h257
-rw-r--r--pixman/pixman/pixman-combine64.c22
-rw-r--r--pixman/pixman/pixman-combine64.h257
-rw-r--r--pixman/pixman/pixman-conical-gradient.c2
-rw-r--r--pixman/pixman/pixman-fast-path.c85
-rw-r--r--pixman/pixman/pixman-general.c2
-rw-r--r--pixman/pixman/pixman-image.c19
-rw-r--r--pixman/pixman/pixman-linear-gradient.c4
-rw-r--r--pixman/pixman/pixman-mmx.c66
-rw-r--r--pixman/pixman/pixman-private.h42
-rw-r--r--pixman/pixman/pixman-radial-gradient.c75
-rw-r--r--pixman/pixman/pixman-region.c31
-rw-r--r--pixman/pixman/pixman-solid-fill.c4
-rw-r--r--pixman/pixman/pixman-sse2.c172
-rw-r--r--pixman/pixman/pixman-trap.c4
-rw-r--r--pixman/pixman/pixman-utils.c27
-rw-r--r--pixman/pixman/pixman-version.h6
-rw-r--r--pixman/pixman/pixman-vmx.c707
-rw-r--r--pixman/pixman/pixman-x64-mmx-emulation.h8
-rw-r--r--pixman/pixman/pixman.c7
27 files changed, 1909 insertions, 2574 deletions
diff --git a/pixman/pixman/Makefile.in b/pixman/pixman/Makefile.in
index a3518b057..1ca63f731 100644
--- a/pixman/pixman/Makefile.in
+++ b/pixman/pixman/Makefile.in
@@ -1,4 +1,4 @@
-# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# Makefile.in generated by automake 1.10.2 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
@@ -193,24 +193,19 @@ CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DEP_CFLAGS = @DEP_CFLAGS@
DEP_LIBS = @DEP_LIBS@
DSYMUTIL = @DSYMUTIL@
-ECHO = @ECHO@
+DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
-F77 = @F77@
-FFLAGS = @FFLAGS@
+FGREP = @FGREP@
GREP = @GREP@
GTK_CFLAGS = @GTK_CFLAGS@
GTK_LIBS = @GTK_LIBS@
@@ -219,10 +214,12 @@ INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
LT_VERSION_INFO = @LT_VERSION_INFO@
@@ -230,8 +227,12 @@ MAKEINFO = @MAKEINFO@
MKDIR_P = @MKDIR_P@
MMX_CFLAGS = @MMX_CFLAGS@
MMX_LDFLAGS = @MMX_LDFLAGS@
+NM = @NM@
NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
@@ -259,8 +260,7 @@ abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_F77 = @ac_ct_F77@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
@@ -291,6 +291,7 @@ libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
@@ -389,8 +390,8 @@ $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
- && exit 0; \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
@@ -753,7 +754,7 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
- $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
mkid -fID $$unique
tags: TAGS
diff --git a/pixman/pixman/pixman-access.c b/pixman/pixman/pixman-access.c
index a5e1c0fa1..532aa2ef7 100644
--- a/pixman/pixman/pixman-access.c
+++ b/pixman/pixman/pixman-access.c
@@ -63,14 +63,14 @@
uint32_t *bits = __bits_image->bits; \
int stride = __bits_image->rowstride; \
int offset0 = stride < 0 ? \
- ((-stride) >> 1) * ((__bits_image->height - 1) >> 1) - stride : \
- stride * __bits_image->height; \
+ ((-stride) >> 1) * ((__bits_image->height - 1) >> 1) - stride : \
+ stride * __bits_image->height; \
int offset1 = stride < 0 ? \
- offset0 + ((-stride) >> 1) * ((__bits_image->height) >> 1) : \
+ offset0 + ((-stride) >> 1) * ((__bits_image->height) >> 1) : \
offset0 + (offset0 >> 2)
/* Note no trailing semicolon on the above macro; if it's there, then
- * the typical usage of YV12_SETUP(pict); will have an extra trailing ;
+ * the typical usage of YV12_SETUP(image); will have an extra trailing ;
* that some compilers will interpret as a statement -- and then any further
* variable declarations will cause an error.
*/
@@ -364,7 +364,7 @@ fetch_scanline_r8g8b8 (pixman_image_t *image,
while (pixel < end)
{
uint32_t b = 0xff000000;
-
+
#ifdef WORDS_BIGENDIAN
b |= (READ (image, pixel++) << 16);
b |= (READ (image, pixel++) << 8);
@@ -374,7 +374,7 @@ fetch_scanline_r8g8b8 (pixman_image_t *image,
b |= (READ (image, pixel++) << 8);
b |= (READ (image, pixel++) << 16);
#endif
-
+
*buffer++ = b;
}
}
@@ -430,7 +430,7 @@ fetch_scanline_r5g6b5 (pixman_image_t *image,
r |= (r >> 5) & 0x70007;
r |= (r >> 6) & 0x300;
-
+
*buffer++ = 0xff000000 | r;
}
}
@@ -456,7 +456,7 @@ fetch_scanline_b5g6r5 (pixman_image_t *image,
b = ((p & 0xf800) | ((p & 0xe000) >> 5)) >> 8;
g = ((p & 0x07e0) | ((p & 0x0600) >> 6)) << 5;
r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -483,7 +483,7 @@ fetch_scanline_a1r5g5b5 (pixman_image_t *image,
r = ((p & 0x7c00) | ((p & 0x7000) >> 5)) << 9;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
b = ((p & 0x001c) | ((p & 0x001f) << 5)) >> 2;
-
+
*buffer++ = a | r | g | b;
}
}
@@ -509,7 +509,7 @@ fetch_scanline_x1r5g5b5 (pixman_image_t *image,
r = ((p & 0x7c00) | ((p & 0x7000) >> 5)) << 9;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
b = ((p & 0x001c) | ((p & 0x001f) << 5)) >> 2;
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -536,7 +536,7 @@ fetch_scanline_a1b5g5r5 (pixman_image_t *image,
b = ((p & 0x7c00) | ((p & 0x7000) >> 5)) >> 7;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
-
+
*buffer++ = a | r | g | b;
}
}
@@ -562,7 +562,7 @@ fetch_scanline_x1b5g5r5 (pixman_image_t *image,
b = ((p & 0x7c00) | ((p & 0x7000) >> 5)) >> 7;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -589,7 +589,7 @@ fetch_scanline_a4r4g4b4 (pixman_image_t *image,
r = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) << 12;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
b = ((p & 0x000f) | ((p & 0x000f) << 4));
-
+
*buffer++ = a | r | g | b;
}
}
@@ -615,7 +615,7 @@ fetch_scanline_x4r4g4b4 (pixman_image_t *image,
r = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) << 12;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
b = ((p & 0x000f) | ((p & 0x000f) << 4));
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -642,7 +642,7 @@ fetch_scanline_a4b4g4r4 (pixman_image_t *image,
b = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) >> 4;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
r = ((p & 0x000f) | ((p & 0x000f) << 4)) << 16;
-
+
*buffer++ = a | r | g | b;
}
}
@@ -668,7 +668,7 @@ fetch_scanline_x4b4g4r4 (pixman_image_t *image,
b = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) >> 4;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
r = ((p & 0x000f) | ((p & 0x000f) << 4)) << 16;
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -714,7 +714,7 @@ fetch_scanline_r3g3b2 (pixman_image_t *image,
((p & 0x03) << 2) |
((p & 0x03) << 4) |
((p & 0x03) << 6));
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -741,13 +741,13 @@ fetch_scanline_b2g3r3 (pixman_image_t *image,
((p & 0xc0) >> 2) |
((p & 0xc0) >> 4) |
((p & 0xc0) >> 6));
-
+
g = ((p & 0x38) | ((p & 0x38) >> 3) | ((p & 0x30) << 2)) << 8;
-
+
r = (((p & 0x07) ) |
((p & 0x07) << 3) |
((p & 0x06) << 6)) << 16;
-
+
*buffer++ = 0xff000000 | r | g | b;
}
}
@@ -774,7 +774,7 @@ fetch_scanline_a2r2g2b2 (pixman_image_t *image,
r = ((p & 0x30) * 0x55) << 12;
g = ((p & 0x0c) * 0x55) << 6;
b = ((p & 0x03) * 0x55);
-
+
*buffer++ = a | r | g | b;
}
}
@@ -801,7 +801,7 @@ fetch_scanline_a2b2g2r2 (pixman_image_t *image,
b = ((p & 0x30) * 0x55) >> 6;
g = ((p & 0x0c) * 0x55) << 6;
r = ((p & 0x03) * 0x55) << 16;
-
+
*buffer++ = a | r | g | b;
}
}
@@ -823,7 +823,7 @@ fetch_scanline_c8 (pixman_image_t *image,
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
-
+
*buffer++ = indexed->rgba[p];
}
}
@@ -844,7 +844,7 @@ fetch_scanline_x4a4 (pixman_image_t *image,
while (pixel < end)
{
uint8_t p = READ (image, pixel++) & 0xf;
-
+
*buffer++ = (p | (p << 4)) << 24;
}
}
@@ -873,7 +873,7 @@ fetch_scanline_a4 (pixman_image_t *image,
uint32_t p = FETCH_4 (image, bits, i + x);
p |= p << 4;
-
+
*buffer++ = p << 24;
}
}
@@ -1137,1264 +1137,676 @@ fetch_scanline_yv12 (pixman_image_t *image,
/**************************** Pixel wise fetching *****************************/
/* Despite the type, expects a uint64_t buffer */
-static void
-fetch_pixels_a2r10g10b10_64 (bits_image_t *pict,
- uint32_t * b,
- int n_pixels)
+static uint64_t
+fetch_pixel_a2r10g10b10 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
- uint64_t *buffer = (uint64_t *)b;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t p = READ (image, bits + offset);
+ uint64_t a = p >> 30;
+ uint64_t r = (p >> 20) & 0x3ff;
+ uint64_t g = (p >> 10) & 0x3ff;
+ uint64_t b = p & 0x3ff;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = ((uint32_t *)buffer)[2 * i];
- int line = ((uint32_t *)buffer)[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t p = READ (pict, bits + offset);
- uint64_t a = p >> 30;
- uint64_t r = (p >> 20) & 0x3ff;
- uint64_t g = (p >> 10) & 0x3ff;
- uint64_t b = p & 0x3ff;
-
- r = r << 6 | r >> 4;
- g = g << 6 | g >> 4;
- b = b << 6 | b >> 4;
-
- a <<= 62;
- a |= a >> 2;
- a |= a >> 4;
- a |= a >> 8;
-
- buffer[i] = a << 48 | r << 32 | g << 16 | b;
- }
- }
+ r = r << 6 | r >> 4;
+ g = g << 6 | g >> 4;
+ b = b << 6 | b >> 4;
+
+ a <<= 62;
+ a |= a >> 2;
+ a |= a >> 4;
+ a |= a >> 8;
+
+ return a << 48 | r << 32 | g << 16 | b;
}
/* Despite the type, this function expects a uint64_t buffer */
-static void
-fetch_pixels_x2r10g10b10_64 (bits_image_t *pict,
- uint32_t * b,
- int n_pixels)
+static uint64_t
+fetch_pixel_x2r10g10b10 (bits_image_t *image,
+ int offset,
+ int line)
{
- uint64_t *buffer = (uint64_t *)b;
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t p = READ (image, bits + offset);
+ uint64_t r = (p >> 20) & 0x3ff;
+ uint64_t g = (p >> 10) & 0x3ff;
+ uint64_t b = p & 0x3ff;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = ((uint32_t *)buffer)[2 * i];
- int line = ((uint32_t *)buffer)[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t p = READ (pict, bits + offset);
- uint64_t r = (p >> 20) & 0x3ff;
- uint64_t g = (p >> 10) & 0x3ff;
- uint64_t b = p & 0x3ff;
-
- r = r << 6 | r >> 4;
- g = g << 6 | g >> 4;
- b = b << 6 | b >> 4;
-
- buffer[i] = 0xffffULL << 48 | r << 32 | g << 16 | b;
- }
- }
+ r = r << 6 | r >> 4;
+ g = g << 6 | g >> 4;
+ b = b << 6 | b >> 4;
+
+ return 0xffffULL << 48 | r << 32 | g << 16 | b;
}
/* Despite the type, expects a uint64_t buffer */
-static void
-fetch_pixels_a2b10g10r10_64 (bits_image_t *pict,
- uint32_t * b,
- int n_pixels)
+static uint64_t
+fetch_pixel_a2b10g10r10 (bits_image_t *image,
+ int offset,
+ int line)
{
- uint64_t *buffer = (uint64_t *)b;
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t p = READ (image, bits + offset);
+ uint64_t a = p >> 30;
+ uint64_t b = (p >> 20) & 0x3ff;
+ uint64_t g = (p >> 10) & 0x3ff;
+ uint64_t r = p & 0x3ff;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = ((uint32_t *)buffer)[2 * i];
- int line = ((uint32_t *)buffer)[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t p = READ (pict, bits + offset);
- uint64_t a = p >> 30;
- uint64_t b = (p >> 20) & 0x3ff;
- uint64_t g = (p >> 10) & 0x3ff;
- uint64_t r = p & 0x3ff;
-
- r = r << 6 | r >> 4;
- g = g << 6 | g >> 4;
- b = b << 6 | b >> 4;
-
- a <<= 62;
- a |= a >> 2;
- a |= a >> 4;
- a |= a >> 8;
-
- buffer[i] = a << 48 | r << 32 | g << 16 | b;
- }
- }
+ r = r << 6 | r >> 4;
+ g = g << 6 | g >> 4;
+ b = b << 6 | b >> 4;
+
+ a <<= 62;
+ a |= a >> 2;
+ a |= a >> 4;
+ a |= a >> 8;
+
+ return a << 48 | r << 32 | g << 16 | b;
}
/* Despite the type, this function expects a uint64_t buffer */
-static void
-fetch_pixels_x2b10g10r10_64 (bits_image_t *pict,
- uint32_t * b,
- int n_pixels)
+static uint64_t
+fetch_pixel_x2b10g10r10 (bits_image_t *image,
+ int offset,
+ int line)
{
- uint64_t *buffer = (uint64_t *)b;
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t p = READ (image, bits + offset);
+ uint64_t b = (p >> 20) & 0x3ff;
+ uint64_t g = (p >> 10) & 0x3ff;
+ uint64_t r = p & 0x3ff;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = ((uint32_t *)buffer)[2 * i];
- int line = ((uint32_t *)buffer)[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t p = READ (pict, bits + offset);
- uint64_t b = (p >> 20) & 0x3ff;
- uint64_t g = (p >> 10) & 0x3ff;
- uint64_t r = p & 0x3ff;
-
- r = r << 6 | r >> 4;
- g = g << 6 | g >> 4;
- b = b << 6 | b >> 4;
-
- buffer[i] = 0xffffULL << 48 | r << 32 | g << 16 | b;
- }
- }
+ r = r << 6 | r >> 4;
+ g = g << 6 | g >> 4;
+ b = b << 6 | b >> 4;
+
+ return 0xffffULL << 48 | r << 32 | g << 16 | b;
}
-static void
-fetch_pixels_a8r8g8b8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a8r8g8b8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
-
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- buffer[i] = READ (pict, (uint32_t *)bits + offset);
- }
- }
+ uint32_t *bits = image->bits + line * image->rowstride;
+ return READ (image, (uint32_t *)bits + offset);
}
-static void
-fetch_pixels_x8r8g8b8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_x8r8g8b8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
-
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- buffer[i] = READ (pict, (uint32_t *)bits + offset) | 0xff000000;
- }
- }
+ uint32_t *bits = image->bits + line * image->rowstride;
+
+ return READ (image, (uint32_t *)bits + offset) | 0xff000000;
}
-static void
-fetch_pixels_a8b8g8r8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a8b8g8r8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint32_t *)bits + offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint32_t *)bits + offset);
-
- buffer[i] = ((pixel & 0xff000000) |
- ((pixel >> 16) & 0xff) |
- (pixel & 0x0000ff00) |
- ((pixel & 0xff) << 16));
- }
- }
+ return ((pixel & 0xff000000) |
+ ((pixel >> 16) & 0xff) |
+ (pixel & 0x0000ff00) |
+ ((pixel & 0xff) << 16));
}
-static void
-fetch_pixels_x8b8g8r8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_x8b8g8r8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint32_t *)bits + offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint32_t *)bits + offset);
-
- buffer[i] = (0xff000000) |
- ((pixel >> 16) & 0xff) |
- (pixel & 0x0000ff00) |
- ((pixel & 0xff) << 16);
- }
- }
+ return ((0xff000000) |
+ ((pixel >> 16) & 0xff) |
+ (pixel & 0x0000ff00) |
+ ((pixel & 0xff) << 16));
}
-static void
-fetch_pixels_b8g8r8a8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_b8g8r8a8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint32_t *)bits + offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint32_t *)bits + offset);
-
- buffer[i] = ((pixel & 0xff000000) >> 24 |
- (pixel & 0x00ff0000) >> 8 |
- (pixel & 0x0000ff00) << 8 |
- (pixel & 0x000000ff) << 24);
- }
- }
+ return ((pixel & 0xff000000) >> 24 |
+ (pixel & 0x00ff0000) >> 8 |
+ (pixel & 0x0000ff00) << 8 |
+ (pixel & 0x000000ff) << 24);
}
-static void
-fetch_pixels_b8g8r8x8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_b8g8r8x8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint32_t *)bits + offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint32_t *)bits + offset);
-
- buffer[i] = ((0xff000000) |
- (pixel & 0xff000000) >> 24 |
- (pixel & 0x00ff0000) >> 8 |
- (pixel & 0x0000ff00) << 8);
- }
- }
+ return ((0xff000000) |
+ (pixel & 0xff000000) >> 24 |
+ (pixel & 0x00ff0000) >> 8 |
+ (pixel & 0x0000ff00) << 8);
}
-static void
-fetch_pixels_r8g8b8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_r8g8b8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint8_t *pixel = ((uint8_t *) bits) + (offset * 3);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint8_t *pixel = ((uint8_t *) bits) + (offset * 3);
-
#ifdef WORDS_BIGENDIAN
- buffer[i] = (0xff000000 |
- (READ (pict, pixel + 0) << 16) |
- (READ (pict, pixel + 1) << 8) |
- (READ (pict, pixel + 2)));
+ return (0xff000000 |
+ (READ (image, pixel + 0) << 16) |
+ (READ (image, pixel + 1) << 8) |
+ (READ (image, pixel + 2)));
#else
- buffer[i] = (0xff000000 |
- (READ (pict, pixel + 2) << 16) |
- (READ (pict, pixel + 1) << 8) |
- (READ (pict, pixel + 0)));
+ return (0xff000000 |
+ (READ (image, pixel + 2) << 16) |
+ (READ (image, pixel + 1) << 8) |
+ (READ (image, pixel + 0)));
#endif
- }
- }
}
-static void
-fetch_pixels_b8g8r8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_b8g8r8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
-
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint8_t *pixel = ((uint8_t *) bits) + (offset * 3);
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint8_t *pixel = ((uint8_t *) bits) + (offset * 3);
#ifdef WORDS_BIGENDIAN
- buffer[i] = (0xff000000 |
- (READ (pict, pixel + 2) << 16) |
- (READ (pict, pixel + 1) << 8) |
- (READ (pict, pixel + 0)));
+ return (0xff000000 |
+ (READ (image, pixel + 2) << 16) |
+ (READ (image, pixel + 1) << 8) |
+ (READ (image, pixel + 0)));
#else
- buffer[i] = (0xff000000 |
- (READ (pict, pixel + 0) << 16) |
- (READ (pict, pixel + 1) << 8) |
- (READ (pict, pixel + 2)));
+ return (0xff000000 |
+ (READ (image, pixel + 0) << 16) |
+ (READ (image, pixel + 1) << 8) |
+ (READ (image, pixel + 2)));
#endif
- }
- }
}
-static void
-fetch_pixels_r5g6b5 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_r5g6b5 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t r, g, b;
-
- r = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) << 8;
- g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
- b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ r = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) << 8;
+ g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
+ b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_b5g6r5 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_b5g6r5 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t r, g, b;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t r, g, b;
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
-
- b = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) >> 8;
- g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
- r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ b = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) >> 8;
+ g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
+ r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_a1r5g5b5 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a1r5g5b5 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t a, r, g, b;
-
- a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
- r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
- g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
- b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
-
- buffer[i] = (a | r | g | b);
- }
- }
+ a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
+ r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
+ g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
+ b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
+
+ return (a | r | g | b);
}
-static void
-fetch_pixels_x1r5g5b5 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_x1r5g5b5 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t r, g, b;
-
- r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
- g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
- b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
+ g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
+ b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_a1b5g5r5 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a1b5g5r5 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t a, r, g, b;
-
- a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
- b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
- g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
- r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
-
- buffer[i] = (a | r | g | b);
- }
- }
+ a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
+ b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
+ g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
+ r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
+
+ return (a | r | g | b);
}
-static void
-fetch_pixels_x1b5g5r5 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_x1b5g5r5 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t r, g, b;
-
- b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
- g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
- r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
+ g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
+ r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_a4r4g4b4 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a4r4g4b4 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t a, r, g, b;
-
- a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
- r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
- g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
- b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
-
- buffer[i] = (a | r | g | b);
- }
- }
+ a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
+ r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
+ g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
+ b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
+
+ return (a | r | g | b);
}
-static void
-fetch_pixels_x4r4g4b4 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_x4r4g4b4 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t r, g, b;
-
- r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
- g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
- b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
+ g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
+ b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_a4b4g4r4 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a4b4g4r4 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t a, r, g, b;
-
- a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
- b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
- g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
- r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
-
- buffer[i] = (a | r | g | b);
- }
- }
+ a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
+ b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
+ g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
+ r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
+
+ return (a | r | g | b);
}
-static void
-fetch_pixels_x4b4g4r4 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_x4b4g4r4 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint16_t *) bits + offset);
- uint32_t r, g, b;
-
- b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
- g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
- r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
+ g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
+ r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_a8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint8_t *) bits + offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint8_t *) bits + offset);
-
- buffer[i] = pixel << 24;
- }
- }
+ return pixel << 24;
}
-static void
-fetch_pixels_r3g3b2 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_r3g3b2 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint8_t *) bits + offset);
- uint32_t r, g, b;
-
- r = ((pixel & 0xe0) |
- ((pixel & 0xe0) >> 3) |
- ((pixel & 0xc0) >> 6)) << 16;
-
- g = ((pixel & 0x1c) |
- ((pixel & 0x18) >> 3) |
- ((pixel & 0x1c) << 3)) << 8;
-
- b = (((pixel & 0x03) ) |
- ((pixel & 0x03) << 2) |
- ((pixel & 0x03) << 4) |
- ((pixel & 0x03) << 6));
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ r = ((pixel & 0xe0) |
+ ((pixel & 0xe0) >> 3) |
+ ((pixel & 0xc0) >> 6)) << 16;
+
+ g = ((pixel & 0x1c) |
+ ((pixel & 0x18) >> 3) |
+ ((pixel & 0x1c) << 3)) << 8;
+
+ b = (((pixel & 0x03) ) |
+ ((pixel & 0x03) << 2) |
+ ((pixel & 0x03) << 4) |
+ ((pixel & 0x03) << 6));
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_b2g3r3 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_b2g3r3 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint8_t *) bits + offset);
- uint32_t r, g, b;
-
- b = ((pixel & 0xc0) |
- ((pixel & 0xc0) >> 2) |
- ((pixel & 0xc0) >> 4) |
- ((pixel & 0xc0) >> 6));
-
- g = ((pixel & 0x38) |
- ((pixel & 0x38) >> 3) |
- ((pixel & 0x30) << 2)) << 8;
-
- r = ((pixel & 0x07) |
- ((pixel & 0x07) << 3) |
- ((pixel & 0x06) << 6)) << 16;
-
- buffer[i] = (0xff000000 | r | g | b);
- }
- }
+ b = ((pixel & 0xc0) |
+ ((pixel & 0xc0) >> 2) |
+ ((pixel & 0xc0) >> 4) |
+ ((pixel & 0xc0) >> 6));
+
+ g = ((pixel & 0x38) |
+ ((pixel & 0x38) >> 3) |
+ ((pixel & 0x30) << 2)) << 8;
+
+ r = ((pixel & 0x07) |
+ ((pixel & 0x07) << 3) |
+ ((pixel & 0x06) << 6)) << 16;
+
+ return (0xff000000 | r | g | b);
}
-static void
-fetch_pixels_a2r2g2b2 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a2r2g2b2 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint8_t *) bits + offset);
- uint32_t a, r, g, b;
-
- a = ((pixel & 0xc0) * 0x55) << 18;
- r = ((pixel & 0x30) * 0x55) << 12;
- g = ((pixel & 0x0c) * 0x55) << 6;
- b = ((pixel & 0x03) * 0x55);
-
- buffer[i] = a | r | g | b;
- }
- }
+ a = ((pixel & 0xc0) * 0x55) << 18;
+ r = ((pixel & 0x30) * 0x55) << 12;
+ g = ((pixel & 0x0c) * 0x55) << 6;
+ b = ((pixel & 0x03) * 0x55);
+
+ return a | r | g | b;
}
-static void
-fetch_pixels_a2b2g2r2 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a2b2g2r2 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint8_t *) bits + offset);
- uint32_t a, r, g, b;
-
- a = ((pixel & 0xc0) * 0x55) << 18;
- b = ((pixel & 0x30) * 0x55) >> 6;
- g = ((pixel & 0x0c) * 0x55) << 6;
- r = ((pixel & 0x03) * 0x55) << 16;
-
- buffer[i] = a | r | g | b;
- }
- }
+ a = ((pixel & 0xc0) * 0x55) << 18;
+ b = ((pixel & 0x30) * 0x55) >> 6;
+ g = ((pixel & 0x0c) * 0x55) << 6;
+ r = ((pixel & 0x03) * 0x55) << 16;
+
+ return a | r | g | b;
}
-static void
-fetch_pixels_c8 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_c8 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+ const pixman_indexed_t * indexed = image->indexed;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint8_t *) bits + offset);
- const pixman_indexed_t * indexed = pict->indexed;
-
- buffer[i] = indexed->rgba[pixel];
- }
- }
+ return indexed->rgba[pixel];
}
-static void
-fetch_pixels_x4a4 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_x4a4 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, (uint8_t *) bits + offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, (uint8_t *) bits + offset);
-
- buffer[i] = ((pixel & 0xf) | ((pixel & 0xf) << 4)) << 24;
- }
- }
+ return ((pixel & 0xf) | ((pixel & 0xf) << 4)) << 24;
}
-static void
-fetch_pixels_a4 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a4 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = FETCH_4 (image, bits, offset);
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = FETCH_4 (pict, bits, offset);
-
- pixel |= pixel << 4;
- buffer[i] = pixel << 24;
- }
- }
+ pixel |= pixel << 4;
+ return pixel << 24;
}
-static void
-fetch_pixels_r1g2b1 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_r1g2b1 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = FETCH_4 (image, bits, offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = FETCH_4 (pict, bits, offset);
- uint32_t r, g, b;
-
- r = ((pixel & 0x8) * 0xff) << 13;
- g = ((pixel & 0x6) * 0x55) << 7;
- b = ((pixel & 0x1) * 0xff);
-
- buffer[i] = 0xff000000 | r | g | b;
- }
- }
+ r = ((pixel & 0x8) * 0xff) << 13;
+ g = ((pixel & 0x6) * 0x55) << 7;
+ b = ((pixel & 0x1) * 0xff);
+
+ return 0xff000000 | r | g | b;
}
-static void
-fetch_pixels_b1g2r1 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_b1g2r1 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = FETCH_4 (image, bits, offset);
+ uint32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = FETCH_4 (pict, bits, offset);
- uint32_t r, g, b;
-
- b = ((pixel & 0x8) * 0xff) >> 3;
- g = ((pixel & 0x6) * 0x55) << 7;
- r = ((pixel & 0x1) * 0xff) << 16;
-
- buffer[i] = 0xff000000 | r | g | b;
- }
- }
+ b = ((pixel & 0x8) * 0xff) >> 3;
+ g = ((pixel & 0x6) * 0x55) << 7;
+ r = ((pixel & 0x1) * 0xff) << 16;
+
+ return 0xff000000 | r | g | b;
}
-static void
-fetch_pixels_a1r1g1b1 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a1r1g1b1 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = FETCH_4 (image, bits, offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = FETCH_4 (pict, bits, offset);
- uint32_t a, r, g, b;
-
- a = ((pixel & 0x8) * 0xff) << 21;
- r = ((pixel & 0x4) * 0xff) << 14;
- g = ((pixel & 0x2) * 0xff) << 7;
- b = ((pixel & 0x1) * 0xff);
-
- buffer[i] = a | r | g | b;
- }
- }
+ a = ((pixel & 0x8) * 0xff) << 21;
+ r = ((pixel & 0x4) * 0xff) << 14;
+ g = ((pixel & 0x2) * 0xff) << 7;
+ b = ((pixel & 0x1) * 0xff);
+
+ return a | r | g | b;
}
-static void
-fetch_pixels_a1b1g1r1 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a1b1g1r1 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = FETCH_4 (image, bits, offset);
+ uint32_t a, r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = FETCH_4 (pict, bits, offset);
- uint32_t a, r, g, b;
-
- a = ((pixel & 0x8) * 0xff) << 21;
- r = ((pixel & 0x4) * 0xff) >> 3;
- g = ((pixel & 0x2) * 0xff) << 7;
- b = ((pixel & 0x1) * 0xff) << 16;
-
- buffer[i] = a | r | g | b;
- }
- }
+ a = ((pixel & 0x8) * 0xff) << 21;
+ r = ((pixel & 0x4) * 0xff) >> 3;
+ g = ((pixel & 0x2) * 0xff) << 7;
+ b = ((pixel & 0x1) * 0xff) << 16;
+
+ return a | r | g | b;
}
-static void
-fetch_pixels_c4 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_c4 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = FETCH_4 (image, bits, offset);
+ const pixman_indexed_t * indexed = image->indexed;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = FETCH_4 (pict, bits, offset);
- const pixman_indexed_t * indexed = pict->indexed;
-
- buffer[i] = indexed->rgba[pixel];
- }
- }
+ return indexed->rgba[pixel];
}
-static void
-fetch_pixels_a1 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_a1 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, bits + (offset >> 5));
+ uint32_t a;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, bits + (offset >> 5));
- uint32_t a;
-
#ifdef WORDS_BIGENDIAN
- a = pixel >> (0x1f - (offset & 0x1f));
+ a = pixel >> (0x1f - (offset & 0x1f));
#else
- a = pixel >> (offset & 0x1f);
+ a = pixel >> (offset & 0x1f);
#endif
- a = a & 1;
- a |= a << 1;
- a |= a << 2;
- a |= a << 4;
-
- buffer[i] = a << 24;
- }
- }
+ a = a & 1;
+ a |= a << 1;
+ a |= a << 2;
+ a |= a << 4;
+
+ return a << 24;
}
-static void
-fetch_pixels_g1 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_g1 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ uint32_t *bits = image->bits + line * image->rowstride;
+ uint32_t pixel = READ (image, bits + (offset >> 5));
+ const pixman_indexed_t * indexed = image->indexed;
+ uint32_t a;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- uint32_t *bits = pict->bits + line * pict->rowstride;
- uint32_t pixel = READ (pict, bits + (offset >> 5));
- const pixman_indexed_t * indexed = pict->indexed;
- uint32_t a;
-
#ifdef WORDS_BIGENDIAN
- a = pixel >> (0x1f - (offset & 0x1f));
+ a = pixel >> (0x1f - (offset & 0x1f));
#else
- a = pixel >> (offset & 0x1f);
+ a = pixel >> (offset & 0x1f);
#endif
- a = a & 1;
-
- buffer[i] = indexed->rgba[a];
- }
- }
+ a = a & 1;
+
+ return indexed->rgba[a];
}
-static void
-fetch_pixels_yuy2 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_yuy2 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ const uint32_t *bits = image->bits + image->rowstride * line;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- const uint32_t *bits = pict->bits + pict->rowstride * line;
-
- int16_t y, u, v;
- int32_t r, g, b;
-
- y = ((uint8_t *) bits)[offset << 1] - 16;
- u = ((uint8_t *) bits)[((offset << 1) & - 4) + 1] - 128;
- v = ((uint8_t *) bits)[((offset << 1) & - 4) + 3] - 128;
-
- /* R = 1.164(Y - 16) + 1.596(V - 128) */
- r = 0x012b27 * y + 0x019a2e * v;
-
- /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
- g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
-
- /* B = 1.164(Y - 16) + 2.018(U - 128) */
- b = 0x012b27 * y + 0x0206a2 * u;
-
- buffer[i] = 0xff000000 |
- (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
- (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
- (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
- }
- }
+ int16_t y, u, v;
+ int32_t r, g, b;
+
+ y = ((uint8_t *) bits)[offset << 1] - 16;
+ u = ((uint8_t *) bits)[((offset << 1) & - 4) + 1] - 128;
+ v = ((uint8_t *) bits)[((offset << 1) & - 4) + 3] - 128;
+
+ /* R = 1.164(Y - 16) + 1.596(V - 128) */
+ r = 0x012b27 * y + 0x019a2e * v;
+
+ /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
+ g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
+
+ /* B = 1.164(Y - 16) + 2.018(U - 128) */
+ b = 0x012b27 * y + 0x0206a2 * u;
+
+ return 0xff000000 |
+ (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
+ (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
+ (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
}
-static void
-fetch_pixels_yv12 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_yv12 (bits_image_t *image,
+ int offset,
+ int line)
{
- int i;
+ YV12_SETUP (image);
+ int16_t y = YV12_Y (line)[offset] - 16;
+ int16_t u = YV12_U (line)[offset >> 1] - 128;
+ int16_t v = YV12_V (line)[offset >> 1] - 128;
+ int32_t r, g, b;
- for (i = 0; i < n_pixels; ++i)
- {
- int offset = buffer[2 * i];
- int line = buffer[2 * i + 1];
-
- if (offset == 0xffffffff || line == 0xffffffff)
- {
- buffer[i] = 0;
- }
- else
- {
- YV12_SETUP (pict);
- int16_t y = YV12_Y (line)[offset] - 16;
- int16_t u = YV12_U (line)[offset >> 1] - 128;
- int16_t v = YV12_V (line)[offset >> 1] - 128;
- int32_t r, g, b;
-
- /* R = 1.164(Y - 16) + 1.596(V - 128) */
- r = 0x012b27 * y + 0x019a2e * v;
-
- /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
- g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
-
- /* B = 1.164(Y - 16) + 2.018(U - 128) */
- b = 0x012b27 * y + 0x0206a2 * u;
-
- buffer[i] = 0xff000000 |
- (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
- (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
- (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
- }
- }
+ /* R = 1.164(Y - 16) + 1.596(V - 128) */
+ r = 0x012b27 * y + 0x019a2e * v;
+
+ /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
+ g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
+
+ /* B = 1.164(Y - 16) + 2.018(U - 128) */
+ b = 0x012b27 * y + 0x0206a2 * u;
+
+ return 0xff000000 |
+ (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
+ (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
+ (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
}
/*********************************** Store ************************************/
@@ -2762,7 +2174,7 @@ store_scanline_a1b5g5r5 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
-
+
WRITE (image, pixel++,
((a << 8) & 0x8000) |
((b << 7) & 0x7c00) |
@@ -2785,7 +2197,7 @@ store_scanline_x1b5g5r5 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
-
+
WRITE (image, pixel++, ((b << 7) & 0x7c00) |
((g << 2) & 0x03e0) |
((r >> 3) ));
@@ -2806,7 +2218,7 @@ store_scanline_a4r4g4b4 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
-
+
WRITE (image, pixel++,
((a << 8) & 0xf000) |
((r << 4) & 0x0f00) |
@@ -2829,7 +2241,7 @@ store_scanline_x4r4g4b4 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
-
+
WRITE (image, pixel++,
((r << 4) & 0x0f00) |
((g ) & 0x00f0) |
@@ -2872,7 +2284,7 @@ store_scanline_x4b4g4r4 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
-
+
WRITE (image, pixel++,
((b << 4) & 0x0f00) |
((g ) & 0x00f0) |
@@ -2933,7 +2345,7 @@ store_scanline_b2g3r3 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
-
+
WRITE (image, pixel++,
((b ) & 0xc0) |
((g >> 2) & 0x38) |
@@ -2955,7 +2367,7 @@ store_scanline_a2r2g2b2 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
-
+
WRITE (image, pixel++,
((a ) & 0xc0) |
((r >> 2) & 0x30) |
@@ -2978,7 +2390,7 @@ store_scanline_a2b2g2r2 (bits_image_t * image,
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
-
+
*(pixel++) =
((a ) & 0xc0) |
((b >> 2) & 0x30) |
@@ -3020,12 +2432,12 @@ store_scanline_x4a4 (bits_image_t * image,
#define STORE_8(img,l,o,v) (WRITE (img, (uint8_t *)(l) + ((o) >> 3), (v)))
#ifdef WORDS_BIGENDIAN
-#define STORE_4(img,l,o,v) \
+#define STORE_4(img,l,o,v) \
STORE_8 (img,l,o,((o) & 4 ? \
(FETCH_8 (img,l,o) & 0xf0) | (v) : \
(FETCH_8 (img,l,o) & 0x0f) | ((v) << 4)))
#else
-#define STORE_4(img,l,o,v) \
+#define STORE_4(img,l,o,v) \
STORE_8 (img,l,o,((o) & 4 ? \
(FETCH_8 (img,l,o) & 0x0f) | ((v) << 4) : \
(FETCH_8 (img,l,o) & 0xf0) | (v)))
@@ -3169,7 +2581,7 @@ store_scanline_a1 (bits_image_t * image,
{
uint32_t *pixel = ((uint32_t *) bits) + ((i + x) >> 5);
uint32_t mask, v;
-
+
#ifdef WORDS_BIGENDIAN
mask = 1 << (0x1f - ((i + x) & 0x1f));
#else
@@ -3196,14 +2608,14 @@ store_scanline_g1 (bits_image_t * image,
{
uint32_t *pixel = ((uint32_t *) bits) + ((i + x) >> 5);
uint32_t mask, v;
-
+
#ifdef WORDS_BIGENDIAN
mask = 1 << (0x1f - ((i + x) & 0x1f));
#else
mask = 1 << ((i + x) & 0x1f);
#endif
v = RGB24_TO_ENTRY_Y (indexed, values[i]) ? mask : 0;
-
+
WRITE (image, pixel, (READ (image, pixel) & ~mask) | v);
}
}
@@ -3258,14 +2670,17 @@ fetch_scanline_generic_64 (pixman_image_t *image,
}
/* Despite the type, this function expects a uint64_t *buffer */
-static void
-fetch_pixels_generic_64 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint64_t
+fetch_pixel_generic_64 (bits_image_t *image,
+ int offset,
+ int line)
{
- pict->fetch_pixels_raw_32 (pict, buffer, n_pixels);
+ uint32_t pixel32 = image->fetch_pixel_raw_32 (image, offset, line);
+ uint64_t result;
- pixman_expand ((uint64_t *)buffer, buffer, pict->format, n_pixels);
+ pixman_expand ((uint64_t *)&result, &pixel32, image->format, 1);
+
+ return result;
}
/*
@@ -3274,17 +2689,17 @@ fetch_pixels_generic_64 (bits_image_t *pict,
*
* WARNING: This function loses precision!
*/
-static void
-fetch_pixels_generic_lossy_32 (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+fetch_pixel_generic_lossy_32 (bits_image_t *image,
+ int offset,
+ int line)
{
- /* Since buffer contains n_pixels coordinate pairs, it also has enough
- * room for n_pixels 64 bit pixels.
- */
- pict->fetch_pixels_raw_64 (pict, buffer, n_pixels);
+ uint64_t pixel64 = image->fetch_pixel_raw_64 (image, offset, line);
+ uint32_t result;
- pixman_contract (buffer, (uint64_t *)buffer, n_pixels);
+ pixman_contract (&result, &pixel64, 1);
+
+ return result;
}
typedef struct
@@ -3292,8 +2707,8 @@ typedef struct
pixman_format_code_t format;
fetch_scanline_t fetch_scanline_raw_32;
fetch_scanline_t fetch_scanline_raw_64;
- fetch_pixels_t fetch_pixels_raw_32;
- fetch_pixels_t fetch_pixels_raw_64;
+ fetch_pixel_32_t fetch_pixel_raw_32;
+ fetch_pixel_64_t fetch_pixel_raw_64;
store_scanline_t store_scanline_raw_32;
store_scanline_t store_scanline_raw_64;
} format_info_t;
@@ -3303,9 +2718,9 @@ typedef struct
PIXMAN_ ## format, \
fetch_scanline_ ## format, \
fetch_scanline_generic_64, \
- fetch_pixels_ ## format, fetch_pixels_generic_64, \
+ fetch_pixel_ ## format, fetch_pixel_generic_64, \
store_scanline_ ## format, store_scanline_generic_64 \
- }
+ }
static const format_info_t accessors[] =
{
@@ -3344,17 +2759,17 @@ static const format_info_t accessors[] =
FORMAT_INFO (c8),
#define fetch_scanline_g8 fetch_scanline_c8
-#define fetch_pixels_g8 fetch_pixels_c8
+#define fetch_pixel_g8 fetch_pixel_c8
#define store_scanline_g8 store_scanline_c8
FORMAT_INFO (g8),
-
+
#define fetch_scanline_x4c4 fetch_scanline_c8
-#define fetch_pixels_x4c4 fetch_pixels_c8
+#define fetch_pixel_x4c4 fetch_pixel_c8
#define store_scanline_x4c4 store_scanline_c8
FORMAT_INFO (x4c4),
-
+
#define fetch_scanline_x4g4 fetch_scanline_c8
-#define fetch_pixels_x4g4 fetch_pixels_c8
+#define fetch_pixel_x4g4 fetch_pixel_c8
#define store_scanline_x4g4 store_scanline_c8
FORMAT_INFO (x4g4),
@@ -3368,9 +2783,9 @@ static const format_info_t accessors[] =
FORMAT_INFO (a1b1g1r1),
FORMAT_INFO (c4),
-
+
#define fetch_scanline_g4 fetch_scanline_c4
-#define fetch_pixels_g4 fetch_pixels_c4
+#define fetch_pixel_g4 fetch_pixel_c4
#define store_scanline_g4 store_scanline_c4
FORMAT_INFO (g4),
@@ -3382,33 +2797,33 @@ static const format_info_t accessors[] =
{ PIXMAN_a2r10g10b10,
NULL, fetch_scanline_a2r10g10b10,
- fetch_pixels_generic_lossy_32, fetch_pixels_a2r10g10b10_64,
+ fetch_pixel_generic_lossy_32, fetch_pixel_a2r10g10b10,
NULL, store_scanline_a2r10g10b10 },
{ PIXMAN_x2r10g10b10,
NULL, fetch_scanline_x2r10g10b10,
- fetch_pixels_generic_lossy_32, fetch_pixels_x2r10g10b10_64,
+ fetch_pixel_generic_lossy_32, fetch_pixel_x2r10g10b10,
NULL, store_scanline_x2r10g10b10 },
{ PIXMAN_a2b10g10r10,
NULL, fetch_scanline_a2b10g10r10,
- fetch_pixels_generic_lossy_32, fetch_pixels_a2b10g10r10_64,
+ fetch_pixel_generic_lossy_32, fetch_pixel_a2b10g10r10,
NULL, store_scanline_a2b10g10r10 },
{ PIXMAN_x2b10g10r10,
NULL, fetch_scanline_x2b10g10r10,
- fetch_pixels_generic_lossy_32, fetch_pixels_x2b10g10r10_64,
+ fetch_pixel_generic_lossy_32, fetch_pixel_x2b10g10r10,
NULL, store_scanline_x2b10g10r10 },
/* YUV formats */
{ PIXMAN_yuy2,
fetch_scanline_yuy2, fetch_scanline_generic_64,
- fetch_pixels_yuy2, fetch_pixels_generic_64,
+ fetch_pixel_yuy2, fetch_pixel_generic_64,
NULL, NULL },
-
+
{ PIXMAN_yv12,
fetch_scanline_yv12, fetch_scanline_generic_64,
- fetch_pixels_yv12, fetch_pixels_generic_64,
+ fetch_pixel_yv12, fetch_pixel_generic_64,
NULL, NULL },
{ PIXMAN_null },
@@ -3425,8 +2840,8 @@ setup_accessors (bits_image_t *image)
{
image->fetch_scanline_raw_32 = info->fetch_scanline_raw_32;
image->fetch_scanline_raw_64 = info->fetch_scanline_raw_64;
- image->fetch_pixels_raw_32 = info->fetch_pixels_raw_32;
- image->fetch_pixels_raw_64 = info->fetch_pixels_raw_64;
+ image->fetch_pixel_raw_32 = info->fetch_pixel_raw_32;
+ image->fetch_pixel_raw_64 = info->fetch_pixel_raw_64;
image->store_scanline_raw_32 = info->store_scanline_raw_32;
image->store_scanline_raw_64 = info->store_scanline_raw_64;
diff --git a/pixman/pixman/pixman-arm-neon.c b/pixman/pixman/pixman-arm-neon.c
index 14f74822a..4125d1b0c 100644
--- a/pixman/pixman/pixman-arm-neon.c
+++ b/pixman/pixman/pixman-arm-neon.c
@@ -835,7 +835,7 @@ neon_composite_over_n_8_0565 (pixman_implementation_t * impl,
w = width;
-#ifndef USE_GCC_INLINE_ASM
+#if 1 /* #ifndef USE_GCC_INLINE_ASM */
uint8x8_t alpha;
uint16x8_t dval, temp;
uint8x8x4_t sval8temp;
@@ -872,6 +872,7 @@ neon_composite_over_n_8_0565 (pixman_implementation_t * impl,
if (w&4)
vst1q_lane_u64 ((void *)dst4, vreinterpretq_u64_u16 (temp),1);
#else
+ /* this code has some bug (does not pass blitters-test) */
asm volatile (
"vdup.32 d0, %[src]\n\t"
"vdup.8 d1, d0[1]\n\t"
diff --git a/pixman/pixman/pixman-bits-image.c b/pixman/pixman/pixman-bits-image.c
index ff296208b..6b8018985 100644
--- a/pixman/pixman/pixman-bits-image.c
+++ b/pixman/pixman/pixman-bits-image.c
@@ -93,523 +93,331 @@ _pixman_image_store_scanline_64 (bits_image_t * image,
/* Fetch functions */
-/* On entry, @buffer should contain @n_pixels (x, y) coordinate pairs, where
- * x and y are both uint32_ts. On exit, buffer will contain the corresponding
- * pixels.
- *
- * The coordinates must be within the sample grid. If either x or y is 0xffffffff,
- * the pixel returned will be 0.
- */
-static void
-bits_image_fetch_raw_pixels (bits_image_t *image,
- uint32_t * buffer,
- int n_pixels)
+static uint32_t
+bits_image_fetch_pixel_alpha (bits_image_t *image, int x, int y)
{
- image->fetch_pixels_raw_32 (image, buffer, n_pixels);
-}
+ uint32_t pixel;
+ uint32_t pixel_a;
-static void
-bits_image_fetch_alpha_pixels (bits_image_t *image,
- uint32_t * buffer,
- int n_pixels)
-{
-#define N_ALPHA_PIXELS 256
+ pixel = image->fetch_pixel_raw_32 (image, x, y);
- uint32_t alpha_pixels[N_ALPHA_PIXELS * 2];
- int i;
+ assert (image->common.alpha_map);
- if (!image->common.alpha_map)
+ x -= image->common.alpha_origin_x;
+ y -= image->common.alpha_origin_y;
+
+ if (x < 0 || x >= image->common.alpha_map->width ||
+ y < 0 || y >= image->common.alpha_map->height)
{
- bits_image_fetch_raw_pixels (image, buffer, n_pixels);
- return;
+ pixel_a = 0;
}
-
- /* Alpha map */
- i = 0;
- while (i < n_pixels)
+ else
{
- int tmp_n_pixels = MIN (N_ALPHA_PIXELS, n_pixels - i);
- int j;
- int32_t *coords;
-
- memcpy (alpha_pixels, buffer + 2 * i, tmp_n_pixels * 2 * sizeof (int32_t));
- coords = (int32_t *)alpha_pixels;
- for (j = 0; j < tmp_n_pixels; ++j)
- {
- int32_t x = coords[0];
- int32_t y = coords[1];
-
- if (x != 0xffffffff)
- {
- x -= image->common.alpha_origin_x;
-
- if (x < 0 || x >= image->common.alpha_map->width)
- x = 0xffffffff;
- }
-
- if (y != 0xffffffff)
- {
- y -= image->common.alpha_origin_y;
-
- if (y < 0 || y >= image->common.alpha_map->height)
- y = 0xffffffff;
- }
-
- coords[0] = x;
- coords[1] = y;
-
- coords += 2;
- }
-
- bits_image_fetch_raw_pixels (image->common.alpha_map, alpha_pixels,
- tmp_n_pixels);
- bits_image_fetch_raw_pixels (image, buffer + 2 * i, tmp_n_pixels);
-
- for (j = 0; j < tmp_n_pixels; ++j)
- {
- int a = alpha_pixels[j] >> 24;
- uint32_t p = buffer[2 * i - j] | 0xff000000;
+ pixel_a = image->fetch_pixel_raw_32 (
+ image->common.alpha_map, x, y);
+ pixel_a = ALPHA_8 (pixel_a);
+ }
- UN8x4_MUL_UN8 (p, a);
+ UN8x4_MUL_UN8 (pixel, pixel_a);
- buffer[i++] = p;
- }
- }
+ return pixel;
}
-static void
-bits_image_fetch_pixels_src_clip (bits_image_t *image,
- uint32_t * buffer,
- int n_pixels)
+static force_inline uint32_t
+get_pixel (bits_image_t *image, int x, int y, pixman_bool_t check_bounds)
{
- bits_image_fetch_alpha_pixels (image, buffer, n_pixels);
+ if (check_bounds &&
+ (x < 0 || x >= image->width || y < 0 || y >= image->height))
+ {
+ return 0;
+ }
+
+ return image->fetch_pixel_32 (image, x, y);
}
static force_inline void
-repeat (pixman_repeat_t repeat,
- int width,
- int height,
- int * x,
- int * y)
+repeat (pixman_repeat_t repeat, int size, int *coord)
{
switch (repeat)
{
case PIXMAN_REPEAT_NORMAL:
- *x = MOD (*x, width);
- *y = MOD (*y, height);
+ *coord = MOD (*coord, size);
break;
case PIXMAN_REPEAT_PAD:
- *x = CLIP (*x, 0, width - 1);
- *y = CLIP (*y, 0, height - 1);
+ *coord = CLIP (*coord, 0, size - 1);
break;
case PIXMAN_REPEAT_REFLECT:
- *x = MOD (*x, width * 2);
- *y = MOD (*y, height * 2);
+ *coord = MOD (*coord, size * 2);
- if (*x >= width)
- *x = width * 2 - *x - 1;
-
- if (*y >= height)
- *y = height * 2 - *y - 1;
+ if (*coord >= size)
+ *coord = size * 2 - *coord - 1;
break;
case PIXMAN_REPEAT_NONE:
- if (*x < 0 || *x >= width)
- *x = 0xffffffff;
-
- if (*y < 0 || *y >= height)
- *y = 0xffffffff;
break;
}
}
-/* Buffer contains list of fixed-point coordinates on input,
- * a list of pixels on output
- */
-static void
-bits_image_fetch_nearest_pixels (bits_image_t *image,
- uint32_t * buffer,
- int n_pixels)
+static force_inline uint32_t
+bits_image_fetch_pixel_nearest (bits_image_t *image,
+ pixman_fixed_t x,
+ pixman_fixed_t y)
{
- pixman_repeat_t repeat_mode = image->common.repeat;
- int width = image->width;
- int height = image->height;
- int i;
+ int x0 = pixman_fixed_to_int (x - pixman_fixed_e);
+ int y0 = pixman_fixed_to_int (y - pixman_fixed_e);
- for (i = 0; i < 2 * n_pixels; i += 2)
+ if (image->common.repeat != PIXMAN_REPEAT_NONE)
{
- int32_t *coords = (int32_t *)buffer;
- int32_t x, y;
-
- /* Subtract pixman_fixed_e to ensure that 0.5 rounds to 0, not 1 */
- x = pixman_fixed_to_int (coords[i] - pixman_fixed_e);
- y = pixman_fixed_to_int (coords[i + 1] - pixman_fixed_e);
+ repeat (image->common.repeat, image->width, &x0);
+ repeat (image->common.repeat, image->height, &y0);
- repeat (repeat_mode, width, height, &x, &y);
-
- coords[i] = x;
- coords[i + 1] = y;
+ return get_pixel (image, x0, y0, FALSE);
+ }
+ else
+ {
+ return get_pixel (image, x0, y0, TRUE);
}
-
- bits_image_fetch_pixels_src_clip (image, buffer, n_pixels);
}
-#define N_TMP_PIXELS (256)
-
-/* Buffer contains list of fixed-point coordinates on input,
- * a list of pixels on output
- */
-static void
-bits_image_fetch_bilinear_pixels (bits_image_t *image,
- uint32_t * buffer,
- int n_pixels)
+static force_inline uint32_t
+bits_image_fetch_pixel_bilinear (bits_image_t *image,
+ pixman_fixed_t x,
+ pixman_fixed_t y)
{
-/* (Four pixels * two coordinates) per pixel */
-#define N_TEMPS (N_TMP_PIXELS * 8)
-#define N_DISTS (N_TMP_PIXELS * 2)
-
- uint32_t temps[N_TEMPS];
- int32_t dists[N_DISTS];
pixman_repeat_t repeat_mode = image->common.repeat;
int width = image->width;
int height = image->height;
- int32_t *coords;
- int i;
-
- i = 0;
- coords = (int32_t *)buffer;
- while (i < n_pixels)
- {
- int tmp_n_pixels = MIN (N_TMP_PIXELS, n_pixels - i);
- int32_t distx, disty;
- uint32_t *u;
- int32_t *t, *d;
- int j;
-
- t = (int32_t *)temps;
- d = dists;
- for (j = 0; j < tmp_n_pixels; ++j)
- {
- int32_t x1, y1, x2, y2;
+ int x1, y1, x2, y2;
+ uint32_t tl, tr, bl, br, r;
+ int32_t distx, disty, idistx, idisty;
+ uint32_t ft, fb;
- x1 = coords[0] - pixman_fixed_1 / 2;
- y1 = coords[1] - pixman_fixed_1 / 2;
+ x1 = x - pixman_fixed_1 / 2;
+ y1 = y - pixman_fixed_1 / 2;
- distx = (x1 >> 8) & 0xff;
- disty = (y1 >> 8) & 0xff;
+ distx = (x1 >> 8) & 0xff;
+ disty = (y1 >> 8) & 0xff;
- x1 >>= 16;
- y1 >>= 16;
- x2 = x1 + 1;
- y2 = y1 + 1;
+ x1 = pixman_fixed_to_int (x1);
+ y1 = pixman_fixed_to_int (y1);
+ x2 = x1 + 1;
+ y2 = y1 + 1;
- repeat (repeat_mode, width, height, &x1, &y1);
- repeat (repeat_mode, width, height, &x2, &y2);
-
- *t++ = x1;
- *t++ = y1;
- *t++ = x2;
- *t++ = y1;
- *t++ = x1;
- *t++ = y2;
- *t++ = x2;
- *t++ = y2;
-
- *d++ = distx;
- *d++ = disty;
-
- coords += 2;
- }
-
- bits_image_fetch_pixels_src_clip (image, temps, tmp_n_pixels * 4);
-
- u = (uint32_t *)temps;
- d = dists;
- for (j = 0; j < tmp_n_pixels; ++j)
- {
- uint32_t tl, tr, bl, br, r;
- int32_t idistx, idisty;
- uint32_t ft, fb;
-
- tl = *u++;
- tr = *u++;
- bl = *u++;
- br = *u++;
-
- distx = *d++;
- disty = *d++;
+ if (repeat_mode != PIXMAN_REPEAT_NONE)
+ {
+ repeat (repeat_mode, width, &x1);
+ repeat (repeat_mode, height, &y1);
+ repeat (repeat_mode, width, &x2);
+ repeat (repeat_mode, height, &y2);
+
+ tl = get_pixel (image, x1, y1, FALSE);
+ bl = get_pixel (image, x1, y2, FALSE);
+ tr = get_pixel (image, x2, y1, FALSE);
+ br = get_pixel (image, x2, y2, FALSE);
+ }
+ else
+ {
+ tl = get_pixel (image, x1, y1, TRUE);
+ tr = get_pixel (image, x2, y1, TRUE);
+ bl = get_pixel (image, x1, y2, TRUE);
+ br = get_pixel (image, x2, y2, TRUE);
+ }
- idistx = 256 - distx;
- idisty = 256 - disty;
+ idistx = 256 - distx;
+ idisty = 256 - disty;
#define GET8(v, i) ((uint16_t) (uint8_t) ((v) >> i))
-
- ft = GET8 (tl, 0) * idistx + GET8 (tr, 0) * distx;
- fb = GET8 (bl, 0) * idistx + GET8 (br, 0) * distx;
- r = (((ft * idisty + fb * disty) >> 16) & 0xff);
- ft = GET8 (tl, 8) * idistx + GET8 (tr, 8) * distx;
- fb = GET8 (bl, 8) * idistx + GET8 (br, 8) * distx;
- r |= (((ft * idisty + fb * disty) >> 8) & 0xff00);
- ft = GET8 (tl, 16) * idistx + GET8 (tr, 16) * distx;
- fb = GET8 (bl, 16) * idistx + GET8 (br, 16) * distx;
- r |= (((ft * idisty + fb * disty)) & 0xff0000);
- ft = GET8 (tl, 24) * idistx + GET8 (tr, 24) * distx;
- fb = GET8 (bl, 24) * idistx + GET8 (br, 24) * distx;
- r |= (((ft * idisty + fb * disty) << 8) & 0xff000000);
-
- buffer[i++] = r;
- }
- }
+ ft = GET8 (tl, 0) * idistx + GET8 (tr, 0) * distx;
+ fb = GET8 (bl, 0) * idistx + GET8 (br, 0) * distx;
+ r = (((ft * idisty + fb * disty) >> 16) & 0xff);
+ ft = GET8 (tl, 8) * idistx + GET8 (tr, 8) * distx;
+ fb = GET8 (bl, 8) * idistx + GET8 (br, 8) * distx;
+ r |= (((ft * idisty + fb * disty) >> 8) & 0xff00);
+ ft = GET8 (tl, 16) * idistx + GET8 (tr, 16) * distx;
+ fb = GET8 (bl, 16) * idistx + GET8 (br, 16) * distx;
+ r |= (((ft * idisty + fb * disty)) & 0xff0000);
+ ft = GET8 (tl, 24) * idistx + GET8 (tr, 24) * distx;
+ fb = GET8 (bl, 24) * idistx + GET8 (br, 24) * distx;
+ r |= (((ft * idisty + fb * disty) << 8) & 0xff000000);
+
+ return r;
}
-/* Buffer contains list of fixed-point coordinates on input,
- * a list of pixels on output
- */
-static void
-bits_image_fetch_convolution_pixels (bits_image_t *image,
- uint32_t * buffer,
- int n_pixels)
+static force_inline uint32_t
+bits_image_fetch_pixel_convolution (bits_image_t *image,
+ pixman_fixed_t x,
+ pixman_fixed_t y)
{
- uint32_t tmp_pixels_stack[N_TMP_PIXELS * 2]; /* Two coordinates per pixel */
- uint32_t *tmp_pixels = tmp_pixels_stack;
pixman_fixed_t *params = image->common.filter_params;
int x_off = (params[0] - pixman_fixed_1) >> 1;
int y_off = (params[1] - pixman_fixed_1) >> 1;
- int n_tmp_pixels;
- int32_t *coords;
- int32_t *t;
- uint32_t *u;
- int i;
- int max_n_kernels;
-
int32_t cwidth = pixman_fixed_to_int (params[0]);
int32_t cheight = pixman_fixed_to_int (params[1]);
- int kernel_size = cwidth * cheight;
+ int32_t srtot, sgtot, sbtot, satot;
+ int32_t i, j, x1, x2, y1, y2;
+ pixman_repeat_t repeat_mode = image->common.repeat;
+ int width = image->width;
+ int height = image->height;
params += 2;
- n_tmp_pixels = N_TMP_PIXELS;
- if (kernel_size > n_tmp_pixels)
- {
- /* Two coordinates per pixel */
- tmp_pixels = malloc (kernel_size * 2 * sizeof (uint32_t));
- n_tmp_pixels = kernel_size;
-
- if (!tmp_pixels)
- {
- /* We ignore out-of-memory during rendering */
- return;
- }
- }
+ x1 = pixman_fixed_to_int (x - pixman_fixed_e - x_off);
+ y1 = pixman_fixed_to_int (y - pixman_fixed_e - y_off);
+ x2 = x1 + cwidth;
+ y2 = y1 + cheight;
- max_n_kernels = n_tmp_pixels / kernel_size;
+ srtot = sgtot = sbtot = satot = 0;
- i = 0;
- coords = (int32_t *)buffer;
- while (i < n_pixels)
+ for (i = y1; i < y2; ++i)
{
- int n_kernels = MIN (max_n_kernels, (n_pixels - i));
- pixman_repeat_t repeat_mode = image->common.repeat;
- int width = image->width;
- int height = image->height;
- int j;
-
- t = (int32_t *)tmp_pixels;
- for (j = 0; j < n_kernels; ++j)
+ for (j = x1; j < x2; ++j)
{
- int32_t x, y, x1, x2, y1, y2;
+ int rx = j;
+ int ry = i;
- /* Subtract pixman_fixed_e to ensure that 0.5 rounds to 0, not 1 */
- x1 = pixman_fixed_to_int (coords[0] - pixman_fixed_e - x_off);
- y1 = pixman_fixed_to_int (coords[1] - pixman_fixed_e - y_off);
- x2 = x1 + cwidth;
- y2 = y1 + cheight;
+ pixman_fixed_t f = *params;
- for (y = y1; y < y2; ++y)
+ if (f)
{
- for (x = x1; x < x2; ++x)
- {
- int rx = x;
- int ry = y;
+ uint32_t pixel;
- repeat (repeat_mode, width, height, &rx, &ry);
+ if (repeat_mode != PIXMAN_REPEAT_NONE)
+ {
+ repeat (repeat_mode, width, &rx);
+ repeat (repeat_mode, height, &ry);
- *t++ = rx;
- *t++ = ry;
+ pixel = get_pixel (image, rx, ry, FALSE);
}
- }
-
- coords += 2;
- }
-
- bits_image_fetch_pixels_src_clip (image, tmp_pixels, n_kernels * kernel_size);
-
- u = tmp_pixels;
- for (j = 0; j < n_kernels; ++j)
- {
- int32_t srtot, sgtot, sbtot, satot;
- pixman_fixed_t *p = params;
- int k;
-
- srtot = sgtot = sbtot = satot = 0;
-
- for (k = 0; k < kernel_size; ++k)
- {
- pixman_fixed_t f = *p++;
- if (f)
+ else
{
- uint32_t c = *u++;
-
- srtot += RED_8 (c) * f;
- sgtot += GREEN_8 (c) * f;
- sbtot += BLUE_8 (c) * f;
- satot += ALPHA_8 (c) * f;
+ pixel = get_pixel (image, rx, ry, TRUE);
}
- }
-
- satot >>= 16;
- srtot >>= 16;
- sgtot >>= 16;
- sbtot >>= 16;
- satot = CLIP (satot, 0, 0xff);
- srtot = CLIP (srtot, 0, 0xff);
- sgtot = CLIP (sgtot, 0, 0xff);
- sbtot = CLIP (sbtot, 0, 0xff);
+ srtot += RED_8 (pixel) * f;
+ sgtot += GREEN_8 (pixel) * f;
+ sbtot += BLUE_8 (pixel) * f;
+ satot += ALPHA_8 (pixel) * f;
+ }
- buffer[i++] = ((satot << 24) |
- (srtot << 16) |
- (sgtot << 8) |
- (sbtot ));
+ params++;
}
}
- if (tmp_pixels != tmp_pixels_stack)
- free (tmp_pixels);
+ satot >>= 16;
+ srtot >>= 16;
+ sgtot >>= 16;
+ sbtot >>= 16;
+
+ satot = CLIP (satot, 0, 0xff);
+ srtot = CLIP (srtot, 0, 0xff);
+ sgtot = CLIP (sgtot, 0, 0xff);
+ sbtot = CLIP (sbtot, 0, 0xff);
+
+ return ((satot << 24) | (srtot << 16) | (sgtot << 8) | (sbtot));
}
-static void
-bits_image_fetch_filtered (bits_image_t *pict,
- uint32_t * buffer,
- int n_pixels)
+static force_inline uint32_t
+bits_image_fetch_pixel_filtered (bits_image_t *image,
+ pixman_fixed_t x,
+ pixman_fixed_t y)
{
- switch (pict->common.filter)
+ switch (image->common.filter)
{
case PIXMAN_FILTER_NEAREST:
case PIXMAN_FILTER_FAST:
- bits_image_fetch_nearest_pixels (pict, buffer, n_pixels);
+ return bits_image_fetch_pixel_nearest (image, x, y);
break;
case PIXMAN_FILTER_BILINEAR:
case PIXMAN_FILTER_GOOD:
case PIXMAN_FILTER_BEST:
- bits_image_fetch_bilinear_pixels (pict, buffer, n_pixels);
+ return bits_image_fetch_pixel_bilinear (image, x, y);
break;
case PIXMAN_FILTER_CONVOLUTION:
- bits_image_fetch_convolution_pixels (pict, buffer, n_pixels);
+ return bits_image_fetch_pixel_convolution (image, x, y);
break;
}
+
+ return 0;
}
static void
-bits_image_fetch_transformed (pixman_image_t * pict,
- int x,
- int y,
+bits_image_fetch_transformed (pixman_image_t * image,
+ int offset,
+ int line,
int width,
uint32_t * buffer,
const uint32_t * mask,
uint32_t mask_bits)
{
- pixman_bool_t affine = TRUE;
- uint32_t tmp_buffer[2 * N_TMP_PIXELS];
- pixman_vector_t unit;
+ pixman_fixed_t x, y, w;
+ pixman_fixed_t ux, uy, uw;
pixman_vector_t v;
- uint32_t *bits;
- int32_t stride;
- int32_t *coords;
int i;
- bits = pict->bits.bits;
- stride = pict->bits.rowstride;
-
/* reference point is the center of the pixel */
- v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2;
- v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2;
+ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
+ v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
/* when using convolution filters or PIXMAN_REPEAT_PAD one
* might get here without a transform */
- if (pict->common.transform)
+ if (image->common.transform)
{
- if (!pixman_transform_point_3d (pict->common.transform, &v))
+ if (!pixman_transform_point_3d (image->common.transform, &v))
return;
- unit.vector[0] = pict->common.transform->matrix[0][0];
- unit.vector[1] = pict->common.transform->matrix[1][0];
- unit.vector[2] = pict->common.transform->matrix[2][0];
-
- affine = (v.vector[2] == pixman_fixed_1 && unit.vector[2] == 0);
+ ux = image->common.transform->matrix[0][0];
+ uy = image->common.transform->matrix[1][0];
+ uw = image->common.transform->matrix[2][0];
}
else
{
- unit.vector[0] = pixman_fixed_1;
- unit.vector[1] = 0;
- unit.vector[2] = 0;
+ ux = pixman_fixed_1;
+ uy = 0;
+ uw = 0;
}
- i = 0;
- while (i < width)
- {
- int n_pixels = MIN (N_TMP_PIXELS, width - i);
- int j;
-
- coords = (int32_t *)tmp_buffer;
+ x = v.vector[0];
+ y = v.vector[1];
+ w = v.vector[2];
- for (j = 0; j < n_pixels; ++j)
+ if (w == pixman_fixed_1 && uw == 0) /* Affine */
+ {
+ for (i = 0; i < width; ++i)
{
- if (affine)
+ if (!mask || (mask[i] & mask_bits))
{
- coords[0] = v.vector[0];
- coords[1] = v.vector[1];
+ buffer[i] =
+ bits_image_fetch_pixel_filtered (&image->bits, x, y);
}
- else
- {
- pixman_fixed_48_16_t div;
-
- div = ((pixman_fixed_48_16_t)v.vector[0] << 16) / v.vector[2];
-
- if ((div >> 16) > 0x7fff)
- coords[0] = 0x7fffffff;
- else if ((div >> 16) < 0x8000)
- coords[0] = 0x80000000;
- else
- coords[0] = div;
- div = ((pixman_fixed_48_16_t)v.vector[1] << 16) / v.vector[2];
+ x += ux;
+ y += uy;
+ }
+ }
+ else
+ {
+ for (i = 0; i < width; ++i)
+ {
+ pixman_fixed_t x0, y0;
- if ((div >> 16) > 0x7fff)
- coords[1] = 0x7fffffff;
- else if ((div >> 16) < 0x8000)
- coords[1] = 0x8000000;
- else
- coords[1] = div;
+ if (!mask || (mask[i] & mask_bits))
+ {
+ x0 = ((pixman_fixed_48_16_t)x << 16) / w;
+ y0 = ((pixman_fixed_48_16_t)y << 16) / w;
- v.vector[2] += unit.vector[2];
+ buffer[i] =
+ bits_image_fetch_pixel_filtered (&image->bits, x0, y0);
}
- coords += 2;
-
- v.vector[0] += unit.vector[0];
- v.vector[1] += unit.vector[1];
+ x += ux;
+ y += uy;
+ w += uw;
}
-
- bits_image_fetch_filtered (&pict->bits, tmp_buffer, n_pixels);
-
- for (j = 0; j < n_pixels; ++j)
- buffer[i++] = tmp_buffer[j];
}
}
@@ -622,17 +430,14 @@ bits_image_fetch_solid_32 (pixman_image_t * image,
const uint32_t * mask,
uint32_t mask_bits)
{
- uint32_t color[2];
+ uint32_t color;
uint32_t *end;
- color[0] = 0;
- color[1] = 0;
-
- image->bits.fetch_pixels_raw_32 (&image->bits, color, 1);
+ color = image->bits.fetch_pixel_raw_32 (&image->bits, 0, 0);
end = buffer + width;
while (buffer < end)
- *(buffer++) = color[0];
+ *(buffer++) = color;
}
static void
@@ -645,15 +450,11 @@ bits_image_fetch_solid_64 (pixman_image_t * image,
uint32_t unused2)
{
uint64_t color;
- uint32_t *coords = (uint32_t *)&color;
uint64_t *buffer = (uint64_t *)b;
uint64_t *end;
- coords[0] = 0;
- coords[1] = 0;
-
- image->bits.fetch_pixels_raw_64 (&image->bits, (uint32_t *)&color, 1);
-
+ color = image->bits.fetch_pixel_raw_64 (&image->bits, 0, 0);
+
end = buffer + width;
while (buffer < end)
*(buffer++) = color;
@@ -732,7 +533,7 @@ bits_image_fetch_untransformed_repeat_normal (bits_image_t *image,
image->fetch_scanline_raw_64 ((pixman_image_t *)image, x, y, w, buffer, NULL, 0);
else
image->fetch_scanline_raw_32 ((pixman_image_t *)image, x, y, w, buffer, NULL, 0);
-
+
buffer += w * (wide? 2 : 1);
x += w;
width -= w;
@@ -837,12 +638,16 @@ bits_image_property_changed (pixman_image_t *image)
_pixman_bits_image_setup_raw_accessors (bits);
+ image->bits.fetch_pixel_32 = image->bits.fetch_pixel_raw_32;
+
if (bits->common.alpha_map)
{
image->common.get_scanline_64 =
_pixman_image_get_scanline_generic_64;
image->common.get_scanline_32 =
bits_image_fetch_transformed;
+
+ image->bits.fetch_pixel_32 = bits_image_fetch_pixel_alpha;
}
else if ((bits->common.repeat != PIXMAN_REPEAT_NONE) &&
bits->width == 1 &&
@@ -941,6 +746,7 @@ pixman_image_create_bits (pixman_format_code_t format,
{
if (free_me)
free (free_me);
+
return NULL;
}
@@ -960,8 +766,6 @@ pixman_image_create_bits (pixman_format_code_t format,
image->common.property_changed = bits_image_property_changed;
- bits_image_property_changed (image);
-
_pixman_image_reset_clip_region (image);
return image;
diff --git a/pixman/pixman/pixman-combine.c.template b/pixman/pixman/pixman-combine.c.template
index 4a0e2bd06..c129980a8 100644
--- a/pixman/pixman/pixman-combine.c.template
+++ b/pixman/pixman/pixman-combine.c.template
@@ -78,7 +78,6 @@ combine_mask_alpha_ca (const comp4_t *src, comp4_t *mask)
if (a == ~0)
{
- x = x >> A_SHIFT;
x |= x << G_SHIFT;
x |= x << R_SHIFT;
*(mask) = x;
@@ -1610,17 +1609,14 @@ combine_over_ca (pixman_implementation_t *imp,
combine_mask_ca (&s, &m);
a = ~m;
- if (a != ~0)
+ if (a)
{
- if (a)
- {
- comp4_t d = *(dest + i);
- UNcx4_MUL_UNcx4_ADD_UNcx4 (d, a, s);
- s = d;
- }
-
- *(dest + i) = s;
+ comp4_t d = *(dest + i);
+ UNcx4_MUL_UNcx4_ADD_UNcx4 (d, a, s);
+ s = d;
}
+
+ *(dest + i) = s;
}
}
@@ -1644,10 +1640,8 @@ combine_over_reverse_ca (pixman_implementation_t *imp,
comp4_t s = *(src + i);
comp4_t m = *(mask + i);
- combine_mask_value_ca (&s, &m);
-
- if (a != MASK)
- UNcx4_MUL_UNc_ADD_UNcx4 (s, a, d);
+ UNcx4_MUL_UNcx4 (s, m);
+ UNcx4_MUL_UNc_ADD_UNcx4 (s, a, d);
*(dest + i) = s;
}
diff --git a/pixman/pixman/pixman-combine.h.template b/pixman/pixman/pixman-combine.h.template
index 2b4bb6045..2f6392f96 100644
--- a/pixman/pixman/pixman-combine.h.template
+++ b/pixman/pixman/pixman-combine.h.template
@@ -43,19 +43,56 @@
*/
/*
+ * x_rb = (x_rb * a) / 255
+ */
+#define UNc_rb_MUL_UNc(x, a, t) \
+ do \
+ { \
+ t = ((x) & RB_MASK) * (a); \
+ t += RB_ONE_HALF; \
+ x = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
+ x &= RB_MASK; \
+ } while (0)
+
+/*
+ * x_rb = min (x_rb + y_rb, 255)
+ */
+#define UNc_rb_ADD_UNc_rb(x, y, t) \
+ do \
+ { \
+ t = ((x) + (y)); \
+ t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
+ x = (t & RB_MASK); \
+ } while (0)
+
+/*
+ * x_rb = (x_rb * a_rb) / 255
+ */
+#define UNc_rb_MUL_UNc_rb(x, a, t) \
+ do \
+ { \
+ t = (x & MASK) * (a & MASK); \
+ t |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
+ t += RB_ONE_HALF; \
+ t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
+ x = t & RB_MASK; \
+ } while (0)
+
+/*
* x_c = (x_c * a) / 255
*/
#define UNcx4_MUL_UNc(x, a) \
do \
{ \
- comp4_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
- t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- t &= RB_MASK; \
- \
- x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
- x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)); \
- x &= RB_MASK << COMPONENT_SIZE; \
- x += t; \
+ comp4_t r1, r2, t; \
+ \
+ r1 = (x); \
+ UNc_rb_MUL_UNc (r1, a, t); \
+ \
+ r2 = (x) >> G_SHIFT; \
+ UNc_rb_MUL_UNc (r2, a, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -64,33 +101,19 @@
#define UNcx4_MUL_UNc_ADD_UNcx4(x, a, y) \
do \
{ \
- /* multiply and divide: trunc((i + 128)*257/65536) */ \
- comp4_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
- t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- t &= RB_MASK; \
- \
- /* add */ \
- t += y & RB_MASK; \
- \
- /* saturate */ \
- t |= RB_MASK_PLUS_ONE - ((t >> COMPONENT_SIZE) & RB_MASK); \
- t &= RB_MASK; \
- \
- /* multiply and divide */ \
- x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
- x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- x &= RB_MASK; \
- \
- /* add */ \
- x += (y >> COMPONENT_SIZE) & RB_MASK; \
- \
- /* saturate */ \
- x |= RB_MASK_PLUS_ONE - ((x >> COMPONENT_SIZE) & RB_MASK); \
- x &= RB_MASK; \
- \
- /* recombine */ \
- x <<= COMPONENT_SIZE; \
- x += t; \
+ comp4_t r1, r2, r3, t; \
+ \
+ r1 = (x); \
+ r2 = (y) & RB_MASK; \
+ UNc_rb_MUL_UNc (r1, a, t); \
+ UNc_rb_ADD_UNc_rb (r1, r2, t); \
+ \
+ r2 = (x) >> G_SHIFT; \
+ r3 = ((y) >> G_SHIFT) & RB_MASK; \
+ UNc_rb_MUL_UNc (r2, a, t); \
+ UNc_rb_ADD_UNc_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -99,32 +122,21 @@
#define UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(x, a, y, b) \
do \
{ \
- comp4_t t; \
- comp4_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- t = (x & G_MASK) * a + (y & G_MASK) * b; \
- t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
- t >>= R_SHIFT; \
- \
- t |= r << R_SHIFT; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- t <<= G_SHIFT; \
- \
- r = ((x >> R_SHIFT) & MASK) * a + \
- ((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- x = (x & MASK) * a + (y & MASK) * b + ONE_HALF; \
- x += (x >> G_SHIFT); \
- x >>= G_SHIFT; \
- x |= r << R_SHIFT; \
- x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
- x &= RB_MASK; \
- x |= t; \
+ comp4_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = y; \
+ UNc_rb_MUL_UNc (r1, a, t); \
+ UNc_rb_MUL_UNc (r2, b, t); \
+ UNc_rb_ADD_UNc_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT); \
+ r3 = (y >> G_SHIFT); \
+ UNc_rb_MUL_UNc (r2, a, t); \
+ UNc_rb_MUL_UNc (r3, b, t); \
+ UNc_rb_ADD_UNc_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -133,19 +145,17 @@
#define UNcx4_MUL_UNcx4(x, a) \
do \
{ \
- comp4_t t; \
- comp4_t r = (x & MASK) * (a & MASK); \
- r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
- r += RB_ONE_HALF; \
- r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- r &= RB_MASK; \
- \
- x >>= G_SHIFT; \
- t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
- t |= (x & R_MASK) * (a >> A_SHIFT); \
- t += RB_ONE_HALF; \
- t = t + ((t >> G_SHIFT) & RB_MASK); \
- x = r | (t & AG_MASK); \
+ comp4_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UNc_rb_MUL_UNc_rb (r1, r2, t); \
+ \
+ r2 = x >> G_SHIFT; \
+ r3 = a >> G_SHIFT; \
+ UNc_rb_MUL_UNc_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -154,26 +164,21 @@
#define UNcx4_MUL_UNcx4_ADD_UNcx4(x, a, y) \
do \
{ \
- comp4_t t; \
- comp4_t r = (x & MASK) * (a & MASK); \
- r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
- r += RB_ONE_HALF; \
- r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- r &= RB_MASK; \
- r += y & RB_MASK; \
- r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
- r &= RB_MASK; \
- \
- x >>= G_SHIFT; \
- t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
- t |= (x & R_MASK) * (a >> A_SHIFT); \
- t += RB_ONE_HALF; \
- t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- t &= RB_MASK; \
- t += (y >> G_SHIFT) & RB_MASK; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- x = r | (t << G_SHIFT); \
+ comp4_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UNc_rb_MUL_UNc_rb (r1, r2, t); \
+ r2 = y & RB_MASK; \
+ UNc_rb_ADD_UNc_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT); \
+ r3 = (a >> G_SHIFT); \
+ UNc_rb_MUL_UNc_rb (r2, r3, t); \
+ r3 = (y >> G_SHIFT) & RB_MASK; \
+ UNc_rb_ADD_UNc_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -182,33 +187,23 @@
#define UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc(x, a, y, b) \
do \
{ \
- comp4_t t; \
- comp4_t r = (x >> A_SHIFT) * (a >> A_SHIFT) + \
- (y >> A_SHIFT) * b; \
- r += (r >> G_SHIFT) + ONE_HALF; \
- r >>= G_SHIFT; \
- \
- t = (x & G_MASK) * ((a >> G_SHIFT) & MASK) + (y & G_MASK) * b; \
- t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
- t >>= R_SHIFT; \
- \
- t |= r << R_SHIFT; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- t <<= G_SHIFT; \
- \
- r = ((x >> R_SHIFT) & MASK) * ((a >> R_SHIFT) & MASK) + \
- ((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- x = (x & MASK) * (a & MASK) + (y & MASK) * b + ONE_HALF; \
- x += (x >> G_SHIFT); \
- x >>= G_SHIFT; \
- x |= r << R_SHIFT; \
- x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
- x &= RB_MASK; \
- x |= t; \
+ comp4_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UNc_rb_MUL_UNc_rb (r1, r2, t); \
+ r2 = y; \
+ UNc_rb_MUL_UNc (r2, b, t); \
+ UNc_rb_ADD_UNc_rb (r1, r2, t); \
+ \
+ r2 = x >> G_SHIFT; \
+ r3 = a >> G_SHIFT; \
+ UNc_rb_MUL_UNc_rb (r2, r3, t); \
+ r3 = y >> G_SHIFT; \
+ UNc_rb_MUL_UNc (r3, b, t); \
+ UNc_rb_ADD_UNc_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -217,13 +212,15 @@
#define UNcx4_ADD_UNcx4(x, y) \
do \
{ \
- comp4_t t; \
- comp4_t r = (x & RB_MASK) + (y & RB_MASK); \
- r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
- r &= RB_MASK; \
- \
- t = ((x >> G_SHIFT) & RB_MASK) + ((y >> G_SHIFT) & RB_MASK); \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- r |= (t & RB_MASK) << G_SHIFT; \
- x = r; \
+ comp4_t r1, r2, r3, t; \
+ \
+ r1 = x & RB_MASK; \
+ r2 = y & RB_MASK; \
+ UNc_rb_ADD_UNc_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT) & RB_MASK; \
+ r3 = (y >> G_SHIFT) & RB_MASK; \
+ UNc_rb_ADD_UNc_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
diff --git a/pixman/pixman/pixman-combine32.c b/pixman/pixman/pixman-combine32.c
index b0a2a8011..ab145e405 100644
--- a/pixman/pixman/pixman-combine32.c
+++ b/pixman/pixman/pixman-combine32.c
@@ -82,7 +82,6 @@ combine_mask_alpha_ca (const uint32_t *src, uint32_t *mask)
if (a == ~0)
{
- x = x >> A_SHIFT;
x |= x << G_SHIFT;
x |= x << R_SHIFT;
*(mask) = x;
@@ -1614,17 +1613,14 @@ combine_over_ca (pixman_implementation_t *imp,
combine_mask_ca (&s, &m);
a = ~m;
- if (a != ~0)
+ if (a)
{
- if (a)
- {
- uint32_t d = *(dest + i);
- UN8x4_MUL_UN8x4_ADD_UN8x4 (d, a, s);
- s = d;
- }
-
- *(dest + i) = s;
+ uint32_t d = *(dest + i);
+ UN8x4_MUL_UN8x4_ADD_UN8x4 (d, a, s);
+ s = d;
}
+
+ *(dest + i) = s;
}
}
@@ -1648,10 +1644,8 @@ combine_over_reverse_ca (pixman_implementation_t *imp,
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
- combine_mask_value_ca (&s, &m);
-
- if (a != MASK)
- UN8x4_MUL_UN8_ADD_UN8x4 (s, a, d);
+ UN8x4_MUL_UN8x4 (s, m);
+ UN8x4_MUL_UN8_ADD_UN8x4 (s, a, d);
*(dest + i) = s;
}
diff --git a/pixman/pixman/pixman-combine32.h b/pixman/pixman/pixman-combine32.h
index 9a2875e12..4c2f4cdf0 100644
--- a/pixman/pixman/pixman-combine32.h
+++ b/pixman/pixman/pixman-combine32.h
@@ -47,19 +47,56 @@
*/
/*
+ * x_rb = (x_rb * a) / 255
+ */
+#define UN8_rb_MUL_UN8(x, a, t) \
+ do \
+ { \
+ t = ((x) & RB_MASK) * (a); \
+ t += RB_ONE_HALF; \
+ x = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
+ x &= RB_MASK; \
+ } while (0)
+
+/*
+ * x_rb = min (x_rb + y_rb, 255)
+ */
+#define UN8_rb_ADD_UN8_rb(x, y, t) \
+ do \
+ { \
+ t = ((x) + (y)); \
+ t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
+ x = (t & RB_MASK); \
+ } while (0)
+
+/*
+ * x_rb = (x_rb * a_rb) / 255
+ */
+#define UN8_rb_MUL_UN8_rb(x, a, t) \
+ do \
+ { \
+ t = (x & MASK) * (a & MASK); \
+ t |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
+ t += RB_ONE_HALF; \
+ t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
+ x = t & RB_MASK; \
+ } while (0)
+
+/*
* x_c = (x_c * a) / 255
*/
#define UN8x4_MUL_UN8(x, a) \
do \
{ \
- uint32_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
- t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- t &= RB_MASK; \
- \
- x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
- x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)); \
- x &= RB_MASK << COMPONENT_SIZE; \
- x += t; \
+ uint32_t r1, r2, t; \
+ \
+ r1 = (x); \
+ UN8_rb_MUL_UN8 (r1, a, t); \
+ \
+ r2 = (x) >> G_SHIFT; \
+ UN8_rb_MUL_UN8 (r2, a, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -68,33 +105,19 @@
#define UN8x4_MUL_UN8_ADD_UN8x4(x, a, y) \
do \
{ \
- /* multiply and divide: trunc((i + 128)*257/65536) */ \
- uint32_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
- t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- t &= RB_MASK; \
- \
- /* add */ \
- t += y & RB_MASK; \
- \
- /* saturate */ \
- t |= RB_MASK_PLUS_ONE - ((t >> COMPONENT_SIZE) & RB_MASK); \
- t &= RB_MASK; \
- \
- /* multiply and divide */ \
- x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
- x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- x &= RB_MASK; \
- \
- /* add */ \
- x += (y >> COMPONENT_SIZE) & RB_MASK; \
- \
- /* saturate */ \
- x |= RB_MASK_PLUS_ONE - ((x >> COMPONENT_SIZE) & RB_MASK); \
- x &= RB_MASK; \
- \
- /* recombine */ \
- x <<= COMPONENT_SIZE; \
- x += t; \
+ uint32_t r1, r2, r3, t; \
+ \
+ r1 = (x); \
+ r2 = (y) & RB_MASK; \
+ UN8_rb_MUL_UN8 (r1, a, t); \
+ UN8_rb_ADD_UN8_rb (r1, r2, t); \
+ \
+ r2 = (x) >> G_SHIFT; \
+ r3 = ((y) >> G_SHIFT) & RB_MASK; \
+ UN8_rb_MUL_UN8 (r2, a, t); \
+ UN8_rb_ADD_UN8_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -103,32 +126,21 @@
#define UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8(x, a, y, b) \
do \
{ \
- uint32_t t; \
- uint32_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- t = (x & G_MASK) * a + (y & G_MASK) * b; \
- t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
- t >>= R_SHIFT; \
- \
- t |= r << R_SHIFT; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- t <<= G_SHIFT; \
- \
- r = ((x >> R_SHIFT) & MASK) * a + \
- ((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- x = (x & MASK) * a + (y & MASK) * b + ONE_HALF; \
- x += (x >> G_SHIFT); \
- x >>= G_SHIFT; \
- x |= r << R_SHIFT; \
- x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
- x &= RB_MASK; \
- x |= t; \
+ uint32_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = y; \
+ UN8_rb_MUL_UN8 (r1, a, t); \
+ UN8_rb_MUL_UN8 (r2, b, t); \
+ UN8_rb_ADD_UN8_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT); \
+ r3 = (y >> G_SHIFT); \
+ UN8_rb_MUL_UN8 (r2, a, t); \
+ UN8_rb_MUL_UN8 (r3, b, t); \
+ UN8_rb_ADD_UN8_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -137,19 +149,17 @@
#define UN8x4_MUL_UN8x4(x, a) \
do \
{ \
- uint32_t t; \
- uint32_t r = (x & MASK) * (a & MASK); \
- r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
- r += RB_ONE_HALF; \
- r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- r &= RB_MASK; \
- \
- x >>= G_SHIFT; \
- t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
- t |= (x & R_MASK) * (a >> A_SHIFT); \
- t += RB_ONE_HALF; \
- t = t + ((t >> G_SHIFT) & RB_MASK); \
- x = r | (t & AG_MASK); \
+ uint32_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UN8_rb_MUL_UN8_rb (r1, r2, t); \
+ \
+ r2 = x >> G_SHIFT; \
+ r3 = a >> G_SHIFT; \
+ UN8_rb_MUL_UN8_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -158,26 +168,21 @@
#define UN8x4_MUL_UN8x4_ADD_UN8x4(x, a, y) \
do \
{ \
- uint32_t t; \
- uint32_t r = (x & MASK) * (a & MASK); \
- r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
- r += RB_ONE_HALF; \
- r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- r &= RB_MASK; \
- r += y & RB_MASK; \
- r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
- r &= RB_MASK; \
- \
- x >>= G_SHIFT; \
- t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
- t |= (x & R_MASK) * (a >> A_SHIFT); \
- t += RB_ONE_HALF; \
- t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- t &= RB_MASK; \
- t += (y >> G_SHIFT) & RB_MASK; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- x = r | (t << G_SHIFT); \
+ uint32_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UN8_rb_MUL_UN8_rb (r1, r2, t); \
+ r2 = y & RB_MASK; \
+ UN8_rb_ADD_UN8_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT); \
+ r3 = (a >> G_SHIFT); \
+ UN8_rb_MUL_UN8_rb (r2, r3, t); \
+ r3 = (y >> G_SHIFT) & RB_MASK; \
+ UN8_rb_ADD_UN8_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -186,33 +191,23 @@
#define UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8(x, a, y, b) \
do \
{ \
- uint32_t t; \
- uint32_t r = (x >> A_SHIFT) * (a >> A_SHIFT) + \
- (y >> A_SHIFT) * b; \
- r += (r >> G_SHIFT) + ONE_HALF; \
- r >>= G_SHIFT; \
- \
- t = (x & G_MASK) * ((a >> G_SHIFT) & MASK) + (y & G_MASK) * b; \
- t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
- t >>= R_SHIFT; \
- \
- t |= r << R_SHIFT; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- t <<= G_SHIFT; \
- \
- r = ((x >> R_SHIFT) & MASK) * ((a >> R_SHIFT) & MASK) + \
- ((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- x = (x & MASK) * (a & MASK) + (y & MASK) * b + ONE_HALF; \
- x += (x >> G_SHIFT); \
- x >>= G_SHIFT; \
- x |= r << R_SHIFT; \
- x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
- x &= RB_MASK; \
- x |= t; \
+ uint32_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UN8_rb_MUL_UN8_rb (r1, r2, t); \
+ r2 = y; \
+ UN8_rb_MUL_UN8 (r2, b, t); \
+ UN8_rb_ADD_UN8_rb (r1, r2, t); \
+ \
+ r2 = x >> G_SHIFT; \
+ r3 = a >> G_SHIFT; \
+ UN8_rb_MUL_UN8_rb (r2, r3, t); \
+ r3 = y >> G_SHIFT; \
+ UN8_rb_MUL_UN8 (r3, b, t); \
+ UN8_rb_ADD_UN8_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -221,13 +216,15 @@
#define UN8x4_ADD_UN8x4(x, y) \
do \
{ \
- uint32_t t; \
- uint32_t r = (x & RB_MASK) + (y & RB_MASK); \
- r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
- r &= RB_MASK; \
- \
- t = ((x >> G_SHIFT) & RB_MASK) + ((y >> G_SHIFT) & RB_MASK); \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- r |= (t & RB_MASK) << G_SHIFT; \
- x = r; \
+ uint32_t r1, r2, r3, t; \
+ \
+ r1 = x & RB_MASK; \
+ r2 = y & RB_MASK; \
+ UN8_rb_ADD_UN8_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT) & RB_MASK; \
+ r3 = (y >> G_SHIFT) & RB_MASK; \
+ UN8_rb_ADD_UN8_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
diff --git a/pixman/pixman/pixman-combine64.c b/pixman/pixman/pixman-combine64.c
index 5c9803319..03e271b95 100644
--- a/pixman/pixman/pixman-combine64.c
+++ b/pixman/pixman/pixman-combine64.c
@@ -82,7 +82,6 @@ combine_mask_alpha_ca (const uint64_t *src, uint64_t *mask)
if (a == ~0)
{
- x = x >> A_SHIFT;
x |= x << G_SHIFT;
x |= x << R_SHIFT;
*(mask) = x;
@@ -1614,17 +1613,14 @@ combine_over_ca (pixman_implementation_t *imp,
combine_mask_ca (&s, &m);
a = ~m;
- if (a != ~0)
+ if (a)
{
- if (a)
- {
- uint64_t d = *(dest + i);
- UN16x4_MUL_UN16x4_ADD_UN16x4 (d, a, s);
- s = d;
- }
-
- *(dest + i) = s;
+ uint64_t d = *(dest + i);
+ UN16x4_MUL_UN16x4_ADD_UN16x4 (d, a, s);
+ s = d;
}
+
+ *(dest + i) = s;
}
}
@@ -1648,10 +1644,8 @@ combine_over_reverse_ca (pixman_implementation_t *imp,
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
- combine_mask_value_ca (&s, &m);
-
- if (a != MASK)
- UN16x4_MUL_UN16_ADD_UN16x4 (s, a, d);
+ UN16x4_MUL_UN16x4 (s, m);
+ UN16x4_MUL_UN16_ADD_UN16x4 (s, a, d);
*(dest + i) = s;
}
diff --git a/pixman/pixman/pixman-combine64.h b/pixman/pixman/pixman-combine64.h
index e5ecef3a1..bb222a4d7 100644
--- a/pixman/pixman/pixman-combine64.h
+++ b/pixman/pixman/pixman-combine64.h
@@ -47,19 +47,56 @@
*/
/*
+ * x_rb = (x_rb * a) / 255
+ */
+#define UN16_rb_MUL_UN16(x, a, t) \
+ do \
+ { \
+ t = ((x) & RB_MASK) * (a); \
+ t += RB_ONE_HALF; \
+ x = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
+ x &= RB_MASK; \
+ } while (0)
+
+/*
+ * x_rb = min (x_rb + y_rb, 255)
+ */
+#define UN16_rb_ADD_UN16_rb(x, y, t) \
+ do \
+ { \
+ t = ((x) + (y)); \
+ t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
+ x = (t & RB_MASK); \
+ } while (0)
+
+/*
+ * x_rb = (x_rb * a_rb) / 255
+ */
+#define UN16_rb_MUL_UN16_rb(x, a, t) \
+ do \
+ { \
+ t = (x & MASK) * (a & MASK); \
+ t |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
+ t += RB_ONE_HALF; \
+ t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
+ x = t & RB_MASK; \
+ } while (0)
+
+/*
* x_c = (x_c * a) / 255
*/
#define UN16x4_MUL_UN16(x, a) \
do \
{ \
- uint64_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
- t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- t &= RB_MASK; \
- \
- x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
- x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)); \
- x &= RB_MASK << COMPONENT_SIZE; \
- x += t; \
+ uint64_t r1, r2, t; \
+ \
+ r1 = (x); \
+ UN16_rb_MUL_UN16 (r1, a, t); \
+ \
+ r2 = (x) >> G_SHIFT; \
+ UN16_rb_MUL_UN16 (r2, a, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -68,33 +105,19 @@
#define UN16x4_MUL_UN16_ADD_UN16x4(x, a, y) \
do \
{ \
- /* multiply and divide: trunc((i + 128)*257/65536) */ \
- uint64_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
- t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- t &= RB_MASK; \
- \
- /* add */ \
- t += y & RB_MASK; \
- \
- /* saturate */ \
- t |= RB_MASK_PLUS_ONE - ((t >> COMPONENT_SIZE) & RB_MASK); \
- t &= RB_MASK; \
- \
- /* multiply and divide */ \
- x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
- x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
- x &= RB_MASK; \
- \
- /* add */ \
- x += (y >> COMPONENT_SIZE) & RB_MASK; \
- \
- /* saturate */ \
- x |= RB_MASK_PLUS_ONE - ((x >> COMPONENT_SIZE) & RB_MASK); \
- x &= RB_MASK; \
- \
- /* recombine */ \
- x <<= COMPONENT_SIZE; \
- x += t; \
+ uint64_t r1, r2, r3, t; \
+ \
+ r1 = (x); \
+ r2 = (y) & RB_MASK; \
+ UN16_rb_MUL_UN16 (r1, a, t); \
+ UN16_rb_ADD_UN16_rb (r1, r2, t); \
+ \
+ r2 = (x) >> G_SHIFT; \
+ r3 = ((y) >> G_SHIFT) & RB_MASK; \
+ UN16_rb_MUL_UN16 (r2, a, t); \
+ UN16_rb_ADD_UN16_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -103,32 +126,21 @@
#define UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16(x, a, y, b) \
do \
{ \
- uint64_t t; \
- uint64_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- t = (x & G_MASK) * a + (y & G_MASK) * b; \
- t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
- t >>= R_SHIFT; \
- \
- t |= r << R_SHIFT; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- t <<= G_SHIFT; \
- \
- r = ((x >> R_SHIFT) & MASK) * a + \
- ((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- x = (x & MASK) * a + (y & MASK) * b + ONE_HALF; \
- x += (x >> G_SHIFT); \
- x >>= G_SHIFT; \
- x |= r << R_SHIFT; \
- x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
- x &= RB_MASK; \
- x |= t; \
+ uint64_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = y; \
+ UN16_rb_MUL_UN16 (r1, a, t); \
+ UN16_rb_MUL_UN16 (r2, b, t); \
+ UN16_rb_ADD_UN16_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT); \
+ r3 = (y >> G_SHIFT); \
+ UN16_rb_MUL_UN16 (r2, a, t); \
+ UN16_rb_MUL_UN16 (r3, b, t); \
+ UN16_rb_ADD_UN16_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -137,19 +149,17 @@
#define UN16x4_MUL_UN16x4(x, a) \
do \
{ \
- uint64_t t; \
- uint64_t r = (x & MASK) * (a & MASK); \
- r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
- r += RB_ONE_HALF; \
- r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- r &= RB_MASK; \
- \
- x >>= G_SHIFT; \
- t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
- t |= (x & R_MASK) * (a >> A_SHIFT); \
- t += RB_ONE_HALF; \
- t = t + ((t >> G_SHIFT) & RB_MASK); \
- x = r | (t & AG_MASK); \
+ uint64_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UN16_rb_MUL_UN16_rb (r1, r2, t); \
+ \
+ r2 = x >> G_SHIFT; \
+ r3 = a >> G_SHIFT; \
+ UN16_rb_MUL_UN16_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -158,26 +168,21 @@
#define UN16x4_MUL_UN16x4_ADD_UN16x4(x, a, y) \
do \
{ \
- uint64_t t; \
- uint64_t r = (x & MASK) * (a & MASK); \
- r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
- r += RB_ONE_HALF; \
- r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- r &= RB_MASK; \
- r += y & RB_MASK; \
- r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
- r &= RB_MASK; \
- \
- x >>= G_SHIFT; \
- t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
- t |= (x & R_MASK) * (a >> A_SHIFT); \
- t += RB_ONE_HALF; \
- t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
- t &= RB_MASK; \
- t += (y >> G_SHIFT) & RB_MASK; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- x = r | (t << G_SHIFT); \
+ uint64_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UN16_rb_MUL_UN16_rb (r1, r2, t); \
+ r2 = y & RB_MASK; \
+ UN16_rb_ADD_UN16_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT); \
+ r3 = (a >> G_SHIFT); \
+ UN16_rb_MUL_UN16_rb (r2, r3, t); \
+ r3 = (y >> G_SHIFT) & RB_MASK; \
+ UN16_rb_ADD_UN16_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -186,33 +191,23 @@
#define UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16(x, a, y, b) \
do \
{ \
- uint64_t t; \
- uint64_t r = (x >> A_SHIFT) * (a >> A_SHIFT) + \
- (y >> A_SHIFT) * b; \
- r += (r >> G_SHIFT) + ONE_HALF; \
- r >>= G_SHIFT; \
- \
- t = (x & G_MASK) * ((a >> G_SHIFT) & MASK) + (y & G_MASK) * b; \
- t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
- t >>= R_SHIFT; \
- \
- t |= r << R_SHIFT; \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- t &= RB_MASK; \
- t <<= G_SHIFT; \
- \
- r = ((x >> R_SHIFT) & MASK) * ((a >> R_SHIFT) & MASK) + \
- ((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
- r += (r >> G_SHIFT); \
- r >>= G_SHIFT; \
- \
- x = (x & MASK) * (a & MASK) + (y & MASK) * b + ONE_HALF; \
- x += (x >> G_SHIFT); \
- x >>= G_SHIFT; \
- x |= r << R_SHIFT; \
- x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
- x &= RB_MASK; \
- x |= t; \
+ uint64_t r1, r2, r3, t; \
+ \
+ r1 = x; \
+ r2 = a; \
+ UN16_rb_MUL_UN16_rb (r1, r2, t); \
+ r2 = y; \
+ UN16_rb_MUL_UN16 (r2, b, t); \
+ UN16_rb_ADD_UN16_rb (r1, r2, t); \
+ \
+ r2 = x >> G_SHIFT; \
+ r3 = a >> G_SHIFT; \
+ UN16_rb_MUL_UN16_rb (r2, r3, t); \
+ r3 = y >> G_SHIFT; \
+ UN16_rb_MUL_UN16 (r3, b, t); \
+ UN16_rb_ADD_UN16_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
/*
@@ -221,13 +216,15 @@
#define UN16x4_ADD_UN16x4(x, y) \
do \
{ \
- uint64_t t; \
- uint64_t r = (x & RB_MASK) + (y & RB_MASK); \
- r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
- r &= RB_MASK; \
- \
- t = ((x >> G_SHIFT) & RB_MASK) + ((y >> G_SHIFT) & RB_MASK); \
- t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
- r |= (t & RB_MASK) << G_SHIFT; \
- x = r; \
+ uint64_t r1, r2, r3, t; \
+ \
+ r1 = x & RB_MASK; \
+ r2 = y & RB_MASK; \
+ UN16_rb_ADD_UN16_rb (r1, r2, t); \
+ \
+ r2 = (x >> G_SHIFT) & RB_MASK; \
+ r3 = (y >> G_SHIFT) & RB_MASK; \
+ UN16_rb_ADD_UN16_rb (r2, r3, t); \
+ \
+ x = r1 | (r2 << G_SHIFT); \
} while (0)
diff --git a/pixman/pixman/pixman-conical-gradient.c b/pixman/pixman/pixman-conical-gradient.c
index 6a4e31ebf..d720db3d4 100644
--- a/pixman/pixman/pixman-conical-gradient.c
+++ b/pixman/pixman/pixman-conical-gradient.c
@@ -175,8 +175,6 @@ pixman_image_create_conical_gradient (pixman_point_fixed_t * center,
image->common.property_changed = conical_gradient_property_changed;
- conical_gradient_property_changed (image);
-
return image;
}
diff --git a/pixman/pixman/pixman-fast-path.c b/pixman/pixman/pixman-fast-path.c
index d2ce26fe3..7f805780f 100644
--- a/pixman/pixman/pixman-fast-path.c
+++ b/pixman/pixman/pixman-fast-path.c
@@ -348,6 +348,67 @@ fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
}
static void
+fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca, s;
+ uint32_t *dst_line, *dst, d;
+ uint32_t *mask_line, *mask, ma;
+ int dst_stride, mask_stride;
+ uint16_t w;
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ while (w--)
+ {
+ ma = *mask++;
+
+ if (ma == 0xffffffff && srca == 0xff)
+ {
+ *dst = src;
+ }
+ else if (ma)
+ {
+ d = *dst;
+ s = src;
+
+ UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d);
+
+ *dst = s;
+ }
+
+ dst++;
+ }
+ }
+}
+
+static void
fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
@@ -362,7 +423,7 @@ fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
int32_t width,
int32_t height)
{
- uint32_t src, srca;
+ uint32_t src, srca, s;
uint32_t *dst_line, *dst, d;
uint32_t *mask_line, *mask, ma;
int dst_stride, mask_stride;
@@ -398,11 +459,12 @@ fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
else if (ma)
{
d = *dst;
+ s = src;
- UN8x4_MUL_UN8x4 (src, ma);
+ UN8x4_MUL_UN8x4 (s, ma);
UN8x4_MUL_UN8 (ma, srca);
ma = ~ma;
- UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, src);
+ UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
*dst = d;
}
@@ -558,11 +620,11 @@ fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
int32_t width,
int32_t height)
{
- uint32_t src, srca;
- uint16_t src16;
- uint16_t *dst_line, *dst;
- uint32_t d;
- uint32_t *mask_line, *mask, ma;
+ uint32_t src, srca, s;
+ uint16_t src16;
+ uint16_t *dst_line, *dst;
+ uint32_t d;
+ uint32_t *mask_line, *mask, ma;
int dst_stride, mask_stride;
uint16_t w;
@@ -606,10 +668,12 @@ fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
d = *dst;
d = CONVERT_0565_TO_0888 (d);
- UN8x4_MUL_UN8x4 (src, ma);
+ s = src;
+
+ UN8x4_MUL_UN8x4 (s, ma);
UN8x4_MUL_UN8 (ma, srca);
ma = ~ma;
- UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, src);
+ UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
*dst = CONVERT_8888_TO_0565 (d);
}
@@ -1064,6 +1128,7 @@ static const pixman_fast_path_t c_fast_paths[] =
{ PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_add_8888_8888, 0 },
{ PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_add_8888_8888, 0 },
{ PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_add_8000_8000, 0 },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_composite_add_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
{ PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_add_8888_8_8, 0 },
{ PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_solid_fill, 0 },
{ PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_solid_fill, 0 },
diff --git a/pixman/pixman/pixman-general.c b/pixman/pixman/pixman-general.c
index 6414b01a5..3ead3dac7 100644
--- a/pixman/pixman/pixman-general.c
+++ b/pixman/pixman/pixman-general.c
@@ -73,7 +73,7 @@ general_composite_rect (pixman_implementation_t *imp,
fetch_scanline_t fetch_src = NULL, fetch_mask = NULL, fetch_dest = NULL;
pixman_combine_32_func_t compose;
store_scanline_t store;
- source_pict_class_t src_class, mask_class;
+ source_image_class_t src_class, mask_class;
pixman_bool_t component_alpha;
uint32_t *bits;
int32_t stride;
diff --git a/pixman/pixman/pixman-image.c b/pixman/pixman/pixman-image.c
index 8b4fb6488..583195377 100644
--- a/pixman/pixman/pixman-image.c
+++ b/pixman/pixman/pixman-image.c
@@ -62,7 +62,7 @@ _pixman_init_gradient (gradient_t * gradient,
* depth, but that's a project for the future.
*/
void
-_pixman_image_get_scanline_generic_64 (pixman_image_t * pict,
+_pixman_image_get_scanline_generic_64 (pixman_image_t * image,
int x,
int y,
int width,
@@ -85,7 +85,7 @@ _pixman_image_get_scanline_generic_64 (pixman_image_t * pict,
}
/* Fetch the source image into the first half of buffer. */
- _pixman_image_get_scanline_32 (pict, x, y, width, (uint32_t*)buffer, mask8,
+ _pixman_image_get_scanline_32 (image, x, y, width, (uint32_t*)buffer, mask8,
mask_bits);
/* Expand from 32bpp to 64bpp in place. */
@@ -120,12 +120,13 @@ _pixman_image_allocate (void)
common->destroy_func = NULL;
common->destroy_data = NULL;
common->need_workaround = FALSE;
+ common->dirty = TRUE;
}
return image;
}
-source_pict_class_t
+source_image_class_t
_pixman_image_classify (pixman_image_t *image,
int x,
int y,
@@ -168,7 +169,7 @@ _pixman_image_get_scanline_64 (pixman_image_t *image,
static void
image_property_changed (pixman_image_t *image)
{
- image->common.property_changed (image);
+ image->common.dirty = TRUE;
}
/* Ref Counting */
@@ -238,6 +239,16 @@ _pixman_image_reset_clip_region (pixman_image_t *image)
image->common.have_clip_region = FALSE;
}
+void
+_pixman_image_validate (pixman_image_t *image)
+{
+ if (image->common.dirty)
+ {
+ image->common.property_changed (image);
+ image->common.dirty = FALSE;
+ }
+}
+
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_clip_region32 (pixman_image_t * image,
pixman_region32_t *region)
diff --git a/pixman/pixman/pixman-linear-gradient.c b/pixman/pixman/pixman-linear-gradient.c
index b940d76ec..d9409fe50 100644
--- a/pixman/pixman/pixman-linear-gradient.c
+++ b/pixman/pixman/pixman-linear-gradient.c
@@ -30,7 +30,7 @@
#include <stdlib.h>
#include "pixman-private.h"
-static source_pict_class_t
+static source_image_class_t
linear_gradient_classify (pixman_image_t *image,
int x,
int y,
@@ -289,8 +289,6 @@ pixman_image_create_linear_gradient (pixman_point_fixed_t * p1,
image->common.classify = linear_gradient_classify;
image->common.property_changed = linear_gradient_property_changed;
- linear_gradient_property_changed (image);
-
return image;
}
diff --git a/pixman/pixman/pixman-mmx.c b/pixman/pixman/pixman-mmx.c
index 7d4ec4f2e..7dcc1dc96 100644
--- a/pixman/pixman/pixman-mmx.c
+++ b/pixman/pixman/pixman-mmx.c
@@ -400,25 +400,18 @@ pack_565 (__m64 pixel, __m64 target, int pos)
static force_inline __m64
pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
{
- x = _mm_mullo_pi16 (x, a);
- y = _mm_mullo_pi16 (y, b);
- x = _mm_adds_pu16 (x, MC (4x0080));
- x = _mm_adds_pu16 (x, y);
- x = _mm_adds_pu16 (x, _mm_srli_pi16 (x, 8));
- x = _mm_srli_pi16 (x, 8);
+ x = pix_multiply (x, a);
+ y = pix_multiply (y, b);
- return x;
+ return pix_add (x, y);
}
#else
#define pix_add_mul(x, a, y, b) \
- ( x = _mm_mullo_pi16 (x, a), \
- y = _mm_mullo_pi16 (y, b), \
- x = _mm_adds_pu16 (x, MC (4x0080)), \
- x = _mm_adds_pu16 (x, y), \
- x = _mm_adds_pu16 (x, _mm_srli_pi16 (x, 8)), \
- _mm_srli_pi16 (x, 8) )
+ ( x = pix_multiply (x, a), \
+ y = pix_multiply (y, a), \
+ pix_add (x, y) )
#endif
@@ -1117,7 +1110,7 @@ mmx_composite_over_n_8888 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
- if (src >> 24 == 0)
+ if (src == 0)
return;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
@@ -1196,7 +1189,7 @@ mmx_composite_over_n_0565 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
- if (src >> 24 == 0)
+ if (src == 0)
return;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
@@ -1285,7 +1278,7 @@ mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
srca = src >> 24;
- if (srca == 0)
+ if (src == 0)
return;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
@@ -1772,7 +1765,7 @@ mmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
srca = src >> 24;
- if (srca == 0)
+ if (src == 0)
return;
srcsrc = (uint64_t)src << 32 | src;
@@ -2054,7 +2047,7 @@ mmx_composite_src_n_8_8888 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
srca = src >> 24;
- if (srca == 0)
+ if (src == 0)
{
pixman_fill_mmx (dst_image->bits.bits, dst_image->bits.rowstride,
PIXMAN_FORMAT_BPP (dst_image->bits.format),
@@ -2189,7 +2182,7 @@ mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
srca = src >> 24;
- if (srca == 0)
+ if (src == 0)
return;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
@@ -2548,7 +2541,7 @@ mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
srca = src >> 24;
- if (srca == 0)
+ if (src == 0)
return;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
@@ -2659,8 +2652,6 @@ mmx_composite_in_n_8_8 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
sa = src >> 24;
- if (sa == 0)
- return;
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
@@ -2780,19 +2771,19 @@ mmx_composite_in_8_8 (pixman_implementation_t *imp,
}
static void
-mmx_composite_add_8888_8_8 (pixman_implementation_t *imp,
- pixman_op_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int32_t src_x,
- int32_t src_y,
- int32_t mask_x,
- int32_t mask_y,
- int32_t dest_x,
- int32_t dest_y,
- int32_t width,
- int32_t height)
+mmx_composite_add_n_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *mask_line, *mask;
@@ -2808,7 +2799,8 @@ mmx_composite_add_8888_8_8 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
sa = src >> 24;
- if (sa == 0)
+
+ if (src == 0)
return;
vsrc = load8888 (src);
@@ -3285,7 +3277,7 @@ static const pixman_fast_path_t mmx_fast_paths[] =
{ PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, mmx_composite_add_8888_8888, 0 },
{ PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, mmx_composite_add_8888_8888, 0 },
{ PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, mmx_composite_add_8000_8000, 0 },
- { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_add_8888_8_8, 0 },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, mmx_composite_add_n_8_8, 0 },
{ PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, mmx_composite_src_n_8_8888, 0 },
{ PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, mmx_composite_src_n_8_8888, 0 },
diff --git a/pixman/pixman/pixman-private.h b/pixman/pixman/pixman-private.h
index 052f3a98d..ff7a65f88 100644
--- a/pixman/pixman/pixman-private.h
+++ b/pixman/pixman/pixman-private.h
@@ -26,7 +26,7 @@ typedef struct radial_gradient radial_gradient_t;
typedef struct bits_image bits_image_t;
typedef struct circle circle_t;
-typedef void (*fetch_scanline_t) (pixman_image_t *pict,
+typedef void (*fetch_scanline_t) (pixman_image_t *image,
int x,
int y,
int width,
@@ -34,9 +34,13 @@ typedef void (*fetch_scanline_t) (pixman_image_t *pict,
const uint32_t *mask,
uint32_t mask_bits);
-typedef void (*fetch_pixels_t) (bits_image_t * image,
- uint32_t * buffer,
- int n_pixels);
+typedef uint32_t (*fetch_pixel_32_t) (bits_image_t *image,
+ int x,
+ int y);
+
+typedef uint64_t (*fetch_pixel_64_t) (bits_image_t *image,
+ int x,
+ int y);
typedef void (*store_scanline_t) (bits_image_t * image,
int x,
@@ -58,9 +62,9 @@ typedef enum
SOURCE_IMAGE_CLASS_UNKNOWN,
SOURCE_IMAGE_CLASS_HORIZONTAL,
SOURCE_IMAGE_CLASS_VERTICAL,
-} source_pict_class_t;
+} source_image_class_t;
-typedef source_pict_class_t (*classify_func_t) (pixman_image_t *image,
+typedef source_image_class_t (*classify_func_t) (pixman_image_t *image,
int x,
int y,
int width,
@@ -78,6 +82,7 @@ struct image_common
pixman_bool_t clip_sources; /* Whether the clip applies when
* the image is used as a source
*/
+ pixman_bool_t dirty;
pixman_bool_t need_workaround;
pixman_transform_t * transform;
pixman_repeat_t repeat;
@@ -100,7 +105,7 @@ struct image_common
struct source_image
{
image_common_t common;
- source_pict_class_t class;
+ source_image_class_t class;
};
struct solid_fill
@@ -163,9 +168,13 @@ struct bits_image
uint32_t * free_me;
int rowstride; /* in number of uint32_t's */
- /* Fetch raw pixels, with no regard for transformations, alpha map etc. */
- fetch_pixels_t fetch_pixels_raw_32;
- fetch_pixels_t fetch_pixels_raw_64;
+ /* Fetch a pixel, disregarding alpha maps, transformations etc. */
+ fetch_pixel_32_t fetch_pixel_raw_32;
+ fetch_pixel_64_t fetch_pixel_raw_64;
+
+ /* Fetch a pixel, taking alpha maps into account */
+ fetch_pixel_32_t fetch_pixel_32;
+ fetch_pixel_64_t fetch_pixel_64;
/* Fetch raw scanlines, with no regard for transformations, alpha maps etc. */
fetch_scanline_t fetch_scanline_raw_32;
@@ -202,7 +211,7 @@ void
_pixman_bits_image_setup_raw_accessors (bits_image_t *image);
void
-_pixman_image_get_scanline_generic_64 (pixman_image_t *pict,
+_pixman_image_get_scanline_generic_64 (pixman_image_t *image,
int x,
int y,
int width,
@@ -210,7 +219,7 @@ _pixman_image_get_scanline_generic_64 (pixman_image_t *pict,
const uint32_t *mask,
uint32_t mask_bits);
-source_pict_class_t
+source_image_class_t
_pixman_image_classify (pixman_image_t *image,
int x,
int y,
@@ -269,6 +278,9 @@ _pixman_init_gradient (gradient_t * gradient,
void
_pixman_image_reset_clip_region (pixman_image_t *image);
+void
+_pixman_image_validate (pixman_image_t *image);
+
pixman_bool_t
_pixman_image_is_opaque (pixman_image_t *image);
@@ -279,14 +291,14 @@ uint32_t
_pixman_image_get_solid (pixman_image_t * image,
pixman_format_code_t format);
-#define PIXMAN_IMAGE_GET_LINE(pict, x, y, type, out_stride, line, mul) \
+#define PIXMAN_IMAGE_GET_LINE(image, x, y, type, out_stride, line, mul) \
do \
{ \
uint32_t *__bits__; \
int __stride__; \
\
- __bits__ = pict->bits.bits; \
- __stride__ = pict->bits.rowstride; \
+ __bits__ = image->bits.bits; \
+ __stride__ = image->bits.rowstride; \
(out_stride) = \
__stride__ * (int) sizeof (uint32_t) / (int) sizeof (type); \
(line) = \
diff --git a/pixman/pixman/pixman-radial-gradient.c b/pixman/pixman/pixman-radial-gradient.c
index a3b591673..022157b9b 100644
--- a/pixman/pixman/pixman-radial-gradient.c
+++ b/pixman/pixman/pixman-radial-gradient.c
@@ -198,41 +198,64 @@ radial_gradient_get_scanline_32 (pixman_image_t *image,
if (affine)
{
+ /* When computing t over a scanline, we notice that some expressions
+ * are constant so we can compute them just once. Given:
+ *
+ * t = (-2·B ± ⎷(B² - 4·A·C)) / 2·A
+ *
+ * where
+ *
+ * A = cdx² + cdy² - dr² [precomputed as radial->A]
+ * B = -2·(pdx·cdx + pdy·cdy + r₁·dr)
+ * C = pdx² + pdy² - r₁²
+ *
+ * Since we have an affine transformation, we know that (pdx, pdy)
+ * increase linearly with each pixel,
+ *
+ * pdx = pdx₀ + n·cx,
+ * pdy = pdy₀ + n·cy,
+ *
+ * we can then express B in terms of an linear increment along
+ * the scanline:
+ *
+ * B = B₀ + n·cB, with
+ * B₀ = -2·(pdx₀·cdx + pdy₀·cdy + r₁·dr) and
+ * cB = -2·(cx·cdx + cy·cdy)
+ *
+ * Thus we can replace the full evaluation of B per-pixel (4 multiplies,
+ * 2 additions) with a single addition.
+ */
+ double r1 = radial->c1.radius / 65536.;
+ double r1sq = r1 * r1;
+ double pdx = rx - radial->c1.x / 65536.;
+ double pdy = ry - radial->c1.y / 65536.;
+ double A = radial->A;
+ double invA = -65536. / (2. * A);
+ double A4 = -4. * A;
+ double B = -2. * (pdx*radial->cdx + pdy*radial->cdy + r1*radial->dr);
+ double cB = -2. * (cx*radial->cdx + cy*radial->cdy);
+ pixman_bool_t invert = A * radial->dr < 0;
+
while (buffer < end)
{
if (!mask || *mask++ & mask_bits)
{
- double pdx, pdy;
- double B, C;
- double det;
- double c1x = radial->c1.x / 65536.0;
- double c1y = radial->c1.y / 65536.0;
- double r1 = radial->c1.radius / 65536.0;
pixman_fixed_48_16_t t;
-
- pdx = rx - c1x;
- pdy = ry - c1y;
-
- B = -2 * (pdx * radial->cdx +
- pdy * radial->cdy +
- r1 * radial->dr);
- C = pdx * pdx + pdy * pdy - r1 * r1;
-
- det = (B * B) - (4 * radial->A * C);
- if (det < 0.0)
- det = 0.0;
-
- if (radial->A < 0)
- t = (pixman_fixed_48_16_t) ((-B - sqrt (det)) / (2.0 * radial->A) * 65536);
+ double det = B * B + A4 * (pdx * pdx + pdy * pdy - r1sq);
+ if (det <= 0.)
+ t = (pixman_fixed_48_16_t) (B * invA);
+ else if (invert)
+ t = (pixman_fixed_48_16_t) ((B + sqrt (det)) * invA);
else
- t = (pixman_fixed_48_16_t) ((-B + sqrt (det)) / (2.0 * radial->A) * 65536);
+ t = (pixman_fixed_48_16_t) ((B - sqrt (det)) * invA);
*buffer = _pixman_gradient_walker_pixel (&walker, t);
}
++buffer;
- rx += cx;
- ry += cy;
+ pdx += cx;
+ pdy += cy;
+ B += cB;
}
}
else
@@ -273,7 +296,7 @@ radial_gradient_get_scanline_32 (pixman_image_t *image,
if (det < 0.0)
det = 0.0;
- if (radial->A < 0)
+ if (radial->A * radial->dr < 0)
t = (pixman_fixed_48_16_t) ((-B - sqrt (det)) / (2.0 * radial->A) * 65536);
else
t = (pixman_fixed_48_16_t) ((-B + sqrt (det)) / (2.0 * radial->A) * 65536);
@@ -340,8 +363,6 @@ pixman_image_create_radial_gradient (pixman_point_fixed_t * inner,
image->common.property_changed = radial_gradient_property_changed;
- radial_gradient_property_changed (image);
-
return image;
}
diff --git a/pixman/pixman/pixman-region.c b/pixman/pixman/pixman-region.c
index 387dbba7f..f88955f3b 100644
--- a/pixman/pixman/pixman-region.c
+++ b/pixman/pixman/pixman-region.c
@@ -64,17 +64,14 @@
#define PIXREGION_END(reg) PIXREGION_BOX (reg, (reg)->data->numRects - 1)
#define GOOD_RECT(rect) ((rect)->x1 < (rect)->x2 && (rect)->y1 < (rect)->y2)
+#define BAD_RECT(rect) ((rect)->x1 > (rect)->x2 || (rect)->y1 > (rect)->y2)
-#define PIXMAN_REGION_LOG_FAILURES
+#define noPIXMAN_REGION_LOG_FAILURES
-#if defined PIXMAN_REGION_DEBUG
-
-# define GOOD(reg) assert (PREFIX (_selfcheck) (reg))
-
-#elif defined PIXMAN_REGION_LOG_FAILURES
+#if defined PIXMAN_REGION_LOG_FAILURES || defined PIXMAN_REGION_DEBUG
static void
-log_region_error (void)
+log_region_error (const char *function, const char *message)
{
static int n_messages = 0;
@@ -82,8 +79,13 @@ log_region_error (void)
{
fprintf (stderr,
"*** BUG ***\n"
- "Malformed region detected\n"
- "Set a breakpoint on 'log_region_error' to debug\n\n");
+ "%s: %s\n"
+ "Set a breakpoint on 'log_region_error' to debug\n\n",
+ function, message);
+
+#if defined PIXMAN_REGION_DEBUG
+ abort ();
+#endif
n_messages++;
}
@@ -93,11 +95,12 @@ log_region_error (void)
do \
{ \
if (!PREFIX (_selfcheck (reg))) \
- log_region_error (); \
+ log_region_error (FUNC, "Malformed region " # reg); \
} while (0)
#else
+#define log_region_error(function, name)
#define GOOD(reg)
#endif
@@ -383,6 +386,8 @@ PREFIX (_init_rect) (region_type_t * region,
if (!GOOD_RECT (&region->extents))
{
+ if (BAD_RECT (&region->extents))
+ log_region_error (FUNC, "Invalid rectangle passed");
PREFIX (_init) (region);
return;
}
@@ -395,6 +400,8 @@ PREFIX (_init_with_extents) (region_type_t *region, box_type_t *extents)
{
if (!GOOD_RECT (extents))
{
+ if (BAD_RECT (extents))
+ log_region_error (FUNC, "Invalid rectangle passed");
PREFIX (_init) (region);
return;
}
@@ -1346,7 +1353,11 @@ PREFIX (_union_rect) (region_type_t *dest,
region.extents.y2 = y + height;
if (!GOOD_RECT (&region.extents))
+ {
+ if (BAD_RECT (&region.extents))
+ log_region_error (FUNC, "Invalid rectangle passed");
return PREFIX (_copy) (dest, source);
+ }
region.data = NULL;
diff --git a/pixman/pixman/pixman-solid-fill.c b/pixman/pixman/pixman-solid-fill.c
index 6a612272a..38675dca8 100644
--- a/pixman/pixman/pixman-solid-fill.c
+++ b/pixman/pixman/pixman-solid-fill.c
@@ -44,7 +44,7 @@ solid_fill_get_scanline_32 (pixman_image_t *image,
return;
}
-static source_pict_class_t
+static source_image_class_t
solid_fill_classify (pixman_image_t *image,
int x,
int y,
@@ -86,8 +86,6 @@ pixman_image_create_solid_fill (pixman_color_t *color)
img->common.classify = solid_fill_classify;
img->common.property_changed = solid_fill_property_changed;
- solid_fill_property_changed (img);
-
return img;
}
diff --git a/pixman/pixman/pixman-sse2.c b/pixman/pixman/pixman-sse2.c
index 2fa956e21..727ad42ec 100644
--- a/pixman/pixman/pixman-sse2.c
+++ b/pixman/pixman/pixman-sse2.c
@@ -264,19 +264,14 @@ pix_add_multiply_2x128 (__m128i* src_lo,
__m128i* ret_lo,
__m128i* ret_hi)
{
- __m128i lo, hi;
- __m128i mul_lo, mul_hi;
+ __m128i t1_lo, t1_hi;
+ __m128i t2_lo, t2_hi;
- lo = _mm_mullo_epi16 (*src_lo, *alpha_dst_lo);
- hi = _mm_mullo_epi16 (*src_hi, *alpha_dst_hi);
- mul_lo = _mm_mullo_epi16 (*dst_lo, *alpha_src_lo);
- mul_hi = _mm_mullo_epi16 (*dst_hi, *alpha_src_hi);
- lo = _mm_adds_epu16 (lo, mask_0080);
- hi = _mm_adds_epu16 (hi, mask_0080);
- lo = _mm_adds_epu16 (lo, mul_lo);
- hi = _mm_adds_epu16 (hi, mul_hi);
- *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
- *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
+ pix_multiply_2x128 (src_lo, src_hi, alpha_dst_lo, alpha_dst_hi, &t1_lo, &t1_hi);
+ pix_multiply_2x128 (dst_lo, dst_hi, alpha_src_lo, alpha_src_hi, &t2_lo, &t2_hi);
+
+ *ret_lo = _mm_adds_epu8 (t1_lo, t2_lo);
+ *ret_hi = _mm_adds_epu8 (t1_hi, t2_hi);
}
static force_inline void
@@ -457,11 +452,10 @@ pix_add_multiply_1x64 (__m64* src,
__m64* dst,
__m64* alpha_src)
{
- return _mm_mulhi_pu16 (
- _mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alpha_dst),
- mask_x0080),
- _mm_mullo_pi16 (*dst, *alpha_src)),
- mask_x0101);
+ __m64 t1 = pix_multiply_1x64 (*src, *alpha_dst);
+ __m64 t2 = pix_multiply_1x64 (*dst, *alpha_src);
+
+ return _mm_adds_pu8 (t1, t2);
}
static force_inline __m64
@@ -3098,6 +3092,145 @@ sse2_composite_over_n_0565 (pixman_implementation_t *imp,
_mm_empty ();
}
+/* ------------------------------
+ * composite_add_n_8888_8888_ca
+ */
+static void
+sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst_line, d;
+ uint32_t *mask_line, m;
+ uint32_t pack_cmp;
+ int dst_stride, mask_stride;
+
+ __m128i xmm_src, xmm_alpha;
+ __m128i xmm_dst;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
+
+ src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+ srca = src >> 24;
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+
+ xmm_src = _mm_unpacklo_epi8 (
+ create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+ mmx_src = _mm_movepi64_pi64 (xmm_src);
+ mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
+
+ while (height--)
+ {
+ int w = width;
+ const uint32_t *pm = (uint32_t *)mask_line;
+ uint32_t *pd = (uint32_t *)dst_line;
+
+ dst_line += dst_stride;
+ mask_line += mask_stride;
+
+ /* call prefetch hint to optimize cache load*/
+ cache_prefetch ((__m128i*)pd);
+ cache_prefetch ((__m128i*)pm);
+
+ while (w && (unsigned long)pd & 15)
+ {
+ m = *pm++;
+
+ if (m)
+ {
+ d = *pd;
+
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *pd = pack_1x64_32 (
+ _mm_adds_pu8 (pix_multiply_1x64 (mmx_mask, mmx_src), mmx_dest));
+ }
+
+ pd++;
+ w--;
+ }
+
+ /* call prefetch hint to optimize cache load*/
+ cache_prefetch ((__m128i*)pd);
+ cache_prefetch ((__m128i*)pm);
+
+ while (w >= 4)
+ {
+ /* fill cache line with next memory */
+ cache_prefetch_next ((__m128i*)pd);
+ cache_prefetch_next ((__m128i*)pm);
+
+ xmm_mask = load_128_unaligned ((__m128i*)pm);
+
+ pack_cmp =
+ _mm_movemask_epi8 (
+ _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
+
+ /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
+ if (pack_cmp != 0xffff)
+ {
+ xmm_dst = load_128_aligned ((__m128i*)pd);
+
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_multiply_2x128 (&xmm_src, &xmm_src,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+ xmm_mask_hi = pack_2x128_128 (xmm_mask_lo, xmm_mask_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, _mm_adds_epu8 (xmm_mask_hi, xmm_dst));
+ }
+
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ m = *pm++;
+
+ if (m)
+ {
+ d = *pd;
+
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *pd = pack_1x64_32 (
+ _mm_adds_pu8 (pix_multiply_1x64 (mmx_mask, mmx_src), mmx_dest));
+ }
+
+ pd++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
/* ---------------------------------------------------------------------------
* composite_over_n_8888_8888_ca
*/
@@ -4718,8 +4851,6 @@ sse2_composite_in_n_8_8 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
sa = src >> 24;
- if (sa == 0)
- return;
xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
@@ -4934,8 +5065,6 @@ sse2_composite_add_8888_8_8 (pixman_implementation_t *imp,
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
sa = src >> 24;
- if (sa == 0)
- return;
xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
@@ -5502,6 +5631,7 @@ static const pixman_fast_path_t sse2_fast_paths[] =
{ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, sse2_composite_copy_area, 0 },
{ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, sse2_composite_copy_area, 0 },
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_add_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
{ PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, sse2_composite_add_8000_8000, 0 },
{ PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888, 0 },
{ PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888, 0 },
diff --git a/pixman/pixman/pixman-trap.c b/pixman/pixman/pixman-trap.c
index 4d7a90a36..962cbb39e 100644
--- a/pixman/pixman/pixman-trap.c
+++ b/pixman/pixman/pixman-trap.c
@@ -243,6 +243,8 @@ pixman_add_traps (pixman_image_t * image,
pixman_edge_t l, r;
pixman_fixed_t t, b;
+ _pixman_image_validate (image);
+
width = image->bits.width;
height = image->bits.height;
bpp = PIXMAN_FORMAT_BPP (image->bits.format);
@@ -357,6 +359,8 @@ pixman_rasterize_trapezoid (pixman_image_t * image,
return_if_fail (image->type == BITS);
+ _pixman_image_validate (image);
+
if (!pixman_trapezoid_valid (trap))
return;
diff --git a/pixman/pixman/pixman-utils.c b/pixman/pixman/pixman-utils.c
index a98141885..71282062c 100644
--- a/pixman/pixman/pixman-utils.c
+++ b/pixman/pixman/pixman-utils.c
@@ -80,7 +80,7 @@ clip_general_image (pixman_region32_t * region,
static inline pixman_bool_t
clip_source_image (pixman_region32_t * region,
- pixman_image_t * picture,
+ pixman_image_t * image,
int dx,
int dy)
{
@@ -89,11 +89,11 @@ clip_source_image (pixman_region32_t * region,
* the clip was not set by a client, then it is a hierarchy
* clip and those should always be ignored for sources).
*/
- if (!picture->common.clip_sources || !picture->common.client_clip)
+ if (!image->common.clip_sources || !image->common.client_clip)
return TRUE;
return clip_general_image (region,
- &picture->common.clip_region,
+ &image->common.clip_region,
dx, dy);
}
@@ -663,20 +663,23 @@ _pixman_run_fast_path (const pixman_fast_path_t *paths,
if (has_fast_path && src->type == BITS)
{
has_fast_path = !src->bits.read_func &&
- !src->bits.write_func;
+ !src->bits.write_func &&
+ !PIXMAN_FORMAT_IS_WIDE (src->bits.format);
}
}
if (mask && has_fast_path)
{
- has_fast_path = mask->type == BITS &&
- !mask->common.transform &&
- !mask->common.alpha_map &&
- !mask->bits.read_func &&
- !mask->bits.write_func &&
- mask->common.filter != PIXMAN_FILTER_CONVOLUTION &&
- mask->common.repeat != PIXMAN_REPEAT_PAD &&
- mask->common.repeat != PIXMAN_REPEAT_REFLECT;
+ has_fast_path =
+ mask->type == BITS &&
+ !mask->common.transform &&
+ !mask->common.alpha_map &&
+ !mask->bits.read_func &&
+ !mask->bits.write_func &&
+ mask->common.filter != PIXMAN_FILTER_CONVOLUTION &&
+ mask->common.repeat != PIXMAN_REPEAT_PAD &&
+ mask->common.repeat != PIXMAN_REPEAT_REFLECT &&
+ !PIXMAN_FORMAT_IS_WIDE (mask->bits.format);
}
if (has_fast_path)
diff --git a/pixman/pixman/pixman-version.h b/pixman/pixman/pixman-version.h
index c8d69eaa7..65df97052 100644
--- a/pixman/pixman/pixman-version.h
+++ b/pixman/pixman/pixman-version.h
@@ -32,10 +32,10 @@
#endif
#define PIXMAN_VERSION_MAJOR 0
-#define PIXMAN_VERSION_MINOR 15
-#define PIXMAN_VERSION_MICRO 18
+#define PIXMAN_VERSION_MINOR 16
+#define PIXMAN_VERSION_MICRO 0
-#define PIXMAN_VERSION_STRING "0.15.18"
+#define PIXMAN_VERSION_STRING "0.16.0"
#define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \
((major) * 10000) \
diff --git a/pixman/pixman/pixman-vmx.c b/pixman/pixman/pixman-vmx.c
index 6fc3cdea5..06325a7c0 100644
--- a/pixman/pixman/pixman-vmx.c
+++ b/pixman/pixman/pixman-vmx.c
@@ -45,24 +45,24 @@ static force_inline vector unsigned int
pix_multiply (vector unsigned int p, vector unsigned int a)
{
vector unsigned short hi, lo, mod;
-
+
/* unpack to short */
hi = (vector unsigned short)
vec_mergeh ((vector unsigned char)AVV (0),
(vector unsigned char)p);
-
+
mod = (vector unsigned short)
vec_mergeh ((vector unsigned char)AVV (0),
(vector unsigned char)a);
-
+
hi = vec_mladd (hi, mod, (vector unsigned short)
AVV (0x0080, 0x0080, 0x0080, 0x0080,
0x0080, 0x0080, 0x0080, 0x0080));
-
+
hi = vec_adds (hi, vec_sr (hi, vec_splat_u16 (8)));
-
+
hi = vec_sr (hi, vec_splat_u16 (8));
-
+
/* unpack to short */
lo = (vector unsigned short)
vec_mergel ((vector unsigned char)AVV (0),
@@ -70,15 +70,15 @@ pix_multiply (vector unsigned int p, vector unsigned int a)
mod = (vector unsigned short)
vec_mergel ((vector unsigned char)AVV (0),
(vector unsigned char)a);
-
+
lo = vec_mladd (lo, mod, (vector unsigned short)
AVV (0x0080, 0x0080, 0x0080, 0x0080,
0x0080, 0x0080, 0x0080, 0x0080));
-
+
lo = vec_adds (lo, vec_sr (lo, vec_splat_u16 (8)));
-
+
lo = vec_sr (lo, vec_splat_u16 (8));
-
+
return (vector unsigned int)vec_packsu (hi, lo);
}
@@ -95,56 +95,12 @@ pix_add_mul (vector unsigned int x,
vector unsigned int y,
vector unsigned int b)
{
- vector unsigned short hi, lo, mod, hiy, loy, mody;
-
- hi = (vector unsigned short)
- vec_mergeh ((vector unsigned char)AVV (0),
- (vector unsigned char)x);
- mod = (vector unsigned short)
- vec_mergeh ((vector unsigned char)AVV (0),
- (vector unsigned char)a);
- hiy = (vector unsigned short)
- vec_mergeh ((vector unsigned char)AVV (0),
- (vector unsigned char)y);
- mody = (vector unsigned short)
- vec_mergeh ((vector unsigned char)AVV (0),
- (vector unsigned char)b);
-
- hi = vec_mladd (hi, mod, (vector unsigned short)
- AVV (0x0080, 0x0080, 0x0080, 0x0080,
- 0x0080, 0x0080, 0x0080, 0x0080));
-
- hi = vec_mladd (hiy, mody, hi);
-
- hi = vec_adds (hi, vec_sr (hi, vec_splat_u16 (8)));
-
- hi = vec_sr (hi, vec_splat_u16 (8));
-
- lo = (vector unsigned short)
- vec_mergel ((vector unsigned char)AVV (0),
- (vector unsigned char)x);
- mod = (vector unsigned short)
- vec_mergel ((vector unsigned char)AVV (0),
- (vector unsigned char)a);
-
- loy = (vector unsigned short)
- vec_mergel ((vector unsigned char)AVV (0),
- (vector unsigned char)y);
- mody = (vector unsigned short)
- vec_mergel ((vector unsigned char)AVV (0),
- (vector unsigned char)b);
-
- lo = vec_mladd (lo, mod, (vector unsigned short)
- AVV (0x0080, 0x0080, 0x0080, 0x0080,
- 0x0080, 0x0080, 0x0080, 0x0080));
-
- lo = vec_mladd (loy, mody, lo);
-
- lo = vec_adds (lo, vec_sr (lo, vec_splat_u16 (8)));
-
- lo = vec_sr (lo, vec_splat_u16 (8));
-
- return (vector unsigned int)vec_packsu (hi, lo);
+ vector unsigned int t1, t2;
+
+ t1 = pix_multiply (x, a);
+ t2 = pix_multiply (y, b);
+
+ return pix_add (t1, t2);
}
static force_inline vector unsigned int
@@ -161,7 +117,7 @@ over (vector unsigned int src,
{
vector unsigned char tmp = (vector unsigned char)
pix_multiply (dest, negate (srca));
-
+
tmp = vec_adds ((vector unsigned char)src, tmp);
return (vector unsigned int)tmp;
}
@@ -235,31 +191,31 @@ vmx_combine_over_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
+
LOAD_VECTORS (dest, src);
-
+
vdest = over (vsrc, splat_alpha (vsrc), vdest);
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t ia = ALPHA_8 (~s);
-
+
UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s);
-
+
dest[i] = d;
}
}
@@ -274,35 +230,34 @@ vmx_combine_over_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = over (vsrc, splat_alpha (vsrc), vdest);
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t ia;
-
+
UN8x4_MUL_UN8 (s, m);
-
+
ia = ALPHA_8 (~s);
-
+
UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s);
dest[i] = d;
}
@@ -331,29 +286,29 @@ vmx_combine_over_reverse_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
+
LOAD_VECTORS (dest, src);
-
+
vdest = over (vdest, splat_alpha (vdest), vsrc);
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t ia = ALPHA_8 (~dest[i]);
-
+
UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d);
dest[i] = s;
}
@@ -369,33 +324,33 @@ vmx_combine_over_reverse_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
+
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = over (vdest, splat_alpha (vdest), vsrc);
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t ia = ALPHA_8 (~dest[i]);
-
+
UN8x4_MUL_UN8 (s, m);
-
+
UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d);
dest[i] = s;
}
@@ -424,28 +379,27 @@ vmx_combine_in_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORS (dest, src);
-
+
vdest = pix_multiply (vsrc, splat_alpha (vdest));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
-
uint32_t s = src[i];
uint32_t a = ALPHA_8 (dest[i]);
+
UN8x4_MUL_UN8 (s, a);
dest[i] = s;
}
@@ -461,33 +415,32 @@ vmx_combine_in_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_multiply (vsrc, splat_alpha (vdest));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
uint32_t s = src[i];
uint32_t a = ALPHA_8 (dest[i]);
-
+
UN8x4_MUL_UN8 (s, m);
-
UN8x4_MUL_UN8 (s, a);
+
dest[i] = s;
}
}
@@ -515,28 +468,29 @@ vmx_combine_in_reverse_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORS (dest, src);
-
+
vdest = pix_multiply (vdest, splat_alpha (vsrc));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t d = dest[i];
uint32_t a = ALPHA_8 (src[i]);
+
UN8x4_MUL_UN8 (d, a);
+
dest[i] = d;
}
}
@@ -551,34 +505,33 @@ vmx_combine_in_reverse_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_multiply (vdest, splat_alpha (vsrc));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
uint32_t d = dest[i];
uint32_t a = src[i];
-
+
UN8x4_MUL_UN8 (a, m);
-
a = ALPHA_8 (a);
UN8x4_MUL_UN8 (d, a);
+
dest[i] = d;
}
}
@@ -606,28 +559,29 @@ vmx_combine_out_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORS (dest, src);
-
+
vdest = pix_multiply (vsrc, splat_alpha (negate (vdest)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t s = src[i];
uint32_t a = ALPHA_8 (~dest[i]);
+
UN8x4_MUL_UN8 (s, a);
+
dest[i] = s;
}
}
@@ -642,33 +596,32 @@ vmx_combine_out_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_multiply (vsrc, splat_alpha (negate (vdest)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
uint32_t s = src[i];
uint32_t a = ALPHA_8 (~dest[i]);
-
+
UN8x4_MUL_UN8 (s, m);
-
UN8x4_MUL_UN8 (s, a);
+
dest[i] = s;
}
}
@@ -696,28 +649,30 @@ vmx_combine_out_reverse_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
+
LOAD_VECTORS (dest, src);
-
+
vdest = pix_multiply (vdest, splat_alpha (negate (vsrc)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t d = dest[i];
uint32_t a = ALPHA_8 (~src[i]);
+
UN8x4_MUL_UN8 (d, a);
+
dest[i] = d;
}
}
@@ -732,34 +687,33 @@ vmx_combine_out_reverse_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_multiply (vdest, splat_alpha (negate (vsrc)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
uint32_t d = dest[i];
uint32_t a = src[i];
-
+
UN8x4_MUL_UN8 (a, m);
-
a = ALPHA_8 (~a);
UN8x4_MUL_UN8 (d, a);
+
dest[i] = d;
}
}
@@ -787,32 +741,32 @@ vmx_combine_atop_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORS (dest, src);
-
+
vdest = pix_add_mul (vsrc, splat_alpha (vdest),
vdest, splat_alpha (negate (vsrc)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t dest_a = ALPHA_8 (d);
uint32_t src_ia = ALPHA_8 (~s);
-
+
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia);
+
dest[i] = s;
}
}
@@ -827,25 +781,24 @@ vmx_combine_atop_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_add_mul (vsrc, splat_alpha (vdest),
vdest, splat_alpha (negate (vsrc)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
@@ -853,12 +806,13 @@ vmx_combine_atop_u_mask (uint32_t * dest,
uint32_t d = dest[i];
uint32_t dest_a = ALPHA_8 (d);
uint32_t src_ia;
-
+
UN8x4_MUL_UN8 (s, m);
-
+
src_ia = ALPHA_8 (~s);
-
+
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia);
+
dest[i] = s;
}
}
@@ -886,32 +840,32 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORS (dest, src);
-
+
vdest = pix_add_mul (vdest, splat_alpha (vsrc),
vsrc, splat_alpha (negate (vdest)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t src_a = ALPHA_8 (s);
uint32_t dest_ia = ALPHA_8 (~d);
-
+
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a);
+
dest[i] = s;
}
}
@@ -926,25 +880,24 @@ vmx_combine_atop_reverse_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_add_mul (vdest, splat_alpha (vsrc),
vsrc, splat_alpha (negate (vdest)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
@@ -952,12 +905,13 @@ vmx_combine_atop_reverse_u_mask (uint32_t * dest,
uint32_t d = dest[i];
uint32_t src_a;
uint32_t dest_ia = ALPHA_8 (~d);
-
+
UN8x4_MUL_UN8 (s, m);
-
+
src_a = ALPHA_8 (s);
-
+
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a);
+
dest[i] = s;
}
}
@@ -985,32 +939,32 @@ vmx_combine_xor_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORS (dest, src);
-
+
vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)),
vdest, splat_alpha (negate (vsrc)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t src_ia = ALPHA_8 (~s);
uint32_t dest_ia = ALPHA_8 (~d);
-
+
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia);
+
dest[i] = s;
}
}
@@ -1025,25 +979,24 @@ vmx_combine_xor_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)),
vdest, splat_alpha (negate (vsrc)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
@@ -1051,12 +1004,13 @@ vmx_combine_xor_u_mask (uint32_t * dest,
uint32_t d = dest[i];
uint32_t src_ia;
uint32_t dest_ia = ALPHA_8 (~d);
-
+
UN8x4_MUL_UN8 (s, m);
-
+
src_ia = ALPHA_8 (~s);
-
+
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia);
+
dest[i] = s;
}
}
@@ -1084,27 +1038,28 @@ vmx_combine_add_u_no_mask (uint32_t * dest,
vector unsigned int vdest, vsrc;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKS (dest, src);
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORS (dest, src);
-
+
vdest = pix_add (vsrc, vdest);
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t s = src[i];
uint32_t d = dest[i];
+
UN8x4_ADD_UN8x4 (d, s);
+
dest[i] = d;
}
}
@@ -1119,33 +1074,32 @@ vmx_combine_add_u_mask (uint32_t * dest,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, src_mask, mask_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSM (dest, src, mask);
-
+
vdest = pix_add (vsrc, vdest);
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t m = ALPHA_8 (mask[i]);
uint32_t s = src[i];
uint32_t d = dest[i];
-
+
UN8x4_MUL_UN8 (s, m);
-
UN8x4_ADD_UN8x4 (d, s);
+
dest[i] = d;
}
}
@@ -1176,28 +1130,30 @@ vmx_combine_src_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = pix_multiply (vsrc, vmask);
-
+
STORE_VECTOR (dest);
-
+
mask += 4;
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
+
UN8x4_MUL_UN8x4 (s, a);
+
dest[i] = s;
}
}
@@ -1214,30 +1170,34 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = in_over (vsrc, splat_alpha (vsrc), vmask, vdest);
-
+
STORE_VECTOR (dest);
-
+
mask += 4;
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t d = dest[i];
+ uint32_t sa = ALPHA_8 (s);
+
UN8x4_MUL_UN8x4 (s, a);
+ UN8x4_MUL_UN8 (a, sa);
UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ~a, s);
+
dest[i] = d;
}
}
@@ -1254,32 +1214,33 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
/* printf("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = over (vdest, splat_alpha (vdest), pix_multiply (vsrc, vmask));
-
+
STORE_VECTOR (dest);
-
+
mask += 4;
src += 4;
dest += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t d = dest[i];
- uint32_t da = ALPHA_8 (d);
+ uint32_t ida = ALPHA_8 (~d);
+
UN8x4_MUL_UN8x4 (s, a);
- UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ~da, d);
+ UN8x4_MUL_UN8_ADD_UN8x4 (s, ida, d);
+
dest[i] = s;
}
}
@@ -1296,31 +1257,32 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = pix_multiply (pix_multiply (vsrc, vmask), splat_alpha (vdest));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t da = ALPHA_8 (dest[i]);
- UN8x4_MUL_UN8 (s, a);
+
+ UN8x4_MUL_UN8x4 (s, a);
UN8x4_MUL_UN8 (s, da);
+
dest[i] = s;
}
}
@@ -1337,31 +1299,33 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
+
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = pix_multiply (vdest, pix_multiply (vmask, splat_alpha (vsrc)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t d = dest[i];
uint32_t sa = ALPHA_8 (src[i]);
+
UN8x4_MUL_UN8 (a, sa);
UN8x4_MUL_UN8x4 (d, a);
+
dest[i] = d;
}
}
@@ -1378,32 +1342,34 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
- vdest = pix_multiply (pix_multiply (vsrc, vmask), splat_alpha (vdest));
-
+
+ vdest = pix_multiply (
+ pix_multiply (vsrc, vmask), splat_alpha (negate (vdest)));
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t da = ALPHA_8 (~d);
+
UN8x4_MUL_UN8x4 (s, a);
- UN8x4_MUL_UN8x4 (s, da);
+ UN8x4_MUL_UN8 (s, da);
+
dest[i] = s;
}
}
@@ -1420,33 +1386,34 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = pix_multiply (
vdest, negate (pix_multiply (vmask, splat_alpha (vsrc))));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t sa = ALPHA_8 (s);
- UN8x4_MUL_UN8x4 (a, sa);
+
+ UN8x4_MUL_UN8 (a, sa);
UN8x4_MUL_UN8x4 (d, ~a);
+
dest[i] = d;
}
}
@@ -1460,30 +1427,32 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
int width)
{
int i;
- vector unsigned int vdest, vsrc, vmask;
+ vector unsigned int vdest, vsrc, vmask, vsrca;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
- vdest = pix_add_mul (pix_multiply (vsrc, vmask), splat_alpha (vdest),
- vdest,
- negate (pix_multiply (vmask,
- splat_alpha (vmask))));
-
+
+ vsrca = splat_alpha (vsrc);
+
+ vsrc = pix_multiply (vsrc, vmask);
+ vmask = pix_multiply (vmask, vsrca);
+
+ vdest = pix_add_mul (vsrc, splat_alpha (vdest),
+ negate (vmask), vdest);
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
@@ -1491,10 +1460,11 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
uint32_t d = dest[i];
uint32_t sa = ALPHA_8 (s);
uint32_t da = ALPHA_8 (d);
-
+
UN8x4_MUL_UN8x4 (s, a);
UN8x4_MUL_UN8 (a, sa);
UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da);
+
dest[i] = d;
}
}
@@ -1511,38 +1481,38 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = pix_add_mul (vdest,
pix_multiply (vmask, splat_alpha (vsrc)),
pix_multiply (vsrc, vmask),
negate (splat_alpha (vdest)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t sa = ALPHA_8 (s);
- uint32_t da = ALPHA_8 (d);
-
+ uint32_t da = ALPHA_8 (~d);
+
UN8x4_MUL_UN8x4 (s, a);
UN8x4_MUL_UN8 (a, sa);
- UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, a, s, ~da);
+ UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, a, s, da);
+
dest[i] = d;
}
}
@@ -1559,38 +1529,38 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = pix_add_mul (vdest,
negate (pix_multiply (vmask, splat_alpha (vsrc))),
pix_multiply (vsrc, vmask),
negate (splat_alpha (vdest)));
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t d = dest[i];
uint32_t sa = ALPHA_8 (s);
- uint32_t da = ALPHA_8 (d);
-
+ uint32_t da = ALPHA_8 (~d);
+
UN8x4_MUL_UN8x4 (s, a);
UN8x4_MUL_UN8 (a, sa);
- UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, ~da);
+ UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da);
+
dest[i] = d;
}
}
@@ -1607,122 +1577,44 @@ vmx_combine_add_ca (pixman_implementation_t *imp,
vector unsigned int vdest, vsrc, vmask;
vector unsigned char tmp1, tmp2, tmp3, tmp4, edges,
dest_mask, mask_mask, src_mask, store_mask;
-
+
COMPUTE_SHIFT_MASKC (dest, src, mask);
-
+
/* printf ("%s\n",__PRETTY_FUNCTION__); */
for (i = width / 4; i > 0; i--)
{
-
LOAD_VECTORSC (dest, src, mask);
-
+
vdest = pix_add (pix_multiply (vsrc, vmask), vdest);
-
+
STORE_VECTOR (dest);
-
+
src += 4;
dest += 4;
mask += 4;
}
-
+
for (i = width % 4; --i >= 0;)
{
uint32_t a = mask[i];
uint32_t s = src[i];
uint32_t d = dest[i];
-
+
UN8x4_MUL_UN8x4 (s, a);
UN8x4_ADD_UN8x4 (s, d);
- dest[i] = s;
- }
-}
-
-#if 0
-void
-vmx_composite_over_n_8888 (pixman_operator_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int16_t src_x,
- int16_t src_y,
- int16_t mask_x,
- int16_t mask_y,
- int16_t dest_x,
- int16_t dest_y,
- uint16_t width,
- uint16_t height)
-{
- uint32_t src;
- uint32_t *dst_line, *dst;
- int dst_stride;
-
- _pixman_image_get_solid (src_image, dst_image, src);
-
- if (src >> 24 == 0)
- return;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- /* XXX vmx_combine_over_u (dst, src, width); */
- }
-}
-void
-vmx_composite_over_n_0565 (pixman_operator_t op,
- pixman_image_t * src_image,
- pixman_image_t * mask_image,
- pixman_image_t * dst_image,
- int16_t src_x,
- int16_t src_y,
- int16_t mask_x,
- int16_t mask_y,
- int16_t dest_x,
- int16_t dest_y,
- uint16_t width,
- uint16_t height)
-{
- uint32_t src;
- uint16_t *dst_line, *dst;
- uint16_t w;
- int dst_stride;
-
- _pixman_image_get_solid (src_image, dst_image, src);
-
- if (src >> 24 == 0)
- return;
-
- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-
- while (height--)
- {
- dst = dst_line;
- dst_line += dst_stride;
- vmx_combine_over_u565 (dst, src, width);
+ dest[i] = s;
}
}
-static const pixman_fast_path_t vmx_fast_path_array[] =
-{
- { PIXMAN_OP_NONE },
-};
-
-const pixman_fast_path_t *const vmx_fast_paths = vmx_fast_path_array;
-
-#endif
-
pixman_implementation_t *
_pixman_implementation_create_vmx (void)
{
pixman_implementation_t *fast = _pixman_implementation_create_fast_path ();
pixman_implementation_t *imp = _pixman_implementation_create (fast);
-
+
/* Set up function pointers */
-
- /* SSE code patch for fbcompose.c */
+
imp->combine_32[PIXMAN_OP_OVER] = vmx_combine_over_u;
imp->combine_32[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_u;
imp->combine_32[PIXMAN_OP_IN] = vmx_combine_in_u;
@@ -1732,9 +1624,9 @@ _pixman_implementation_create_vmx (void)
imp->combine_32[PIXMAN_OP_ATOP] = vmx_combine_atop_u;
imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_u;
imp->combine_32[PIXMAN_OP_XOR] = vmx_combine_xor_u;
-
+
imp->combine_32[PIXMAN_OP_ADD] = vmx_combine_add_u;
-
+
imp->combine_32_ca[PIXMAN_OP_SRC] = vmx_combine_src_ca;
imp->combine_32_ca[PIXMAN_OP_OVER] = vmx_combine_over_ca;
imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_ca;
@@ -1746,7 +1638,6 @@ _pixman_implementation_create_vmx (void)
imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_XOR] = vmx_combine_xor_ca;
imp->combine_32_ca[PIXMAN_OP_ADD] = vmx_combine_add_ca;
-
+
return imp;
}
-
diff --git a/pixman/pixman/pixman-x64-mmx-emulation.h b/pixman/pixman/pixman-x64-mmx-emulation.h
index 231030fba..378019cf2 100644
--- a/pixman/pixman/pixman-x64-mmx-emulation.h
+++ b/pixman/pixman/pixman-x64-mmx-emulation.h
@@ -135,8 +135,8 @@ _m_paddusb (__m64 a, __m64 b) /* unoptimized */
unsigned long long y = ((M64U (a) >> 8) & 0x00FF00FF00FF00FFULL) +
((M64U (b) >> 8) & 0x00FF00FF00FF00FFULL);
- x | = ((x & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
- y | = ((y & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
+ x |= ((x & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
+ y |= ((y & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
x = (x & 0x00FF00FF00FF00FFULL) | ((y & 0x00FF00FF00FF00FFULL) << 8);
@@ -152,8 +152,8 @@ _m_paddusw (__m64 a, __m64 b) /* unoptimized */
unsigned long long y = ((M64U (a) >> 16) & 0x0000FFFF0000FFFFULL) +
((M64U (b) >> 16) & 0x0000FFFF0000FFFFULL);
- x | = ((x & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
- y | = ((y & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
+ x |= ((x & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
+ y |= ((y & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
x = (x & 0x0000FFFF0000FFFFULL) | ((y & 0x0000FFFF0000FFFFULL) << 16);
diff --git a/pixman/pixman/pixman.c b/pixman/pixman/pixman.c
index 12b4c5ac1..0edd967cf 100644
--- a/pixman/pixman/pixman.c
+++ b/pixman/pixman/pixman.c
@@ -173,6 +173,11 @@ pixman_image_composite (pixman_op_t op,
uint32_t *dest_bits;
int dest_dx, dest_dy;
+ _pixman_image_validate (src);
+ if (mask)
+ _pixman_image_validate (mask);
+ _pixman_image_validate (dest);
+
/*
* Check if we can replace our operator by a simpler one
* if the src or dest are opaque. The output operator should be
@@ -322,6 +327,8 @@ pixman_image_fill_rectangles (pixman_op_t op,
pixman_color_t c;
int i;
+ _pixman_image_validate (dest);
+
if (color->alpha == 0xffff)
{
if (op == PIXMAN_OP_OVER)