aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2011-09-28 16:10:37 +0200
committermarha <marha@users.sourceforge.net>2011-09-28 16:10:37 +0200
commit0e68f72b7b0ab957a0d3c9415f68e3c4f484b0b3 (patch)
tree55d4695431eceb8977f70ea5cdf2b5aa5326b21c
parentc958cf29a8265091c4f68bb842a1d07c47e181fa (diff)
downloadvcxsrv-0e68f72b7b0ab957a0d3c9415f68e3c4f484b0b3.tar.gz
vcxsrv-0e68f72b7b0ab957a0d3c9415f68e3c4f484b0b3.tar.bz2
vcxsrv-0e68f72b7b0ab957a0d3c9415f68e3c4f484b0b3.zip
Solved pixman compilaton problem
-rw-r--r--pixman/pixman/config.h6
-rw-r--r--pixman/pixman/makefile32
-rw-r--r--pixman/pixman/pixman-mmx.c34
3 files changed, 25 insertions, 47 deletions
diff --git a/pixman/pixman/config.h b/pixman/pixman/config.h
index 50adacc11..b16bc8da4 100644
--- a/pixman/pixman/config.h
+++ b/pixman/pixman/config.h
@@ -122,7 +122,7 @@
/* #undef TRANS_REOPEN */
/* use MMX compiler intrinsics */
-#define USE_MMX 1
+#define USE_X86_MMX 1
/* Support UNIX socket connections */
#define UNIXCONN 1
@@ -134,7 +134,7 @@
#define USE_DYNAMIC_XCURSOR 1
/* use SSE2 compiler intrinsics */
-#undef USE_SSE2
+#define USE_SSE2 1
/* use VMX compiler intrinsics */
#undef USE_VMX
@@ -225,3 +225,5 @@
/* Support gzip for bitmap fonts */
#define X_GZIP_FONT_COMPRESSION 1
+
+#define __inline__ __inline \ No newline at end of file
diff --git a/pixman/pixman/makefile b/pixman/pixman/makefile
index 869040492..d85478e98 100644
--- a/pixman/pixman/makefile
+++ b/pixman/pixman/makefile
@@ -42,36 +42,8 @@ $(OBJDIR)\pixman-combine64.h : pixman-combine.h.template make-combine.pl
perl make-combine.pl 16 < pixman-combine.h.template > $@
## mmx code
-#if USE_MMX
-#noinst_LTLIBRARIES += libpixman-mmx.la
-CSRCS += \
- pixman-mmx.c
-# pixman-mmx.h
-#libpixman_mmx_la_CFLAGS = $(DEP_CFLAGS) $(MMX_CFLAGS)
-#libpixman_mmx_la_LIBADD = $(DEP_LIBS)
-#libpixman_1_la_LIBADD += libpixman-mmx.la
-#endif
-
-## vmx code
-#if USE_VMX
-#noinst_LTLIBRARIES += libpixman-vmx.la
-#libpixman_vmx_la_SOURCES = \
-# pixman-vmx.c \
-# pixman-vmx.h \
-# pixman-combine32.h
-#libpixman_vmx_la_CFLAGS = $(DEP_CFLAGS) $(VMX_CFLAGS)
-#libpixman_vmx_la_LIBADD = $(DEP_LIBS)
-#libpixman_1_la_LIBADD += libpixman-vmx.la
-#endif
+CSRCS += pixman-mmx.c
# sse2 code
-#if USE_SSE2
-#noinst_LTLIBRARIES += libpixman-sse2.la
-#libpixman_sse2_la_SOURCES = \
-# pixman-sse2.c \
-# pixman-sse2.h
-#libpixman_sse2_la_CFLAGS = $(DEP_CFLAGS) $(SSE2_CFLAGS)
-#libpixman_sse2_la_LIBADD = $(DEP_LIBS)
-#libpixman_1_la_LIBADD += libpixman-sse2.la
-#endif
+CSRCS += pixman-sse2.c
diff --git a/pixman/pixman/pixman-mmx.c b/pixman/pixman/pixman-mmx.c
index 9c428a3de..ea244f67e 100644
--- a/pixman/pixman/pixman-mmx.c
+++ b/pixman/pixman/pixman-mmx.c
@@ -313,24 +313,28 @@ in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest)
/* Elemental unaligned loads */
-static __inline__ uint64_t ldq_u(uint64_t *p)
+#ifdef _MSC_VER
+#define ldq_u(p) *((__m64*)(p))
+#else
+static __inline__ __m64 ldq_u(uint64_t *p)
{
#ifdef USE_X86_MMX
/* x86's alignment restrictions are very relaxed. */
- return *p;
+ return (__m64)*p;
#elif defined USE_ARM_IWMMXT
int align = (uintptr_t)p & 7;
__m64 *aligned_p;
if (align == 0)
- return *p;
+ return (__m64)*p;
aligned_p = (__m64 *)((uintptr_t)p & ~7);
return _mm_align_si64 (aligned_p[0], aligned_p[1], align);
#else
struct __una_u64 { uint64_t x __attribute__((packed)); };
const struct __una_u64 *ptr = (const struct __una_u64 *) p;
- return ptr->x;
+ return (__m64)ptr->x;
#endif
}
+#endif
static __inline__ uint32_t ldl_u(uint32_t *p)
{
@@ -1424,7 +1428,7 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
while (w >= 2)
{
- __m64 vs = (__m64)ldq_u((uint64_t *)src);
+ __m64 vs = ldq_u((uint64_t *)src);
__m64 vd = *(__m64 *)dst;
__m64 vsrc0 = expand8888 (vs, 0);
__m64 vsrc1 = expand8888 (vs, 1);
@@ -1505,14 +1509,14 @@ mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp,
__m64 vd6 = *(__m64 *)(dst + 12);
__m64 vd7 = *(__m64 *)(dst + 14);
- __m64 vs0 = (__m64)ldq_u((uint64_t *)(src + 0));
- __m64 vs1 = (__m64)ldq_u((uint64_t *)(src + 2));
- __m64 vs2 = (__m64)ldq_u((uint64_t *)(src + 4));
- __m64 vs3 = (__m64)ldq_u((uint64_t *)(src + 6));
- __m64 vs4 = (__m64)ldq_u((uint64_t *)(src + 8));
- __m64 vs5 = (__m64)ldq_u((uint64_t *)(src + 10));
- __m64 vs6 = (__m64)ldq_u((uint64_t *)(src + 12));
- __m64 vs7 = (__m64)ldq_u((uint64_t *)(src + 14));
+ __m64 vs0 = ldq_u((uint64_t *)(src + 0));
+ __m64 vs1 = ldq_u((uint64_t *)(src + 2));
+ __m64 vs2 = ldq_u((uint64_t *)(src + 4));
+ __m64 vs3 = ldq_u((uint64_t *)(src + 6));
+ __m64 vs4 = ldq_u((uint64_t *)(src + 8));
+ __m64 vs5 = ldq_u((uint64_t *)(src + 10));
+ __m64 vs6 = ldq_u((uint64_t *)(src + 12));
+ __m64 vs7 = ldq_u((uint64_t *)(src + 14));
vd0 = pack8888 (
in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)),
@@ -2791,7 +2795,7 @@ mmx_composite_add_8_8 (pixman_implementation_t *imp,
while (w >= 8)
{
- *(__m64*)dst = _mm_adds_pu8 ((__m64)ldq_u((uint64_t *)src), *(__m64*)dst);
+ *(__m64*)dst = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst);
dst += 8;
src += 8;
w -= 8;
@@ -2849,7 +2853,7 @@ mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
while (w >= 2)
{
- dst64 = _mm_adds_pu8 ((__m64)ldq_u((uint64_t *)src), *(__m64*)dst);
+ dst64 = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst);
*(uint64_t*)dst = to_uint64 (dst64);
dst += 2;
src += 2;