aboutsummaryrefslogtreecommitdiff
path: root/xorg-server/hw/xfree86/common/compiler.h
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2009-09-06 18:48:27 +0000
committermarha <marha@users.sourceforge.net>2009-09-06 18:48:27 +0000
commita915739887477b28d924ecc8417ee107d125bd6c (patch)
treec02f315476b61892d1fd89182e18943dce8d6277 /xorg-server/hw/xfree86/common/compiler.h
parent6f25a23db1df27e992c34f6fd4c82e83c44fc2e2 (diff)
downloadvcxsrv-a915739887477b28d924ecc8417ee107d125bd6c.tar.gz
vcxsrv-a915739887477b28d924ecc8417ee107d125bd6c.tar.bz2
vcxsrv-a915739887477b28d924ecc8417ee107d125bd6c.zip
Switched to xorg-server-1.6.99.900.tar.gz
Diffstat (limited to 'xorg-server/hw/xfree86/common/compiler.h')
-rw-r--r--xorg-server/hw/xfree86/common/compiler.h813
1 files changed, 227 insertions, 586 deletions
diff --git a/xorg-server/hw/xfree86/common/compiler.h b/xorg-server/hw/xfree86/common/compiler.h
index 285d9a3f6..95ef72c39 100644
--- a/xorg-server/hw/xfree86/common/compiler.h
+++ b/xorg-server/hw/xfree86/common/compiler.h
@@ -55,6 +55,12 @@
# define DO_PROTOTYPES
#endif
+#ifndef _X_EXPORT
+# include <X11/Xfuncproto.h>
+#endif
+
+# include <pixman.h> /* for uint*_t types */
+
/* Allow drivers to use the GCC-supported __inline__ and/or __inline. */
# ifndef __inline__
# if defined(__GNUC__)
@@ -87,25 +93,26 @@
&& !(defined(__alpha__) && defined(linux)) \
&& !(defined(__ia64__) && defined(linux)) \
-extern void outb(unsigned short, unsigned char);
-extern void outw(unsigned short, unsigned short);
-extern void outl(unsigned short, unsigned int);
-extern unsigned int inb(unsigned short);
-extern unsigned int inw(unsigned short);
-extern unsigned int inl(unsigned short);
+extern _X_EXPORT void outb(unsigned short, unsigned char);
+extern _X_EXPORT void outw(unsigned short, unsigned short);
+extern _X_EXPORT void outl(unsigned short, unsigned int);
+extern _X_EXPORT unsigned int inb(unsigned short);
+extern _X_EXPORT unsigned int inw(unsigned short);
+extern _X_EXPORT unsigned int inl(unsigned short);
# else /* __sparc__, __arm32__, __alpha__*/
-extern void outb(unsigned long, unsigned char);
-extern void outw(unsigned long, unsigned short);
-extern void outl(unsigned long, unsigned int);
-extern unsigned int inb(unsigned long);
-extern unsigned int inw(unsigned long);
-extern unsigned int inl(unsigned long);
+extern _X_EXPORT void outb(unsigned long, unsigned char);
+extern _X_EXPORT void outw(unsigned long, unsigned short);
+extern _X_EXPORT void outl(unsigned long, unsigned int);
+extern _X_EXPORT unsigned int inb(unsigned long);
+extern _X_EXPORT unsigned int inw(unsigned long);
+extern _X_EXPORT unsigned int inl(unsigned long);
# endif /* __sparc__, __arm32__, __alpha__ */
# endif /* __arm__ */
+# if defined(__powerpc__) && !defined(__OpenBSD__)
extern unsigned long ldq_u(unsigned long *);
extern unsigned long ldl_u(unsigned int *);
extern unsigned long ldw_u(unsigned short *);
@@ -118,9 +125,186 @@ extern void stl_brx(unsigned long, volatile unsigned char *, int);
extern void stw_brx(unsigned short, volatile unsigned char *, int);
extern unsigned long ldl_brx(volatile unsigned char *, int);
extern unsigned short ldw_brx(volatile unsigned char *, int);
+# endif /* __powerpc__ && !__OpenBSD */
+
+# endif /* NO_INLINE || DO_PROTOTYPES */
+
+# ifndef NO_INLINE
+# ifdef __GNUC__
+# ifdef __i386__
+
+# ifdef __SSE__
+# define write_mem_barrier() __asm__ __volatile__ ("sfence" : : : "memory")
+# else
+# define write_mem_barrier() __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
+# endif
+
+# ifdef __SSE2__
+# define mem_barrier() __asm__ __volatile__ ("mfence" : : : "memory")
+# else
+# define mem_barrier() __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
+# endif
+
+# elif defined __alpha__
+
+# define mem_barrier() __asm__ __volatile__ ("mb" : : : "memory")
+# define write_mem_barrier() __asm__ __volatile__ ("wmb" : : : "memory")
+
+# elif defined __amd64__
+
+# define mem_barrier() __asm__ __volatile__ ("mfence" : : : "memory")
+# define write_mem_barrier() __asm__ __volatile__ ("sfence" : : : "memory")
+
+# elif defined __ia64__
+
+# ifndef __INTEL_COMPILER
+# define mem_barrier() __asm__ __volatile__ ("mf" : : : "memory")
+# define write_mem_barrier() __asm__ __volatile__ ("mf" : : : "memory")
+# else
+# include "ia64intrin.h"
+# define mem_barrier() __mf()
+# define write_mem_barrier() __mf()
+# endif
+
+# elif defined __mips__
+ /* Note: sync instruction requires MIPS II instruction set */
+# define mem_barrier() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ ".set mips2\n\t" \
+ "sync\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : /* no input */ \
+ : "memory")
+# define write_mem_barrier() mem_barrier()
+
+# elif defined __powerpc__
+
+# if defined(linux) && defined(__powerpc64__)
+# include <linux/version.h>
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+# include <asm/memory.h>
+# endif
+# endif /* defined(linux) && defined(__powerpc64__) */
+
+# ifndef eieio /* We deal with arch-specific eieio() routines above... */
+# define eieio() __asm__ __volatile__ ("eieio" ::: "memory")
+# endif /* eieio */
+# define mem_barrier() eieio()
+# define write_mem_barrier() eieio()
+# elif defined __sparc__
+
+# define barrier() __asm__ __volatile__ (".word 0x8143e00a" : : : "memory")
+# define mem_barrier() /* XXX: nop for now */
+# define write_mem_barrier() /* XXX: nop for now */
+# endif
+# endif /* __GNUC__ */
+# endif /* NO_INLINE */
+
+# ifndef mem_barrier
+# define mem_barrier() /* NOP */
+# endif
+
+# ifndef write_mem_barrier
+# define write_mem_barrier() /* NOP */
# endif
+
+# ifndef NO_INLINE
+# ifdef __GNUC__
+
+/* Define some packed structures to use with unaligned accesses */
+
+struct __una_u64 { uint64_t x __attribute__((packed)); };
+struct __una_u32 { uint32_t x __attribute__((packed)); };
+struct __una_u16 { uint16_t x __attribute__((packed)); };
+
+/* Elemental unaligned loads */
+
+static __inline__ uint64_t ldq_u(uint64_t *p)
+{
+ const struct __una_u64 *ptr = (const struct __una_u64 *) p;
+ return ptr->x;
+}
+
+static __inline__ uint32_t ldl_u(uint32_t *p)
+{
+ const struct __una_u32 *ptr = (const struct __una_u32 *) p;
+ return ptr->x;
+}
+
+static __inline__ uint16_t ldw_u(uint16_t *p)
+{
+ const struct __una_u16 *ptr = (const struct __una_u16 *) p;
+ return ptr->x;
+}
+
+/* Elemental unaligned stores */
+
+static __inline__ void stq_u(uint64_t val, uint64_t *p)
+{
+ struct __una_u64 *ptr = (struct __una_u64 *) p;
+ ptr->x = val;
+}
+
+static __inline__ void stl_u(uint32_t val, uint32_t *p)
+{
+ struct __una_u32 *ptr = (struct __una_u32 *) p;
+ ptr->x = val;
+}
+
+static __inline__ void stw_u(uint16_t val, uint16_t *p)
+{
+ struct __una_u16 *ptr = (struct __una_u16 *) p;
+ ptr->x = val;
+}
+# else /* !__GNUC__ */
+
+static __inline__ uint64_t ldq_u(uint64_t *p)
+{
+ uint64_t ret;
+ memmove(&ret, p, sizeof(*p));
+ return ret;
+}
+
+static __inline__ uint32_t ldl_u(uint32_t *p)
+{
+ uint32_t ret;
+ memmove(&ret, p, sizeof(*p));
+ return ret;
+}
+
+static __inline__ uint16_t ldw_u(uint16_t *p)
+{
+ uint16_t ret;
+ memmove(&ret, p, sizeof(*p));
+ return ret;
+}
+
+static __inline__ void stq_u(uint64_t val, uint64_t *p)
+{
+ uint64_t tmp = val;
+ memmove(p, &tmp, sizeof(*p));
+}
+
+static __inline__ void stl_u(uint32_t val, uint32_t *p)
+{
+ uint32_t tmp = val;
+ memmove(p, &tmp, sizeof(*p));
+}
+
+static __inline__ void stw_u(uint16_t val, uint16_t *p)
+{
+ uint16_t tmp = val;
+ memmove(p, &tmp, sizeof(*p));
+}
+
+# endif /* __GNUC__ */
+# endif /* NO_INLINE */
+
# ifndef NO_INLINE
# ifdef __GNUC__
# if (defined(linux) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && (defined(__alpha__))
@@ -130,42 +314,42 @@ extern unsigned short ldw_brx(volatile unsigned char *, int);
/* note that the appropriate setup via "ioperm" needs to be done */
/* *before* any inx/outx is done. */
-extern void (*_alpha_outb)(char val, unsigned long port);
+extern _X_EXPORT void (*_alpha_outb)(char val, unsigned long port);
static __inline__ void
outb(unsigned long port, unsigned char val)
{
_alpha_outb(val, port);
}
-extern void (*_alpha_outw)(short val, unsigned long port);
+extern _X_EXPORT void (*_alpha_outw)(short val, unsigned long port);
static __inline__ void
outw(unsigned long port, unsigned short val)
{
_alpha_outw(val, port);
}
-extern void (*_alpha_outl)(int val, unsigned long port);
+extern _X_EXPORT void (*_alpha_outl)(int val, unsigned long port);
static __inline__ void
outl(unsigned long port, unsigned int val)
{
_alpha_outl(val, port);
}
-extern unsigned int (*_alpha_inb)(unsigned long port);
+extern _X_EXPORT unsigned int (*_alpha_inb)(unsigned long port);
static __inline__ unsigned int
inb(unsigned long port)
{
return _alpha_inb(port);
}
-extern unsigned int (*_alpha_inw)(unsigned long port);
+extern _X_EXPORT unsigned int (*_alpha_inw)(unsigned long port);
static __inline__ unsigned int
inw(unsigned long port)
{
return _alpha_inw(port);
}
-extern unsigned int (*_alpha_inl)(unsigned long port);
+extern _X_EXPORT unsigned int (*_alpha_inl)(unsigned long port);
static __inline__ unsigned int
inl(unsigned long port)
{
@@ -182,12 +366,12 @@ inl(unsigned long port)
/* note that the appropriate setup via "ioperm" needs to be done */
/* *before* any inx/outx is done. */
-extern void outb(unsigned int port, unsigned char val);
-extern void outw(unsigned int port, unsigned short val);
-extern void outl(unsigned int port, unsigned int val);
-extern unsigned char inb(unsigned int port);
-extern unsigned short inw(unsigned int port);
-extern unsigned int inl(unsigned int port);
+extern _X_EXPORT void outb(unsigned int port, unsigned char val);
+extern _X_EXPORT void outw(unsigned int port, unsigned short val);
+extern _X_EXPORT void outl(unsigned int port, unsigned int val);
+extern _X_EXPORT unsigned char inb(unsigned int port);
+extern _X_EXPORT unsigned short inw(unsigned int port);
+extern _X_EXPORT unsigned int inl(unsigned int port);
# endif /* (__FreeBSD__ || __OpenBSD__ ) && !DO_PROTOTYPES */
@@ -196,292 +380,29 @@ extern unsigned int inl(unsigned int port);
#include <machine/pio.h>
#endif /* __NetBSD__ */
-/*
- * inline functions to do unaligned accesses
- * from linux/include/asm-alpha/unaligned.h
- */
-
-/*
- * EGCS 1.1 knows about arbitrary unaligned loads. Define some
- * packed structures to talk about such things with.
- */
-
-struct __una_u64 { unsigned long x __attribute__((packed)); };
-struct __una_u32 { unsigned int x __attribute__((packed)); };
-struct __una_u16 { unsigned short x __attribute__((packed)); };
-
-/*
- * Elemental unaligned loads
- */
-/* let's try making these things static */
-
-static __inline__ unsigned long ldq_u(unsigned long * r11)
-{
-# if defined(__GNUC__)
- const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
- return ptr->x;
-# else
- unsigned long r1,r2;
- __asm__("ldq_u %0,%3\n\t"
- "ldq_u %1,%4\n\t"
- "extql %0,%2,%0\n\t"
- "extqh %1,%2,%1"
- :"=&r" (r1), "=&r" (r2)
- :"r" (r11),
- "m" (*r11),
- "m" (*(const unsigned long *)(7+(char *) r11)));
- return r1 | r2;
-# endif
-}
-
-static __inline__ unsigned long ldl_u(unsigned int * r11)
-{
-# if defined(__GNUC__)
- const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
- return ptr->x;
-# else
- unsigned long r1,r2;
- __asm__("ldq_u %0,%3\n\t"
- "ldq_u %1,%4\n\t"
- "extll %0,%2,%0\n\t"
- "extlh %1,%2,%1"
- :"=&r" (r1), "=&r" (r2)
- :"r" (r11),
- "m" (*r11),
- "m" (*(const unsigned long *)(3+(char *) r11)));
- return r1 | r2;
-# endif
-}
-
-static __inline__ unsigned long ldw_u(unsigned short * r11)
-{
-# if defined(__GNUC__)
- const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
- return ptr->x;
-# else
- unsigned long r1,r2;
- __asm__("ldq_u %0,%3\n\t"
- "ldq_u %1,%4\n\t"
- "extwl %0,%2,%0\n\t"
- "extwh %1,%2,%1"
- :"=&r" (r1), "=&r" (r2)
- :"r" (r11),
- "m" (*r11),
- "m" (*(const unsigned long *)(1+(char *) r11)));
- return r1 | r2;
-# endif
-}
-
-/*
- * Elemental unaligned stores
- */
-
-static __inline__ void stq_u(unsigned long r5, unsigned long * r11)
-{
-# if defined(__GNUC__)
- struct __una_u64 *ptr = (struct __una_u64 *) r11;
- ptr->x = r5;
-# else
- unsigned long r1,r2,r3,r4;
-
- __asm__("ldq_u %3,%1\n\t"
- "ldq_u %2,%0\n\t"
- "insqh %6,%7,%5\n\t"
- "insql %6,%7,%4\n\t"
- "mskqh %3,%7,%3\n\t"
- "mskql %2,%7,%2\n\t"
- "bis %3,%5,%3\n\t"
- "bis %2,%4,%2\n\t"
- "stq_u %3,%1\n\t"
- "stq_u %2,%0"
- :"=m" (*r11),
- "=m" (*(unsigned long *)(7+(char *) r11)),
- "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
- :"r" (r5), "r" (r11));
-# endif
-}
-
-static __inline__ void stl_u(unsigned long r5, unsigned int * r11)
-{
-# if defined(__GNUC__)
- struct __una_u32 *ptr = (struct __una_u32 *) r11;
- ptr->x = r5;
-# else
- unsigned long r1,r2,r3,r4;
-
- __asm__("ldq_u %3,%1\n\t"
- "ldq_u %2,%0\n\t"
- "inslh %6,%7,%5\n\t"
- "insll %6,%7,%4\n\t"
- "msklh %3,%7,%3\n\t"
- "mskll %2,%7,%2\n\t"
- "bis %3,%5,%3\n\t"
- "bis %2,%4,%2\n\t"
- "stq_u %3,%1\n\t"
- "stq_u %2,%0"
- :"=m" (*r11),
- "=m" (*(unsigned long *)(3+(char *) r11)),
- "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
- :"r" (r5), "r" (r11));
-# endif
-}
-
-static __inline__ void stw_u(unsigned long r5, unsigned short * r11)
-{
-# if defined(__GNUC__)
- struct __una_u16 *ptr = (struct __una_u16 *) r11;
- ptr->x = r5;
-# else
- unsigned long r1,r2,r3,r4;
-
- __asm__("ldq_u %3,%1\n\t"
- "ldq_u %2,%0\n\t"
- "inswh %6,%7,%5\n\t"
- "inswl %6,%7,%4\n\t"
- "mskwh %3,%7,%3\n\t"
- "mskwl %2,%7,%2\n\t"
- "bis %3,%5,%3\n\t"
- "bis %2,%4,%2\n\t"
- "stq_u %3,%1\n\t"
- "stq_u %2,%0"
- :"=m" (*r11),
- "=m" (*(unsigned long *)(1+(char *) r11)),
- "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
- :"r" (r5), "r" (r11));
-# endif
-}
-
-/* to flush the I-cache before jumping to code which just got loaded */
-# define PAL_imb 134
-# define istream_mem_barrier() \
- __asm__ __volatile__("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
-# define mem_barrier() __asm__ __volatile__("mb" : : : "memory")
-# ifdef __ELF__
-# define write_mem_barrier() __asm__ __volatile__("wmb" : : : "memory")
-# else /* ECOFF gas 2.6 doesn't know "wmb" :-( */
-# define write_mem_barrier() mem_barrier()
-# endif
-
-
# elif defined(linux) && defined(__ia64__)
# include <inttypes.h>
# include <sys/io.h>
-struct __una_u64 { uint64_t x __attribute__((packed)); };
-struct __una_u32 { uint32_t x __attribute__((packed)); };
-struct __una_u16 { uint16_t x __attribute__((packed)); };
-
-static __inline__ unsigned long
-__uldq (const unsigned long * r11)
-{
- const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
- return ptr->x;
-}
-
-static __inline__ unsigned long
-__uldl (const unsigned int * r11)
-{
- const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
- return ptr->x;
-}
-
-static __inline__ unsigned long
-__uldw (const unsigned short * r11)
-{
- const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
- return ptr->x;
-}
-
-static __inline__ void
-__ustq (unsigned long r5, unsigned long * r11)
-{
- struct __una_u64 *ptr = (struct __una_u64 *) r11;
- ptr->x = r5;
-}
-
-static __inline__ void
-__ustl (unsigned long r5, unsigned int * r11)
-{
- struct __una_u32 *ptr = (struct __una_u32 *) r11;
- ptr->x = r5;
-}
-
-static __inline__ void
-__ustw (unsigned long r5, unsigned short * r11)
-{
- struct __una_u16 *ptr = (struct __una_u16 *) r11;
- ptr->x = r5;
-}
-
-# define ldq_u(p) __uldq(p)
-# define ldl_u(p) __uldl(p)
-# define ldw_u(p) __uldw(p)
-# define stq_u(v,p) __ustq(v,p)
-# define stl_u(v,p) __ustl(v,p)
-# define stw_u(v,p) __ustw(v,p)
-
-# ifndef __INTEL_COMPILER
-# define mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
-# define write_mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
-# else
-# include "ia64intrin.h"
-# define mem_barrier() __mf()
-# define write_mem_barrier() __mf()
-# endif
-
-/*
- * This is overkill, but for different reasons depending on where it is used.
- * This is thus general enough to be used everywhere cache flushes are needed.
- * It doesn't handle memory access serialisation by other processors, though.
- */
-# ifndef __INTEL_COMPILER
-# define ia64_flush_cache(Addr) \
- __asm__ __volatile__ ( \
- "fc.i %0;;;" \
- "sync.i;;;" \
- "mf;;;" \
- "srlz.i;;;" \
- :: "r"(Addr) : "memory")
-# else
-# define ia64_flush_cache(Addr) { \
- __fc(Addr);\
- __synci();\
- __mf();\
- __isrlz();\
- }
-# endif
# undef outb
# undef outw
# undef outl
# undef inb
# undef inw
# undef inl
-extern void outb(unsigned long port, unsigned char val);
-extern void outw(unsigned long port, unsigned short val);
-extern void outl(unsigned long port, unsigned int val);
-extern unsigned int inb(unsigned long port);
-extern unsigned int inw(unsigned long port);
-extern unsigned int inl(unsigned long port);
+extern _X_EXPORT void outb(unsigned long port, unsigned char val);
+extern _X_EXPORT void outw(unsigned long port, unsigned short val);
+extern _X_EXPORT void outl(unsigned long port, unsigned int val);
+extern _X_EXPORT unsigned int inb(unsigned long port);
+extern _X_EXPORT unsigned int inw(unsigned long port);
+extern _X_EXPORT unsigned int inl(unsigned long port);
-# elif defined(linux) && defined(__amd64__)
+# elif (defined(linux) || defined(__FreeBSD__)) && defined(__amd64__)
# include <inttypes.h>
-# define ldq_u(p) (*((unsigned long *)(p)))
-# define ldl_u(p) (*((unsigned int *)(p)))
-# define ldw_u(p) (*((unsigned short *)(p)))
-# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
-# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
-# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
-
-# define mem_barrier() \
- __asm__ __volatile__ ("lock; addl $0,0(%%rsp)": : :"memory")
-# define write_mem_barrier() \
- __asm__ __volatile__ ("": : :"memory")
-
-
static __inline__ void
outb(unsigned short port, unsigned char val)
{
@@ -537,8 +458,6 @@ inl(unsigned short port)
# define ASI_PL 0x88
# endif
-# define barrier() __asm__ __volatile__(".word 0x8143e00a": : :"memory")
-
static __inline__ void
outb(unsigned long port, unsigned char val)
{
@@ -771,98 +690,6 @@ xf86WriteMmio32LeNB(__volatile__ void *base, const unsigned long offset,
: "r" (val), "r" (addr), "i" (ASI_PL));
}
-
-/*
- * EGCS 1.1 knows about arbitrary unaligned loads. Define some
- * packed structures to talk about such things with.
- */
-
-# if defined(__arch64__) || defined(__sparcv9)
-struct __una_u64 { unsigned long x __attribute__((packed)); };
-# endif
-struct __una_u32 { unsigned int x __attribute__((packed)); };
-struct __una_u16 { unsigned short x __attribute__((packed)); };
-
-static __inline__ unsigned long ldq_u(unsigned long *p)
-{
-# if defined(__GNUC__)
-# if defined(__arch64__) || defined(__sparcv9)
- const struct __una_u64 *ptr = (const struct __una_u64 *) p;
-# else
- const struct __una_u32 *ptr = (const struct __una_u32 *) p;
-# endif
- return ptr->x;
-# else
- unsigned long ret;
- memmove(&ret, p, sizeof(*p));
- return ret;
-# endif
-}
-
-static __inline__ unsigned long ldl_u(unsigned int *p)
-{
-# if defined(__GNUC__)
- const struct __una_u32 *ptr = (const struct __una_u32 *) p;
- return ptr->x;
-# else
- unsigned int ret;
- memmove(&ret, p, sizeof(*p));
- return ret;
-# endif
-}
-
-static __inline__ unsigned long ldw_u(unsigned short *p)
-{
-# if defined(__GNUC__)
- const struct __una_u16 *ptr = (const struct __una_u16 *) p;
- return ptr->x;
-# else
- unsigned short ret;
- memmove(&ret, p, sizeof(*p));
- return ret;
-# endif
-}
-
-static __inline__ void stq_u(unsigned long val, unsigned long *p)
-{
-# if defined(__GNUC__)
-# if defined(__arch64__) || defined(__sparcv9)
- struct __una_u64 *ptr = (struct __una_u64 *) p;
-# else
- struct __una_u32 *ptr = (struct __una_u32 *) p;
-# endif
- ptr->x = val;
-# else
- unsigned long tmp = val;
- memmove(p, &tmp, sizeof(*p));
-# endif
-}
-
-static __inline__ void stl_u(unsigned long val, unsigned int *p)
-{
-# if defined(__GNUC__)
- struct __una_u32 *ptr = (struct __una_u32 *) p;
- ptr->x = val;
-# else
- unsigned int tmp = val;
- memmove(p, &tmp, sizeof(*p));
-# endif
-}
-
-static __inline__ void stw_u(unsigned long val, unsigned short *p)
-{
-# if defined(__GNUC__)
- struct __una_u16 *ptr = (struct __una_u16 *) p;
- ptr->x = val;
-# else
- unsigned short tmp = val;
- memmove(p, &tmp, sizeof(*p));
-# endif
-}
-
-# define mem_barrier() /* XXX: nop for now */
-# define write_mem_barrier() /* XXX: nop for now */
-
# elif defined(__mips__) || (defined(__arm32__) && !defined(__linux__))
# ifdef __arm32__
# define PORT_SIZE long
@@ -870,7 +697,7 @@ static __inline__ void stw_u(unsigned long val, unsigned short *p)
# define PORT_SIZE short
# endif
-unsigned int IOPortBase; /* Memory mapped I/O port area */
+_X_EXPORT unsigned int IOPortBase; /* Memory mapped I/O port area */
static __inline__ void
outb(unsigned PORT_SIZE port, unsigned char val)
@@ -910,65 +737,7 @@ inl(unsigned PORT_SIZE port)
# if defined(__mips__)
-static __inline__ unsigned long ldq_u(unsigned long * r11)
-{
- unsigned long r1;
- __asm__("lwr %0,%2\n\t"
- "lwl %0,%3\n\t"
- :"=&r" (r1)
- :"r" (r11),
- "m" (*r11),
- "m" (*(unsigned long *)(3+(char *) r11)));
- return r1;
-}
-
-static __inline__ unsigned long ldl_u(unsigned int * r11)
-{
- unsigned long r1;
- __asm__("lwr %0,%2\n\t"
- "lwl %0,%3\n\t"
- :"=&r" (r1)
- :"r" (r11),
- "m" (*r11),
- "m" (*(unsigned long *)(3+(char *) r11)));
- return r1;
-}
-
-static __inline__ unsigned long ldw_u(unsigned short * r11)
-{
- unsigned long r1;
- __asm__("lwr %0,%2\n\t"
- "lwl %0,%3\n\t"
- :"=&r" (r1)
- :"r" (r11),
- "m" (*r11),
- "m" (*(unsigned long *)(1+(char *) r11)));
- return r1;
-}
-
# ifdef linux /* don't mess with other OSs */
-
-/*
- * EGCS 1.1 knows about arbitrary unaligned loads (and we don't support older
- * versions anyway. Define some packed structures to talk about such things
- * with.
- */
-
-struct __una_u32 { unsigned int x __attribute__((packed)); };
-struct __una_u16 { unsigned short x __attribute__((packed)); };
-
-static __inline__ void stw_u(unsigned long val, unsigned short *p)
-{
- struct __una_u16 *ptr = (struct __una_u16 *) p;
- ptr->x = val;
-}
-
-static __inline__ void stl_u(unsigned long val, unsigned int *p)
-{
- struct __una_u32 *ptr = (struct __una_u32 *) p;
- ptr->x = val;
-}
-
# if X_BYTE_ORDER == X_BIG_ENDIAN
static __inline__ unsigned int
xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
@@ -993,62 +762,16 @@ xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
: "r" (val), "r" (addr));
}
# endif
-
-# define mem_barrier() \
- __asm__ __volatile__( \
- "# prevent instructions being moved around\n\t" \
- ".set\tnoreorder\n\t" \
- "# 8 nops to fool the R4400 pipeline\n\t" \
- "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
- ".set\treorder" \
- : /* no output */ \
- : /* no input */ \
- : "memory")
-# define write_mem_barrier() mem_barrier()
-
-# else /* !linux */
-
-# define stq_u(v,p) stl_u(v,p)
-# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
- (*(unsigned char *)(p)+1) = ((v) >> 8); \
- (*(unsigned char *)(p)+2) = ((v) >> 16); \
- (*(unsigned char *)(p)+3) = ((v) >> 24)
-
-# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
- (*(unsigned char *)(p)+1) = ((v) >> 8)
-
-# define mem_barrier() /* NOP */
# endif /* !linux */
# endif /* __mips__ */
-# if defined(__arm32__)
-# define ldq_u(p) (*((unsigned long *)(p)))
-# define ldl_u(p) (*((unsigned int *)(p)))
-# define ldw_u(p) (*((unsigned short *)(p)))
-# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
-# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
-# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
-# define mem_barrier() /* NOP */
-# define write_mem_barrier() /* NOP */
-# endif /* __arm32__ */
-
# elif (defined(linux) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__FreeBSD__)) && defined(__powerpc__)
# ifndef MAP_FAILED
# define MAP_FAILED ((void *)-1)
# endif
-extern volatile unsigned char *ioBase;
-
-#if defined(linux) && defined(__powerpc64__)
-# include <linux/version.h>
-# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-# include <asm/memory.h>
-# endif
-#endif /* defined(linux) && defined(__powerpc64__) */
-#ifndef eieio /* We deal with arch-specific eieio() routines above... */
-# define eieio() __asm__ __volatile__ ("eieio" ::: "memory")
-#endif /* eieio */
+extern _X_EXPORT volatile unsigned char *ioBase;
static __inline__ unsigned char
xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
@@ -1248,36 +971,8 @@ inl(unsigned short port)
return xf86ReadMmio32Le((void *)ioBase, port);
}
-# define ldq_u(p) ldl_u(p)
-# define ldl_u(p) ((*(unsigned char *)(p)) | \
- (*((unsigned char *)(p)+1)<<8) | \
- (*((unsigned char *)(p)+2)<<16) | \
- (*((unsigned char *)(p)+3)<<24))
-# define ldw_u(p) ((*(unsigned char *)(p)) | \
- (*((unsigned char *)(p)+1)<<8))
-
-# define stq_u(v,p) stl_u(v,p)
-# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
- (*((unsigned char *)(p)+1)) = ((v) >> 8); \
- (*((unsigned char *)(p)+2)) = ((v) >> 16); \
- (*((unsigned char *)(p)+3)) = ((v) >> 24)
-# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
- (*((unsigned char *)(p)+1)) = ((v) >> 8)
-
-# define mem_barrier() eieio()
-# define write_mem_barrier() eieio()
-
#elif defined(__arm__) && defined(__linux__)
-#define ldq_u(p) (*((unsigned long *)(p)))
-#define ldl_u(p) (*((unsigned int *)(p)))
-#define ldw_u(p) (*((unsigned short *)(p)))
-#define stq_u(v,p) (*(unsigned long *)(p)) = (v)
-#define stl_u(v,p) (*(unsigned int *)(p)) = (v)
-#define stw_u(v,p) (*(unsigned short *)(p)) = (v)
-#define mem_barrier() /* NOP */
-#define write_mem_barrier() /* NOP */
-
/* for Linux on ARM, we use the LIBC inx/outx routines */
/* note that the appropriate setup via "ioperm" needs to be done */
/* *before* any inx/outx is done. */
@@ -1306,27 +1001,8 @@ xf_outl(unsigned short port, unsigned int val)
#define outw xf_outw
#define outl xf_outl
-#define arm_flush_cache(addr) \
-do { \
- register unsigned long _beg __asm ("a1") = (unsigned long) (addr); \
- register unsigned long _end __asm ("a2") = (unsigned long) (addr) + 4;\
- register unsigned long _flg __asm ("a3") = 0; \
- __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
- : "=r" (_beg) \
- : "0" (_beg), "r" (_end), "r" (_flg)); \
-} while (0)
-
# else /* ix86 */
-# define ldq_u(p) (*((unsigned long *)(p)))
-# define ldl_u(p) (*((unsigned int *)(p)))
-# define ldw_u(p) (*((unsigned short *)(p)))
-# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
-# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
-# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
-# define mem_barrier() /* NOP */
-# define write_mem_barrier() /* NOP */
-
# if !defined(__SUNPRO_C)
# if !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__s390__) && !defined(__m32r__)
# ifdef GCCUSESGAS
@@ -1508,41 +1184,31 @@ inl(unsigned short port)
# pragma asm partial_optimization inw
# pragma asm partial_optimization inb
# endif
-# define ldq_u(p) (*((unsigned long *)(p)))
-# define ldl_u(p) (*((unsigned int *)(p)))
-# define ldw_u(p) (*((unsigned short *)(p)))
-# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
-# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
-# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
-# define mem_barrier() /* NOP */
-# define write_mem_barrier() /* NOP */
# endif /* __GNUC__ */
# endif /* NO_INLINE */
# ifdef __alpha__
/* entry points for Mmio memory access routines */
-extern int (*xf86ReadMmio8)(void *, unsigned long);
-extern int (*xf86ReadMmio16)(void *, unsigned long);
+extern _X_EXPORT int (*xf86ReadMmio8)(void *, unsigned long);
+extern _X_EXPORT int (*xf86ReadMmio16)(void *, unsigned long);
# ifndef STANDALONE_MMIO
-extern int (*xf86ReadMmio32)(void *, unsigned long);
+extern _X_EXPORT int (*xf86ReadMmio32)(void *, unsigned long);
# else
/* Some DRI 3D drivers need MMIO_IN32. */
static __inline__ int
xf86ReadMmio32(void *Base, unsigned long Offset)
{
- __asm__ __volatile__("mb" : : : "memory");
+ mem_barrier();
return *(volatile unsigned int*)((unsigned long)Base+(Offset));
}
# endif
-extern void (*xf86WriteMmio8)(int, void *, unsigned long);
-extern void (*xf86WriteMmio16)(int, void *, unsigned long);
-extern void (*xf86WriteMmio32)(int, void *, unsigned long);
-extern void (*xf86WriteMmioNB8)(int, void *, unsigned long);
-extern void (*xf86WriteMmioNB16)(int, void *, unsigned long);
-extern void (*xf86WriteMmioNB32)(int, void *, unsigned long);
-extern void xf86SlowBCopyFromBus(unsigned char *, unsigned char *, int);
-extern void xf86SlowBCopyToBus(unsigned char *, unsigned char *, int);
+extern _X_EXPORT void (*xf86WriteMmio8)(int, void *, unsigned long);
+extern _X_EXPORT void (*xf86WriteMmio16)(int, void *, unsigned long);
+extern _X_EXPORT void (*xf86WriteMmio32)(int, void *, unsigned long);
+extern _X_EXPORT void (*xf86WriteMmioNB8)(int, void *, unsigned long);
+extern _X_EXPORT void (*xf86WriteMmioNB16)(int, void *, unsigned long);
+extern _X_EXPORT void (*xf86WriteMmioNB32)(int, void *, unsigned long);
/* Some macros to hide the system dependencies for MMIO accesses */
/* Changed to kill noise generated by gcc's -Wcast-align */
@@ -1612,17 +1278,6 @@ extern void xf86SlowBCopyToBus(unsigned char *, unsigned char *, int);
# define MMIO_MOVE32(base, offset, val) \
xf86WriteMmio32Be(base, offset, (CARD32)(val))
-static __inline__ void ppc_flush_icache(char *addr)
-{
- __asm__ volatile (
- "dcbf 0,%0;"
- "sync;"
- "icbi 0,%0;"
- "sync;"
- "isync;"
- : : "r"(addr) : "memory");
-}
-
# elif defined(__sparc__) || defined(sparc) || defined(__sparc)
/*
* Like powerpc, we provide byteswapping and no byteswapping functions
@@ -1685,18 +1340,4 @@ static __inline__ void ppc_flush_icache(char *addr)
# define MMIO_MOVE32(base, offset, val) MMIO_OUT32(base, offset, val)
# endif /* __alpha__ */
-
-/*
- * With Intel, the version in os-support/misc/SlowBcopy.s is used.
- * This avoids port I/O during the copy (which causes problems with
- * some hardware).
- */
-# ifdef __alpha__
-# define slowbcopy_tobus(src,dst,count) xf86SlowBCopyToBus(src,dst,count)
-# define slowbcopy_frombus(src,dst,count) xf86SlowBCopyFromBus(src,dst,count)
-# else /* __alpha__ */
-# define slowbcopy_tobus(src,dst,count) xf86SlowBcopy(src,dst,count)
-# define slowbcopy_frombus(src,dst,count) xf86SlowBcopy(src,dst,count)
-# endif /* __alpha__ */
-
#endif /* _COMPILER_H */