--- db-3.1.17/dist/aclocal/mutex.m4.jbj Sat Apr 29 15:03:49 2000 +++ db-3.1.17/dist/aclocal/mutex.m4 Sat Sep 30 01:03:03 2000 @@ -307,6 +307,42 @@ }], [db_cv_mutex="ia64/gcc-assembly"]) fi +dnl alphalinux/gcc: Linux +if test "$db_cv_mutex" = no; then +AC_TRY_RUN([main(){ +#if defined(__alpha__) +#if defined(__linux__) + exit(0); +#endif +#endif + exit(1); +}], [db_cv_mutex="alphalinux/gcc-assembly"]) +fi + +dnl sparc32linux/gcc: Linux +if test "$db_cv_mutex" = no; then +AC_TRY_RUN([main(){ +#if defined(__sparc__) && !defined(__arch64__) +#if defined(__linux__) + exit(0); +#endif +#endif + exit(1); +}], [db_cv_mutex="sparc32linux/gcc-assembly"]) +fi + +dnl sparc64linux/gcc: Linux +if test "$db_cv_mutex" = no; then +AC_TRY_RUN([main(){ +#if defined(__sparc__) && defined(__arch64__) +#if defined(__linux__) + exit(0); +#endif +#endif + exit(1); +}], [db_cv_mutex="sparc64linux/gcc-assembly"]) +fi + dnl: uts/cc: UTS if test "$db_cv_mutex" = no; then AC_TRY_RUN([main(){ @@ -370,5 +406,11 @@ AC_DEFINE(HAVE_MUTEX_UTS_CC_ASSEMBLY);; x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY);; +alphalinux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + AC_DEFINE(HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY);; +sparc32linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + AC_DEFINE(HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY);; +sparc64linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS" + AC_DEFINE(HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY);; esac ])dnl --- db-3.1.17/dist/acconfig.h.jbj Tue May 16 11:40:44 2000 +++ db-3.1.17/dist/acconfig.h Sat Sep 30 01:03:03 2000 @@ -48,6 +48,9 @@ #undef HAVE_MUTEX_WIN16 #undef HAVE_MUTEX_WIN32 #undef HAVE_MUTEX_X86_GCC_ASSEMBLY +#undef HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY +#undef HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY +#undef HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY /* Define if building RPC client/server. */ #undef HAVE_RPC --- db-3.1.17/include/mutex.h.jbj Sat Sep 30 01:10:35 2000 +++ db-3.1.17/include/mutex.h Sat Sep 30 01:15:44 2000 @@ -341,6 +341,29 @@ typedef unsigned char tsl_t; #endif +/********************************************************************* + * alphalinux/gcc assembly. + *********************************************************************/ +#ifdef HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY +typedef unsigned long int tsl_t; + +#define MUTEX_ALIGN 8 +#endif + +/********************************************************************* + * sparc32linux/gcc assembly. + *********************************************************************/ +#ifdef HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY +typedef unsigned char tsl_t; +#endif + +/********************************************************************* + * sparc64linux/gcc assembly. + *********************************************************************/ +#ifdef HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY +typedef unsigned char tsl_t; +#endif + /* * Mutex alignment defaults to one byte. * --- db-3.1.17/mutex/sparc32linux.gcc.jbj Sat Sep 30 01:03:03 2000 +++ db-3.1.17/mutex/sparc32linux.gcc Sat Sep 30 01:03:03 2000 @@ -0,0 +1,30 @@ + +/* + * The ldstub instruction takes the location specified by its first argument + * (a register containing a memory address) and loads its contents into its + * second argument (a register) and atomically sets the contents the location + * specified by its first argument to a byte of 1s. (The value in the second + * argument is never read, but only overwritten.) + * + * The stbar is needed for v8, and is implemented as membar #sync on v9, + + so is functional there as well. For v7, stbar may generate an illegal + + instruction and we have no way to tell what we're running on. Some + + operating systems notice and skip this instruction in the fault handler. + * + * For gcc/sparc, 0 is clear, 1 is set. + * + * Adapted from glibc-2.1.94 pthreads/sysdeps/sparc/sparc32/pspinlock.c. + */ +#define MUTEX_SET(tsl) ({ \ + register tsl_t *__l = (tsl); \ + register tsl_t __r; \ + __asm__ volatile \ + ("ldstub [%1], %0" \ + : "=r" (__r) \ + : "r" (__l) \ + : "memory"); \ + !__r; \ +}) + +#define MUTEX_UNSET(tsl) (*(tsl) = 0) +#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) --- db-3.1.17/mutex/mut_tas.c.jbj Wed May 31 17:38:14 2000 +++ db-3.1.17/mutex/mut_tas.c Sat Sep 30 01:03:03 2000 @@ -43,6 +43,15 @@ #ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY #include "x86.gcc" #endif +#ifdef HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY +#include "alphalinux.gcc" +#endif +#ifdef HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY +#include "sparc32linux.gcc" +#endif +#ifdef HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY +#include "sparc64linux.gcc" +#endif #ifdef DIAGNOSTIC #undef MSG1 --- db-3.1.17/mutex/sparc64linux.gcc.jbj Sat Sep 30 01:03:03 2000 +++ db-3.1.17/mutex/sparc64linux.gcc Sat Sep 30 01:03:03 2000 @@ -0,0 +1,33 @@ + +/* + * $Id$ + * + * The ldstub instruction takes the location specified by its first argument + * (a register containing a memory address) and loads its contents into its + * second argument (a register) and atomically sets the contents the location + * specified by its first argument to a byte of 1s. (The value in the second + * argument is never read, but only overwritten.) + * + * The stbar is needed for v8, and is implemented as membar #sync on v9, + + so is functional there as well. For v7, stbar may generate an illegal + + instruction and we have no way to tell what we're running on. Some + + operating systems notice and skip this instruction in the fault handler. + * + * For gcc/sparc, 0 is clear, 1 is set. + * + * Adapted from glibc-2.1.94 pthreads/sysdeps/sparc/sparc64/pspinlock.c. + */ +#define MUTEX_SET(tsl) ({ \ + register tsl_t *__l = (tsl); \ + register tsl_t __r; \ + __asm__ volatile \ + ("ldstub [%1], %0\n" \ + "membar #StoreLoad | #StoreStore" \ + : "=r" (__r) \ + : "r" (__l) \ + : "memory"); \ + !__r; \ +}) + +#define MUTEX_UNSET(tsl) (*(tsl) = 0) +#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl) --- db-3.1.17/mutex/alphalinux.gcc.jbj Sat Sep 30 01:03:03 2000 +++ db-3.1.17/mutex/alphalinux.gcc Sat Sep 30 01:03:03 2000 @@ -0,0 +1,46 @@ + * For gcc/alpha. + * + * Should return 0 if could not acquire the lock, 1 if lock was acquired + * properly. + */ +#ifdef __GNUC__ +static inline int +MUTEX_SET(tsl_t *tsl) { + register tsl_t *__l = tsl; + unsigned long int __r; + unsigned long int temp; + __asm__ volatile( + ("1: ldl_l %0,%1\n" + " and %0,%3,%2\n" + " bne %2,2f\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" + " mb\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp), "=m" (*__l), "=&r" (__r) + : "Ir" (1UL), "m" (*__l)); + return __r; +} + +/* + * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction + * might be necessary before unlocking + */ +static inline int +MUTEX_UNSET(tsl_t *tsl) { + asm volatile(" mb\n"); + return *tsl = 0; +} +#endif + +#ifdef __DECC +#include +#define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0) +#define MUTEX_UNSET(tsl) (*(tsl) = 0) +#endif + +#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)