1 diff -ruN a/innobase/btr/btr0sea.c b/innobase/btr/btr0sea.c
2 --- a/innobase/btr/btr0sea.c 2009-05-20 14:21:44.000000000 +0900
3 +++ b/innobase/btr/btr0sea.c 2009-05-20 14:39:34.000000000 +0900
5 rw_lock_s_lock(&btr_search_latch);
8 - ut_ad(btr_search_latch.writer != RW_LOCK_EX);
9 + ut_ad(btr_search_latch.writer_count == 0);
10 ut_ad(btr_search_latch.reader_count > 0);
12 rec = ha_search_and_get_data(btr_search_sys->hash_index, fold);
13 diff -ruN a/innobase/include/sync0rw.h b/innobase/include/sync0rw.h
14 --- a/innobase/include/sync0rw.h 2009-01-30 06:42:20.000000000 +0900
15 +++ b/innobase/include/sync0rw.h 2009-04-16 16:15:28.000000000 +0900
17 Accessor functions for rw lock. */
21 +rw_lock_get_s_waiters(
22 +/*==================*/
26 +rw_lock_get_x_waiters(
27 +/*==================*/
31 +rw_lock_get_wx_waiters(
36 rw_lock_debug_t* info); /* in: debug struct */
37 #endif /* UNIV_SYNC_DEBUG */
39 +#ifdef HAVE_ATOMIC_BUILTINS
40 +/* This value means NOT_LOCKED */
41 +#define RW_LOCK_BIAS 0x00100000
43 +#error HAVE_ATOMIC_BUILTINS is not defined. Do you use enough new GCC or compatibles?
44 +#error Or do you use exact options for CFLAGS?
45 +#error e.g. (for x86_32): "-m32 -march=i586 -mtune=i686"
46 +#error e.g. (for Sparc_64): "-m64 -mcpu=v9"
47 +#error Otherwise, this build may be slower than normal version.
50 /* NOTE! The structure appears here only for the compiler to know its size.
51 Do not use its fields directly! The structure used in the spin lock
52 implementation of a read-write lock. Several threads may have a shared lock
54 field. Then no new readers are allowed in. */
56 struct rw_lock_struct {
57 - os_event_t event; /* Used by sync0arr.c for thread queueing */
60 + /* Used by sync0arr.c for thread queueing */
61 + os_event_t s_event; /* Used for s_lock */
62 + os_event_t x_event; /* Used for x_lock */
63 os_event_t wait_ex_event; /* This windows specific event is
64 used by the thread which has set the
65 lock state to RW_LOCK_WAIT_EX. The
67 thread will be the next one to proceed
68 once the current the event gets
69 signalled. See LEMMA 2 in sync0sync.c */
71 +#ifdef HAVE_ATOMIC_BUILTINS
72 + volatile lint lock_word; /* Used by using atomic builtin */
75 - ulint reader_count; /* Number of readers who have locked this
76 + volatile ulint reader_count; /* Number of readers who have locked this
77 lock in the shared mode */
78 - ulint writer; /* This field is set to RW_LOCK_EX if there
79 + volatile ulint writer; /* This field is set to RW_LOCK_EX if there
80 is a writer owning the lock (in exclusive
81 mode), RW_LOCK_WAIT_EX if a writer is
82 queueing for the lock, and
83 RW_LOCK_NOT_LOCKED, otherwise. */
84 - os_thread_id_t writer_thread;
85 + volatile os_thread_id_t writer_thread;
86 /* Thread id of a possible writer thread */
87 - ulint writer_count; /* Number of times the same thread has
88 + volatile ulint writer_count; /* Number of times the same thread has
89 recursively locked the lock in the exclusive
91 +#ifndef HAVE_ATOMIC_BUILTINS
92 mutex_t mutex; /* The mutex protecting rw_lock_struct */
94 ulint pass; /* Default value 0. This is set to some
95 value != 0 given by the caller of an x-lock
96 operation, if the x-lock is to be passed to
97 another thread to unlock (which happens in
99 - ulint waiters; /* This ulint is set to 1 if there are
100 - waiters (readers or writers) in the global
101 - wait array, waiting for this rw_lock.
102 - Otherwise, == 0. */
103 - ibool writer_is_wait_ex;
104 + volatile ulint s_waiters; /* 1: there are waiters (s_lock) */
105 + volatile ulint x_waiters; /* 1: there are waiters (x_lock) */
106 + volatile ulint wait_ex_waiters; /* 1: there are waiters (wait_ex) */
107 + volatile ibool writer_is_wait_ex;
108 /* This is TRUE if the writer field is
109 RW_LOCK_WAIT_EX; this field is located far
110 from the memory update hotspot fields which
111 diff -ruN a/innobase/include/sync0rw.ic b/innobase/include/sync0rw.ic
112 --- a/innobase/include/sync0rw.ic 2009-01-30 06:42:20.000000000 +0900
113 +++ b/innobase/include/sync0rw.ic 2009-04-16 17:06:53.000000000 +0900
115 Accessor functions for rw lock. */
118 -rw_lock_get_waiters(
119 +rw_lock_get_s_waiters(
123 - return(lock->waiters);
124 + return(lock->s_waiters);
128 -rw_lock_set_waiters(
130 +rw_lock_get_x_waiters(
134 + return(lock->x_waiters);
138 +rw_lock_get_wx_waiters(
139 +/*================*/
142 + return(lock->wait_ex_waiters);
146 +rw_lock_set_s_waiters(
150 - lock->waiters = flag;
151 +#ifdef HAVE_ATOMIC_BUILTINS
152 + __sync_lock_test_and_set(&lock->s_waiters, flag);
154 + lock->s_waiters = flag;
159 +rw_lock_set_x_waiters(
163 +#ifdef HAVE_ATOMIC_BUILTINS
164 + __sync_lock_test_and_set(&lock->x_waiters, flag);
166 + lock->x_waiters = flag;
171 +rw_lock_set_wx_waiters(
172 +/*================*/
176 +#ifdef HAVE_ATOMIC_BUILTINS
177 + __sync_lock_test_and_set(&lock->wait_ex_waiters, flag);
179 + lock->wait_ex_waiters = flag;
188 +#ifdef HAVE_ATOMIC_BUILTINS
189 + if (lock->writer == RW_LOCK_NOT_LOCKED) {
190 + return(RW_LOCK_NOT_LOCKED);
193 + if (lock->writer_is_wait_ex) {
194 + return(RW_LOCK_WAIT_EX);
196 + return(RW_LOCK_EX);
199 return(lock->writer);
206 lock->reader_count = count;
208 +#ifndef HAVE_ATOMIC_BUILTINS
214 return(&(lock->mutex));
218 /**********************************************************************
219 Returns the value of writer_count for the lock. Does not reserve the lock
220 @@ -133,14 +191,26 @@
221 const char* file_name, /* in: file name where lock requested */
222 ulint line) /* in: line where requested */
224 -#ifdef UNIV_SYNC_DEBUG
225 +#if defined(UNIV_SYNC_DEBUG) && !defined(HAVE_ATOMIC_BUILTINS)
226 ut_ad(mutex_own(rw_lock_get_mutex(lock)));
227 #endif /* UNIV_SYNC_DEBUG */
228 /* Check if the writer field is free */
230 +#ifdef HAVE_ATOMIC_BUILTINS
231 + if (UNIV_LIKELY(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED)) {
233 + if(__sync_sub_and_fetch(&(lock->lock_word),1) <= 0) {
235 + __sync_fetch_and_add(&(lock->lock_word),1);
236 + return(FALSE); /* locking did not succeed */
239 + __sync_fetch_and_add(&(lock->reader_count),1);
241 if (UNIV_LIKELY(lock->writer == RW_LOCK_NOT_LOCKED)) {
242 /* Set the shared lock by incrementing the reader count */
243 lock->reader_count++;
246 #ifdef UNIV_SYNC_DEBUG
247 rw_lock_add_debug_info(lock, pass, RW_LOCK_SHARED, file_name,
248 @@ -167,11 +237,15 @@
249 const char* file_name, /* in: file name where requested */
250 ulint line) /* in: line where lock requested */
252 - ut_ad(lock->writer == RW_LOCK_NOT_LOCKED);
253 + ut_ad(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
254 ut_ad(rw_lock_get_reader_count(lock) == 0);
256 /* Set the shared lock by incrementing the reader count */
257 +#ifdef HAVE_ATOMIC_BUILTINS
258 + __sync_fetch_and_add(&(lock->reader_count),1);
260 lock->reader_count++;
263 lock->last_s_file_name = file_name;
264 lock->last_s_line = line;
267 rw_lock_set_writer(lock, RW_LOCK_EX);
268 lock->writer_thread = os_thread_get_curr_id();
269 +#ifdef HAVE_ATOMIC_BUILTINS
270 + __sync_fetch_and_add(&(lock->writer_count),1);
272 lock->writer_count++;
276 lock->last_x_file_name = file_name;
277 @@ -241,15 +319,21 @@
278 ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); /* see NOTE above */
279 #endif /* UNIV_SYNC_DEBUG */
281 +#ifndef HAVE_ATOMIC_BUILTINS
282 mutex_enter(rw_lock_get_mutex(lock));
285 if (UNIV_LIKELY(rw_lock_s_lock_low(lock, pass, file_name, line))) {
286 +#ifndef HAVE_ATOMIC_BUILTINS
287 mutex_exit(rw_lock_get_mutex(lock));
290 return; /* Success */
292 /* Did not succeed, try spin wait */
293 +#ifndef HAVE_ATOMIC_BUILTINS
294 mutex_exit(rw_lock_get_mutex(lock));
297 rw_lock_s_lock_spin(lock, pass, file_name, line);
299 @@ -272,11 +356,23 @@
301 ibool success = FALSE;
303 +#ifdef HAVE_ATOMIC_BUILTINS
304 + if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
306 + if(__sync_sub_and_fetch(&(lock->lock_word),1) <= 0) {
308 + __sync_fetch_and_add(&(lock->lock_word),1);
309 + return(FALSE); /* locking did not succeed */
312 + __sync_fetch_and_add(&(lock->reader_count),1);
314 mutex_enter(rw_lock_get_mutex(lock));
316 if (lock->writer == RW_LOCK_NOT_LOCKED) {
317 /* Set the shared lock by incrementing the reader count */
318 lock->reader_count++;
321 #ifdef UNIV_SYNC_DEBUG
322 rw_lock_add_debug_info(lock, 0, RW_LOCK_SHARED, file_name,
327 +#ifndef HAVE_ATOMIC_BUILTINS
328 mutex_exit(rw_lock_get_mutex(lock));
335 ibool success = FALSE;
336 os_thread_id_t curr_thread = os_thread_get_curr_id();
337 +#ifdef HAVE_ATOMIC_BUILTINS
338 + if (lock->reader_count == 0) {
339 + /* try to lock writer */
340 + if(__sync_lock_test_and_set(&(lock->writer),RW_LOCK_EX)
341 + == RW_LOCK_NOT_LOCKED) {
345 + if(__sync_sub_and_fetch(&(lock->lock_word),
346 + RW_LOCK_BIAS) == 0) {
348 + lock->writer_thread = curr_thread;
350 + lock->writer_is_wait_ex = FALSE;
351 + /* next function may work as memory barrier */
353 + __sync_fetch_and_add(&(lock->writer_count),1);
355 +#ifdef UNIV_SYNC_DEBUG
356 + rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
359 + lock->last_x_file_name = file_name;
360 + lock->last_x_line = line;
362 + ut_ad(rw_lock_validate(lock));
366 + /* fail (x-lock) */
367 + if (__sync_fetch_and_add(&(lock->lock_word),RW_LOCK_BIAS)
372 + __sync_lock_test_and_set(&(lock->writer),RW_LOCK_NOT_LOCKED);
376 + if (lock->pass == 0
377 + && os_thread_eq(lock->writer_thread, curr_thread)) {
381 + //ut_ad(rw_lock_validate(lock));
385 mutex_enter(rw_lock_get_mutex(lock));
387 if (UNIV_UNLIKELY(rw_lock_get_reader_count(lock) != 0)) {
389 ut_ad(rw_lock_validate(lock));
395 /**********************************************************************
396 @@ -354,16 +501,33 @@
400 +#ifndef HAVE_ATOMIC_BUILTINS
401 mutex_t* mutex = &(lock->mutex);
404 + ibool x_sg = FALSE;
405 + ibool wx_sg = FALSE;
406 +#ifdef HAVE_ATOMIC_BUILTINS
407 + ibool last = FALSE;
410 +#ifndef HAVE_ATOMIC_BUILTINS
411 /* Acquire the mutex protecting the rw-lock fields */
415 /* Reset the shared lock by decrementing the reader count */
417 ut_a(lock->reader_count > 0);
418 +#ifdef HAVE_ATOMIC_BUILTINS
419 + /* unlock lock_word */
420 + __sync_fetch_and_add(&(lock->lock_word),1);
422 + if(__sync_sub_and_fetch(&(lock->reader_count),1) == 0) {
426 lock->reader_count--;
429 #ifdef UNIV_SYNC_DEBUG
430 rw_lock_remove_debug_info(lock, pass, RW_LOCK_SHARED);
431 @@ -372,22 +536,39 @@
432 /* If there may be waiters and this was the last s-lock,
435 - if (UNIV_UNLIKELY(lock->waiters)
436 +#ifdef HAVE_ATOMIC_BUILTINS
437 + if (UNIV_UNLIKELY(last && __sync_lock_test_and_set(&lock->wait_ex_waiters, 0))) {
438 + os_event_set(lock->wait_ex_event);
439 + sync_array_object_signalled(sync_primary_wait_array);
441 + else if (UNIV_UNLIKELY(last && __sync_lock_test_and_set(&lock->x_waiters, 0))) {
442 + os_event_set(lock->x_event);
443 + sync_array_object_signalled(sync_primary_wait_array);
446 + if (UNIV_UNLIKELY(lock->wait_ex_waiters)
447 && lock->reader_count == 0) {
451 - rw_lock_set_waiters(lock, 0);
452 + rw_lock_set_wx_waiters(lock, 0);
454 + else if (UNIV_UNLIKELY(lock->x_waiters)
455 + && lock->reader_count == 0) {
458 + rw_lock_set_x_waiters(lock, 0);
463 - if (UNIV_UNLIKELY(sg)) {
465 + if (UNIV_UNLIKELY(wx_sg)) {
466 os_event_set(lock->wait_ex_event);
468 - os_event_set(lock->event);
469 + sync_array_object_signalled(sync_primary_wait_array);
470 + } else if (UNIV_UNLIKELY(x_sg)) {
471 + os_event_set(lock->x_event);
472 sync_array_object_signalled(sync_primary_wait_array);
476 ut_ad(rw_lock_validate(lock));
478 @@ -409,13 +590,22 @@
480 ut_ad(lock->reader_count > 0);
482 +#ifdef HAVE_ATOMIC_BUILTINS
483 + __sync_sub_and_fetch(&(lock->reader_count),1);
485 lock->reader_count--;
488 #ifdef UNIV_SYNC_DEBUG
489 rw_lock_remove_debug_info(lock, 0, RW_LOCK_SHARED);
492 +#ifdef HAVE_ATOMIC_BUILTINS
493 + ut_ad(!lock->s_waiters);
494 + ut_ad(!lock->x_waiters);
496 ut_ad(!lock->waiters);
498 ut_ad(rw_lock_validate(lock));
499 #ifdef UNIV_SYNC_PERF_STAT
501 @@ -435,41 +625,83 @@
506 +#ifdef HAVE_ATOMIC_BUILTINS
507 + ibool last = FALSE;
509 + ibool s_sg = FALSE;
510 + ibool x_sg = FALSE;
512 +#ifndef HAVE_ATOMIC_BUILTINS
513 /* Acquire the mutex protecting the rw-lock fields */
514 mutex_enter(&(lock->mutex));
517 /* Reset the exclusive lock if this thread no longer has an x-mode
520 ut_ad(lock->writer_count > 0);
522 +#ifdef HAVE_ATOMIC_BUILTINS
523 + if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
528 + /* unlock lock_word */
529 + __sync_fetch_and_add(&(lock->lock_word),RW_LOCK_BIAS);
531 + /* FIXME: It is a value of bad manners for pthread.
532 + But we shouldn't keep an ID of not-owner. */
533 + lock->writer_thread = -1;
534 + __sync_lock_test_and_set(&(lock->writer),RW_LOCK_NOT_LOCKED);
537 lock->writer_count--;
539 if (lock->writer_count == 0) {
540 rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
544 #ifdef UNIV_SYNC_DEBUG
545 rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
548 /* If there may be waiters, signal the lock */
549 - if (UNIV_UNLIKELY(lock->waiters)
550 - && lock->writer_count == 0) {
553 - rw_lock_set_waiters(lock, 0);
554 +#ifdef HAVE_ATOMIC_BUILTINS
556 + if(__sync_lock_test_and_set(&lock->s_waiters, 0)){
559 + if(__sync_lock_test_and_set(&lock->x_waiters, 0)){
564 + if (lock->writer_count == 0) {
565 + if(lock->s_waiters){
567 + rw_lock_set_s_waiters(lock, 0);
569 + if(lock->x_waiters){
571 + rw_lock_set_x_waiters(lock, 0);
575 mutex_exit(&(lock->mutex));
578 - if (UNIV_UNLIKELY(sg)) {
579 + if (UNIV_UNLIKELY(s_sg)) {
580 + os_event_set(lock->s_event);
581 + sync_array_object_signalled(sync_primary_wait_array);
583 + if (UNIV_UNLIKELY(x_sg)) {
585 + /* I doubt the necessity of it. */
586 os_event_set(lock->wait_ex_event);
588 - os_event_set(lock->event);
589 + os_event_set(lock->x_event);
590 sync_array_object_signalled(sync_primary_wait_array);
595 ut_ad(lock->writer_count > 0);
597 +#ifdef HAVE_ATOMIC_BUILTINS
598 + if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
600 lock->writer_count--;
602 if (lock->writer_count == 0) {
604 rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
608 rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX);
611 +#ifdef HAVE_ATOMIC_BUILTINS
612 + ut_ad(!lock->s_waiters);
613 + ut_ad(!lock->x_waiters);
615 ut_ad(!lock->waiters);
617 ut_ad(rw_lock_validate(lock));
619 #ifdef UNIV_SYNC_PERF_STAT
620 diff -ruN a/innobase/sync/sync0arr.c b/innobase/sync/sync0arr.c
621 --- a/innobase/sync/sync0arr.c 2009-01-30 06:42:24.000000000 +0900
622 +++ b/innobase/sync/sync0arr.c 2009-04-16 16:15:28.000000000 +0900
623 @@ -309,13 +309,13 @@
625 if (type == SYNC_MUTEX) {
626 return(os_event_reset(((mutex_t *) object)->event));
628 } else if (type == RW_LOCK_WAIT_EX) {
629 return(os_event_reset(
630 ((rw_lock_t *) object)->wait_ex_event));
633 - return(os_event_reset(((rw_lock_t *) object)->event));
634 + } else if (type == RW_LOCK_SHARED) {
635 + return(os_event_reset(((rw_lock_t *) object)->s_event));
636 + } else { /* RW_LOCK_EX */
637 + return(os_event_reset(((rw_lock_t *) object)->x_event));
641 @@ -415,15 +415,12 @@
643 if (cell->request_type == SYNC_MUTEX) {
644 event = ((mutex_t*) cell->wait_object)->event;
646 - /* On windows if the thread about to wait is the one which
647 - has set the state of the rw_lock to RW_LOCK_WAIT_EX, then
648 - it waits on a special event i.e.: wait_ex_event. */
649 } else if (cell->request_type == RW_LOCK_WAIT_EX) {
650 event = ((rw_lock_t*) cell->wait_object)->wait_ex_event;
653 - event = ((rw_lock_t*) cell->wait_object)->event;
654 + } else if (cell->request_type == RW_LOCK_SHARED) {
655 + event = ((rw_lock_t*) cell->wait_object)->s_event;
657 + event = ((rw_lock_t*) cell->wait_object)->x_event;
660 cell->waiting = TRUE;
667 type = cell->request_type;
669 @@ -492,12 +490,10 @@
670 (ulong) mutex->waiters);
672 } else if (type == RW_LOCK_EX
674 || type == RW_LOCK_WAIT_EX
676 || type == RW_LOCK_SHARED) {
678 - fputs(type == RW_LOCK_EX ? "X-lock on" : "S-lock on", file);
679 + fputs(type == RW_LOCK_SHARED ? "S-lock on" : "X-lock on", file);
681 rwlock = cell->old_wait_rw_lock;
683 @@ -505,21 +501,23 @@
684 " RW-latch at %p created in file %s line %lu\n",
685 rwlock, rwlock->cfile_name,
686 (ulong) rwlock->cline);
687 - if (rwlock->writer != RW_LOCK_NOT_LOCKED) {
688 + writer = rw_lock_get_writer(rwlock);
689 + if (writer != RW_LOCK_NOT_LOCKED) {
691 "a writer (thread id %lu) has reserved it in mode %s",
692 (ulong) os_thread_pf(rwlock->writer_thread),
693 - rwlock->writer == RW_LOCK_EX
694 + writer == RW_LOCK_EX
696 : " wait exclusive\n");
700 - "number of readers %lu, waiters flag %lu\n"
701 + "number of readers %lu, s_waiters flag %lu, x_waiters flag %lu\n"
702 "Last time read locked in file %s line %lu\n"
703 "Last time write locked in file %s line %lu\n",
704 (ulong) rwlock->reader_count,
705 - (ulong) rwlock->waiters,
706 + (ulong) rwlock->s_waiters,
707 + (ulong) (rwlock->x_waiters || rwlock->wait_ex_waiters),
708 rwlock->last_s_file_name,
709 (ulong) rwlock->last_s_line,
710 rwlock->last_x_file_name,
711 @@ -839,11 +837,15 @@
712 /*========================*/
713 sync_array_t* arr) /* in: wait array */
715 +#ifdef HAVE_ATOMIC_BUILTINS
716 + __sync_fetch_and_add(&(arr->sg_count),1);
718 sync_array_enter(arr);
722 sync_array_exit(arr);
726 /**************************************************************************
727 @@ -880,19 +882,23 @@
729 mutex = cell->wait_object;
730 os_event_set(mutex->event);
732 } else if (cell->request_type
733 == RW_LOCK_WAIT_EX) {
736 lock = cell->wait_object;
737 os_event_set(lock->wait_ex_event);
740 + } else if (cell->request_type
741 + == RW_LOCK_SHARED) {
744 lock = cell->wait_object;
745 - os_event_set(lock->event);
746 + os_event_set(lock->s_event);
750 + lock = cell->wait_object;
751 + os_event_set(lock->x_event);
755 diff -ruN a/innobase/sync/sync0rw.c b/innobase/sync/sync0rw.c
756 --- a/innobase/sync/sync0rw.c 2009-01-30 06:42:24.000000000 +0900
757 +++ b/innobase/sync/sync0rw.c 2009-04-16 17:33:59.000000000 +0900
759 object is created, then the following call initializes
762 +#ifndef HAVE_ATOMIC_BUILTINS
763 mutex_create(rw_lock_get_mutex(lock));
764 mutex_set_level(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK);
767 lock->mutex.cmutex_name = cmutex_name;
768 lock->mutex.mutex_type = 1;
769 #endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
770 +#endif /* !HAVE_ATOMIC_BUILTINS */
772 - rw_lock_set_waiters(lock, 0);
773 +#ifdef HAVE_ATOMIC_BUILTINS
774 + lock->lock_word = RW_LOCK_BIAS;
776 + rw_lock_set_s_waiters(lock, 0);
777 + rw_lock_set_x_waiters(lock, 0);
778 + rw_lock_set_wx_waiters(lock, 0);
779 rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
780 lock->writer_count = 0;
781 rw_lock_set_reader_count(lock, 0);
783 lock->last_x_file_name = "not yet reserved";
784 lock->last_s_line = 0;
785 lock->last_x_line = 0;
786 - lock->event = os_event_create(NULL);
789 + lock->s_event = os_event_create(NULL);
790 + lock->x_event = os_event_create(NULL);
791 lock->wait_ex_event = os_event_create(NULL);
794 mutex_enter(&rw_lock_list_mutex);
796 @@ -162,19 +167,21 @@
797 ut_a(rw_lock_validate(lock));
798 #endif /* UNIV_DEBUG */
799 ut_a(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
800 - ut_a(rw_lock_get_waiters(lock) == 0);
801 + ut_a(rw_lock_get_s_waiters(lock) == 0);
802 + ut_a(rw_lock_get_x_waiters(lock) == 0);
803 + ut_a(rw_lock_get_wx_waiters(lock) == 0);
804 ut_a(rw_lock_get_reader_count(lock) == 0);
808 +#ifndef HAVE_ATOMIC_BUILTINS
809 mutex_free(rw_lock_get_mutex(lock));
812 mutex_enter(&rw_lock_list_mutex);
813 - os_event_free(lock->event);
816 + os_event_free(lock->s_event);
817 + os_event_free(lock->x_event);
818 os_event_free(lock->wait_ex_event);
821 if (UT_LIST_GET_PREV(list, lock)) {
822 ut_a(UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
823 @@ -192,26 +199,43 @@
824 Checks that the rw-lock has been initialized and that there are no
825 simultaneous shared and exclusive locks. */
827 +/* MEMO: If HAVE_ATOMIC_BUILTINS, we should use this function statically. */
837 +#ifndef HAVE_ATOMIC_BUILTINS
838 mutex_enter(rw_lock_get_mutex(lock));
841 ut_a(lock->magic_n == RW_LOCK_MAGIC_N);
842 +#ifndef HAVE_ATOMIC_BUILTINS
843 ut_a((rw_lock_get_reader_count(lock) == 0)
844 || (rw_lock_get_writer(lock) != RW_LOCK_EX));
845 - ut_a((rw_lock_get_writer(lock) == RW_LOCK_EX)
846 - || (rw_lock_get_writer(lock) == RW_LOCK_WAIT_EX)
847 - || (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED));
848 - ut_a((rw_lock_get_waiters(lock) == 0)
849 - || (rw_lock_get_waiters(lock) == 1));
851 + test = rw_lock_get_writer(lock);
852 + ut_a((test == RW_LOCK_EX)
853 + || (test == RW_LOCK_WAIT_EX)
854 + || (test == RW_LOCK_NOT_LOCKED));
855 + test = rw_lock_get_s_waiters(lock);
858 + test = rw_lock_get_x_waiters(lock);
861 + test = rw_lock_get_wx_waiters(lock);
864 +#ifndef HAVE_ATOMIC_BUILTINS
865 ut_a((lock->writer != RW_LOCK_EX) || (lock->writer_count > 0));
867 mutex_exit(rw_lock_get_mutex(lock));
872 @@ -237,13 +261,14 @@
873 ut_ad(rw_lock_validate(lock));
878 rw_s_spin_wait_count++;
880 /* Spin waiting for the writer field to become free */
883 - while (rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED
884 - && i < SYNC_SPIN_ROUNDS) {
885 + while (i < SYNC_SPIN_ROUNDS
886 + && rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) {
887 if (srv_spin_wait_delay) {
888 ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
890 @@ -262,15 +287,27 @@
891 lock->cfile_name, (ulong) lock->cline, (ulong) i);
894 +#ifndef HAVE_ATOMIC_BUILTINS
895 mutex_enter(rw_lock_get_mutex(lock));
898 /* We try once again to obtain the lock */
900 if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
901 +#ifndef HAVE_ATOMIC_BUILTINS
902 mutex_exit(rw_lock_get_mutex(lock));
905 return; /* Success */
907 +#ifdef HAVE_ATOMIC_BUILTINS
908 + /* like sync0sync.c doing */
911 + if (i < SYNC_SPIN_ROUNDS) {
915 /* If we get here, locking did not succeed, we may
916 suspend the thread to wait in the wait array */
922 - rw_lock_set_waiters(lock, 1);
923 + rw_lock_set_s_waiters(lock, 1);
925 +#ifdef HAVE_ATOMIC_BUILTINS
926 + /* like sync0sync.c doing */
927 + for (i = 0; i < 4; i++) {
928 + if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
929 + sync_array_free_cell(sync_primary_wait_array, index);
930 + return; /* Success */
934 + /* If wait_ex_waiter stalls, wakes it. */
935 + if (lock->reader_count == 0
936 + && __sync_lock_test_and_set(&lock->wait_ex_waiters, 0)) {
937 + os_event_set(lock->wait_ex_event);
938 + sync_array_object_signalled(sync_primary_wait_array);
941 mutex_exit(rw_lock_get_mutex(lock));
944 if (srv_print_latch_waits) {
946 @@ -318,13 +372,19 @@
948 ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
950 +#ifndef HAVE_ATOMIC_BUILTINS
951 mutex_enter(&(lock->mutex));
954 lock->writer_thread = os_thread_get_curr_id();
958 +#ifndef HAVE_ATOMIC_BUILTINS
959 mutex_exit(&(lock->mutex));
961 + __sync_synchronize();
965 /**********************************************************************
967 const char* file_name,/* in: file name where lock requested */
968 ulint line) /* in: line where requested */
970 +#ifdef HAVE_ATOMIC_BUILTINS
971 + os_thread_id_t curr_thread = os_thread_get_curr_id();
973 + /* try to lock writer */
974 + if(__sync_lock_test_and_set(&(lock->writer),RW_LOCK_EX)
975 + == RW_LOCK_NOT_LOCKED) {
977 + /* obtain RW_LOCK_WAIT_EX right */
978 + lock->writer_thread = curr_thread;
980 + lock->writer_is_wait_ex = TRUE;
981 + /* atomic operation may be safer about memory order. */
982 + __sync_synchronize();
983 +#ifdef UNIV_SYNC_DEBUG
984 + rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
989 + if (!os_thread_eq(lock->writer_thread, curr_thread)) {
990 + return(RW_LOCK_NOT_LOCKED);
993 + switch(rw_lock_get_writer(lock)) {
994 + case RW_LOCK_WAIT_EX:
995 + /* have right to try x-lock */
998 + if(__sync_sub_and_fetch(&(lock->lock_word),
999 + RW_LOCK_BIAS) == 0) {
1001 + lock->pass = pass;
1002 + lock->writer_is_wait_ex = FALSE;
1003 + __sync_fetch_and_add(&(lock->writer_count),1);
1005 +#ifdef UNIV_SYNC_DEBUG
1006 + rw_lock_remove_debug_info(lock, pass, RW_LOCK_WAIT_EX);
1007 + rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
1011 + lock->last_x_file_name = file_name;
1012 + lock->last_x_line = line;
1014 + /* Locking succeeded, we may return */
1015 + return(RW_LOCK_EX);
1016 + } else if(__sync_fetch_and_add(&(lock->lock_word),
1017 + RW_LOCK_BIAS) == 0) {
1018 + /* retry x-lock */
1019 + goto retry_x_lock;
1022 + /* There are readers, we have to wait */
1023 + return(RW_LOCK_WAIT_EX);
1028 + /* already have x-lock */
1029 + if ((lock->pass == 0)&&(pass == 0)) {
1030 + __sync_fetch_and_add(&(lock->writer_count),1);
1032 +#ifdef UNIV_SYNC_DEBUG
1033 + rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, file_name,
1037 + lock->last_x_file_name = file_name;
1038 + lock->last_x_line = line;
1040 + /* Locking succeeded, we may return */
1041 + return(RW_LOCK_EX);
1044 + return(RW_LOCK_NOT_LOCKED);
1048 + default: /* RW_LOCK_NOT_LOCKED? maybe impossible */
1049 + goto retry_writer;
1051 +#else /* HAVE_ATOMIC_BUILTINS */
1053 #ifdef UNIV_SYNC_DEBUG
1054 ut_ad(mutex_own(rw_lock_get_mutex(lock)));
1055 #endif /* UNIV_SYNC_DEBUG */
1057 /* Locking succeeded, we may return */
1060 +#endif /* HAVE_ATOMIC_BUILTINS */
1062 /* Locking did not succeed */
1063 return(RW_LOCK_NOT_LOCKED);
1064 @@ -448,19 +592,33 @@
1065 ulint line) /* in: line where requested */
1067 ulint index; /* index of the reserved wait cell */
1068 - ulint state; /* lock state acquired */
1069 + ulint state = RW_LOCK_NOT_LOCKED; /* lock state acquired */
1070 +#ifdef HAVE_ATOMIC_BUILTINS
1071 + ulint prev_state = RW_LOCK_NOT_LOCKED;
1073 ulint i; /* spin round count */
1075 ut_ad(rw_lock_validate(lock));
1080 +#ifdef HAVE_ATOMIC_BUILTINS
1081 + prev_state = state;
1083 /* Acquire the mutex protecting the rw-lock fields */
1084 mutex_enter_fast(&(lock->mutex));
1087 state = rw_lock_x_lock_low(lock, pass, file_name, line);
1089 +#ifdef HAVE_ATOMIC_BUILTINS
1090 + if (state != prev_state) i=0; /* if progress, reset counter. */
1092 mutex_exit(&(lock->mutex));
1096 if (state == RW_LOCK_EX) {
1098 return; /* Locking succeeded */
1099 @@ -468,10 +626,9 @@
1100 } else if (state == RW_LOCK_NOT_LOCKED) {
1102 /* Spin waiting for the writer field to become free */
1105 - while (rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED
1106 - && i < SYNC_SPIN_ROUNDS) {
1107 + while (i < SYNC_SPIN_ROUNDS
1108 + && lock->lock_word != RW_LOCK_BIAS) {
1109 if (srv_spin_wait_delay) {
1110 ut_delay(ut_rnd_interval(0,
1111 srv_spin_wait_delay));
1112 @@ -485,9 +642,12 @@
1113 } else if (state == RW_LOCK_WAIT_EX) {
1115 /* Spin waiting for the reader count field to become zero */
1118 +#ifdef HAVE_ATOMIC_BUILTINS
1119 + while (lock->lock_word != RW_LOCK_BIAS
1121 while (rw_lock_get_reader_count(lock) != 0
1123 && i < SYNC_SPIN_ROUNDS) {
1124 if (srv_spin_wait_delay) {
1125 ut_delay(ut_rnd_interval(0,
1130 - i = 0; /* Eliminate a compiler warning */
1134 @@ -516,34 +675,69 @@
1135 /* We try once again to obtain the lock. Acquire the mutex protecting
1136 the rw-lock fields */
1138 +#ifdef HAVE_ATOMIC_BUILTINS
1139 + prev_state = state;
1141 mutex_enter(rw_lock_get_mutex(lock));
1144 state = rw_lock_x_lock_low(lock, pass, file_name, line);
1146 +#ifdef HAVE_ATOMIC_BUILTINS
1147 + if (state != prev_state) i=0; /* if progress, reset counter. */
1150 if (state == RW_LOCK_EX) {
1151 +#ifndef HAVE_ATOMIC_BUILTINS
1152 mutex_exit(rw_lock_get_mutex(lock));
1155 return; /* Locking succeeded */
1158 +#ifdef HAVE_ATOMIC_BUILTINS
1159 + /* like sync0sync.c doing */
1162 + if (i < SYNC_SPIN_ROUNDS) {
1167 rw_x_system_call_count++;
1169 sync_array_reserve_cell(sync_primary_wait_array,
1172 - /* On windows RW_LOCK_WAIT_EX signifies
1173 - that this thread should wait on the
1174 - special wait_ex_event. */
1175 (state == RW_LOCK_WAIT_EX)
1182 - rw_lock_set_waiters(lock, 1);
1183 + if (state == RW_LOCK_WAIT_EX) {
1184 + rw_lock_set_wx_waiters(lock, 1);
1186 + rw_lock_set_x_waiters(lock, 1);
1189 +#ifdef HAVE_ATOMIC_BUILTINS
1190 + /* like sync0sync.c doing */
1191 + for (i = 0; i < 4; i++) {
1192 + prev_state = state;
1193 + state = rw_lock_x_lock_low(lock, pass, file_name, line);
1194 + if (state == RW_LOCK_EX) {
1195 + sync_array_free_cell(sync_primary_wait_array, index);
1196 + return; /* Locking succeeded */
1198 + if (state != prev_state) {
1200 + sync_array_free_cell(sync_primary_wait_array, index);
1205 mutex_exit(rw_lock_get_mutex(lock));
1208 if (srv_print_latch_waits) {
1212 ut_ad(rw_lock_validate(lock));
1214 +#ifndef HAVE_ATOMIC_BUILTINS
1215 mutex_enter(&(lock->mutex));
1218 info = UT_LIST_GET_FIRST(lock->debug_list);
1221 && (info->pass == 0)
1222 && (info->lock_type == lock_type)) {
1224 +#ifndef HAVE_ATOMIC_BUILTINS
1225 mutex_exit(&(lock->mutex));
1232 info = UT_LIST_GET_NEXT(list, info);
1234 +#ifndef HAVE_ATOMIC_BUILTINS
1235 mutex_exit(&(lock->mutex));
1240 @@ -758,21 +958,25 @@
1242 ut_ad(rw_lock_validate(lock));
1244 +#ifndef HAVE_ATOMIC_BUILTINS
1245 mutex_enter(&(lock->mutex));
1248 if (lock_type == RW_LOCK_SHARED) {
1249 if (lock->reader_count > 0) {
1252 } else if (lock_type == RW_LOCK_EX) {
1253 - if (lock->writer == RW_LOCK_EX) {
1254 + if (rw_lock_get_writer(lock) == RW_LOCK_EX) {
1261 +#ifndef HAVE_ATOMIC_BUILTINS
1262 mutex_exit(&(lock->mutex));
1267 @@ -801,16 +1005,26 @@
1271 +#ifndef HAVE_ATOMIC_BUILTINS
1272 mutex_enter(&(lock->mutex));
1275 if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED)
1276 || (rw_lock_get_reader_count(lock) != 0)
1277 - || (rw_lock_get_waiters(lock) != 0)) {
1278 + || (rw_lock_get_s_waiters(lock) != 0)
1279 + || (rw_lock_get_x_waiters(lock) != 0)
1280 + || (rw_lock_get_wx_waiters(lock) != 0)) {
1282 fprintf(stderr, "RW-LOCK: %p ", lock);
1284 - if (rw_lock_get_waiters(lock)) {
1285 - fputs(" Waiters for the lock exist\n", stderr);
1286 + if (rw_lock_get_s_waiters(lock)) {
1287 + fputs(" s_waiters for the lock exist,", stderr);
1289 + if (rw_lock_get_x_waiters(lock)) {
1290 + fputs(" x_waiters for the lock exist\n", stderr);
1292 + if (rw_lock_get_wx_waiters(lock)) {
1293 + fputs(" wait_ex_waiters for the lock exist\n", stderr);
1297 @@ -822,7 +1036,9 @@
1301 +#ifndef HAVE_ATOMIC_BUILTINS
1302 mutex_exit(&(lock->mutex));
1304 lock = UT_LIST_GET_NEXT(list, lock);
1307 @@ -847,10 +1063,18 @@
1309 if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED)
1310 || (rw_lock_get_reader_count(lock) != 0)
1311 - || (rw_lock_get_waiters(lock) != 0)) {
1312 + || (rw_lock_get_s_waiters(lock) != 0)
1313 + || (rw_lock_get_x_waiters(lock) != 0)
1314 + || (rw_lock_get_wx_waiters(lock) != 0)) {
1316 - if (rw_lock_get_waiters(lock)) {
1317 - fputs(" Waiters for the lock exist\n", stderr);
1318 + if (rw_lock_get_s_waiters(lock)) {
1319 + fputs(" s_waiters for the lock exist,", stderr);
1321 + if (rw_lock_get_x_waiters(lock)) {
1322 + fputs(" x_waiters for the lock exist\n", stderr);
1324 + if (rw_lock_get_wx_waiters(lock)) {
1325 + fputs(" wait_ex_waiters for the lock exist\n", stderr);
1329 @@ -909,14 +1133,18 @@
1330 lock = UT_LIST_GET_FIRST(rw_lock_list);
1332 while (lock != NULL) {
1333 +#ifndef HAVE_ATOMIC_BUILTINS
1334 mutex_enter(rw_lock_get_mutex(lock));
1337 if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED)
1338 || (rw_lock_get_reader_count(lock) != 0)) {
1342 +#ifndef HAVE_ATOMIC_BUILTINS
1343 mutex_exit(rw_lock_get_mutex(lock));
1345 lock = UT_LIST_GET_NEXT(list, lock);
1348 diff -ruN a/patch_info/innodb_rw_lock.info b/patch_info/innodb_rw_lock.info
1349 --- /dev/null 1970-01-01 09:00:00.000000000 +0900
1350 +++ b/patch_info/innodb_rw_lock.info 2009-04-16 16:15:28.000000000 +0900
1352 +File=innodb_rw_lock.patch
1353 +Name=Fix of InnoDB rw_locks
1355 +Author=Yasufumi Kinoshita