1 # name : innodb_split_buf_pool_mutex.patch
2 # introduced : 11 or before
3 # maintainer : Yasufumi
6 # Any small change to this file in the main branch
7 # should be done or reviewed by the maintainer!
8 --- a/storage/innobase/btr/btr0cur.c
9 +++ b/storage/innobase/btr/btr0cur.c
14 - buf_pool_mutex_enter(buf_pool);
15 + //buf_pool_mutex_enter(buf_pool);
16 + mutex_enter(&buf_pool->LRU_list_mutex);
17 mutex_enter(&block->mutex);
19 /* Only free the block if it is still allocated to
20 @@ -4096,16 +4097,21 @@
21 && buf_block_get_space(block) == space
22 && buf_block_get_page_no(block) == page_no) {
24 - if (!buf_LRU_free_block(&block->page, all)
25 - && all && block->page.zip.data) {
26 + if (!buf_LRU_free_block(&block->page, all, TRUE)
27 + && all && block->page.zip.data
28 + /* Now, buf_LRU_free_block() may release mutex temporarily */
29 + && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE
30 + && buf_block_get_space(block) == space
31 + && buf_block_get_page_no(block) == page_no) {
32 /* Attempt to deallocate the uncompressed page
33 if the whole block cannot be deallocted. */
35 - buf_LRU_free_block(&block->page, FALSE);
36 + buf_LRU_free_block(&block->page, FALSE, TRUE);
40 - buf_pool_mutex_exit(buf_pool);
41 + //buf_pool_mutex_exit(buf_pool);
42 + mutex_exit(&buf_pool->LRU_list_mutex);
43 mutex_exit(&block->mutex);
46 --- a/storage/innobase/btr/btr0sea.c
47 +++ b/storage/innobase/btr/btr0sea.c
49 rec_offs_init(offsets_);
51 rw_lock_x_lock(&btr_search_latch);
52 - buf_pool_mutex_enter_all();
53 + buf_pool_page_hash_x_lock_all();
55 cell_count = hash_get_n_cells(btr_search_sys->hash_index);
57 @@ -1980,11 +1980,11 @@
58 /* We release btr_search_latch every once in a while to
59 give other queries a chance to run. */
60 if ((i != 0) && ((i % chunk_size) == 0)) {
61 - buf_pool_mutex_exit_all();
62 + buf_pool_page_hash_x_unlock_all();
63 rw_lock_x_unlock(&btr_search_latch);
65 rw_lock_x_lock(&btr_search_latch);
66 - buf_pool_mutex_enter_all();
67 + buf_pool_page_hash_x_lock_all();
70 node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
71 @@ -2093,11 +2093,11 @@
72 /* We release btr_search_latch every once in a while to
73 give other queries a chance to run. */
75 - buf_pool_mutex_exit_all();
76 + buf_pool_page_hash_x_unlock_all();
77 rw_lock_x_unlock(&btr_search_latch);
79 rw_lock_x_lock(&btr_search_latch);
80 - buf_pool_mutex_enter_all();
81 + buf_pool_page_hash_x_lock_all();
84 if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
89 - buf_pool_mutex_exit_all();
90 + buf_pool_page_hash_x_unlock_all();
91 rw_lock_x_unlock(&btr_search_latch);
92 if (UNIV_LIKELY_NULL(heap)) {
94 --- a/storage/innobase/buf/buf0buddy.c
95 +++ b/storage/innobase/buf/buf0buddy.c
98 /** Validate a given zip_free list. */
99 #define BUF_BUDDY_LIST_VALIDATE(b, i) \
100 - UT_LIST_VALIDATE(list, buf_page_t, \
101 + UT_LIST_VALIDATE(zip_list, buf_page_t, \
103 ut_ad(buf_page_get_state( \
106 ulint i) /*!< in: index of
107 buf_pool->zip_free[] */
109 - ut_ad(buf_pool_mutex_own(buf_pool));
110 + //ut_ad(buf_pool_mutex_own(buf_pool));
111 + ut_ad(mutex_own(&buf_pool->zip_free_mutex));
112 ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE);
113 ut_ad(buf_pool->zip_free[i].start != bpage);
114 - UT_LIST_ADD_FIRST(list, buf_pool->zip_free[i], bpage);
115 + UT_LIST_ADD_FIRST(zip_list, buf_pool->zip_free[i], bpage);
118 /**********************************************************************//**
120 buf_pool->zip_free[] */
123 - buf_page_t* prev = UT_LIST_GET_PREV(list, bpage);
124 - buf_page_t* next = UT_LIST_GET_NEXT(list, bpage);
125 + buf_page_t* prev = UT_LIST_GET_PREV(zip_list, bpage);
126 + buf_page_t* next = UT_LIST_GET_NEXT(zip_list, bpage);
128 ut_ad(!prev || buf_page_get_state(prev) == BUF_BLOCK_ZIP_FREE);
129 ut_ad(!next || buf_page_get_state(next) == BUF_BLOCK_ZIP_FREE);
130 #endif /* UNIV_DEBUG */
132 - ut_ad(buf_pool_mutex_own(buf_pool));
133 + //ut_ad(buf_pool_mutex_own(buf_pool));
134 + ut_ad(mutex_own(&buf_pool->zip_free_mutex));
135 ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE);
136 - UT_LIST_REMOVE(list, buf_pool->zip_free[i], bpage);
137 + UT_LIST_REMOVE(zip_list, buf_pool->zip_free[i], bpage);
140 /**********************************************************************//**
145 - ut_ad(buf_pool_mutex_own(buf_pool));
146 + //ut_ad(buf_pool_mutex_own(buf_pool));
147 + ut_ad(mutex_own(&buf_pool->zip_free_mutex));
148 ut_a(i < BUF_BUDDY_SIZES);
149 ut_a(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE));
151 @@ -159,16 +162,19 @@
152 buf_buddy_block_free(
153 /*=================*/
154 buf_pool_t* buf_pool, /*!< in: buffer pool instance */
155 - void* buf) /*!< in: buffer frame to deallocate */
156 + void* buf, /*!< in: buffer frame to deallocate */
157 + ibool have_page_hash_mutex)
159 const ulint fold = BUF_POOL_ZIP_FOLD_PTR(buf);
163 - ut_ad(buf_pool_mutex_own(buf_pool));
164 + //ut_ad(buf_pool_mutex_own(buf_pool));
165 ut_ad(!mutex_own(&buf_pool->zip_mutex));
166 ut_a(!ut_align_offset(buf, UNIV_PAGE_SIZE));
168 + mutex_enter(&buf_pool->zip_hash_mutex);
170 HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage,
171 ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY
172 && bpage->in_zip_hash && !bpage->in_page_hash),
173 @@ -180,12 +186,14 @@
174 ut_d(bpage->in_zip_hash = FALSE);
175 HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage);
177 + mutex_exit(&buf_pool->zip_hash_mutex);
179 ut_d(memset(buf, 0, UNIV_PAGE_SIZE));
180 UNIV_MEM_INVALID(buf, UNIV_PAGE_SIZE);
182 block = (buf_block_t*) bpage;
183 mutex_enter(&block->mutex);
184 - buf_LRU_block_free_non_file_page(block);
185 + buf_LRU_block_free_non_file_page(block, have_page_hash_mutex);
186 mutex_exit(&block->mutex);
188 ut_ad(buf_pool->buddy_n_frames > 0);
191 buf_pool_t* buf_pool = buf_pool_from_block(block);
192 const ulint fold = BUF_POOL_ZIP_FOLD(block);
193 - ut_ad(buf_pool_mutex_own(buf_pool));
194 + //ut_ad(buf_pool_mutex_own(buf_pool));
195 ut_ad(!mutex_own(&buf_pool->zip_mutex));
196 ut_ad(buf_block_get_state(block) == BUF_BLOCK_READY_FOR_USE);
199 ut_ad(!block->page.in_page_hash);
200 ut_ad(!block->page.in_zip_hash);
201 ut_d(block->page.in_zip_hash = TRUE);
203 + mutex_enter(&buf_pool->zip_hash_mutex);
204 HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page);
205 + mutex_exit(&buf_pool->zip_hash_mutex);
207 ut_d(buf_pool->buddy_n_frames++);
209 @@ -268,26 +279,30 @@
210 buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
211 ulint i, /*!< in: index of buf_pool->zip_free[],
212 or BUF_BUDDY_SIZES */
213 - ibool* lru) /*!< in: pointer to a variable that
214 + ibool* lru, /*!< in: pointer to a variable that
215 will be assigned TRUE if storage was
216 allocated from the LRU list and
217 buf_pool->mutex was temporarily
219 + ibool have_page_hash_mutex)
224 - ut_ad(buf_pool_mutex_own(buf_pool));
225 + //ut_ad(buf_pool_mutex_own(buf_pool));
226 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
227 ut_ad(!mutex_own(&buf_pool->zip_mutex));
228 ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE));
230 if (i < BUF_BUDDY_SIZES) {
231 /* Try to allocate from the buddy system. */
232 + mutex_enter(&buf_pool->zip_free_mutex);
233 block = buf_buddy_alloc_zip(buf_pool, i);
238 + mutex_exit(&buf_pool->zip_free_mutex);
241 /* Try allocating from the buf_pool->free list. */
242 @@ -299,19 +314,30 @@
245 /* Try replacing an uncompressed page in the buffer pool. */
246 - buf_pool_mutex_exit(buf_pool);
247 + //buf_pool_mutex_exit(buf_pool);
248 + mutex_exit(&buf_pool->LRU_list_mutex);
249 + if (have_page_hash_mutex) {
250 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
252 block = buf_LRU_get_free_block(buf_pool);
254 - buf_pool_mutex_enter(buf_pool);
255 + //buf_pool_mutex_enter(buf_pool);
256 + mutex_enter(&buf_pool->LRU_list_mutex);
257 + if (have_page_hash_mutex) {
258 + rw_lock_x_lock(&buf_pool->page_hash_latch);
262 buf_buddy_block_register(block);
264 + mutex_enter(&buf_pool->zip_free_mutex);
265 block = buf_buddy_alloc_from(
266 buf_pool, block->frame, i, BUF_BUDDY_SIZES);
269 buf_pool->buddy_stat[i].used++;
270 + mutex_exit(&buf_pool->zip_free_mutex);
276 buf_pool_t* buf_pool, /*!< in: buffer pool instance */
277 void* src, /*!< in: block to relocate */
278 void* dst, /*!< in: free block to relocate to */
279 - ulint i) /*!< in: index of
280 + ulint i, /*!< in: index of
281 buf_pool->zip_free[] */
282 + ibool have_page_hash_mutex)
285 const ulint size = BUF_BUDDY_LOW << i;
286 @@ -334,13 +361,20 @@
290 - ut_ad(buf_pool_mutex_own(buf_pool));
291 + //ut_ad(buf_pool_mutex_own(buf_pool));
292 + ut_ad(mutex_own(&buf_pool->zip_free_mutex));
293 ut_ad(!mutex_own(&buf_pool->zip_mutex));
294 ut_ad(!ut_align_offset(src, size));
295 ut_ad(!ut_align_offset(dst, size));
296 ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE));
297 UNIV_MEM_ASSERT_W(dst, size);
299 + if (!have_page_hash_mutex) {
300 + mutex_exit(&buf_pool->zip_free_mutex);
301 + mutex_enter(&buf_pool->LRU_list_mutex);
302 + rw_lock_x_lock(&buf_pool->page_hash_latch);
305 /* We assume that all memory from buf_buddy_alloc()
306 is used for compressed page frames. */
309 added to buf_pool->page_hash yet. Obviously,
310 it cannot be relocated. */
312 + if (!have_page_hash_mutex) {
313 + mutex_enter(&buf_pool->zip_free_mutex);
314 + mutex_exit(&buf_pool->LRU_list_mutex);
315 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
320 @@ -383,18 +422,27 @@
321 For the sake of simplicity, give up. */
322 ut_ad(page_zip_get_size(&bpage->zip) < size);
324 + if (!have_page_hash_mutex) {
325 + mutex_enter(&buf_pool->zip_free_mutex);
326 + mutex_exit(&buf_pool->LRU_list_mutex);
327 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
332 + /* To keep latch order */
333 + if (have_page_hash_mutex)
334 + mutex_exit(&buf_pool->zip_free_mutex);
336 /* The block must have been allocated, but it may
337 contain uninitialized data. */
338 UNIV_MEM_ASSERT_W(src, size);
340 - mutex = buf_page_get_mutex(bpage);
341 + mutex = buf_page_get_mutex_enter(bpage);
343 - mutex_enter(mutex);
344 + mutex_enter(&buf_pool->zip_free_mutex);
346 - if (buf_page_can_relocate(bpage)) {
347 + if (mutex && buf_page_can_relocate(bpage)) {
348 /* Relocate the compressed page. */
349 ullint usec = ut_time_us(NULL);
350 ut_a(bpage->zip.data == src);
351 @@ -409,10 +457,22 @@
352 buddy_stat->relocated_usec
353 += ut_time_us(NULL) - usec;
356 + if (!have_page_hash_mutex) {
357 + mutex_exit(&buf_pool->LRU_list_mutex);
358 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
364 + if (!have_page_hash_mutex) {
365 + mutex_exit(&buf_pool->LRU_list_mutex);
366 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
375 @@ -425,13 +485,15 @@
376 buf_pool_t* buf_pool, /*!< in: buffer pool instance */
377 void* buf, /*!< in: block to be freed, must not be
378 pointed to by the buffer pool */
379 - ulint i) /*!< in: index of buf_pool->zip_free[],
380 + ulint i, /*!< in: index of buf_pool->zip_free[],
381 or BUF_BUDDY_SIZES */
382 + ibool have_page_hash_mutex)
387 - ut_ad(buf_pool_mutex_own(buf_pool));
388 + //ut_ad(buf_pool_mutex_own(buf_pool));
389 + ut_ad(mutex_own(&buf_pool->zip_free_mutex));
390 ut_ad(!mutex_own(&buf_pool->zip_mutex));
391 ut_ad(i <= BUF_BUDDY_SIZES);
392 ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE));
394 ((buf_page_t*) buf)->state = BUF_BLOCK_ZIP_FREE;
396 if (i == BUF_BUDDY_SIZES) {
397 - buf_buddy_block_free(buf_pool, buf);
398 + mutex_exit(&buf_pool->zip_free_mutex);
399 + buf_buddy_block_free(buf_pool, buf, have_page_hash_mutex);
400 + mutex_enter(&buf_pool->zip_free_mutex);
407 UNIV_MEM_ASSERT_W(bpage, BUF_BUDDY_LOW << i);
408 - bpage = UT_LIST_GET_NEXT(list, bpage);
409 + bpage = UT_LIST_GET_NEXT(zip_list, bpage);
412 #ifndef UNIV_DEBUG_VALGRIND
414 ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i));
416 /* The buddy is not free. Is there a free block of this size? */
417 - bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
418 + bpage = UT_LIST_GET_LAST(buf_pool->zip_free[i]);
423 buf_buddy_remove_from_free(buf_pool, bpage, i);
425 /* Try to relocate the buddy of buf to the free block. */
426 - if (buf_buddy_relocate(buf_pool, buddy, bpage, i)) {
427 + if (buf_buddy_relocate(buf_pool, buddy, bpage, i, have_page_hash_mutex)) {
429 buddy->state = BUF_BLOCK_ZIP_FREE;
431 --- a/storage/innobase/buf/buf0buf.c
432 +++ b/storage/innobase/buf/buf0buf.c
434 #ifdef UNIV_PFS_RWLOCK
435 /* Keys to register buffer block related rwlocks and mutexes with
436 performance schema */
437 +UNIV_INTERN mysql_pfs_key_t buf_pool_page_hash_key;
438 UNIV_INTERN mysql_pfs_key_t buf_block_lock_key;
439 # ifdef UNIV_SYNC_DEBUG
440 UNIV_INTERN mysql_pfs_key_t buf_block_debug_latch_key;
442 UNIV_INTERN mysql_pfs_key_t buffer_block_mutex_key;
443 UNIV_INTERN mysql_pfs_key_t buf_pool_mutex_key;
444 UNIV_INTERN mysql_pfs_key_t buf_pool_zip_mutex_key;
445 +UNIV_INTERN mysql_pfs_key_t buf_pool_LRU_list_mutex_key;
446 +UNIV_INTERN mysql_pfs_key_t buf_pool_free_list_mutex_key;
447 +UNIV_INTERN mysql_pfs_key_t buf_pool_zip_free_mutex_key;
448 +UNIV_INTERN mysql_pfs_key_t buf_pool_zip_hash_mutex_key;
449 UNIV_INTERN mysql_pfs_key_t flush_list_mutex_key;
450 #endif /* UNIV_PFS_MUTEX */
453 block->page.in_zip_hash = FALSE;
454 block->page.in_flush_list = FALSE;
455 block->page.in_free_list = FALSE;
456 - block->in_unzip_LRU_list = FALSE;
457 #endif /* UNIV_DEBUG */
458 + block->page.flush_list.prev = NULL;
459 + block->page.flush_list.next = NULL;
460 + block->page.zip_list.prev = NULL;
461 + block->page.zip_list.next = NULL;
462 block->page.in_LRU_list = FALSE;
463 + block->in_unzip_LRU_list = FALSE;
464 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
465 block->n_pointers = 0;
466 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
468 memset(block->frame, '\0', UNIV_PAGE_SIZE);
470 /* Add the block to the free list */
471 - UT_LIST_ADD_LAST(list, buf_pool->free, (&block->page));
472 + mutex_enter(&buf_pool->free_list_mutex);
473 + UT_LIST_ADD_LAST(free, buf_pool->free, (&block->page));
475 ut_d(block->page.in_free_list = TRUE);
476 + mutex_exit(&buf_pool->free_list_mutex);
477 ut_ad(buf_pool_from_block(block) == buf_pool);
480 @@ -1037,7 +1048,8 @@
481 buf_chunk_t* chunk = buf_pool->chunks;
484 - ut_ad(buf_pool_mutex_own(buf_pool));
485 + //ut_ad(buf_pool_mutex_own(buf_pool));
486 + ut_ad(mutex_own(&buf_pool->zip_free_mutex));
487 for (n = buf_pool->n_chunks; n--; chunk++) {
489 buf_block_t* block = buf_chunk_contains_zip(chunk, data);
490 @@ -1143,9 +1155,21 @@
491 ------------------------------- */
492 mutex_create(buf_pool_mutex_key,
493 &buf_pool->mutex, SYNC_BUF_POOL);
494 + mutex_create(buf_pool_LRU_list_mutex_key,
495 + &buf_pool->LRU_list_mutex, SYNC_BUF_LRU_LIST);
496 + rw_lock_create(buf_pool_page_hash_key,
497 + &buf_pool->page_hash_latch, SYNC_BUF_PAGE_HASH);
498 + mutex_create(buf_pool_free_list_mutex_key,
499 + &buf_pool->free_list_mutex, SYNC_BUF_FREE_LIST);
500 + mutex_create(buf_pool_zip_free_mutex_key,
501 + &buf_pool->zip_free_mutex, SYNC_BUF_ZIP_FREE);
502 + mutex_create(buf_pool_zip_hash_mutex_key,
503 + &buf_pool->zip_hash_mutex, SYNC_BUF_ZIP_HASH);
504 mutex_create(buf_pool_zip_mutex_key,
505 &buf_pool->zip_mutex, SYNC_BUF_BLOCK);
507 + mutex_enter(&buf_pool->LRU_list_mutex);
508 + rw_lock_x_lock(&buf_pool->page_hash_latch);
509 buf_pool_mutex_enter(buf_pool);
511 if (buf_pool_size > 0) {
512 @@ -1158,6 +1182,8 @@
516 + mutex_exit(&buf_pool->LRU_list_mutex);
517 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
518 buf_pool_mutex_exit(buf_pool);
521 @@ -1188,6 +1214,8 @@
523 /* All fields are initialized by mem_zalloc(). */
525 + mutex_exit(&buf_pool->LRU_list_mutex);
526 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
527 buf_pool_mutex_exit(buf_pool);
530 @@ -1339,7 +1367,11 @@
532 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
534 - ut_ad(buf_pool_mutex_own(buf_pool));
535 + //ut_ad(buf_pool_mutex_own(buf_pool));
536 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
537 +#ifdef UNIV_SYNC_DEBUG
538 + ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
540 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
541 ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
542 ut_a(bpage->buf_fix_count == 0);
543 @@ -1450,21 +1482,32 @@
546 buf_pool_t* buf_pool = buf_pool_get(space, offset);
547 + mutex_t* block_mutex;
549 - ut_ad(buf_pool_mutex_own(buf_pool));
550 + //ut_ad(buf_pool_mutex_own(buf_pool));
552 + rw_lock_x_lock(&buf_pool->page_hash_latch);
553 bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
555 + block_mutex = buf_page_get_mutex_enter(bpage);
559 if (UNIV_LIKELY_NULL(bpage)) {
560 if (!buf_pool_watch_is_sentinel(buf_pool, bpage)) {
561 /* The page was loaded meanwhile. */
562 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
565 /* Add to an existing watch. */
566 bpage->buf_fix_count++;
567 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
568 + mutex_exit(block_mutex);
572 + /* buf_pool->watch is protected by zip_mutex for now */
573 + mutex_enter(&buf_pool->zip_mutex);
574 for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
575 bpage = &buf_pool->watch[i];
577 @@ -1488,10 +1531,12 @@
578 bpage->space = space;
579 bpage->offset = offset;
580 bpage->buf_fix_count = 1;
582 + bpage->buf_pool_index = buf_pool_index(buf_pool);
583 ut_d(bpage->in_page_hash = TRUE);
584 HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
586 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
587 + mutex_exit(&buf_pool->zip_mutex);
589 case BUF_BLOCK_ZIP_PAGE:
590 ut_ad(bpage->in_page_hash);
591 @@ -1509,6 +1554,8 @@
594 /* Fix compiler warning */
595 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
596 + mutex_exit(&buf_pool->zip_mutex);
600 @@ -1526,7 +1573,11 @@
602 buf_page_t* watch) /*!< in/out: sentinel for watch */
604 - ut_ad(buf_pool_mutex_own(buf_pool));
605 + //ut_ad(buf_pool_mutex_own(buf_pool));
606 +#ifdef UNIV_SYNC_DEBUG
607 + ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
609 + ut_ad(mutex_own(&buf_pool->zip_mutex)); /* for now */
611 HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, watch);
612 ut_d(watch->in_page_hash = FALSE);
613 @@ -1548,28 +1599,31 @@
614 buf_pool_t* buf_pool = buf_pool_get(space, offset);
615 ulint fold = buf_page_address_fold(space, offset);
617 - buf_pool_mutex_enter(buf_pool);
618 + //buf_pool_mutex_enter(buf_pool);
619 + rw_lock_x_lock(&buf_pool->page_hash_latch);
620 bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
621 /* The page must exist because buf_pool_watch_set()
622 increments buf_fix_count. */
625 if (UNIV_UNLIKELY(!buf_pool_watch_is_sentinel(buf_pool, bpage))) {
626 - mutex_t* mutex = buf_page_get_mutex(bpage);
627 + mutex_t* mutex = buf_page_get_mutex_enter(bpage);
629 - mutex_enter(mutex);
630 ut_a(bpage->buf_fix_count > 0);
631 bpage->buf_fix_count--;
634 + mutex_enter(&buf_pool->zip_mutex);
635 ut_a(bpage->buf_fix_count > 0);
637 if (UNIV_LIKELY(!--bpage->buf_fix_count)) {
638 buf_pool_watch_remove(buf_pool, fold, bpage);
640 + mutex_exit(&buf_pool->zip_mutex);
643 - buf_pool_mutex_exit(buf_pool);
644 + //buf_pool_mutex_exit(buf_pool);
645 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
648 /****************************************************************//**
649 @@ -1589,14 +1643,16 @@
650 buf_pool_t* buf_pool = buf_pool_get(space, offset);
651 ulint fold = buf_page_address_fold(space, offset);
653 - buf_pool_mutex_enter(buf_pool);
654 + //buf_pool_mutex_enter(buf_pool);
655 + rw_lock_s_lock(&buf_pool->page_hash_latch);
657 bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
658 /* The page must exist because buf_pool_watch_set()
659 increments buf_fix_count. */
661 ret = !buf_pool_watch_is_sentinel(buf_pool, bpage);
662 - buf_pool_mutex_exit(buf_pool);
663 + //buf_pool_mutex_exit(buf_pool);
664 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
668 @@ -1613,13 +1669,15 @@
670 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
672 - buf_pool_mutex_enter(buf_pool);
673 + //buf_pool_mutex_enter(buf_pool);
674 + mutex_enter(&buf_pool->LRU_list_mutex);
676 ut_a(buf_page_in_file(bpage));
678 buf_LRU_make_block_young(bpage);
680 - buf_pool_mutex_exit(buf_pool);
681 + //buf_pool_mutex_exit(buf_pool);
682 + mutex_exit(&buf_pool->LRU_list_mutex);
685 /********************************************************************//**
686 @@ -1643,14 +1701,20 @@
687 ut_a(buf_page_in_file(bpage));
689 if (buf_page_peek_if_too_old(bpage)) {
690 - buf_pool_mutex_enter(buf_pool);
691 + //buf_pool_mutex_enter(buf_pool);
692 + mutex_enter(&buf_pool->LRU_list_mutex);
693 buf_LRU_make_block_young(bpage);
694 - buf_pool_mutex_exit(buf_pool);
695 + //buf_pool_mutex_exit(buf_pool);
696 + mutex_exit(&buf_pool->LRU_list_mutex);
697 } else if (!access_time) {
698 ulint time_ms = ut_time_ms();
699 - buf_pool_mutex_enter(buf_pool);
700 + mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
701 + //buf_pool_mutex_enter(buf_pool);
703 buf_page_set_accessed(bpage, time_ms);
704 - buf_pool_mutex_exit(buf_pool);
705 + mutex_exit(block_mutex);
707 + //buf_pool_mutex_exit(buf_pool);
711 @@ -1667,7 +1731,8 @@
713 buf_pool_t* buf_pool = buf_pool_get(space, offset);
715 - buf_pool_mutex_enter(buf_pool);
716 + //buf_pool_mutex_enter(buf_pool);
717 + rw_lock_s_lock(&buf_pool->page_hash_latch);
719 block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
721 @@ -1676,7 +1741,8 @@
722 block->check_index_page_at_flush = FALSE;
725 - buf_pool_mutex_exit(buf_pool);
726 + //buf_pool_mutex_exit(buf_pool);
727 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
730 #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
731 @@ -1696,7 +1762,8 @@
733 buf_pool_t* buf_pool = buf_pool_get(space, offset);
735 - buf_pool_mutex_enter(buf_pool);
736 + //buf_pool_mutex_enter(buf_pool);
737 + rw_lock_s_lock(&buf_pool->page_hash_latch);
739 bpage = buf_page_hash_get(buf_pool, space, offset);
741 @@ -1707,7 +1774,8 @@
742 bpage->file_page_was_freed = TRUE;
745 - buf_pool_mutex_exit(buf_pool);
746 + //buf_pool_mutex_exit(buf_pool);
747 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
751 @@ -1728,7 +1796,8 @@
753 buf_pool_t* buf_pool = buf_pool_get(space, offset);
755 - buf_pool_mutex_enter(buf_pool);
756 + //buf_pool_mutex_enter(buf_pool);
757 + rw_lock_s_lock(&buf_pool->page_hash_latch);
759 bpage = buf_page_hash_get(buf_pool, space, offset);
761 @@ -1737,7 +1806,8 @@
762 bpage->file_page_was_freed = FALSE;
765 - buf_pool_mutex_exit(buf_pool);
766 + //buf_pool_mutex_exit(buf_pool);
767 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
771 @@ -1769,8 +1839,9 @@
772 buf_pool->stat.n_page_gets++;
775 - buf_pool_mutex_enter(buf_pool);
776 + //buf_pool_mutex_enter(buf_pool);
778 + rw_lock_s_lock(&buf_pool->page_hash_latch);
779 bpage = buf_page_hash_get(buf_pool, space, offset);
781 ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
782 @@ -1779,7 +1850,8 @@
784 /* Page not in buf_pool: needs to be read from file */
786 - buf_pool_mutex_exit(buf_pool);
787 + //buf_pool_mutex_exit(buf_pool);
788 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
790 buf_read_page(space, zip_size, offset);
792 @@ -1791,10 +1863,15 @@
793 if (UNIV_UNLIKELY(!bpage->zip.data)) {
794 /* There is no compressed page. */
796 - buf_pool_mutex_exit(buf_pool);
797 + //buf_pool_mutex_exit(buf_pool);
798 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
802 + block_mutex = buf_page_get_mutex_enter(bpage);
804 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
806 ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
808 switch (buf_page_get_state(bpage)) {
809 @@ -1803,24 +1880,43 @@
810 case BUF_BLOCK_MEMORY:
811 case BUF_BLOCK_REMOVE_HASH:
812 case BUF_BLOCK_ZIP_FREE:
814 + mutex_exit(block_mutex);
816 case BUF_BLOCK_ZIP_PAGE:
817 case BUF_BLOCK_ZIP_DIRTY:
818 - block_mutex = &buf_pool->zip_mutex;
819 - mutex_enter(block_mutex);
820 + ut_a(block_mutex == &buf_pool->zip_mutex);
821 bpage->buf_fix_count++;
823 case BUF_BLOCK_FILE_PAGE:
824 - block_mutex = &((buf_block_t*) bpage)->mutex;
825 + ut_a(block_mutex == &((buf_block_t*) bpage)->mutex);
827 + /* release mutex to obey to latch-order */
828 + mutex_exit(block_mutex);
830 + /* get LRU_list_mutex for buf_LRU_free_block() */
831 + mutex_enter(&buf_pool->LRU_list_mutex);
832 mutex_enter(block_mutex);
834 - /* Discard the uncompressed page frame if possible. */
835 - if (buf_LRU_free_block(bpage, FALSE)) {
836 + if (UNIV_UNLIKELY(bpage->space != space
837 + || bpage->offset != offset
838 + || !bpage->in_LRU_list
839 + || !bpage->zip.data)) {
840 + /* someone should interrupt, retry */
841 + mutex_exit(&buf_pool->LRU_list_mutex);
842 + mutex_exit(block_mutex);
846 + /* Discard the uncompressed page frame if possible. */
847 + if (buf_LRU_free_block(bpage, FALSE, TRUE)) {
848 + mutex_exit(&buf_pool->LRU_list_mutex);
849 mutex_exit(block_mutex);
853 + mutex_exit(&buf_pool->LRU_list_mutex);
855 buf_block_buf_fix_inc((buf_block_t*) bpage,
858 @@ -1833,7 +1929,7 @@
859 must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
860 access_time = buf_page_is_accessed(bpage);
862 - buf_pool_mutex_exit(buf_pool);
863 + //buf_pool_mutex_exit(buf_pool);
865 mutex_exit(block_mutex);
867 @@ -2144,7 +2240,7 @@
868 const buf_block_t* block) /*!< in: pointer to block,
871 - ut_ad(buf_pool_mutex_own(buf_pool));
872 + //ut_ad(buf_pool_mutex_own(buf_pool));
874 if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
875 /* The pointer should be aligned. */
876 @@ -2180,6 +2276,7 @@
880 + mutex_t* block_mutex = NULL;
881 buf_pool_t* buf_pool = buf_pool_get(space, offset);
884 @@ -2213,18 +2310,24 @@
885 fold = buf_page_address_fold(space, offset);
888 - buf_pool_mutex_enter(buf_pool);
889 + //buf_pool_mutex_enter(buf_pool);
892 + block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
894 /* If the guess is a compressed page descriptor that
895 has been allocated by buf_page_alloc_descriptor(),
896 it may have been freed by buf_relocate(). */
898 - if (!buf_block_is_uncompressed(buf_pool, block)
899 + if (!block_mutex) {
900 + block = guess = NULL;
901 + } else if (!buf_block_is_uncompressed(buf_pool, block)
902 || offset != block->page.offset
903 || space != block->page.space
904 || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
906 + mutex_exit(block_mutex);
908 block = guess = NULL;
910 ut_ad(!block->page.in_zip_hash);
911 @@ -2233,12 +2336,19 @@
915 + rw_lock_s_lock(&buf_pool->page_hash_latch);
916 block = (buf_block_t*) buf_page_hash_get_low(
917 buf_pool, space, offset, fold);
919 + block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
922 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
926 if (block && buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
927 + mutex_exit(block_mutex);
931 @@ -2250,12 +2360,14 @@
932 space, offset, fold);
934 if (UNIV_LIKELY_NULL(block)) {
936 + block_mutex = buf_page_get_mutex((buf_page_t*)block);
938 + ut_ad(mutex_own(block_mutex));
943 - buf_pool_mutex_exit(buf_pool);
944 + //buf_pool_mutex_exit(buf_pool);
946 if (mode == BUF_GET_IF_IN_POOL
947 || mode == BUF_PEEK_IF_IN_POOL
948 @@ -2308,7 +2420,8 @@
949 /* The page is being read to buffer pool,
950 but we cannot wait around for the read to
952 - buf_pool_mutex_exit(buf_pool);
953 + //buf_pool_mutex_exit(buf_pool);
954 + mutex_exit(block_mutex);
958 @@ -2318,38 +2431,49 @@
961 case BUF_BLOCK_FILE_PAGE:
962 + if (block_mutex == &buf_pool->zip_mutex) {
963 + /* it is wrong mutex... */
964 + mutex_exit(block_mutex);
969 case BUF_BLOCK_ZIP_PAGE:
970 case BUF_BLOCK_ZIP_DIRTY:
971 + ut_ad(block_mutex == &buf_pool->zip_mutex);
972 bpage = &block->page;
973 /* Protect bpage->buf_fix_count. */
974 - mutex_enter(&buf_pool->zip_mutex);
975 + //mutex_enter(&buf_pool->zip_mutex);
977 if (bpage->buf_fix_count
978 || buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
979 /* This condition often occurs when the buffer
980 is not buffer-fixed, but I/O-fixed by
981 buf_page_init_for_read(). */
982 - mutex_exit(&buf_pool->zip_mutex);
983 + //mutex_exit(&buf_pool->zip_mutex);
985 /* The block is buffer-fixed or I/O-fixed.
987 - buf_pool_mutex_exit(buf_pool);
988 + //buf_pool_mutex_exit(buf_pool);
989 + mutex_exit(block_mutex);
990 os_thread_sleep(WAIT_FOR_READ);
995 /* Allocate an uncompressed page. */
996 - buf_pool_mutex_exit(buf_pool);
997 - mutex_exit(&buf_pool->zip_mutex);
998 + //buf_pool_mutex_exit(buf_pool);
999 + //mutex_exit(&buf_pool->zip_mutex);
1000 + mutex_exit(block_mutex);
1002 block = buf_LRU_get_free_block(buf_pool);
1004 + block_mutex = &block->mutex;
1006 - buf_pool_mutex_enter(buf_pool);
1007 - mutex_enter(&block->mutex);
1008 + //buf_pool_mutex_enter(buf_pool);
1009 + mutex_enter(&buf_pool->LRU_list_mutex);
1010 + rw_lock_x_lock(&buf_pool->page_hash_latch);
1011 + mutex_enter(block_mutex);
1014 buf_page_t* hash_bpage;
1015 @@ -2362,35 +2486,47 @@
1016 while buf_pool->mutex was released.
1017 Free the block that was allocated. */
1019 - buf_LRU_block_free_non_file_page(block);
1020 - mutex_exit(&block->mutex);
1021 + buf_LRU_block_free_non_file_page(block, TRUE);
1022 + mutex_exit(block_mutex);
1024 block = (buf_block_t*) hash_bpage;
1026 + block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
1027 + ut_a(block_mutex);
1029 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1030 + mutex_exit(&buf_pool->LRU_list_mutex);
1035 + mutex_enter(&buf_pool->zip_mutex);
1038 (bpage->buf_fix_count
1039 || buf_page_get_io_fix(bpage) != BUF_IO_NONE)) {
1041 + mutex_exit(&buf_pool->zip_mutex);
1042 /* The block was buffer-fixed or I/O-fixed
1043 while buf_pool->mutex was not held by this thread.
1044 Free the block that was allocated and try again.
1045 This should be extremely unlikely. */
1047 - buf_LRU_block_free_non_file_page(block);
1048 - mutex_exit(&block->mutex);
1049 + buf_LRU_block_free_non_file_page(block, TRUE);
1050 + //mutex_exit(&block->mutex);
1052 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1053 + mutex_exit(&buf_pool->LRU_list_mutex);
1054 goto wait_until_unfixed;
1057 /* Move the compressed page from bpage to block,
1058 and uncompress it. */
1060 - mutex_enter(&buf_pool->zip_mutex);
1062 buf_relocate(bpage, &block->page);
1064 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1066 buf_block_init_low(block);
1067 block->lock_hash_val = lock_rec_hash(space, offset);
1069 @@ -2400,7 +2536,7 @@
1070 if (buf_page_get_state(&block->page)
1071 == BUF_BLOCK_ZIP_PAGE) {
1072 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1073 - UT_LIST_REMOVE(list, buf_pool->zip_clean,
1074 + UT_LIST_REMOVE(zip_list, buf_pool->zip_clean,
1076 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
1077 ut_ad(!block->page.in_flush_list);
1078 @@ -2418,18 +2554,23 @@
1079 /* Insert at the front of unzip_LRU list */
1080 buf_unzip_LRU_add_block(block, FALSE);
1082 + mutex_exit(&buf_pool->LRU_list_mutex);
1084 block->page.buf_fix_count = 1;
1085 buf_block_set_io_fix(block, BUF_IO_READ);
1086 rw_lock_x_lock_func(&block->lock, 0, file, line);
1088 UNIV_MEM_INVALID(bpage, sizeof *bpage);
1090 - mutex_exit(&block->mutex);
1091 + mutex_exit(block_mutex);
1092 mutex_exit(&buf_pool->zip_mutex);
1093 - buf_pool->n_pend_unzip++;
1095 + buf_pool_mutex_enter(buf_pool);
1096 + buf_pool->n_pend_unzip++;
1097 buf_pool_mutex_exit(buf_pool);
1099 + //buf_pool_mutex_exit(buf_pool);
1101 buf_page_free_descriptor(bpage);
1103 /* Decompress the page and apply buffered operations
1104 @@ -2443,12 +2584,15 @@
1107 /* Unfix and unlatch the block. */
1108 - buf_pool_mutex_enter(buf_pool);
1109 - mutex_enter(&block->mutex);
1110 + //buf_pool_mutex_enter(buf_pool);
1111 + block_mutex = &block->mutex;
1112 + mutex_enter(block_mutex);
1113 block->page.buf_fix_count--;
1114 buf_block_set_io_fix(block, BUF_IO_NONE);
1115 - mutex_exit(&block->mutex);
1117 + buf_pool_mutex_enter(buf_pool);
1118 buf_pool->n_pend_unzip--;
1119 + buf_pool_mutex_exit(buf_pool);
1120 rw_lock_x_unlock(&block->lock);
1123 @@ -2464,7 +2608,7 @@
1125 ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1127 - mutex_enter(&block->mutex);
1128 + //mutex_enter(&block->mutex);
1129 #if UNIV_WORD_SIZE == 4
1130 /* On 32-bit systems, there is no padding in buf_page_t. On
1131 other systems, Valgrind could complain about uninitialized pad
1132 @@ -2477,8 +2621,8 @@
1133 /* Try to evict the block from the buffer pool, to use the
1134 insert buffer (change buffer) as much as possible. */
1136 - if (buf_LRU_free_block(&block->page, TRUE)) {
1137 - mutex_exit(&block->mutex);
1138 + if (buf_LRU_free_block(&block->page, TRUE, FALSE)) {
1139 + mutex_exit(block_mutex);
1140 if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
1141 /* Set the watch, as it would have
1142 been set if the page were not in the
1143 @@ -2487,6 +2631,9 @@
1144 space, offset, fold);
1146 if (UNIV_LIKELY_NULL(block)) {
1147 + block_mutex = buf_page_get_mutex((buf_page_t*)block);
1148 + ut_a(block_mutex);
1149 + ut_ad(mutex_own(block_mutex));
1151 /* The page entered the buffer
1152 pool for some reason. Try to
1153 @@ -2494,7 +2641,7 @@
1157 - buf_pool_mutex_exit(buf_pool);
1158 + //buf_pool_mutex_exit(buf_pool);
1160 "innodb_change_buffering_debug evict %u %u\n",
1161 (unsigned) space, (unsigned) offset);
1162 @@ -2516,13 +2663,14 @@
1163 ut_a(mode == BUF_GET_POSSIBLY_FREED
1164 || !block->page.file_page_was_freed);
1166 - mutex_exit(&block->mutex);
1167 + //mutex_exit(&block->mutex);
1169 /* Check if this is the first access to the page */
1171 access_time = buf_page_is_accessed(&block->page);
1173 - buf_pool_mutex_exit(buf_pool);
1174 + //buf_pool_mutex_exit(buf_pool);
1175 + mutex_exit(block_mutex);
1177 if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL)) {
1178 buf_page_set_accessed_make_young(&block->page, access_time);
1179 @@ -2755,9 +2903,11 @@
1180 buf_pool = buf_pool_from_block(block);
1182 if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
1183 - buf_pool_mutex_enter(buf_pool);
1184 + //buf_pool_mutex_enter(buf_pool);
1185 + mutex_enter(&buf_pool->LRU_list_mutex);
1186 buf_LRU_make_block_young(&block->page);
1187 - buf_pool_mutex_exit(buf_pool);
1188 + //buf_pool_mutex_exit(buf_pool);
1189 + mutex_exit(&buf_pool->LRU_list_mutex);
1190 } else if (!buf_page_is_accessed(&block->page)) {
1191 /* Above, we do a dirty read on purpose, to avoid
1192 mutex contention. The field buf_page_t::access_time
1193 @@ -2765,9 +2915,11 @@
1194 field must be protected by mutex, however. */
1195 ulint time_ms = ut_time_ms();
1197 - buf_pool_mutex_enter(buf_pool);
1198 + //buf_pool_mutex_enter(buf_pool);
1199 + mutex_enter(&block->mutex);
1200 buf_page_set_accessed(&block->page, time_ms);
1201 - buf_pool_mutex_exit(buf_pool);
1202 + //buf_pool_mutex_exit(buf_pool);
1203 + mutex_exit(&block->mutex);
1206 ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD);
1207 @@ -2834,18 +2986,21 @@
1209 ut_ad(mtr->state == MTR_ACTIVE);
1211 - buf_pool_mutex_enter(buf_pool);
1212 + //buf_pool_mutex_enter(buf_pool);
1213 + rw_lock_s_lock(&buf_pool->page_hash_latch);
1214 block = buf_block_hash_get(buf_pool, space_id, page_no);
1216 if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
1217 - buf_pool_mutex_exit(buf_pool);
1218 + //buf_pool_mutex_exit(buf_pool);
1219 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
1223 ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
1225 mutex_enter(&block->mutex);
1226 - buf_pool_mutex_exit(buf_pool);
1227 + //buf_pool_mutex_exit(buf_pool);
1228 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
1230 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1231 ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1232 @@ -2935,7 +3090,10 @@
1233 buf_page_t* hash_page;
1235 ut_ad(buf_pool == buf_pool_get(space, offset));
1236 - ut_ad(buf_pool_mutex_own(buf_pool));
1237 + //ut_ad(buf_pool_mutex_own(buf_pool));
1238 +#ifdef UNIV_SYNC_DEBUG
1239 + ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
1241 ut_ad(mutex_own(&(block->mutex)));
1242 ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
1244 @@ -2964,11 +3122,14 @@
1245 if (UNIV_LIKELY(!hash_page)) {
1246 } else if (buf_pool_watch_is_sentinel(buf_pool, hash_page)) {
1247 /* Preserve the reference count. */
1248 - ulint buf_fix_count = hash_page->buf_fix_count;
1249 + ulint buf_fix_count;
1251 + mutex_enter(&buf_pool->zip_mutex);
1252 + buf_fix_count = hash_page->buf_fix_count;
1253 ut_a(buf_fix_count > 0);
1254 block->page.buf_fix_count += buf_fix_count;
1255 buf_pool_watch_remove(buf_pool, fold, hash_page);
1256 + mutex_exit(&buf_pool->zip_mutex);
1259 "InnoDB: Error: page %lu %lu already found"
1260 @@ -2978,7 +3139,8 @@
1261 (const void*) hash_page, (const void*) block);
1262 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1263 mutex_exit(&block->mutex);
1264 - buf_pool_mutex_exit(buf_pool);
1265 + //buf_pool_mutex_exit(buf_pool);
1266 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1270 @@ -3061,7 +3223,9 @@
1272 fold = buf_page_address_fold(space, offset);
1274 - buf_pool_mutex_enter(buf_pool);
1275 + //buf_pool_mutex_enter(buf_pool);
1276 + mutex_enter(&buf_pool->LRU_list_mutex);
1277 + rw_lock_x_lock(&buf_pool->page_hash_latch);
1279 watch_page = buf_page_hash_get_low(buf_pool, space, offset, fold);
1280 if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) {
1281 @@ -3070,9 +3234,15 @@
1284 mutex_enter(&block->mutex);
1285 - buf_LRU_block_free_non_file_page(block);
1286 + mutex_exit(&buf_pool->LRU_list_mutex);
1287 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1288 + buf_LRU_block_free_non_file_page(block, FALSE);
1289 mutex_exit(&block->mutex);
1292 + mutex_exit(&buf_pool->LRU_list_mutex);
1293 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1298 @@ -3095,6 +3265,8 @@
1300 buf_page_init(buf_pool, space, offset, fold, block);
1302 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1304 /* The block must be put to the LRU list, to the old blocks */
1305 buf_LRU_add_block(bpage, TRUE/* to old blocks */);
1307 @@ -3122,7 +3294,7 @@
1308 been added to buf_pool->LRU and
1309 buf_pool->page_hash. */
1310 mutex_exit(&block->mutex);
1311 - data = buf_buddy_alloc(buf_pool, zip_size, &lru);
1312 + data = buf_buddy_alloc(buf_pool, zip_size, &lru, FALSE);
1313 mutex_enter(&block->mutex);
1314 block->page.zip.data = data;
1316 @@ -3135,13 +3307,14 @@
1317 buf_unzip_LRU_add_block(block, TRUE);
1320 + mutex_exit(&buf_pool->LRU_list_mutex);
1321 mutex_exit(&block->mutex);
1323 /* The compressed page must be allocated before the
1324 control block (bpage), in order to avoid the
1325 invocation of buf_buddy_relocate_block() on
1326 uninitialized data. */
1327 - data = buf_buddy_alloc(buf_pool, zip_size, &lru);
1328 + data = buf_buddy_alloc(buf_pool, zip_size, &lru, TRUE);
1330 /* If buf_buddy_alloc() allocated storage from the LRU list,
1331 it released and reacquired buf_pool->mutex. Thus, we must
1332 @@ -3157,7 +3330,10 @@
1334 /* The block was added by some other thread. */
1336 - buf_buddy_free(buf_pool, data, zip_size);
1337 + buf_buddy_free(buf_pool, data, zip_size, TRUE);
1339 + mutex_exit(&buf_pool->LRU_list_mutex);
1340 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1344 @@ -3205,20 +3381,26 @@
1345 HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold,
1348 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1350 /* The block must be put to the LRU list, to the old blocks */
1351 buf_LRU_add_block(bpage, TRUE/* to old blocks */);
1352 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1353 buf_LRU_insert_zip_clean(bpage);
1354 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
1356 + mutex_exit(&buf_pool->LRU_list_mutex);
1358 buf_page_set_io_fix(bpage, BUF_IO_READ);
1360 mutex_exit(&buf_pool->zip_mutex);
1363 + buf_pool_mutex_enter(buf_pool);
1364 buf_pool->n_pend_reads++;
1366 buf_pool_mutex_exit(buf_pool);
1368 + //buf_pool_mutex_exit(buf_pool);
1370 if (mode == BUF_READ_IBUF_PAGES_ONLY) {
1372 @@ -3260,7 +3442,9 @@
1374 fold = buf_page_address_fold(space, offset);
1376 - buf_pool_mutex_enter(buf_pool);
1377 + //buf_pool_mutex_enter(buf_pool);
1378 + mutex_enter(&buf_pool->LRU_list_mutex);
1379 + rw_lock_x_lock(&buf_pool->page_hash_latch);
1381 block = (buf_block_t*) buf_page_hash_get_low(
1382 buf_pool, space, offset, fold);
1383 @@ -3276,7 +3460,9 @@
1384 #endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
1386 /* Page can be found in buf_pool */
1387 - buf_pool_mutex_exit(buf_pool);
1388 + //buf_pool_mutex_exit(buf_pool);
1389 + mutex_exit(&buf_pool->LRU_list_mutex);
1390 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1392 buf_block_free(free_block);
1394 @@ -3298,6 +3484,7 @@
1395 mutex_enter(&block->mutex);
1397 buf_page_init(buf_pool, space, offset, fold, block);
1398 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1400 /* The block must be put to the LRU list */
1401 buf_LRU_add_block(&block->page, FALSE);
1402 @@ -3324,7 +3511,7 @@
1403 the reacquisition of buf_pool->mutex. We also must
1404 defer this operation until after the block descriptor
1405 has been added to buf_pool->LRU and buf_pool->page_hash. */
1406 - data = buf_buddy_alloc(buf_pool, zip_size, &lru);
1407 + data = buf_buddy_alloc(buf_pool, zip_size, &lru, FALSE);
1408 mutex_enter(&block->mutex);
1409 block->page.zip.data = data;
1411 @@ -3342,7 +3529,8 @@
1413 buf_page_set_accessed(&block->page, time_ms);
1415 - buf_pool_mutex_exit(buf_pool);
1416 + //buf_pool_mutex_exit(buf_pool);
1417 + mutex_exit(&buf_pool->LRU_list_mutex);
1419 mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
1421 @@ -3397,7 +3585,9 @@
1424 /* First unfix and release lock on the bpage */
1425 - buf_pool_mutex_enter(buf_pool);
1426 + //buf_pool_mutex_enter(buf_pool);
1427 + mutex_enter(&buf_pool->LRU_list_mutex);
1428 + rw_lock_x_lock(&buf_pool->page_hash_latch);
1429 mutex_enter(buf_page_get_mutex(bpage));
1430 ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_READ);
1431 ut_ad(bpage->buf_fix_count == 0);
1432 @@ -3418,11 +3608,15 @@
1436 + buf_pool_mutex_enter(buf_pool);
1437 ut_ad(buf_pool->n_pend_reads > 0);
1438 buf_pool->n_pend_reads--;
1439 + buf_pool_mutex_exit(buf_pool);
1441 mutex_exit(buf_page_get_mutex(bpage));
1442 - buf_pool_mutex_exit(buf_pool);
1443 + //buf_pool_mutex_exit(buf_pool);
1444 + mutex_exit(&buf_pool->LRU_list_mutex);
1445 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1449 @@ -3440,6 +3634,8 @@
1450 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1451 const ibool uncompressed = (buf_page_get_state(bpage)
1452 == BUF_BLOCK_FILE_PAGE);
1453 + ibool have_LRU_mutex = FALSE;
1454 + mutex_t* block_mutex;
1456 ut_a(buf_page_in_file(bpage));
1458 @@ -3582,8 +3778,26 @@
1462 + if (io_type == BUF_IO_WRITE
1463 + && (buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY
1464 + || buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU)) {
1465 + /* to keep consistency at buf_LRU_insert_zip_clean() */
1466 + have_LRU_mutex = TRUE; /* optimistic */
1469 + if (have_LRU_mutex)
1470 + mutex_enter(&buf_pool->LRU_list_mutex);
1471 + block_mutex = buf_page_get_mutex_enter(bpage);
1472 + ut_a(block_mutex);
1473 + if (io_type == BUF_IO_WRITE
1474 + && (buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY
1475 + || buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU)
1476 + && !have_LRU_mutex) {
1477 + mutex_exit(block_mutex);
1478 + have_LRU_mutex = TRUE;
1481 buf_pool_mutex_enter(buf_pool);
1482 - mutex_enter(buf_page_get_mutex(bpage));
1484 #ifdef UNIV_IBUF_COUNT_DEBUG
1485 if (io_type == BUF_IO_WRITE || uncompressed) {
1486 @@ -3606,6 +3820,7 @@
1487 the x-latch to this OS thread: do not let this confuse you in
1490 + ut_a(!have_LRU_mutex);
1491 ut_ad(buf_pool->n_pend_reads > 0);
1492 buf_pool->n_pend_reads--;
1493 buf_pool->stat.n_pages_read++;
1494 @@ -3623,6 +3838,9 @@
1496 buf_flush_write_complete(bpage);
1498 + if (have_LRU_mutex)
1499 + mutex_exit(&buf_pool->LRU_list_mutex);
1502 rw_lock_s_unlock_gen(&((buf_block_t*) bpage)->lock,
1504 @@ -3645,8 +3863,8 @@
1506 #endif /* UNIV_DEBUG */
1508 - mutex_exit(buf_page_get_mutex(bpage));
1509 buf_pool_mutex_exit(buf_pool);
1510 + mutex_exit(block_mutex);
1513 /*********************************************************************//**
1514 @@ -3663,7 +3881,9 @@
1518 - buf_pool_mutex_enter(buf_pool);
1519 + //buf_pool_mutex_enter(buf_pool);
1520 + mutex_enter(&buf_pool->LRU_list_mutex);
1521 + rw_lock_x_lock(&buf_pool->page_hash_latch);
1523 chunk = buf_pool->chunks;
1525 @@ -3680,7 +3900,9 @@
1529 - buf_pool_mutex_exit(buf_pool);
1530 + //buf_pool_mutex_exit(buf_pool);
1531 + mutex_exit(&buf_pool->LRU_list_mutex);
1532 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1536 @@ -3728,7 +3950,8 @@
1537 freed = buf_LRU_search_and_free_block(buf_pool, 100);
1540 - buf_pool_mutex_enter(buf_pool);
1541 + //buf_pool_mutex_enter(buf_pool);
1542 + mutex_enter(&buf_pool->LRU_list_mutex);
1544 ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
1545 ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
1546 @@ -3741,7 +3964,8 @@
1547 memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
1548 buf_refresh_io_stats(buf_pool);
1550 - buf_pool_mutex_exit(buf_pool);
1551 + //buf_pool_mutex_exit(buf_pool);
1552 + mutex_exit(&buf_pool->LRU_list_mutex);
1555 /*********************************************************************//**
1556 @@ -3783,7 +4007,10 @@
1560 - buf_pool_mutex_enter(buf_pool);
1561 + //buf_pool_mutex_enter(buf_pool);
1562 + mutex_enter(&buf_pool->LRU_list_mutex);
1563 + rw_lock_x_lock(&buf_pool->page_hash_latch);
1564 + /* for keep the new latch order, it cannot validate correctly... */
1566 chunk = buf_pool->chunks;
1568 @@ -3878,7 +4105,7 @@
1569 /* Check clean compressed-only blocks. */
1571 for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
1572 - b = UT_LIST_GET_NEXT(list, b)) {
1573 + b = UT_LIST_GET_NEXT(zip_list, b)) {
1574 ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
1575 switch (buf_page_get_io_fix(b)) {
1577 @@ -3909,7 +4136,7 @@
1579 buf_flush_list_mutex_enter(buf_pool);
1580 for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
1581 - b = UT_LIST_GET_NEXT(list, b)) {
1582 + b = UT_LIST_GET_NEXT(flush_list, b)) {
1583 ut_ad(b->in_flush_list);
1584 ut_a(b->oldest_modification);
1586 @@ -3968,6 +4195,8 @@
1589 ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru);
1590 + /* because of latching order with block->mutex, we cannot get needed mutexes before that */
1592 if (UT_LIST_GET_LEN(buf_pool->free) != n_free) {
1593 fprintf(stderr, "Free list len %lu, free blocks %lu\n",
1594 (ulong) UT_LIST_GET_LEN(buf_pool->free),
1595 @@ -3978,8 +4207,11 @@
1596 ut_a(buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE] == n_single_flush);
1597 ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
1598 ut_a(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
1601 - buf_pool_mutex_exit(buf_pool);
1602 + //buf_pool_mutex_exit(buf_pool);
1603 + mutex_exit(&buf_pool->LRU_list_mutex);
1604 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
1606 ut_a(buf_LRU_validate());
1607 ut_a(buf_flush_validate(buf_pool));
1608 @@ -4035,7 +4267,9 @@
1609 index_ids = mem_alloc(size * sizeof *index_ids);
1610 counts = mem_alloc(sizeof(ulint) * size);
1612 - buf_pool_mutex_enter(buf_pool);
1613 + //buf_pool_mutex_enter(buf_pool);
1614 + mutex_enter(&buf_pool->LRU_list_mutex);
1615 + mutex_enter(&buf_pool->free_list_mutex);
1616 buf_flush_list_mutex_enter(buf_pool);
1619 @@ -4104,7 +4338,9 @@
1623 - buf_pool_mutex_exit(buf_pool);
1624 + //buf_pool_mutex_exit(buf_pool);
1625 + mutex_exit(&buf_pool->LRU_list_mutex);
1626 + mutex_exit(&buf_pool->free_list_mutex);
1628 for (i = 0; i < n_found; i++) {
1629 index = dict_index_get_if_in_cache(index_ids[i]);
1630 @@ -4161,7 +4397,7 @@
1632 ulint fixed_pages_number = 0;
1634 - buf_pool_mutex_enter(buf_pool);
1635 + //buf_pool_mutex_enter(buf_pool);
1637 chunk = buf_pool->chunks;
1639 @@ -4195,7 +4431,7 @@
1640 /* Traverse the lists of clean and dirty compressed-only blocks. */
1642 for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
1643 - b = UT_LIST_GET_NEXT(list, b)) {
1644 + b = UT_LIST_GET_NEXT(zip_list, b)) {
1645 ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
1646 ut_a(buf_page_get_io_fix(b) != BUF_IO_WRITE);
1648 @@ -4207,7 +4443,7 @@
1650 buf_flush_list_mutex_enter(buf_pool);
1651 for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
1652 - b = UT_LIST_GET_NEXT(list, b)) {
1653 + b = UT_LIST_GET_NEXT(flush_list, b)) {
1654 ut_ad(b->in_flush_list);
1656 switch (buf_page_get_state(b)) {
1657 @@ -4233,7 +4469,7 @@
1659 buf_flush_list_mutex_exit(buf_pool);
1660 mutex_exit(&buf_pool->zip_mutex);
1661 - buf_pool_mutex_exit(buf_pool);
1662 + //buf_pool_mutex_exit(buf_pool);
1664 return(fixed_pages_number);
1666 @@ -4391,6 +4627,8 @@
1667 /* Find appropriate pool_info to store stats for this buffer pool */
1668 pool_info = &all_pool_info[pool_id];
1670 + mutex_enter(&buf_pool->LRU_list_mutex);
1671 + mutex_enter(&buf_pool->free_list_mutex);
1672 buf_pool_mutex_enter(buf_pool);
1673 buf_flush_list_mutex_enter(buf_pool);
1675 @@ -4506,6 +4744,8 @@
1676 pool_info->unzip_cur = buf_LRU_stat_cur.unzip;
1678 buf_refresh_io_stats(buf_pool);
1679 + mutex_exit(&buf_pool->LRU_list_mutex);
1680 + mutex_exit(&buf_pool->free_list_mutex);
1681 buf_pool_mutex_exit(buf_pool);
1684 @@ -4750,11 +4990,13 @@
1688 - buf_pool_mutex_enter(buf_pool);
1689 + //buf_pool_mutex_enter(buf_pool);
1690 + mutex_enter(&buf_pool->free_list_mutex);
1692 len = UT_LIST_GET_LEN(buf_pool->free);
1694 - buf_pool_mutex_exit(buf_pool);
1695 + //buf_pool_mutex_exit(buf_pool);
1696 + mutex_exit(&buf_pool->free_list_mutex);
1700 --- a/storage/innobase/buf/buf0flu.c
1701 +++ b/storage/innobase/buf/buf0flu.c
1704 ut_d(block->page.in_flush_list = TRUE);
1705 block->page.oldest_modification = lsn;
1706 - UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page);
1707 + UT_LIST_ADD_FIRST(flush_list, buf_pool->flush_list, &block->page);
1709 #ifdef UNIV_DEBUG_VALGRIND
1711 @@ -401,14 +401,14 @@
1712 > block->page.oldest_modification) {
1713 ut_ad(b->in_flush_list);
1715 - b = UT_LIST_GET_NEXT(list, b);
1716 + b = UT_LIST_GET_NEXT(flush_list, b);
1720 if (prev_b == NULL) {
1721 - UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page);
1722 + UT_LIST_ADD_FIRST(flush_list, buf_pool->flush_list, &block->page);
1724 - UT_LIST_INSERT_AFTER(list, buf_pool->flush_list,
1725 + UT_LIST_INSERT_AFTER(flush_list, buf_pool->flush_list,
1726 prev_b, &block->page);
1730 //buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1731 //ut_ad(buf_pool_mutex_own(buf_pool));
1733 - //ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1734 + ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1735 //ut_ad(bpage->in_LRU_list);
1737 if (UNIV_LIKELY(bpage->in_LRU_list && buf_page_in_file(bpage))) {
1738 @@ -470,14 +470,14 @@
1739 enum buf_flush flush_type)/*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
1742 - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1743 - ut_ad(buf_pool_mutex_own(buf_pool));
1744 + //buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1745 + //ut_ad(buf_pool_mutex_own(buf_pool));
1747 - ut_a(buf_page_in_file(bpage));
1748 + //ut_a(buf_page_in_file(bpage));
1749 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1750 ut_ad(flush_type == BUF_FLUSH_LRU || BUF_FLUSH_LIST);
1752 - if (bpage->oldest_modification != 0
1753 + if (buf_page_in_file(bpage) && bpage->oldest_modification != 0
1754 && buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
1755 ut_ad(bpage->in_flush_list);
1759 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1761 - ut_ad(buf_pool_mutex_own(buf_pool));
1762 + //ut_ad(buf_pool_mutex_own(buf_pool));
1763 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1764 ut_ad(bpage->in_flush_list);
1766 @@ -526,13 +526,13 @@
1768 case BUF_BLOCK_ZIP_DIRTY:
1769 buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
1770 - UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
1771 + UT_LIST_REMOVE(flush_list, buf_pool->flush_list, bpage);
1772 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1773 buf_LRU_insert_zip_clean(bpage);
1774 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
1776 case BUF_BLOCK_FILE_PAGE:
1777 - UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
1778 + UT_LIST_REMOVE(flush_list, buf_pool->flush_list, bpage);
1783 buf_page_t* prev_b = NULL;
1784 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1786 - ut_ad(buf_pool_mutex_own(buf_pool));
1787 + //ut_ad(buf_pool_mutex_own(buf_pool));
1788 /* Must reside in the same buffer pool. */
1789 ut_ad(buf_pool == buf_pool_from_bpage(dpage));
1791 @@ -605,18 +605,18 @@
1792 because we assert on in_flush_list in comparison function. */
1793 ut_d(bpage->in_flush_list = FALSE);
1795 - prev = UT_LIST_GET_PREV(list, bpage);
1796 - UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
1797 + prev = UT_LIST_GET_PREV(flush_list, bpage);
1798 + UT_LIST_REMOVE(flush_list, buf_pool->flush_list, bpage);
1801 ut_ad(prev->in_flush_list);
1802 UT_LIST_INSERT_AFTER(
1805 buf_pool->flush_list,
1811 buf_pool->flush_list,
1814 @@ -1085,7 +1085,7 @@
1817 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1818 - ut_ad(!buf_pool_mutex_own(buf_pool));
1819 + //ut_ad(!buf_pool_mutex_own(buf_pool));
1822 #ifdef UNIV_LOG_DEBUG
1823 @@ -1099,7 +1099,8 @@
1824 io_fixed and oldest_modification != 0. Thus, it cannot be
1825 relocated in the buffer pool or removed from flush_list or
1827 - ut_ad(!buf_pool_mutex_own(buf_pool));
1828 + //ut_ad(!buf_pool_mutex_own(buf_pool));
1829 + ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
1830 ut_ad(!buf_flush_list_mutex_own(buf_pool));
1831 ut_ad(!mutex_own(buf_page_get_mutex(bpage)));
1832 ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
1833 @@ -1179,7 +1180,7 @@
1834 buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
1835 buf_block_t* block) /*!< in/out: buffer control block */
1837 - ut_ad(buf_pool_mutex_own(buf_pool));
1838 + //ut_ad(buf_pool_mutex_own(buf_pool));
1839 ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1840 ut_ad(mutex_own(&block->mutex));
1842 @@ -1187,8 +1188,11 @@
1846 + buf_pool_mutex_enter(buf_pool);
1848 if (buf_pool->n_flush[BUF_FLUSH_LRU] > 0
1849 || buf_pool->init_flush[BUF_FLUSH_LRU]) {
1850 + buf_pool_mutex_exit(buf_pool);
1851 /* There is already a flush batch of the same type running */
1854 @@ -1262,12 +1266,18 @@
1855 ibool is_uncompressed;
1857 ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1858 - ut_ad(buf_pool_mutex_own(buf_pool));
1859 + //ut_ad(buf_pool_mutex_own(buf_pool));
1860 +#ifdef UNIV_SYNC_DEBUG
1861 + ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_SHARED));
1863 ut_ad(buf_page_in_file(bpage));
1865 block_mutex = buf_page_get_mutex(bpage);
1866 ut_ad(mutex_own(block_mutex));
1868 + buf_pool_mutex_enter(buf_pool);
1869 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
1871 ut_ad(buf_flush_ready_for_flush(bpage, flush_type));
1873 buf_page_set_io_fix(bpage, BUF_IO_WRITE);
1874 @@ -1429,14 +1439,16 @@
1876 buf_pool = buf_pool_get(space, i);
1878 - buf_pool_mutex_enter(buf_pool);
1879 + //buf_pool_mutex_enter(buf_pool);
1880 + rw_lock_s_lock(&buf_pool->page_hash_latch);
1882 /* We only want to flush pages from this buffer pool. */
1883 bpage = buf_page_hash_get(buf_pool, space, i);
1887 - buf_pool_mutex_exit(buf_pool);
1888 + //buf_pool_mutex_exit(buf_pool);
1889 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
1893 @@ -1448,11 +1460,9 @@
1894 if (flush_type != BUF_FLUSH_LRU
1896 || buf_page_is_old(bpage)) {
1897 - mutex_t* block_mutex = buf_page_get_mutex(bpage);
1898 + mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
1900 - mutex_enter(block_mutex);
1902 - if (buf_flush_ready_for_flush(bpage, flush_type)
1903 + if (block_mutex && buf_flush_ready_for_flush(bpage, flush_type)
1904 && (i == offset || !bpage->buf_fix_count)) {
1905 /* We only try to flush those
1906 neighbors != offset where the buf fix
1907 @@ -1468,11 +1478,12 @@
1908 ut_ad(!buf_pool_mutex_own(buf_pool));
1912 + } else if (block_mutex) {
1913 mutex_exit(block_mutex);
1916 - buf_pool_mutex_exit(buf_pool);
1917 + //buf_pool_mutex_exit(buf_pool);
1918 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
1922 @@ -1505,21 +1516,25 @@
1923 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1924 #endif /* UNIV_DEBUG */
1926 - ut_ad(buf_pool_mutex_own(buf_pool));
1927 + //ut_ad(buf_pool_mutex_own(buf_pool));
1928 + ut_ad(flush_type != BUF_FLUSH_LRU
1929 + || mutex_own(&buf_pool->LRU_list_mutex));
1931 - block_mutex = buf_page_get_mutex(bpage);
1932 - mutex_enter(block_mutex);
1933 + block_mutex = buf_page_get_mutex_enter(bpage);
1935 - ut_a(buf_page_in_file(bpage));
1936 + //ut_a(buf_page_in_file(bpage));
1938 - if (buf_flush_ready_for_flush(bpage, flush_type)) {
1939 + if (block_mutex && buf_flush_ready_for_flush(bpage, flush_type)) {
1942 buf_pool_t* buf_pool;
1944 buf_pool = buf_pool_from_bpage(bpage);
1946 - buf_pool_mutex_exit(buf_pool);
1947 + //buf_pool_mutex_exit(buf_pool);
1948 + if (flush_type == BUF_FLUSH_LRU) {
1949 + mutex_exit(&buf_pool->LRU_list_mutex);
1952 /* These fields are protected by both the
1953 buffer pool mutex and block mutex. */
1954 @@ -1535,13 +1550,18 @@
1958 - buf_pool_mutex_enter(buf_pool);
1959 + //buf_pool_mutex_enter(buf_pool);
1960 + if (flush_type == BUF_FLUSH_LRU) {
1961 + mutex_enter(&buf_pool->LRU_list_mutex);
1965 + } else if (block_mutex) {
1966 mutex_exit(block_mutex);
1969 - ut_ad(buf_pool_mutex_own(buf_pool));
1970 + //ut_ad(buf_pool_mutex_own(buf_pool));
1971 + ut_ad(flush_type != BUF_FLUSH_LRU
1972 + || mutex_own(&buf_pool->LRU_list_mutex));
1976 @@ -1562,7 +1582,8 @@
1980 - ut_ad(buf_pool_mutex_own(buf_pool));
1981 + //ut_ad(buf_pool_mutex_own(buf_pool));
1982 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
1985 /* Start from the end of the list looking for a
1986 @@ -1584,7 +1605,8 @@
1987 should be flushed, we factor in this value. */
1988 buf_lru_flush_page_count += count;
1990 - ut_ad(buf_pool_mutex_own(buf_pool));
1991 + //ut_ad(buf_pool_mutex_own(buf_pool));
1992 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
1996 @@ -1612,9 +1634,10 @@
2000 + buf_page_t* prev_bpage = NULL;
2003 - ut_ad(buf_pool_mutex_own(buf_pool));
2004 + //ut_ad(buf_pool_mutex_own(buf_pool));
2006 /* If we have flushed enough, leave the loop */
2008 @@ -1633,6 +1656,7 @@
2011 ut_a(bpage->oldest_modification > 0);
2012 + prev_bpage = UT_LIST_GET_PREV(flush_list, bpage);
2015 if (!bpage || bpage->oldest_modification >= lsn_limit) {
2016 @@ -1674,9 +1698,17 @@
2020 - bpage = UT_LIST_GET_PREV(list, bpage);
2021 + bpage = UT_LIST_GET_PREV(flush_list, bpage);
2023 - ut_ad(!bpage || bpage->in_flush_list);
2024 + //ut_ad(!bpage || bpage->in_flush_list);
2025 + if (bpage != prev_bpage) {
2026 + /* the search might warp.. retrying */
2027 + buf_flush_list_mutex_exit(buf_pool);
2031 + prev_bpage = UT_LIST_GET_PREV(flush_list, bpage);
2034 buf_flush_list_mutex_exit(buf_pool);
2036 @@ -1685,7 +1717,7 @@
2038 } while (count < min_n && bpage != NULL && len > 0);
2040 - ut_ad(buf_pool_mutex_own(buf_pool));
2041 + //ut_ad(buf_pool_mutex_own(buf_pool));
2045 @@ -1724,13 +1756,15 @@
2046 || sync_thread_levels_empty_except_dict());
2047 #endif /* UNIV_SYNC_DEBUG */
2049 - buf_pool_mutex_enter(buf_pool);
2050 + //buf_pool_mutex_enter(buf_pool);
2052 /* Note: The buffer pool mutex is released and reacquired within
2053 the flush functions. */
2054 switch(flush_type) {
2056 + mutex_enter(&buf_pool->LRU_list_mutex);
2057 count = buf_flush_LRU_list_batch(buf_pool, min_n);
2058 + mutex_exit(&buf_pool->LRU_list_mutex);
2060 case BUF_FLUSH_LIST:
2061 count = buf_flush_flush_list_batch(buf_pool, min_n, lsn_limit);
2062 @@ -1739,7 +1773,7 @@
2066 - buf_pool_mutex_exit(buf_pool);
2067 + //buf_pool_mutex_exit(buf_pool);
2069 buf_flush_buffered_writes();
2071 @@ -1995,7 +2029,7 @@
2073 //buf_pool_mutex_enter(buf_pool);
2075 - buf_pool_mutex_enter(buf_pool);
2076 + mutex_enter(&buf_pool->LRU_list_mutex);
2078 n_replaceable = UT_LIST_GET_LEN(buf_pool->free);
2080 @@ -2012,15 +2046,15 @@
2081 bpage = UT_LIST_GET_LAST(buf_pool->LRU);
2084 - block_mutex = buf_page_get_mutex(bpage);
2086 - mutex_enter(block_mutex);
2087 + block_mutex = buf_page_get_mutex_enter(bpage);
2089 - if (buf_flush_ready_for_replace(bpage)) {
2090 + if (block_mutex && buf_flush_ready_for_replace(bpage)) {
2094 - mutex_exit(block_mutex);
2095 + if (block_mutex) {
2096 + mutex_exit(block_mutex);
2101 @@ -2029,7 +2063,7 @@
2103 //buf_pool_mutex_exit(buf_pool);
2105 - buf_pool_mutex_exit(buf_pool);
2106 + mutex_exit(&buf_pool->LRU_list_mutex);
2108 if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN(buf_pool)) {
2110 @@ -2228,7 +2262,7 @@
2112 ut_ad(buf_flush_list_mutex_own(buf_pool));
2114 - UT_LIST_VALIDATE(list, buf_page_t, buf_pool->flush_list,
2115 + UT_LIST_VALIDATE(flush_list, buf_page_t, buf_pool->flush_list,
2116 ut_ad(ut_list_node_313->in_flush_list));
2118 bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
2119 @@ -2268,7 +2302,7 @@
2120 rnode = rbt_next(buf_pool->flush_rbt, rnode);
2123 - bpage = UT_LIST_GET_NEXT(list, bpage);
2124 + bpage = UT_LIST_GET_NEXT(flush_list, bpage);
2126 ut_a(!bpage || om >= bpage->oldest_modification);
2128 --- a/storage/innobase/buf/buf0lru.c
2129 +++ b/storage/innobase/buf/buf0lru.c
2132 buf_LRU_block_free_hashed_page(
2133 /*===========================*/
2134 - buf_block_t* block); /*!< in: block, must contain a file page and
2135 + buf_block_t* block, /*!< in: block, must contain a file page and
2136 be in a state where it can be freed */
2137 + ibool have_page_hash_mutex);
2139 /******************************************************************//**
2140 Determines if the unzip_LRU list should be used for evicting a victim
2141 @@ -154,15 +155,20 @@
2143 buf_LRU_evict_from_unzip_LRU(
2144 /*=========================*/
2145 - buf_pool_t* buf_pool)
2146 + buf_pool_t* buf_pool,
2147 + ibool have_LRU_mutex)
2152 - ut_ad(buf_pool_mutex_own(buf_pool));
2153 + //ut_ad(buf_pool_mutex_own(buf_pool));
2155 + if (!have_LRU_mutex)
2156 + mutex_enter(&buf_pool->LRU_list_mutex);
2157 /* If the unzip_LRU list is empty, we can only use the LRU. */
2158 if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
2159 + if (!have_LRU_mutex)
2160 + mutex_exit(&buf_pool->LRU_list_mutex);
2164 @@ -171,14 +177,20 @@
2165 decompressed pages in the buffer pool. */
2166 if (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
2167 <= UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
2168 + if (!have_LRU_mutex)
2169 + mutex_exit(&buf_pool->LRU_list_mutex);
2173 /* If eviction hasn't started yet, we assume by default
2174 that a workload is disk bound. */
2175 if (buf_pool->freed_page_clock == 0) {
2176 + if (!have_LRU_mutex)
2177 + mutex_exit(&buf_pool->LRU_list_mutex);
2180 + if (!have_LRU_mutex)
2181 + mutex_exit(&buf_pool->LRU_list_mutex);
2183 /* Calculate the average over past intervals, and add the values
2184 of the current interval. */
2185 @@ -246,18 +258,25 @@
2186 page_arr = ut_malloc(
2187 sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE);
2189 - buf_pool_mutex_enter(buf_pool);
2190 + //buf_pool_mutex_enter(buf_pool);
2191 + mutex_enter(&buf_pool->LRU_list_mutex);
2195 bpage = UT_LIST_GET_LAST(buf_pool->LRU);
2197 while (bpage != NULL) {
2198 + /* bpage->state,space,io_fix,buf_fix_count are protected by block_mutex at XtraDB */
2199 + mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
2200 buf_page_t* prev_bpage;
2203 prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
2205 + if (UNIV_UNLIKELY(!block_mutex)) {
2209 ut_a(buf_page_in_file(bpage));
2211 if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE
2212 @@ -266,23 +285,27 @@
2213 /* Compressed pages are never hashed.
2214 Skip blocks of other tablespaces.
2215 Skip I/O-fixed blocks (to be dealt with later). */
2216 + mutex_exit(block_mutex);
2222 - mutex_enter(&((buf_block_t*) bpage)->mutex);
2223 + //mutex_enter(&((buf_block_t*) bpage)->mutex);
2224 is_fixed = bpage->buf_fix_count > 0
2225 || !((buf_block_t*) bpage)->index;
2226 - mutex_exit(&((buf_block_t*) bpage)->mutex);
2227 + //mutex_exit(&((buf_block_t*) bpage)->mutex);
2230 + mutex_exit(block_mutex);
2234 /* Store the page number so that we can drop the hash
2235 index in a batch later. */
2236 page_arr[num_entries] = bpage->offset;
2237 + mutex_exit(block_mutex);
2239 ut_a(num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE);
2242 @@ -292,14 +315,16 @@
2244 /* Array full. We release the buf_pool->mutex to obey
2245 the latching order. */
2246 - buf_pool_mutex_exit(buf_pool);
2247 + //buf_pool_mutex_exit(buf_pool);
2248 + mutex_exit(&buf_pool->LRU_list_mutex);
2250 buf_LRU_drop_page_hash_batch(
2251 id, zip_size, page_arr, num_entries);
2255 - buf_pool_mutex_enter(buf_pool);
2256 + //buf_pool_mutex_enter(buf_pool);
2257 + mutex_enter(&buf_pool->LRU_list_mutex);
2259 /* Note that we released the buf_pool mutex above
2260 after reading the prev_bpage during processing of a
2261 @@ -317,13 +342,23 @@
2262 /* If, however, bpage has been removed from LRU list
2263 to the free list then we should restart the scan.
2264 bpage->state is protected by buf_pool mutex. */
2266 + /* obtain block_mutex again to avoid race condition of bpage->state */
2267 + block_mutex = buf_page_get_mutex_enter(bpage);
2268 + if (!block_mutex) {
2273 && buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
2274 + mutex_exit(block_mutex);
2277 + mutex_exit(block_mutex);
2280 - buf_pool_mutex_exit(buf_pool);
2281 + //buf_pool_mutex_exit(buf_pool);
2282 + mutex_exit(&buf_pool->LRU_list_mutex);
2284 /* Drop any remaining batch of search hashed pages. */
2285 buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
2290 - buf_pool_mutex_enter(buf_pool);
2291 + //buf_pool_mutex_enter(buf_pool);
2292 + mutex_enter(&buf_pool->LRU_list_mutex);
2293 + rw_lock_x_lock(&buf_pool->page_hash_latch);
2297 @@ -375,8 +412,15 @@
2301 - block_mutex = buf_page_get_mutex(bpage);
2302 - mutex_enter(block_mutex);
2303 + block_mutex = buf_page_get_mutex_enter(bpage);
2305 + if (!block_mutex) {
2306 + /* It may be impossible case...
2307 + Something wrong, so will be scan_again */
2309 + all_freed = FALSE;
2313 if (bpage->buf_fix_count > 0) {
2319 - buf_pool_mutex_exit(buf_pool);
2320 + //buf_pool_mutex_exit(buf_pool);
2321 + mutex_exit(&buf_pool->LRU_list_mutex);
2322 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
2324 zip_size = buf_page_get_zip_size(bpage);
2325 page_no = buf_page_get_page_no(bpage);
2328 if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
2329 != BUF_BLOCK_ZIP_FREE) {
2330 - buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
2331 + buf_LRU_block_free_hashed_page((buf_block_t*) bpage, TRUE);
2332 mutex_exit(block_mutex);
2334 /* The block_mutex should have been released
2339 - buf_pool_mutex_exit(buf_pool);
2340 + //buf_pool_mutex_exit(buf_pool);
2341 + mutex_exit(&buf_pool->LRU_list_mutex);
2342 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
2345 os_thread_sleep(20000);
2348 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
2350 - ut_ad(buf_pool_mutex_own(buf_pool));
2351 + //ut_ad(buf_pool_mutex_own(buf_pool));
2352 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2353 + ut_ad(mutex_own(&buf_pool->zip_mutex));
2354 ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
2356 /* Find the first successor of bpage in the LRU list
2357 @@ -501,17 +551,17 @@
2360 b = UT_LIST_GET_NEXT(LRU, b);
2361 - } while (b && buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE);
2362 + } while (b && (buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE || !b->in_LRU_list));
2364 /* Insert bpage before b, i.e., after the predecessor of b. */
2366 - b = UT_LIST_GET_PREV(list, b);
2367 + b = UT_LIST_GET_PREV(zip_list, b);
2371 - UT_LIST_INSERT_AFTER(list, buf_pool->zip_clean, b, bpage);
2372 + UT_LIST_INSERT_AFTER(zip_list, buf_pool->zip_clean, b, bpage);
2374 - UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage);
2375 + UT_LIST_ADD_FIRST(zip_list, buf_pool->zip_clean, bpage);
2378 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2379 @@ -525,18 +575,19 @@
2380 buf_LRU_free_from_unzip_LRU_list(
2381 /*=============================*/
2382 buf_pool_t* buf_pool, /*!< in: buffer pool instance */
2383 - ulint n_iterations) /*!< in: how many times this has
2384 + ulint n_iterations, /*!< in: how many times this has
2385 been called repeatedly without
2386 result: a high value means that
2387 we should search farther; we will
2388 search n_iterations / 5 of the
2389 unzip_LRU list, or nothing if
2390 n_iterations >= 5 */
2391 + ibool have_LRU_mutex)
2396 - ut_ad(buf_pool_mutex_own(buf_pool));
2397 + //ut_ad(buf_pool_mutex_own(buf_pool));
2399 /* Theoratically it should be much easier to find a victim
2400 from unzip_LRU as we can choose even a dirty block (as we'll
2402 if we have done five iterations so far. */
2404 if (UNIV_UNLIKELY(n_iterations >= 5)
2405 - || !buf_LRU_evict_from_unzip_LRU(buf_pool)) {
2406 + || !buf_LRU_evict_from_unzip_LRU(buf_pool, have_LRU_mutex)) {
2410 @@ -554,18 +605,25 @@
2411 distance = 100 + (n_iterations
2412 * UT_LIST_GET_LEN(buf_pool->unzip_LRU)) / 5;
2415 for (block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
2416 UNIV_LIKELY(block != NULL) && UNIV_LIKELY(distance > 0);
2417 block = UT_LIST_GET_PREV(unzip_LRU, block), distance--) {
2421 + mutex_enter(&block->mutex);
2422 + if (!block->in_unzip_LRU_list || !block->page.in_LRU_list
2423 + || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
2424 + mutex_exit(&block->mutex);
2428 ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
2429 ut_ad(block->in_unzip_LRU_list);
2430 ut_ad(block->page.in_LRU_list);
2432 - mutex_enter(&block->mutex);
2433 - freed = buf_LRU_free_block(&block->page, FALSE);
2434 + freed = buf_LRU_free_block(&block->page, FALSE, have_LRU_mutex);
2435 mutex_exit(&block->mutex);
2438 @@ -584,35 +642,46 @@
2439 buf_LRU_free_from_common_LRU_list(
2440 /*==============================*/
2441 buf_pool_t* buf_pool,
2442 - ulint n_iterations)
2443 + ulint n_iterations,
2444 /*!< in: how many times this has been called
2445 repeatedly without result: a high value means
2446 that we should search farther; if
2447 n_iterations < 10, then we search
2448 n_iterations / 10 * buf_pool->curr_size
2449 pages from the end of the LRU list */
2450 + ibool have_LRU_mutex)
2455 - ut_ad(buf_pool_mutex_own(buf_pool));
2456 + //ut_ad(buf_pool_mutex_own(buf_pool));
2458 distance = 100 + (n_iterations * buf_pool->curr_size) / 10;
2461 for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
2462 UNIV_LIKELY(bpage != NULL) && UNIV_LIKELY(distance > 0);
2463 bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) {
2467 - mutex_t* block_mutex = buf_page_get_mutex(bpage);
2468 + mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
2470 + if (!block_mutex) {
2474 + if (!bpage->in_LRU_list
2475 + || !buf_page_in_file(bpage)) {
2476 + mutex_exit(block_mutex);
2480 ut_ad(buf_page_in_file(bpage));
2481 ut_ad(bpage->in_LRU_list);
2483 - mutex_enter(block_mutex);
2484 accessed = buf_page_is_accessed(bpage);
2485 - freed = buf_LRU_free_block(bpage, TRUE);
2486 + freed = buf_LRU_free_block(bpage, TRUE, have_LRU_mutex);
2487 mutex_exit(block_mutex);
2490 @@ -649,16 +718,23 @@
2491 n_iterations / 5 of the unzip_LRU list. */
2493 ibool freed = FALSE;
2494 + ibool have_LRU_mutex = FALSE;
2496 - buf_pool_mutex_enter(buf_pool);
2497 + if (UT_LIST_GET_LEN(buf_pool->unzip_LRU))
2498 + have_LRU_mutex = TRUE;
2500 - freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations);
2501 + //buf_pool_mutex_enter(buf_pool);
2502 + if (have_LRU_mutex)
2503 + mutex_enter(&buf_pool->LRU_list_mutex);
2505 + freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations, have_LRU_mutex);
2508 freed = buf_LRU_free_from_common_LRU_list(
2509 - buf_pool, n_iterations);
2510 + buf_pool, n_iterations, have_LRU_mutex);
2513 + buf_pool_mutex_enter(buf_pool);
2515 buf_pool->LRU_flush_ended = 0;
2516 } else if (buf_pool->LRU_flush_ended > 0) {
2520 buf_pool_mutex_exit(buf_pool);
2521 + if (have_LRU_mutex)
2522 + mutex_exit(&buf_pool->LRU_list_mutex);
2528 buf_pool = buf_pool_from_array(i);
2530 - buf_pool_mutex_enter(buf_pool);
2531 + //buf_pool_mutex_enter(buf_pool);
2532 + mutex_enter(&buf_pool->LRU_list_mutex);
2533 + mutex_enter(&buf_pool->free_list_mutex);
2535 if (!recv_recovery_on
2536 && UT_LIST_GET_LEN(buf_pool->free)
2541 - buf_pool_mutex_exit(buf_pool);
2542 + //buf_pool_mutex_exit(buf_pool);
2543 + mutex_exit(&buf_pool->LRU_list_mutex);
2544 + mutex_exit(&buf_pool->free_list_mutex);
2548 @@ -754,9 +836,10 @@
2552 - ut_ad(buf_pool_mutex_own(buf_pool));
2553 + //ut_ad(buf_pool_mutex_own(buf_pool));
2555 - block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
2556 + mutex_enter(&buf_pool->free_list_mutex);
2557 + block = (buf_block_t*) UT_LIST_GET_LAST(buf_pool->free);
2562 ut_ad(!block->page.in_flush_list);
2563 ut_ad(!block->page.in_LRU_list);
2564 ut_a(!buf_page_in_file(&block->page));
2565 - UT_LIST_REMOVE(list, buf_pool->free, (&block->page));
2566 + UT_LIST_REMOVE(free, buf_pool->free, (&block->page));
2568 + mutex_exit(&buf_pool->free_list_mutex);
2570 mutex_enter(&block->mutex);
2573 ut_ad(buf_pool_from_block(block) == buf_pool);
2575 mutex_exit(&block->mutex);
2577 + mutex_exit(&buf_pool->free_list_mutex);
2582 ibool mon_value_was = FALSE;
2583 ibool started_monitor = FALSE;
2585 - buf_pool_mutex_enter(buf_pool);
2586 + //buf_pool_mutex_enter(buf_pool);
2588 if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
2589 + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
2592 /* If there is a block in the free list, take it */
2593 block = buf_LRU_get_free_only(buf_pool);
2594 - buf_pool_mutex_exit(buf_pool);
2595 + //buf_pool_mutex_exit(buf_pool);
2598 ut_ad(buf_pool_from_block(block) == buf_pool);
2599 @@ -965,7 +1052,8 @@
2602 ut_a(buf_pool->LRU_old);
2603 - ut_ad(buf_pool_mutex_own(buf_pool));
2604 + //ut_ad(buf_pool_mutex_own(buf_pool));
2605 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2606 ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
2607 ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
2608 #if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
2609 @@ -1031,7 +1119,8 @@
2613 - ut_ad(buf_pool_mutex_own(buf_pool));
2614 + //ut_ad(buf_pool_mutex_own(buf_pool));
2615 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2616 ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
2618 /* We first initialize all blocks in the LRU list as old and then use
2619 @@ -1066,13 +1155,14 @@
2622 ut_ad(buf_page_in_file(bpage));
2623 - ut_ad(buf_pool_mutex_own(buf_pool));
2624 + //ut_ad(buf_pool_mutex_own(buf_pool));
2625 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2627 if (buf_page_belongs_to_unzip_LRU(bpage)) {
2628 buf_block_t* block = (buf_block_t*) bpage;
2630 ut_ad(block->in_unzip_LRU_list);
2631 - ut_d(block->in_unzip_LRU_list = FALSE);
2632 + block->in_unzip_LRU_list = FALSE;
2634 UT_LIST_REMOVE(unzip_LRU, buf_pool->unzip_LRU, block);
2636 @@ -1090,7 +1180,8 @@
2640 - ut_ad(buf_pool_mutex_own(buf_pool));
2641 + //ut_ad(buf_pool_mutex_own(buf_pool));
2642 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2644 ut_a(buf_page_in_file(bpage));
2646 @@ -1167,12 +1258,13 @@
2650 - ut_ad(buf_pool_mutex_own(buf_pool));
2651 + //ut_ad(buf_pool_mutex_own(buf_pool));
2652 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2654 ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
2656 ut_ad(!block->in_unzip_LRU_list);
2657 - ut_d(block->in_unzip_LRU_list = TRUE);
2658 + block->in_unzip_LRU_list = TRUE;
2661 UT_LIST_ADD_LAST(unzip_LRU, buf_pool->unzip_LRU, block);
2662 @@ -1193,7 +1285,8 @@
2666 - ut_ad(buf_pool_mutex_own(buf_pool));
2667 + //ut_ad(buf_pool_mutex_own(buf_pool));
2668 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2670 ut_a(buf_page_in_file(bpage));
2672 @@ -1244,7 +1337,8 @@
2676 - ut_ad(buf_pool_mutex_own(buf_pool));
2677 + //ut_ad(buf_pool_mutex_own(buf_pool));
2678 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2680 ut_a(buf_page_in_file(bpage));
2681 ut_ad(!bpage->in_LRU_list);
2682 @@ -1323,7 +1417,8 @@
2684 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
2686 - ut_ad(buf_pool_mutex_own(buf_pool));
2687 + //ut_ad(buf_pool_mutex_own(buf_pool));
2688 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2691 buf_pool->stat.n_pages_made_young++;
2692 @@ -1362,17 +1457,18 @@
2695 buf_page_t* bpage, /*!< in: block to be freed */
2696 - ibool zip) /*!< in: TRUE if should remove also the
2697 + ibool zip, /*!< in: TRUE if should remove also the
2698 compressed page of an uncompressed page */
2699 + ibool have_LRU_mutex)
2701 buf_page_t* b = NULL;
2702 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
2703 mutex_t* block_mutex = buf_page_get_mutex(bpage);
2705 - ut_ad(buf_pool_mutex_own(buf_pool));
2706 + //ut_ad(buf_pool_mutex_own(buf_pool));
2707 ut_ad(mutex_own(block_mutex));
2708 ut_ad(buf_page_in_file(bpage));
2709 - ut_ad(bpage->in_LRU_list);
2710 + //ut_ad(bpage->in_LRU_list);
2711 ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
2712 #if UNIV_WORD_SIZE == 4
2713 /* On 32-bit systems, there is no padding in buf_page_t. On
2714 @@ -1381,7 +1477,7 @@
2715 UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
2718 - if (!buf_page_can_relocate(bpage)) {
2719 + if (!bpage->in_LRU_list || !block_mutex || !buf_page_can_relocate(bpage)) {
2721 /* Do not free buffer-fixed or I/O-fixed blocks. */
2723 @@ -1415,7 +1511,7 @@
2725 b = buf_page_alloc_descriptor();
2727 - memcpy(b, bpage, sizeof *b);
2728 + //memcpy(b, bpage, sizeof *b);
2732 @@ -1426,6 +1522,39 @@
2734 #endif /* UNIV_DEBUG */
2736 + /* not to break latch order, must re-enter block_mutex */
2737 + mutex_exit(block_mutex);
2739 + if (!have_LRU_mutex)
2740 + mutex_enter(&buf_pool->LRU_list_mutex); /* optimistic */
2741 + rw_lock_x_lock(&buf_pool->page_hash_latch);
2742 + mutex_enter(block_mutex);
2744 + /* recheck states of block */
2745 + if (!bpage->in_LRU_list || block_mutex != buf_page_get_mutex(bpage)
2746 + || !buf_page_can_relocate(bpage)) {
2749 + buf_buddy_free(buf_pool, b, sizeof *b, TRUE);
2751 + if (!have_LRU_mutex)
2752 + mutex_exit(&buf_pool->LRU_list_mutex);
2753 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
2755 + } else if (zip || !bpage->zip.data) {
2756 + if (bpage->oldest_modification)
2758 + } else if (bpage->oldest_modification) {
2759 + if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
2760 + ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY);
2766 + memcpy(b, bpage, sizeof *b);
2769 if (buf_LRU_block_remove_hashed_page(bpage, zip)
2770 != BUF_BLOCK_ZIP_FREE) {
2771 ut_a(bpage->buf_fix_count == 0);
2772 @@ -1442,6 +1571,10 @@
2776 + while (prev_b && !prev_b->in_LRU_list) {
2777 + prev_b = UT_LIST_GET_PREV(LRU, prev_b);
2780 b->state = b->oldest_modification
2781 ? BUF_BLOCK_ZIP_DIRTY
2782 : BUF_BLOCK_ZIP_PAGE;
2783 @@ -1517,6 +1650,7 @@
2784 buf_LRU_add_block_low(b, buf_page_is_old(b));
2787 + mutex_enter(&buf_pool->zip_mutex);
2788 if (b->state == BUF_BLOCK_ZIP_PAGE) {
2789 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2790 buf_LRU_insert_zip_clean(b);
2791 @@ -1534,9 +1668,12 @@
2792 buf_pool->mutex and block_mutex. */
2794 b->io_fix = BUF_IO_READ;
2795 + mutex_exit(&buf_pool->zip_mutex);
2798 - buf_pool_mutex_exit(buf_pool);
2799 + //buf_pool_mutex_exit(buf_pool);
2800 + mutex_exit(&buf_pool->LRU_list_mutex);
2801 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
2802 mutex_exit(block_mutex);
2804 /* Remove possible adaptive hash index on the page.
2805 @@ -1568,7 +1705,9 @@
2806 : BUF_NO_CHECKSUM_MAGIC);
2809 - buf_pool_mutex_enter(buf_pool);
2810 + //buf_pool_mutex_enter(buf_pool);
2811 + if (have_LRU_mutex)
2812 + mutex_enter(&buf_pool->LRU_list_mutex);
2813 mutex_enter(block_mutex);
2816 @@ -1578,13 +1717,17 @@
2817 mutex_exit(&buf_pool->zip_mutex);
2820 - buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
2821 + buf_LRU_block_free_hashed_page((buf_block_t*) bpage, FALSE);
2823 /* The block_mutex should have been released by
2824 buf_LRU_block_remove_hashed_page() when it returns
2825 BUF_BLOCK_ZIP_FREE. */
2826 ut_ad(block_mutex == &buf_pool->zip_mutex);
2827 mutex_enter(block_mutex);
2829 + if (!have_LRU_mutex)
2830 + mutex_exit(&buf_pool->LRU_list_mutex);
2831 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
2835 @@ -1596,13 +1739,14 @@
2837 buf_LRU_block_free_non_file_page(
2838 /*=============================*/
2839 - buf_block_t* block) /*!< in: block, must not contain a file page */
2840 + buf_block_t* block, /*!< in: block, must not contain a file page */
2841 + ibool have_page_hash_mutex)
2844 buf_pool_t* buf_pool = buf_pool_from_block(block);
2847 - ut_ad(buf_pool_mutex_own(buf_pool));
2848 + //ut_ad(buf_pool_mutex_own(buf_pool));
2849 ut_ad(mutex_own(&block->mutex));
2851 switch (buf_block_get_state(block)) {
2852 @@ -1636,18 +1780,21 @@
2854 block->page.zip.data = NULL;
2855 mutex_exit(&block->mutex);
2856 - buf_pool_mutex_exit_forbid(buf_pool);
2857 + //buf_pool_mutex_exit_forbid(buf_pool);
2860 - buf_pool, data, page_zip_get_size(&block->page.zip));
2861 + buf_pool, data, page_zip_get_size(&block->page.zip),
2862 + have_page_hash_mutex);
2864 - buf_pool_mutex_exit_allow(buf_pool);
2865 + //buf_pool_mutex_exit_allow(buf_pool);
2866 mutex_enter(&block->mutex);
2867 page_zip_set_size(&block->page.zip, 0);
2870 - UT_LIST_ADD_FIRST(list, buf_pool->free, (&block->page));
2871 + mutex_enter(&buf_pool->free_list_mutex);
2872 + UT_LIST_ADD_FIRST(free, buf_pool->free, (&block->page));
2873 ut_d(block->page.in_free_list = TRUE);
2874 + mutex_exit(&buf_pool->free_list_mutex);
2876 UNIV_MEM_ASSERT_AND_FREE(block->frame, UNIV_PAGE_SIZE);
2878 @@ -1677,7 +1824,11 @@
2879 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
2882 - ut_ad(buf_pool_mutex_own(buf_pool));
2883 + //ut_ad(buf_pool_mutex_own(buf_pool));
2884 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
2885 +#ifdef UNIV_SYNC_DEBUG
2886 + ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
2888 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
2890 ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
2891 @@ -1785,7 +1936,9 @@
2893 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2894 mutex_exit(buf_page_get_mutex(bpage));
2895 - buf_pool_mutex_exit(buf_pool);
2896 + //buf_pool_mutex_exit(buf_pool);
2897 + mutex_exit(&buf_pool->LRU_list_mutex);
2898 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
2902 @@ -1807,17 +1960,17 @@
2903 ut_a(buf_page_get_zip_size(bpage));
2905 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2906 - UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
2907 + UT_LIST_REMOVE(zip_list, buf_pool->zip_clean, bpage);
2908 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2910 mutex_exit(&buf_pool->zip_mutex);
2911 - buf_pool_mutex_exit_forbid(buf_pool);
2912 + //buf_pool_mutex_exit_forbid(buf_pool);
2915 buf_pool, bpage->zip.data,
2916 - page_zip_get_size(&bpage->zip));
2917 + page_zip_get_size(&bpage->zip), TRUE);
2919 - buf_pool_mutex_exit_allow(buf_pool);
2920 + //buf_pool_mutex_exit_allow(buf_pool);
2921 buf_page_free_descriptor(bpage);
2922 return(BUF_BLOCK_ZIP_FREE);
2924 @@ -1839,13 +1992,13 @@
2925 ut_ad(!bpage->in_flush_list);
2926 ut_ad(!bpage->in_LRU_list);
2927 mutex_exit(&((buf_block_t*) bpage)->mutex);
2928 - buf_pool_mutex_exit_forbid(buf_pool);
2929 + //buf_pool_mutex_exit_forbid(buf_pool);
2933 - page_zip_get_size(&bpage->zip));
2934 + page_zip_get_size(&bpage->zip), TRUE);
2936 - buf_pool_mutex_exit_allow(buf_pool);
2937 + //buf_pool_mutex_exit_allow(buf_pool);
2938 mutex_enter(&((buf_block_t*) bpage)->mutex);
2939 page_zip_set_size(&bpage->zip, 0);
2941 @@ -1871,18 +2024,19 @@
2943 buf_LRU_block_free_hashed_page(
2944 /*===========================*/
2945 - buf_block_t* block) /*!< in: block, must contain a file page and
2946 + buf_block_t* block, /*!< in: block, must contain a file page and
2947 be in a state where it can be freed */
2948 + ibool have_page_hash_mutex)
2951 - buf_pool_t* buf_pool = buf_pool_from_block(block);
2952 - ut_ad(buf_pool_mutex_own(buf_pool));
2953 + //buf_pool_t* buf_pool = buf_pool_from_block(block);
2954 + //ut_ad(buf_pool_mutex_own(buf_pool));
2956 ut_ad(mutex_own(&block->mutex));
2958 buf_block_set_state(block, BUF_BLOCK_MEMORY);
2960 - buf_LRU_block_free_non_file_page(block);
2961 + buf_LRU_block_free_non_file_page(block, have_page_hash_mutex);
2964 /******************************************************************//**
2965 @@ -1897,7 +2051,7 @@
2967 if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
2968 != BUF_BLOCK_ZIP_FREE) {
2969 - buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
2970 + buf_LRU_block_free_hashed_page((buf_block_t*) bpage, TRUE);
2974 @@ -1925,7 +2079,8 @@
2978 - buf_pool_mutex_enter(buf_pool);
2979 + //buf_pool_mutex_enter(buf_pool);
2980 + mutex_enter(&buf_pool->LRU_list_mutex);
2982 if (ratio != buf_pool->LRU_old_ratio) {
2983 buf_pool->LRU_old_ratio = ratio;
2984 @@ -1937,7 +2092,8 @@
2988 - buf_pool_mutex_exit(buf_pool);
2989 + //buf_pool_mutex_exit(buf_pool);
2990 + mutex_exit(&buf_pool->LRU_list_mutex);
2992 buf_pool->LRU_old_ratio = ratio;
2994 @@ -2042,7 +2198,8 @@
2998 - buf_pool_mutex_enter(buf_pool);
2999 + //buf_pool_mutex_enter(buf_pool);
3000 + mutex_enter(&buf_pool->LRU_list_mutex);
3002 if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
3004 @@ -2103,16 +2260,22 @@
3006 ut_a(buf_pool->LRU_old_len == old_len);
3008 - UT_LIST_VALIDATE(list, buf_page_t, buf_pool->free,
3009 + mutex_exit(&buf_pool->LRU_list_mutex);
3010 + mutex_enter(&buf_pool->free_list_mutex);
3012 + UT_LIST_VALIDATE(free, buf_page_t, buf_pool->free,
3013 ut_ad(ut_list_node_313->in_free_list));
3015 for (bpage = UT_LIST_GET_FIRST(buf_pool->free);
3017 - bpage = UT_LIST_GET_NEXT(list, bpage)) {
3018 + bpage = UT_LIST_GET_NEXT(free, bpage)) {
3020 ut_a(buf_page_get_state(bpage) == BUF_BLOCK_NOT_USED);
3023 + mutex_exit(&buf_pool->free_list_mutex);
3024 + mutex_enter(&buf_pool->LRU_list_mutex);
3026 UT_LIST_VALIDATE(unzip_LRU, buf_block_t, buf_pool->unzip_LRU,
3027 ut_ad(ut_list_node_313->in_unzip_LRU_list
3028 && ut_list_node_313->page.in_LRU_list));
3029 @@ -2126,7 +2289,8 @@
3030 ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
3033 - buf_pool_mutex_exit(buf_pool);
3034 + //buf_pool_mutex_exit(buf_pool);
3035 + mutex_exit(&buf_pool->LRU_list_mutex);
3038 /**********************************************************************//**
3039 @@ -2162,7 +2326,8 @@
3040 const buf_page_t* bpage;
3043 - buf_pool_mutex_enter(buf_pool);
3044 + //buf_pool_mutex_enter(buf_pool);
3045 + mutex_enter(&buf_pool->LRU_list_mutex);
3047 bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
3049 @@ -2219,7 +2384,8 @@
3050 bpage = UT_LIST_GET_NEXT(LRU, bpage);
3053 - buf_pool_mutex_exit(buf_pool);
3054 + //buf_pool_mutex_exit(buf_pool);
3055 + mutex_exit(&buf_pool->LRU_list_mutex);
3058 /**********************************************************************//**
3059 --- a/storage/innobase/buf/buf0rea.c
3060 +++ b/storage/innobase/buf/buf0rea.c
3065 + buf_pool_mutex_exit(buf_pool);
3067 /* Check that almost all pages in the area have been accessed; if
3068 offset == low, the accesses must be in a descending order, otherwise,
3073 + rw_lock_s_lock(&buf_pool->page_hash_latch);
3074 for (i = low; i < high; i++) {
3075 bpage = buf_page_hash_get(buf_pool, space, i);
3079 if (fail_count > threshold) {
3080 /* Too many failures: return */
3081 - buf_pool_mutex_exit(buf_pool);
3082 + //buf_pool_mutex_exit(buf_pool);
3083 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
3088 bpage = buf_page_hash_get(buf_pool, space, offset);
3090 if (bpage == NULL) {
3091 - buf_pool_mutex_exit(buf_pool);
3092 + //buf_pool_mutex_exit(buf_pool);
3093 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
3098 pred_offset = fil_page_get_prev(frame);
3099 succ_offset = fil_page_get_next(frame);
3101 - buf_pool_mutex_exit(buf_pool);
3102 + //buf_pool_mutex_exit(buf_pool);
3103 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
3105 if ((offset == low) && (succ_offset == offset + 1)) {
3107 --- a/storage/innobase/handler/ha_innodb.cc
3108 +++ b/storage/innobase/handler/ha_innodb.cc
3109 @@ -265,6 +265,10 @@
3110 # endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
3111 {&buf_pool_mutex_key, "buf_pool_mutex", 0},
3112 {&buf_pool_zip_mutex_key, "buf_pool_zip_mutex", 0},
3113 + {&buf_pool_LRU_list_mutex_key, "buf_pool_LRU_list_mutex", 0},
3114 + {&buf_pool_free_list_mutex_key, "buf_pool_free_list_mutex", 0},
3115 + {&buf_pool_zip_free_mutex_key, "buf_pool_zip_free_mutex", 0},
3116 + {&buf_pool_zip_hash_mutex_key, "buf_pool_zip_hash_mutex", 0},
3117 {&cache_last_read_mutex_key, "cache_last_read_mutex", 0},
3118 {&dict_foreign_err_mutex_key, "dict_foreign_err_mutex", 0},
3119 {&dict_sys_mutex_key, "dict_sys_mutex", 0},
3121 {&archive_lock_key, "archive_lock", 0},
3122 # endif /* UNIV_LOG_ARCHIVE */
3123 {&btr_search_latch_key, "btr_search_latch", 0},
3124 + {&buf_pool_page_hash_key, "buf_pool_page_hash_latch", 0},
3125 # ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
3126 {&buf_block_lock_key, "buf_block_lock", 0},
3127 # endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
3128 --- a/storage/innobase/handler/i_s.cc
3129 +++ b/storage/innobase/handler/i_s.cc
3130 @@ -1583,7 +1583,8 @@
3132 buf_pool = buf_pool_from_array(i);
3134 - buf_pool_mutex_enter(buf_pool);
3135 + //buf_pool_mutex_enter(buf_pool);
3136 + mutex_enter(&buf_pool->zip_free_mutex);
3138 for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
3139 buf_buddy_stat_t* buddy_stat;
3140 @@ -1613,7 +1614,8 @@
3144 - buf_pool_mutex_exit(buf_pool);
3145 + //buf_pool_mutex_exit(buf_pool);
3146 + mutex_exit(&buf_pool->zip_free_mutex);
3150 --- a/storage/innobase/ibuf/ibuf0ibuf.c
3151 +++ b/storage/innobase/ibuf/ibuf0ibuf.c
3152 @@ -3821,9 +3821,11 @@
3153 ulint fold = buf_page_address_fold(space, page_no);
3154 buf_pool_t* buf_pool = buf_pool_get(space, page_no);
3156 - buf_pool_mutex_enter(buf_pool);
3157 + //buf_pool_mutex_enter(buf_pool);
3158 + rw_lock_s_lock(&buf_pool->page_hash_latch);
3159 bpage = buf_page_hash_get_low(buf_pool, space, page_no, fold);
3160 - buf_pool_mutex_exit(buf_pool);
3161 + //buf_pool_mutex_exit(buf_pool);
3162 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
3164 if (UNIV_LIKELY_NULL(bpage)) {
3165 /* A buffer pool watch has been set or the
3166 --- a/storage/innobase/include/buf0buddy.h
3167 +++ b/storage/innobase/include/buf0buddy.h
3169 ulint size, /*!< in: compressed page size
3170 (between PAGE_ZIP_MIN_SIZE and
3172 - ibool* lru) /*!< in: pointer to a variable
3173 + ibool* lru, /*!< in: pointer to a variable
3174 that will be assigned TRUE if
3175 storage was allocated from the
3176 LRU list and buf_pool->mutex was
3177 temporarily released */
3178 + ibool have_page_hash_mutex)
3179 __attribute__((malloc, nonnull));
3181 /**********************************************************************//**
3183 the block resides */
3184 void* buf, /*!< in: block to be freed, must not
3185 be pointed to by the buffer pool */
3186 - ulint size) /*!< in: block size,
3187 + ulint size, /*!< in: block size,
3188 up to UNIV_PAGE_SIZE */
3189 + ibool have_page_hash_mutex)
3190 __attribute__((nonnull));
3193 --- a/storage/innobase/include/buf0buddy.ic
3194 +++ b/storage/innobase/include/buf0buddy.ic
3196 buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
3197 ulint i, /*!< in: index of buf_pool->zip_free[],
3198 or BUF_BUDDY_SIZES */
3199 - ibool* lru) /*!< in: pointer to a variable that
3200 + ibool* lru, /*!< in: pointer to a variable that
3201 will be assigned TRUE if storage was
3202 allocated from the LRU list and
3203 buf_pool->mutex was temporarily
3205 + ibool have_page_hash_mutex)
3206 __attribute__((malloc, nonnull));
3208 /**********************************************************************//**
3210 buf_pool_t* buf_pool, /*!< in: buffer pool instance */
3211 void* buf, /*!< in: block to be freed, must not be
3212 pointed to by the buffer pool */
3213 - ulint i) /*!< in: index of buf_pool->zip_free[],
3214 + ulint i, /*!< in: index of buf_pool->zip_free[],
3215 or BUF_BUDDY_SIZES */
3216 + ibool have_page_hash_mutex)
3217 __attribute__((nonnull));
3219 /**********************************************************************//**
3220 @@ -101,19 +103,20 @@
3221 ulint size, /*!< in: compressed page size
3222 (between PAGE_ZIP_MIN_SIZE and
3224 - ibool* lru) /*!< in: pointer to a variable
3225 + ibool* lru, /*!< in: pointer to a variable
3226 that will be assigned TRUE if
3227 storage was allocated from the
3228 LRU list and buf_pool->mutex was
3229 temporarily released */
3230 + ibool have_page_hash_mutex)
3232 - ut_ad(buf_pool_mutex_own(buf_pool));
3233 + //ut_ad(buf_pool_mutex_own(buf_pool));
3234 ut_ad(ut_is_2pow(size));
3235 ut_ad(size >= PAGE_ZIP_MIN_SIZE);
3236 ut_ad(size <= UNIV_PAGE_SIZE);
3238 return((byte*) buf_buddy_alloc_low(buf_pool, buf_buddy_get_slot(size),
3240 + lru, have_page_hash_mutex));
3243 /**********************************************************************//**
3244 @@ -126,15 +129,28 @@
3245 the block resides */
3246 void* buf, /*!< in: block to be freed, must not
3247 be pointed to by the buffer pool */
3248 - ulint size) /*!< in: block size,
3249 + ulint size, /*!< in: block size,
3250 up to UNIV_PAGE_SIZE */
3251 + ibool have_page_hash_mutex)
3253 - ut_ad(buf_pool_mutex_own(buf_pool));
3254 + //ut_ad(buf_pool_mutex_own(buf_pool));
3255 ut_ad(ut_is_2pow(size));
3256 ut_ad(size >= PAGE_ZIP_MIN_SIZE);
3257 ut_ad(size <= UNIV_PAGE_SIZE);
3259 - buf_buddy_free_low(buf_pool, buf, buf_buddy_get_slot(size));
3260 + if (!have_page_hash_mutex) {
3261 + mutex_enter(&buf_pool->LRU_list_mutex);
3262 + rw_lock_x_lock(&buf_pool->page_hash_latch);
3265 + mutex_enter(&buf_pool->zip_free_mutex);
3266 + buf_buddy_free_low(buf_pool, buf, buf_buddy_get_slot(size), TRUE);
3267 + mutex_exit(&buf_pool->zip_free_mutex);
3269 + if (!have_page_hash_mutex) {
3270 + mutex_exit(&buf_pool->LRU_list_mutex);
3271 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
3275 #ifdef UNIV_MATERIALIZE
3276 --- a/storage/innobase/include/buf0buf.h
3277 +++ b/storage/innobase/include/buf0buf.h
3278 @@ -212,6 +212,20 @@
3279 /*==========================*/
3281 /********************************************************************//**
3285 +buf_pool_page_hash_x_lock_all(void);
3286 +/*================================*/
3288 +/********************************************************************//**
3292 +buf_pool_page_hash_x_unlock_all(void);
3293 +/*==================================*/
3295 +/********************************************************************//**
3296 Creates the buffer pool.
3297 @return own: buf_pool object, NULL if not enough memory or error */
3299 @@ -851,6 +865,15 @@
3300 const buf_page_t* bpage) /*!< in: pointer to control block */
3301 __attribute__((pure));
3303 +/*************************************************************************
3304 +Gets the mutex of a block and enter the mutex with consistency. */
3307 +buf_page_get_mutex_enter(
3308 +/*=========================*/
3309 + const buf_page_t* bpage) /*!< in: pointer to control block */
3310 + __attribute__((pure));
3312 /*********************************************************************//**
3313 Get the flush type of a page.
3314 @return flush type */
3315 @@ -1332,7 +1355,7 @@
3316 All these are protected by buf_pool->mutex. */
3319 - UT_LIST_NODE_T(buf_page_t) list;
3320 + /* UT_LIST_NODE_T(buf_page_t) list; */
3321 /*!< based on state, this is a
3322 list node, protected either by
3323 buf_pool->mutex or by
3324 @@ -1360,6 +1383,10 @@
3325 BUF_BLOCK_REMOVE_HASH or
3326 BUF_BLOCK_READY_IN_USE. */
3328 + /* resplit for optimistic use */
3329 + UT_LIST_NODE_T(buf_page_t) free;
3330 + UT_LIST_NODE_T(buf_page_t) flush_list;
3331 + UT_LIST_NODE_T(buf_page_t) zip_list; /* zip_clean or zip_free[] */
3333 ibool in_flush_list; /*!< TRUE if in buf_pool->flush_list;
3334 when buf_pool->flush_list_mutex is
3335 @@ -1452,11 +1479,11 @@
3336 a block is in the unzip_LRU list
3337 if page.state == BUF_BLOCK_FILE_PAGE
3338 and page.zip.data != NULL */
3340 +//#ifdef UNIV_DEBUG
3341 ibool in_unzip_LRU_list;/*!< TRUE if the page is in the
3342 decompressed LRU list;
3343 used in debugging */
3344 -#endif /* UNIV_DEBUG */
3345 +//#endif /* UNIV_DEBUG */
3346 mutex_t mutex; /*!< mutex protecting this block:
3347 state (also protected by the buffer
3348 pool mutex), io_fix, buf_fix_count,
3349 @@ -1636,6 +1663,11 @@
3350 pool instance, protects compressed
3351 only pages (of type buf_page_t, not
3353 + mutex_t LRU_list_mutex;
3354 + rw_lock_t page_hash_latch;
3355 + mutex_t free_list_mutex;
3356 + mutex_t zip_free_mutex;
3357 + mutex_t zip_hash_mutex;
3358 ulint instance_no; /*!< Array index of this buffer
3360 ulint old_pool_size; /*!< Old pool size in bytes */
3361 @@ -1789,8 +1821,8 @@
3362 /** Test if a buffer pool mutex is owned. */
3363 #define buf_pool_mutex_own(b) mutex_own(&b->mutex)
3364 /** Acquire a buffer pool mutex. */
3365 +/* the buf_pool_mutex is changed the latch order */
3366 #define buf_pool_mutex_enter(b) do { \
3367 - ut_ad(!mutex_own(&b->zip_mutex)); \
3368 mutex_enter(&b->mutex); \
3371 --- a/storage/innobase/include/buf0buf.ic
3372 +++ b/storage/innobase/include/buf0buf.ic
3374 case BUF_BLOCK_ZIP_FREE:
3375 /* This is a free page in buf_pool->zip_free[].
3376 Such pages should only be accessed by the buddy allocator. */
3378 + /* ut_error; */ /* optimistic */
3380 case BUF_BLOCK_ZIP_PAGE:
3381 case BUF_BLOCK_ZIP_DIRTY:
3382 @@ -335,9 +335,16 @@
3384 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3386 + if (/*equivalent to buf_pool_watch_is_sentinel(buf_pool, bpage)*/
3387 + bpage >= &buf_pool->watch[0]
3388 + && bpage < &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
3389 + /* TODO: this code is the interim. should be confirmed later. */
3390 + return(&buf_pool->zip_mutex);
3393 switch (buf_page_get_state(bpage)) {
3394 case BUF_BLOCK_ZIP_FREE:
3396 + /* ut_error; */ /* optimistic */
3398 case BUF_BLOCK_ZIP_PAGE:
3399 case BUF_BLOCK_ZIP_DIRTY:
3400 @@ -347,6 +354,28 @@
3404 +/*************************************************************************
3405 +Gets the mutex of a block and enter the mutex with consistency. */
3408 +buf_page_get_mutex_enter(
3409 +/*=========================*/
3410 + const buf_page_t* bpage) /*!< in: pointer to control block */
3412 + mutex_t* block_mutex;
3415 + block_mutex = buf_page_get_mutex(bpage);
3417 + return block_mutex;
3419 + mutex_enter(block_mutex);
3420 + if (block_mutex == buf_page_get_mutex(bpage))
3421 + return block_mutex;
3422 + mutex_exit(block_mutex);
3426 /*********************************************************************//**
3427 Get the flush type of a page.
3428 @return flush type */
3430 enum buf_io_fix io_fix) /*!< in: io_fix state */
3433 - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3434 - ut_ad(buf_pool_mutex_own(buf_pool));
3435 + //buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3436 + //ut_ad(buf_pool_mutex_own(buf_pool));
3438 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
3440 @@ -474,14 +503,14 @@
3441 const buf_page_t* bpage) /*!< control block being relocated */
3444 - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3445 - ut_ad(buf_pool_mutex_own(buf_pool));
3446 + //buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3447 + //ut_ad(buf_pool_mutex_own(buf_pool));
3449 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
3450 ut_ad(buf_page_in_file(bpage));
3451 - ut_ad(bpage->in_LRU_list);
3452 + //ut_ad(bpage->in_LRU_list);
3454 - return(buf_page_get_io_fix(bpage) == BUF_IO_NONE
3455 + return(bpage->in_LRU_list && bpage->io_fix == BUF_IO_NONE
3456 && bpage->buf_fix_count == 0);
3460 const buf_page_t* bpage) /*!< in: control block */
3463 - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3464 - ut_ad(buf_pool_mutex_own(buf_pool));
3465 + //buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3466 + //ut_ad(buf_pool_mutex_own(buf_pool));
3468 ut_ad(buf_page_in_file(bpage));
3471 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3472 #endif /* UNIV_DEBUG */
3473 ut_a(buf_page_in_file(bpage));
3474 - ut_ad(buf_pool_mutex_own(buf_pool));
3475 + //ut_ad(buf_pool_mutex_own(buf_pool));
3476 + ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
3477 ut_ad(bpage->in_LRU_list);
3479 #ifdef UNIV_LRU_DEBUG
3480 @@ -563,9 +593,10 @@
3481 ulint time_ms) /*!< in: ut_time_ms() */
3484 - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3485 - ut_ad(buf_pool_mutex_own(buf_pool));
3486 + //buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
3487 + //ut_ad(buf_pool_mutex_own(buf_pool));
3489 + ut_ad(mutex_own(buf_page_get_mutex(bpage)));
3490 ut_a(buf_page_in_file(bpage));
3492 if (!bpage->access_time) {
3493 @@ -808,19 +839,19 @@
3495 buf_block_t* block) /*!< in, own: block to be freed */
3497 - buf_pool_t* buf_pool = buf_pool_from_bpage((buf_page_t*)block);
3498 + //buf_pool_t* buf_pool = buf_pool_from_bpage((buf_page_t*)block);
3500 - buf_pool_mutex_enter(buf_pool);
3501 + //buf_pool_mutex_enter(buf_pool);
3503 mutex_enter(&block->mutex);
3505 ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
3507 - buf_LRU_block_free_non_file_page(block);
3508 + buf_LRU_block_free_non_file_page(block, FALSE);
3510 mutex_exit(&block->mutex);
3512 - buf_pool_mutex_exit(buf_pool);
3513 + //buf_pool_mutex_exit(buf_pool);
3515 #endif /* !UNIV_HOTBACKUP */
3517 @@ -868,17 +899,17 @@
3521 - mutex_t* block_mutex = buf_page_get_mutex(bpage);
3523 - mutex_enter(block_mutex);
3524 + mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
3526 - if (buf_page_in_file(bpage)) {
3527 + if (block_mutex && buf_page_in_file(bpage)) {
3528 lsn = bpage->newest_modification;
3533 - mutex_exit(block_mutex);
3534 + if (block_mutex) {
3535 + mutex_exit(block_mutex);
3541 #ifdef UNIV_SYNC_DEBUG
3542 buf_pool_t* buf_pool = buf_pool_from_bpage((buf_page_t*)block);
3544 - ut_ad((buf_pool_mutex_own(buf_pool)
3545 + ut_ad((mutex_own(&buf_pool->LRU_list_mutex)
3546 && (block->page.buf_fix_count == 0))
3547 || rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
3548 #endif /* UNIV_SYNC_DEBUG */
3549 @@ -1026,7 +1057,11 @@
3553 - ut_ad(buf_pool_mutex_own(buf_pool));
3554 + //ut_ad(buf_pool_mutex_own(buf_pool));
3555 +#ifdef UNIV_SYNC_DEBUG
3556 + ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX)
3557 + || rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_SHARED));
3559 ut_ad(fold == buf_page_address_fold(space, offset));
3561 /* Look for the page in the hash table */
3562 @@ -1111,11 +1146,13 @@
3563 const buf_page_t* bpage;
3564 buf_pool_t* buf_pool = buf_pool_get(space, offset);
3566 - buf_pool_mutex_enter(buf_pool);
3567 + //buf_pool_mutex_enter(buf_pool);
3568 + rw_lock_s_lock(&buf_pool->page_hash_latch);
3570 bpage = buf_page_hash_get(buf_pool, space, offset);
3572 - buf_pool_mutex_exit(buf_pool);
3573 + //buf_pool_mutex_exit(buf_pool);
3574 + rw_lock_s_unlock(&buf_pool->page_hash_latch);
3576 return(bpage != NULL);
3578 @@ -1243,4 +1280,38 @@
3579 buf_pool_mutex_exit(buf_pool);
3583 +/********************************************************************//**
3587 +buf_pool_page_hash_x_lock_all(void)
3588 +/*===============================*/
3592 + for (i = 0; i < srv_buf_pool_instances; i++) {
3593 + buf_pool_t* buf_pool;
3595 + buf_pool = buf_pool_from_array(i);
3596 + rw_lock_x_lock(&buf_pool->page_hash_latch);
3600 +/********************************************************************//**
3604 +buf_pool_page_hash_x_unlock_all(void)
3605 +/*=================================*/
3609 + for (i = 0; i < srv_buf_pool_instances; i++) {
3610 + buf_pool_t* buf_pool;
3612 + buf_pool = buf_pool_from_array(i);
3613 + rw_lock_x_unlock(&buf_pool->page_hash_latch);
3616 #endif /* !UNIV_HOTBACKUP */
3617 --- a/storage/innobase/include/buf0lru.h
3618 +++ b/storage/innobase/include/buf0lru.h
3622 buf_page_t* bpage, /*!< in: block to be freed */
3623 - ibool zip) /*!< in: TRUE if should remove also the
3624 + ibool zip, /*!< in: TRUE if should remove also the
3625 compressed page of an uncompressed page */
3626 + ibool have_LRU_mutex)
3627 __attribute__((nonnull));
3628 /******************************************************************//**
3629 Try to free a replaceable block.
3632 buf_LRU_block_free_non_file_page(
3633 /*=============================*/
3634 - buf_block_t* block); /*!< in: block, must not contain a file page */
3635 + buf_block_t* block, /*!< in: block, must not contain a file page */
3636 + ibool have_page_hash_mutex);
3637 /******************************************************************//**
3638 Adds a block to the LRU list. */
3640 --- a/storage/innobase/include/sync0rw.h
3641 +++ b/storage/innobase/include/sync0rw.h
3643 extern mysql_pfs_key_t archive_lock_key;
3644 # endif /* UNIV_LOG_ARCHIVE */
3645 extern mysql_pfs_key_t btr_search_latch_key;
3646 +extern mysql_pfs_key_t buf_pool_page_hash_key;
3647 extern mysql_pfs_key_t buf_block_lock_key;
3648 # ifdef UNIV_SYNC_DEBUG
3649 extern mysql_pfs_key_t buf_block_debug_latch_key;
3650 --- a/storage/innobase/include/sync0sync.h
3651 +++ b/storage/innobase/include/sync0sync.h
3653 extern mysql_pfs_key_t buffer_block_mutex_key;
3654 extern mysql_pfs_key_t buf_pool_mutex_key;
3655 extern mysql_pfs_key_t buf_pool_zip_mutex_key;
3656 +extern mysql_pfs_key_t buf_pool_LRU_list_mutex_key;
3657 +extern mysql_pfs_key_t buf_pool_free_list_mutex_key;
3658 +extern mysql_pfs_key_t buf_pool_zip_free_mutex_key;
3659 +extern mysql_pfs_key_t buf_pool_zip_hash_mutex_key;
3660 extern mysql_pfs_key_t cache_last_read_mutex_key;
3661 extern mysql_pfs_key_t dict_foreign_err_mutex_key;
3662 extern mysql_pfs_key_t dict_sys_mutex_key;
3664 #define SYNC_TRX_SYS_HEADER 290
3665 #define SYNC_PURGE_QUEUE 200
3666 #define SYNC_LOG 170
3667 -#define SYNC_LOG_FLUSH_ORDER 147
3668 +#define SYNC_LOG_FLUSH_ORDER 156
3669 #define SYNC_RECV 168
3670 #define SYNC_WORK_QUEUE 162
3671 #define SYNC_SEARCH_SYS 160 /* NOTE that if we have a memory
3672 @@ -676,8 +680,13 @@
3673 SYNC_SEARCH_SYS, as memory allocation
3674 can call routines there! Otherwise
3675 the level is SYNC_MEM_HASH. */
3676 +#define SYNC_BUF_LRU_LIST 158
3677 +#define SYNC_BUF_PAGE_HASH 157
3678 +#define SYNC_BUF_BLOCK 155 /* Block mutex */
3679 +#define SYNC_BUF_FREE_LIST 153
3680 +#define SYNC_BUF_ZIP_FREE 152
3681 +#define SYNC_BUF_ZIP_HASH 151
3682 #define SYNC_BUF_POOL 150 /* Buffer pool mutex */
3683 -#define SYNC_BUF_BLOCK 146 /* Block mutex */
3684 #define SYNC_BUF_FLUSH_LIST 145 /* Buffer flush list mutex */
3685 #define SYNC_DOUBLEWRITE 140
3686 #define SYNC_ANY_LATCH 135
3688 os_fast_mutex; /*!< We use this OS mutex in place of lock_word
3689 when atomic operations are not enabled */
3691 - ulint waiters; /*!< This ulint is set to 1 if there are (or
3692 + volatile ulint waiters; /*!< This ulint is set to 1 if there are (or
3693 may be) threads waiting in the global wait
3694 array for this mutex to be released.
3695 Otherwise, this is 0. */
3696 --- a/storage/innobase/srv/srv0srv.c
3697 +++ b/storage/innobase/srv/srv0srv.c
3698 @@ -3102,7 +3102,7 @@
3699 level += log_sys->max_checkpoint_age
3700 - (lsn - oldest_modification);
3702 - bpage = UT_LIST_GET_NEXT(list, bpage);
3703 + bpage = UT_LIST_GET_NEXT(flush_list, bpage);
3707 @@ -3188,7 +3188,7 @@
3711 - bpage = UT_LIST_GET_NEXT(list, bpage);
3712 + bpage = UT_LIST_GET_NEXT(flush_list, bpage);
3716 --- a/storage/innobase/sync/sync0sync.c
3717 +++ b/storage/innobase/sync/sync0sync.c
3719 mutex->lock_word = 0;
3721 mutex->event = os_event_create(NULL);
3722 - mutex_set_waiters(mutex, 0);
3723 + mutex->waiters = 0;
3725 mutex->magic_n = MUTEX_MAGIC_N;
3726 #endif /* UNIV_DEBUG */
3727 @@ -464,6 +464,15 @@
3728 mutex_t* mutex, /*!< in: mutex */
3729 ulint n) /*!< in: value to set */
3731 +#ifdef INNODB_RW_LOCKS_USE_ATOMICS
3735 + os_compare_and_swap_ulint(&mutex->waiters, 0, 1);
3737 + os_compare_and_swap_ulint(&mutex->waiters, 1, 0);
3740 volatile ulint* ptr; /* declared volatile to ensure that
3741 the value is stored to memory */
3745 *ptr = n; /* Here we assume that the write of a single
3746 word in memory is atomic */
3750 /******************************************************************//**
3751 @@ -1233,7 +1243,12 @@
3755 + case SYNC_BUF_LRU_LIST:
3756 case SYNC_BUF_FLUSH_LIST:
3757 + case SYNC_BUF_PAGE_HASH:
3758 + case SYNC_BUF_FREE_LIST:
3759 + case SYNC_BUF_ZIP_FREE:
3760 + case SYNC_BUF_ZIP_HASH:
3762 /* We can have multiple mutexes of this type therefore we
3763 can only check whether the greater than condition holds. */
3764 @@ -1251,7 +1266,8 @@
3765 buffer block (block->mutex or buf_pool->zip_mutex). */
3766 if (!sync_thread_levels_g(array, level, FALSE)) {
3767 ut_a(sync_thread_levels_g(array, level - 1, TRUE));
3768 - ut_a(sync_thread_levels_contain(array, SYNC_BUF_POOL));
3769 + /* the exact rule is not fixed yet, for now */
3770 + //ut_a(sync_thread_levels_contain(array, SYNC_BUF_LRU_LIST));