53 #ifdef UNIV_SEARCH_PERF_STAT
55 UNIV_INTERN ulint btr_search_n_succ = 0;
57 UNIV_INTERN ulint btr_search_n_hash_fail = 0;
82 #ifdef UNIV_PFS_RWLOCK
84 UNIV_INTERN mysql_pfs_key_t btr_search_latch_key;
90 #define BTR_SEARCH_PAGE_BUILD_LIMIT 16
94 #define BTR_SEARCH_BUILD_LIMIT 100
103 btr_search_build_page_hash_index(
125 btr_search_check_free_space_in_heap(
void)
131 #ifdef UNIV_SYNC_DEBUG
144 if (heap->free_block == NULL) {
149 if (heap->free_block == NULL) {
150 heap->free_block =
block;
179 MEM_HEAP_FOR_BTR_SEARCH, 0);
180 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
206 btr_search_disable_ref_count(
213 #ifdef UNIV_SYNC_DEBUG
217 for (index = dict_table_get_first_index(table);
index;
218 index = dict_table_get_next_index(index)) {
243 btr_search_disable_ref_count(table);
249 btr_search_disable_ref_count(table);
292 info->magic_n = BTR_SEARCH_MAGIC_N;
303 #ifdef UNIV_SEARCH_PERF_STAT
304 info->n_hash_succ = 0;
305 info->n_hash_fail = 0;
306 info->n_patt_succ = 0;
307 info->n_searches = 0;
333 #ifdef UNIV_SYNC_DEBUG
351 btr_search_info_update_hash(
360 #ifdef UNIV_SYNC_DEBUG
365 index = cursor->
index;
394 if (info->
left_side ? cmp <= 0 : cmp > 0) {
402 if (info->
left_side ? cmp <= 0 : cmp > 0) {
404 goto increment_potential;
426 }
else if (cmp > 0) {
472 btr_search_update_block_hash_info(
476 btr_cur_t* cursor __attribute__((unused)))
479 #ifdef UNIV_SYNC_DEBUG
482 ut_ad(rw_lock_own(&block->
lock, RW_LOCK_SHARED)
483 || rw_lock_own(&block->
lock, RW_LOCK_EX));
490 ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
518 if (cursor->
index->
table->does_not_fit_in_memory) {
553 btr_search_update_hash_ref(
564 #ifdef UNIV_SYNC_DEBUG
566 ut_ad(rw_lock_own(&(block->
lock), RW_LOCK_SHARED)
567 || rw_lock_own(&(block->
lock), RW_LOCK_EX));
570 == buf_block_get_frame(block));
572 index = block->
index;
587 ulint offsets_[REC_OFFS_NORMAL_SIZE];
588 rec_offs_init(offsets_);
590 rec = btr_cur_get_rec(cursor);
598 rec_get_offsets(rec, index, offsets_,
599 ULINT_UNDEFINED, &heap),
602 if (UNIV_LIKELY_NULL(heap)) {
605 #ifdef UNIV_SYNC_DEBUG
630 #ifdef UNIV_SYNC_DEBUG
635 block = btr_cur_get_block(cursor);
642 btr_search_info_update_hash(info, cursor);
644 build_index = btr_search_update_block_hash_info(info, block, cursor);
648 btr_search_check_free_space_in_heap();
654 #ifdef UNIV_SEARCH_PERF_STAT
655 btr_search_n_hash_fail++;
660 btr_search_update_hash_ref(info, block, cursor);
674 params = (ulint*) mem_alloc(3 *
sizeof(ulint));
684 btr_search_build_page_hash_index(cursor->
index,
700 btr_search_check_guess(
703 ibool can_only_compare_to_cursor_rec,
722 ulint offsets_[REC_OFFS_NORMAL_SIZE];
724 ibool success = FALSE;
725 rec_offs_init(offsets_);
729 rec = btr_cur_get_rec(cursor);
736 offsets = rec_get_offsets(rec, cursor->
index, offsets,
739 offsets, &match, &bytes);
741 if (mode == PAGE_CUR_GE) {
748 if (match >= n_unique) {
752 }
else if (mode == PAGE_CUR_LE) {
759 }
else if (mode == PAGE_CUR_G) {
763 }
else if (mode == PAGE_CUR_L) {
769 if (can_only_compare_to_cursor_rec) {
778 if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
792 offsets = rec_get_offsets(prev_rec, cursor->
index, offsets,
795 offsets, &match, &bytes);
796 if (mode == PAGE_CUR_GE) {
821 offsets = rec_get_offsets(next_rec, cursor->
index, offsets,
824 offsets, &match, &bytes);
825 if (mode == PAGE_CUR_LE) {
833 if (UNIV_LIKELY_NULL(heap)) {
860 ulint has_search_latch,
874 ut_ad(index && info && tuple && cursor && mtr);
896 index_id = index->
id;
898 #ifdef UNIV_SEARCH_PERF_STAT
906 if (UNIV_LIKELY(!has_search_latch)) {
919 if (UNIV_UNLIKELY(!rec)) {
925 if (UNIV_LIKELY(!has_search_latch)) {
937 buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
943 if (UNIV_LIKELY(!has_search_latch)) {
963 || !btr_search_check_guess(cursor,
966 if (UNIV_LIKELY(!has_search_latch)) {
985 ut_ad(!has_search_latch);
991 if (mode == PAGE_CUR_GE
1000 btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
1002 ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
1004 ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
1013 #ifdef UNIV_SEARCH_PERF_STAT
1014 btr_search_n_succ++;
1016 if (UNIV_LIKELY(!has_search_latch)
1031 if (UNIV_LIKELY(!has_search_latch)) {
1037 #ifdef UNIV_SEARCH_PERF_STAT
1038 info->n_hash_fail++;
1040 if (info->n_hash_succ > 0) {
1041 info->n_hash_succ--;
1071 index_id_t index_id;
1081 #ifdef UNIV_SYNC_DEBUG
1089 if (!block->
index) {
1095 index = block->
index;
1097 if (UNIV_LIKELY(!index)) {
1128 #ifdef UNIV_SYNC_DEBUG
1129 ut_ad(rw_lock_own(&(block->
lock), RW_LOCK_SHARED)
1130 || rw_lock_own(&(block->
lock), RW_LOCK_EX)
1144 ut_a(n_fields + n_bytes > 0);
1146 page = block->
frame;
1152 folds = (ulint*) mem_alloc(n_recs *
sizeof(ulint));
1156 rec = page_get_infimum_rec(page);
1161 ut_a(index_id == index->
id);
1169 offsets = rec_get_offsets(rec, index, offsets,
1170 n_fields + (n_bytes > 0), &heap);
1172 fold =
rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1174 if (fold == prev_fold && prev_fold != 0) {
1182 folds[n_cached] =
fold;
1189 if (UNIV_LIKELY_NULL(heap)) {
1195 if (UNIV_UNLIKELY(!block->
index)) {
1215 for (i = 0; i < n_cached; i++) {
1224 block->
index = NULL;
1227 MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_REMOVED, n_cached);
1230 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1231 if (UNIV_UNLIKELY(block->n_pointers)) {
1235 " InnoDB: Corruption of adaptive hash index."
1237 "InnoDB: the hash index to a page of %s,"
1238 " still %lu hash nodes remain.\n",
1239 index->
name, (ulong) block->n_pointers);
1242 ut_ad(btr_search_validate());
1280 if (block && block->
index) {
1282 buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1297 btr_search_build_page_hash_index(
1318 ulint offsets_[REC_OFFS_NORMAL_SIZE];
1319 ulint* offsets = offsets_;
1320 rec_offs_init(offsets_);
1325 #ifdef UNIV_SYNC_DEBUG
1327 ut_ad(rw_lock_own(&(block->
lock), RW_LOCK_SHARED)
1328 || rw_lock_own(&(block->
lock), RW_LOCK_EX));
1339 page = buf_block_get_frame(block);
1361 if (n_fields + n_bytes == 0) {
1375 folds = (ulint*) mem_alloc(n_recs *
sizeof(ulint));
1376 recs = (rec_t**) mem_alloc(n_recs *
sizeof(rec_t*));
1384 offsets = rec_get_offsets(rec, index, offsets,
1385 n_fields + (n_bytes > 0), &heap);
1395 fold =
rec_fold(rec, offsets, n_fields, n_bytes, index->
id);
1399 folds[n_cached] =
fold;
1400 recs[n_cached] =
rec;
1411 folds[n_cached] =
fold;
1412 recs[n_cached] =
rec;
1419 offsets = rec_get_offsets(next_rec, index, offsets,
1420 n_fields + (n_bytes > 0), &heap);
1421 next_fold =
rec_fold(next_rec, offsets, n_fields,
1422 n_bytes, index->
id);
1424 if (fold != next_fold) {
1429 folds[n_cached] = next_fold;
1430 recs[n_cached] = next_rec;
1433 folds[n_cached] =
fold;
1434 recs[n_cached] =
rec;
1443 btr_search_check_free_space_in_heap();
1462 if (!block->
index) {
1473 for (i = 0; i < n_cached; i++) {
1479 MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_ADDED, n_cached);
1485 if (UNIV_LIKELY_NULL(heap)) {
1511 #ifdef UNIV_SYNC_DEBUG
1512 ut_ad(rw_lock_own(&(block->
lock), RW_LOCK_EX));
1513 ut_ad(rw_lock_own(&(new_block->
lock), RW_LOCK_EX));
1523 if (new_block->
index) {
1544 ut_a(n_fields + n_bytes > 0);
1546 btr_search_build_page_hash_index(index, new_block, n_fields,
1547 n_bytes, left_side);
1572 ulint offsets_[REC_OFFS_NORMAL_SIZE];
1574 rec_offs_init(offsets_);
1576 block = btr_cur_get_block(cursor);
1578 #ifdef UNIV_SYNC_DEBUG
1579 ut_ad(rw_lock_own(&(block->
lock), RW_LOCK_EX));
1582 index = block->
index;
1595 rec = btr_cur_get_rec(cursor);
1597 fold =
rec_fold(rec, rec_get_offsets(rec, index, offsets_,
1598 ULINT_UNDEFINED, &heap),
1600 if (UNIV_LIKELY_NULL(heap)) {
1613 MONITOR_ADAPTIVE_HASH_ROW_REMOVE_NOT_FOUND);
1636 rec = btr_cur_get_rec(cursor);
1638 block = btr_cur_get_block(cursor);
1640 #ifdef UNIV_SYNC_DEBUG
1641 ut_ad(rw_lock_own(&(block->
lock), RW_LOCK_EX));
1644 index = block->
index;
1656 if (!block->
index) {
1671 table, cursor->
fold, rec, block,
1700 const rec_t* ins_rec;
1701 const rec_t* next_rec;
1704 ulint next_fold = 0;
1708 ibool locked = FALSE;
1710 ulint offsets_[REC_OFFS_NORMAL_SIZE];
1711 ulint* offsets = offsets_;
1712 rec_offs_init(offsets_);
1714 block = btr_cur_get_block(cursor);
1716 #ifdef UNIV_SYNC_DEBUG
1717 ut_ad(rw_lock_own(&(block->
lock), RW_LOCK_EX));
1720 index = block->
index;
1727 btr_search_check_free_space_in_heap();
1731 rec = btr_cur_get_rec(cursor);
1743 offsets = rec_get_offsets(ins_rec, index, offsets,
1744 ULINT_UNDEFINED, &heap);
1745 ins_fold =
rec_fold(ins_rec, offsets, n_fields, n_bytes, index->
id);
1748 offsets = rec_get_offsets(next_rec, index, offsets,
1749 n_fields + (n_bytes > 0), &heap);
1750 next_fold =
rec_fold(next_rec, offsets, n_fields,
1751 n_bytes, index->
id);
1755 offsets = rec_get_offsets(rec, index, offsets,
1756 n_fields + (n_bytes > 0), &heap);
1757 fold =
rec_fold(rec, offsets, n_fields, n_bytes, index->
id);
1772 goto check_next_rec;
1775 if (fold != ins_fold) {
1816 if (ins_fold != next_fold) {
1843 if (UNIV_LIKELY_NULL(heap)) {
1851 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1857 btr_search_validate(
void)
1861 ulint n_page_dumps = 0;
1866 ulint offsets_[REC_OFFS_NORMAL_SIZE];
1867 ulint* offsets = offsets_;
1871 ulint chunk_size = 10000;
1873 rec_offs_init(offsets_);
1880 for (i = 0; i < cell_count; i++) {
1883 if ((i != 0) && ((i % chunk_size) == 0)) {
1894 for (; node != NULL; node = node->
next) {
1899 index_id_t page_index_id;
1911 hash_block = buf_block_hash_get(
1920 ut_a(hash_block == block);
1940 offsets = rec_get_offsets(node->
data,
1941 block->
index, offsets,
1958 " InnoDB: Error in an adaptive hash"
1959 " index pointer to page %lu\n"
1960 "InnoDB: ptr mem address %p"
1962 " node fold %lu, rec fold %lu\n",
1965 (ullint) page_index_id,
1973 fputs(
"InnoDB: Record ", stderr);
1975 fprintf(stderr,
"\nInnoDB: on that page."
1976 " Page mem address %p, is hashed %p,"
1977 " n fields %lu, n bytes %lu\n"
1978 "InnoDB: side %lu\n",
1979 (
void*) page, (
void*) block->
index,
1984 if (n_page_dumps < 20) {
1994 for (i = 0; i < cell_count; i += chunk_size) {
1995 ulint end_index =
ut_min(i + chunk_size - 1, cell_count - 1);
2014 if (UNIV_LIKELY_NULL(heap)) {