39 #include <mysql/plugin.h>
63 #define TABLE_CACHE_INITIAL_ROWSNUM 1024
73 #define MEM_CHUNKS_IN_TABLE_CACHE 39
84 #define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
92 #define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
99 #define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
105 #define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
112 #define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
119 #define MAX_ALLOWED_FOR_STORAGE(cache) \
121 - (cache)->mem_allocd)
126 #define MAX_ALLOWED_FOR_ALLOC(cache) \
128 - (cache)->mem_allocd \
129 - ha_storage_get_size((cache)->storage))
166 #define LOCKS_HASH_CELLS_NUM 10000
171 #define CACHE_STORAGE_INITIAL_SIZE 1024
173 #define CACHE_STORAGE_HASH_CELLS 2048
195 #ifdef UNIV_PFS_RWLOCK
196 UNIV_INTERN mysql_pfs_key_t trx_i_s_cache_lock_key;
199 #ifdef UNIV_PFS_MUTEX
200 UNIV_INTERN mysql_pfs_key_t cache_last_read_mutex_key;
209 wait_lock_get_heap_no(
218 ut_a(ret != ULINT_UNDEFINED);
221 ret = ULINT_UNDEFINED;
283 table_cache_create_empty_row(
319 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
341 req_bytes = req_rows * table_cache->
row_size;
348 chunk = &table_cache->
chunks[
i];
350 chunk->
base = mem_alloc2(req_bytes, &got_bytes);
352 got_rows = got_bytes / table_cache->
row_size;
357 printf(
"allocating chunk %d req bytes=%lu, got bytes=%lu, "
359 "req rows=%lu, got rows=%lu\n",
360 i, req_bytes, got_bytes,
370 if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
404 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
406 chunk_start = (
char*) table_cache->
chunks[i].
base;
410 row = chunk_start + offset * table_cache->
row_size;
424 i_s_locks_row_validate(
485 ut_ad(requested_lock_row == NULL
486 || i_s_locks_row_validate(requested_lock_row));
490 ut_a(requested_lock_row != NULL);
493 ut_a(requested_lock_row == NULL);
519 memcpy(query, stmt, stmt_len);
520 query[stmt_len] =
'\0';
522 row->
trx_query =
static_cast<const char*
>(
524 cache->
storage, query, stmt_len + 1,
541 if (s != NULL && s[0] !=
'\0') {
574 case TRX_ISO_READ_UNCOMMITTED:
577 case TRX_ISO_READ_COMMITTED:
580 case TRX_ISO_REPEATABLE_READ:
583 case TRX_ISO_SERIALIZABLE:
597 if (s != NULL && s[0] !=
'\0') {
644 ut_ad(rec_offs_validate(rec, NULL, offsets));
662 memcpy(buf,
", ", 3);
671 data = rec_get_nth_field(rec, offsets, n, &data_len);
673 dict_field = dict_index_get_nth_field(index, n);
676 dict_field, buf, buf_size);
689 const char** lock_data,
718 page = (
const page_t*) buf_block_get_frame(block);
725 cache->
storage,
"infimum pseudo-record",
730 cache->
storage,
"supremum pseudo-record",
737 ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
743 rec_offs_init(offsets_onstack);
744 offsets = offsets_onstack;
753 offsets = rec_get_offsets(rec, index, offsets, n_fields,
759 for (i = 0; i < n_fields; i++) {
761 buf_used += put_nth_field(
762 buf + buf_used,
sizeof(buf) - buf_used,
763 i, index, rec, offsets) - 1;
767 cache->
storage, buf, buf_used + 1,
770 if (UNIV_UNLIKELY(heap != NULL)) {
775 ut_a(offsets != offsets_onstack);
782 if (*lock_data == NULL) {
836 if (!fill_lock_data(&row->
lock_data, lock, heap_no, cache)) {
860 ut_ad(i_s_locks_row_validate(row));
881 ut_ad(i_s_locks_row_validate(requested_lock_row));
882 ut_ad(i_s_locks_row_validate(blocking_lock_row));
905 #ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
906 static ulint
fold = 0;
914 ut_a(heap_no != ULINT_UNDEFINED);
929 ut_a(heap_no == ULINT_UNDEFINED);
955 ut_ad(i_s_locks_row_validate(row));
956 #ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
961 ut_a(heap_no != ULINT_UNDEFINED);
972 ut_a(heap_no == ULINT_UNDEFINED);
1007 fold_lock(lock, heap_no),
1013 ut_ad(i_s_locks_row_validate(hash_chain->
value)),
1015 locks_row_eq_lock(hash_chain->
value, lock, heap_no));
1017 if (hash_chain == NULL) {
1023 return(hash_chain->
value);
1044 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1046 for (i = 0; i < 10000; i++) {
1048 #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
1050 dst_row = search_innodb_locks(cache, lock, heap_no);
1051 if (dst_row != NULL) {
1053 ut_ad(i_s_locks_row_validate(dst_row));
1059 table_cache_create_empty_row(&cache->
innodb_locks, cache);
1062 if (dst_row == NULL) {
1067 if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
1074 #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
1083 fold_lock(lock, heap_no),
1087 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1091 ut_ad(i_s_locks_row_validate(dst_row));
1101 add_lock_wait_to_cache(
1118 if (dst_row == NULL) {
1123 fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
1138 add_trx_relevant_locks_to_cache(
1153 ulint wait_lock_heap_no;
1168 if (*requested_lock_row == NULL) {
1189 = add_lock_to_cache(
1197 if (blocking_lock_row == NULL) {
1204 if (!add_lock_wait_to_cache(
1205 cache, *requested_lock_row,
1206 blocking_lock_row)) {
1215 *requested_lock_row = NULL;
1225 #define CACHE_MIN_IDLE_TIME_US 100000
1232 can_cache_be_updated(
1245 #ifdef UNIV_SYNC_DEBUG
1263 trx_i_s_cache_clear(
1281 fetch_data_into_cache_low(
1286 trx_list_t* trx_list)
1311 if (trx->
state == TRX_STATE_NOT_STARTED
1319 ut_ad(trx->in_ro_trx_list
1322 ut_ad(trx->in_rw_trx_list
1325 if (!add_trx_relevant_locks_to_cache(cache, trx,
1326 &requested_lock_row)) {
1333 table_cache_create_empty_row(&cache->
innodb_trx,
1337 if (trx_row == NULL) {
1343 if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1358 fetch_data_into_cache(
1365 trx_i_s_cache_clear(cache);
1387 if (!can_cache_be_updated(cache)) {
1398 fetch_data_into_cache(cache);
1439 SYNC_TRX_I_S_RWLOCK);
1443 mutex_create(cache_last_read_mutex_key,
1474 memset(cache, 0,
sizeof *cache);
1498 #ifdef UNIV_SYNC_DEBUG
1499 ut_a(rw_lock_own(&cache->
rw_lock, RW_LOCK_SHARED));
1508 rw_lock_s_unlock(&cache->
rw_lock);
1519 rw_lock_x_lock(&cache->
rw_lock);
1530 #ifdef UNIV_SYNC_DEBUG
1534 rw_lock_x_unlock(&cache->
rw_lock);
1549 #ifdef UNIV_SYNC_DEBUG
1551 || rw_lock_own(&cache->
rw_lock, RW_LOCK_EX));
1568 return(table_cache);
1584 table_cache = cache_select_table(cache, table);
1605 table_cache = cache_select_table(cache, table);
1607 ut_a(n < table_cache->rows_used);
1664 ut_a((ulint) res_len < lock_id_size);