19 #define DBTUP_DISK_ALLOC_CPP
24 operator<<(NdbOut& out, const Ptr<Dbtup::Page> & ptr)
26 out <<
"[ Page: ptr.i: " << ptr.i
27 <<
" [ m_file_no: " << ptr.p->m_file_no
28 <<
" m_page_no: " << ptr.p->m_page_no <<
"]"
29 <<
" list_index: " << ptr.p->list_index
30 <<
" free_space: " << ptr.p->free_space
31 <<
" uncommitted_used_space: " << ptr.p->uncommitted_used_space
38 operator<<(NdbOut& out, const Ptr<Dbtup::Page_request> & ptr)
40 out <<
"[ Page_request: ptr.i: " << ptr.i
41 <<
" " << ptr.p->m_key
42 <<
" m_original_estimated_free_space: " << ptr.p->m_original_estimated_free_space
43 <<
" m_list_index: " << ptr.p->m_list_index
44 <<
" m_frag_ptr_i: " << ptr.p->m_frag_ptr_i
45 <<
" m_extent_info_ptr: " << ptr.p->m_extent_info_ptr
46 <<
" m_ref_count: " << ptr.p->m_ref_count
47 <<
" m_uncommitted_used_space: " << ptr.p->m_uncommitted_used_space
55 operator<<(NdbOut& out, const Ptr<Dbtup::Extent_info> & ptr)
57 out <<
"[ Extent_info: ptr.i " << ptr.i
58 <<
" " << ptr.p->m_key
59 <<
" m_first_page_no: " << ptr.p->m_first_page_no
60 <<
" m_free_space: " << ptr.p->m_free_space
61 <<
" m_free_matrix_pos: " << ptr.p->m_free_matrix_pos
62 <<
" m_free_page_count: [";
64 for(Uint32
i = 0;
i<Dbtup::EXTENT_SEARCH_MATRIX_COLS;
i++)
65 out <<
" " << ptr.p->m_free_page_count[
i];
71 #if NOT_YET_FREE_EXTENT
78 for (Uint32
i = 1;
i<MAX_FREE_LIST;
i++)
79 res += extP->m_free_page_count[
i];
82 #error "Code for deallocting extents when they get empty"
83 #error "This code is not yet complete"
86 #if NOT_YET_UNDO_ALLOC_EXTENT
87 #error "This is needed for deallocting extents when they get empty"
88 #error "This code is not complete yet"
94 const Uint32
limit = 512;
95 ndbout_c(
"dirty pages");
96 for(Uint32
i = 0;
i<MAX_FREE_LIST;
i++)
103 for (list.first(ptr); c < limit && !ptr.isNull(); c++, list.next(ptr))
105 ndbout << ptr <<
" ";
109 ndbout <<
"MAXLIMIT ";
113 ndbout_c(
"page requests");
114 for(Uint32
i = 0;
i<MAX_FREE_LIST;
i++)
118 Local_page_request_list list(c_page_request_pool,
121 for (list.first(ptr); c < limit && !ptr.isNull(); c++, list.next(ptr))
123 ndbout << ptr <<
" ";
127 ndbout <<
"MAXLIMIT ";
132 ndbout_c(
"Extent matrix");
133 for(Uint32
i = 0;
i<alloc.SZ;
i++)
137 Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[
i]);
139 for (list.first(ptr); c < limit && !ptr.isNull(); c++, list.next(ptr))
141 ndbout << ptr <<
" ";
145 ndbout <<
"MAXLIMIT ";
154 ndbout <<
"current extent: " << ptr << endl;
158 #if defined VM_TRACE || 1
159 #define ddassert(x) do { if(unlikely(!(x))) { dump_disk_alloc(alloc); ndbrequire(false); } } while(0)
164 Dbtup::Disk_alloc_info::Disk_alloc_info(
const Tablerec* tabPtrP,
167 m_extent_size = extent_size;
169 if (tabPtrP->m_no_of_disk_attributes == 0)
172 Uint32 min_size= 4*tabPtrP->m_offsets[DD].m_fix_header_size;
174 if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0)
176 Uint32 recs_per_page= (4*Tup_fixsize_page::DATA_WORDS)/min_size;
177 m_page_free_bits_map[0] = recs_per_page;
178 m_page_free_bits_map[1] = 1;
179 m_page_free_bits_map[2] = 0;
180 m_page_free_bits_map[3] = 0;
182 Uint32 max= recs_per_page * extent_size;
183 for(Uint32
i = 0;
i<EXTENT_SEARCH_MATRIX_ROWS;
i++)
185 m_total_extent_free_space_thresholds[
i] =
186 (EXTENT_SEARCH_MATRIX_ROWS -
i - 1)*max/EXTENT_SEARCH_MATRIX_ROWS;
203 Uint32 col = calc_page_free_bits(sz);
204 Uint32 mask= EXTENT_SEARCH_MATRIX_COLS - 1;
205 for(Uint32
i= 0;
i<EXTENT_SEARCH_MATRIX_SIZE;
i++)
208 if (!m_free_extents[
i].isEmpty())
213 if ((
i & mask) >= col)
215 i = (
i & ~mask) + mask;
225 Uint32 free= extP->m_free_space;
226 Uint32 mask= EXTENT_SEARCH_MATRIX_COLS - 1;
228 Uint32 col= 0, row=0;
236 const Uint32 *arr= m_total_extent_free_space_thresholds;
237 for(; free < * arr++; row++)
238 assert(row < EXTENT_SEARCH_MATRIX_ROWS);
245 const Uint16 *arr= extP->m_free_page_count;
246 for(; col < EXTENT_SEARCH_MATRIX_COLS && * arr++ == 0; col++);
258 Uint32 pos= (row * (mask + 1)) + (col & mask);
260 assert(pos < EXTENT_SEARCH_MATRIX_SIZE);
272 Uint32 sub = Uint32(- delta);
273 ddassert(extentPtr.p->m_free_space >= sub);
274 extentPtr.p->m_free_space -= sub;
279 extentPtr.p->m_free_space += delta;
286 for(Uint32
i = 0;
i<MAX_FREE_LIST;
i++)
288 cnt += extentPtr.p->m_free_page_count[
i];
291 if (extentPtr.p->m_free_page_count[0] == cnt)
293 ddassert(extentPtr.p->m_free_space == cnt*alloc.m_page_free_bits_map[0]);
297 ddassert(extentPtr.p->m_free_space < cnt*alloc.m_page_free_bits_map[0]);
299 ddassert(extentPtr.p->m_free_space >= sum);
300 ddassert(extentPtr.p->m_free_space <= cnt*alloc.m_page_free_bits_map[0]);
303 Uint32 old = extentPtr.p->m_free_matrix_pos;
310 Local_extent_info_list old_list(c_extent_pool, alloc.m_free_extents[old]);
311 Local_extent_info_list new_list(c_extent_pool, alloc.m_free_extents[pos]);
312 old_list.remove(extentPtr);
313 new_list.add(extentPtr);
314 extentPtr.p->m_free_matrix_pos= pos;
324 Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr,
331 pagePtr.p->uncommitted_used_space = 0;
332 pagePtr.p->m_restart_seq = globalData.m_restart_seq;
335 key.m_key.m_file_no = pagePtr.p->m_file_no;
336 key.m_key.m_page_idx = pagePtr.p->m_extent_no;
338 ndbrequire(c_extent_hash.
find(extentPtr, key));
339 pagePtr.p->m_extent_info_ptr = extentPtr.i;
341 Uint32 real_free = pagePtr.p->free_space;
342 const bool prealloc = estimate >= 0;
350 estimated = (Uint32)estimate;
358 estimated =alloc.calc_page_free_space(alloc.calc_page_free_bits(real_free));
364 page.m_file_no = pagePtr.p->m_file_no;
365 page.m_page_no = pagePtr.p->m_page_no;
367 D(
"Tablespace_client - restart_setup_page");
370 unsigned uncommitted, committed;
371 uncommitted = committed = ~(unsigned)0;
372 (void) tsman.get_page_free_bits(&page, &uncommitted, &committed);
375 ddassert(alloc.calc_page_free_bits(real_free) == committed);
382 ddassert(uncommitted == MAX_FREE_LIST - 1);
386 ddassert(committed == uncommitted);
391 ddassert(real_free >= estimated);
393 if (real_free != estimated)
396 Uint32 delta = (real_free-estimated);
397 update_extent_pos(alloc, extentPtr, delta);
413 Dbtup::disk_page_prealloc(
Signal* signal,
420 Fragrecord* fragPtrP = fragPtr.p;
421 Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
422 Uint32 idx= alloc.calc_page_free_bits(sz);
423 D(
"Tablespace_client - disk_page_prealloc");
425 fragPtrP->fragTableId,
426 fragPtrP->fragmentId,
427 fragPtrP->m_tablespace_id);
430 ndbout <<
"disk_page_prealloc";
435 for(i= 0; i <= idx; i++)
437 if (!alloc.m_dirty_pages[i].isEmpty())
439 ptrI= alloc.m_dirty_pages[
i].firstItem;
441 m_global_page_pool.
getPtr(gpage, ptrI);
445 tmp.p =
reinterpret_cast<Page*
>(gpage.p);
446 disk_page_prealloc_dirty_page(alloc, tmp, i, sz);
447 key->m_page_no= tmp.p->m_page_no;
448 key->m_file_no= tmp.p->m_file_no;
450 ndbout <<
" found dirty page " << *key << endl;
461 for(i= 0; i <= idx; i++)
463 if (!alloc.m_page_requests[i].isEmpty())
465 ptrI= alloc.m_page_requests[
i].firstItem;
467 c_page_request_pool.
getPtr(req, ptrI);
469 disk_page_prealloc_transit_page(alloc, req, i, sz);
470 * key = req.p->m_key;
472 ndbout <<
" found transit page " << *key << endl;
481 if (!c_page_request_pool.
seize(req))
486 ndbout_c(
"no free request");
490 req.p->m_ref_count= 1;
491 req.p->m_frag_ptr_i= fragPtr.i;
492 req.p->m_uncommitted_used_space= sz;
496 const Uint32 bits= alloc.calc_page_free_bits(sz);
502 if ((ext.i= alloc.m_curr_extent_info_ptr_i) != RNIL)
505 c_extent_pool.
getPtr(ext);
506 if ((pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits)) >= 0)
519 alloc.m_curr_extent_info_ptr_i = RNIL;
520 Uint32 pos= alloc.calc_extent_pos(ext.p);
521 ext.p->m_free_matrix_pos = pos;
522 Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[pos]);
530 if ((pos= alloc.find_extent(sz)) != RNIL)
533 Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[pos]);
543 #if NOT_YET_UNDO_ALLOC_EXTENT
544 Uint32 logfile_group_id = fragPtr.p->m_logfile_group_id;
546 err = c_lgman->alloc_log_space(logfile_group_id,
547 sizeof(Disk_undo::AllocExtent)>>2);
555 if (!c_extent_pool.
seize(ext))
560 #if NOT_YET_UNDO_ALLOC_EXTENT
561 c_lgman->free_log_space(logfile_group_id,
562 sizeof(Disk_undo::AllocExtent)>>2);
564 c_page_request_pool.
release(req);
565 ndbout_c(
"no free extent info");
569 if ((err= tsman.alloc_extent(&ext.p->m_key)) < 0)
572 #if NOT_YET_UNDO_ALLOC_EXTENT
573 c_lgman->free_log_space(logfile_group_id,
574 sizeof(Disk_undo::AllocExtent)>>2);
577 c_page_request_pool.
release(req);
582 #if NOT_YET_UNDO_ALLOC_EXTENT
589 cb.m_callbackData= ext.i;
590 cb.m_callbackFunction =
591 safe_cast(&Dbtup::disk_page_alloc_extent_log_buffer_callback);
592 Uint32 sz=
sizeof(Disk_undo::AllocExtent)>>2;
595 int res= lgman.get_log_buffer(signal, sz, &cb);
601 ndbrequire(
"NOT YET IMPLEMENTED" == 0);
604 execute(signal, cb, res);
611 ndbout <<
"allocated " << pages <<
" pages: " << ext.p->m_key
612 <<
" table: " << fragPtr.p->fragTableId
613 <<
" fragment: " << fragPtr.p->fragmentId << endl;
615 ext.p->m_first_page_no = ext.p->m_key.m_page_no;
616 memset(ext.p->m_free_page_count, 0,
sizeof(ext.p->m_free_page_count));
617 ext.p->m_free_space= alloc.m_page_free_bits_map[0] * pages;
618 ext.p->m_free_page_count[0]= pages;
619 ext.p->m_empty_page_no = 0;
620 c_extent_hash.
add(ext);
622 Local_fragment_extent_list list1(c_extent_pool, alloc.m_extent_list);
626 alloc.m_curr_extent_info_ptr_i= ext.i;
627 ext.p->m_free_matrix_pos= RNIL;
628 pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits);
630 ddassert(pageBits >= 0);
636 *key= req.p->m_key= ext.p->m_key;
639 ndbout <<
" allocated page " << *key << endl;
646 Uint32
size= alloc.calc_page_free_space((Uint32)pageBits);
648 ddassert(size >= sz);
649 req.p->m_original_estimated_free_space =
size;
651 Uint32 new_size = size - sz;
652 Uint32 newPageBits= alloc.calc_page_free_bits(new_size);
653 if (newPageBits != (Uint32)pageBits)
656 ddassert(ext.p->m_free_page_count[pageBits] > 0);
657 ext.p->m_free_page_count[pageBits]--;
658 ext.p->m_free_page_count[newPageBits]++;
660 update_extent_pos(alloc, ext, -Int32(sz));
663 idx= alloc.calc_page_free_bits(new_size);
665 Local_page_request_list list(c_page_request_pool,
666 alloc.m_page_requests[idx]);
670 req.p->m_list_index= idx;
671 req.p->m_extent_info_ptr= ext.i;
675 preq.m_callback.m_callbackData= req.i;
676 preq.m_callback.m_callbackFunction =
677 safe_cast(&Dbtup::disk_page_prealloc_callback);
679 int flags= Page_cache_client::ALLOC_REQ;
684 if (ext.p->m_first_page_no + ext.p->m_empty_page_no == key->m_page_no)
687 flags |= Page_cache_client::EMPTY_PAGE;
689 ext.p->m_empty_page_no++;
692 preq.m_callback.m_callbackFunction =
693 safe_cast(&Dbtup::disk_page_prealloc_initial_callback);
697 int res= pgman.get_page(signal, preq, flags);
698 m_pgman_ptr = pgman.m_ptr;
710 execute(signal, preq.m_callback, res);
717 Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc,
719 Uint32 old_idx, Uint32 sz)
722 ddassert(pagePtr.p->list_index == old_idx);
724 Uint32 free= pagePtr.p->free_space;
725 Uint32 used= pagePtr.p->uncommitted_used_space + sz;
726 Uint32 ext= pagePtr.p->m_extent_info_ptr;
728 ddassert(free >= used);
730 c_extent_pool.
getPtr(extentPtr, ext);
732 Uint32 new_idx= alloc.calc_page_free_bits(free - used);
734 if (old_idx != new_idx)
737 disk_page_move_dirty_page(alloc, extentPtr, pagePtr, old_idx, new_idx);
740 pagePtr.p->uncommitted_used_space = used;
741 update_extent_pos(alloc, extentPtr, -Int32(sz));
746 Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc,
748 Uint32 old_idx, Uint32 sz)
751 ddassert(req.p->m_list_index == old_idx);
753 Uint32 free= req.p->m_original_estimated_free_space;
754 Uint32 used= req.p->m_uncommitted_used_space + sz;
755 Uint32 ext= req.p->m_extent_info_ptr;
758 c_extent_pool.
getPtr(extentPtr, ext);
760 ddassert(free >= used);
761 Uint32 new_idx= alloc.calc_page_free_bits(free - used);
763 if (old_idx != new_idx)
766 disk_page_move_page_request(alloc, extentPtr, req, old_idx, new_idx);
769 req.p->m_uncommitted_used_space = used;
770 update_extent_pos(alloc, extentPtr, -Int32(sz));
774 Dbtup::disk_page_prealloc_callback(
Signal* signal,
775 Uint32 page_request, Uint32 page_id)
781 c_page_request_pool.
getPtr(req, page_request);
784 m_global_page_pool.
getPtr(gpage, page_id);
787 fragPtr.i= req.p->m_frag_ptr_i;
788 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
792 pagePtr.p =
reinterpret_cast<Page*
>(gpage.p);
794 Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
795 if (unlikely(pagePtr.p->m_restart_seq != globalData.m_restart_seq))
798 D(V(pagePtr.p->m_restart_seq) << V(globalData.m_restart_seq));
799 restart_setup_page(alloc, pagePtr, req.p->m_original_estimated_free_space);
803 c_extent_pool.
getPtr(extentPtr, req.p->m_extent_info_ptr);
805 pagePtr.p->uncommitted_used_space += req.p->m_uncommitted_used_space;
806 ddassert(pagePtr.p->free_space >= pagePtr.p->uncommitted_used_space);
808 Uint32 free = pagePtr.p->free_space - pagePtr.p->uncommitted_used_space;
809 Uint32 idx = req.p->m_list_index;
810 Uint32 real_idx = alloc.calc_page_free_bits(free);
815 ddassert(extentPtr.p->m_free_page_count[idx]);
816 extentPtr.p->m_free_page_count[idx]--;
817 extentPtr.p->m_free_page_count[real_idx]++;
818 update_extent_pos(alloc, extentPtr, 0);
825 pagePtr.p->list_index = real_idx;
835 Local_page_request_list list(c_page_request_pool,
836 alloc.m_page_requests[idx]);
842 Dbtup::disk_page_move_dirty_page(Disk_alloc_info& alloc,
848 ddassert(extentPtr.p->m_free_page_count[old_idx]);
849 extentPtr.p->m_free_page_count[old_idx]--;
850 extentPtr.p->m_free_page_count[new_idx]++;
855 old_list.remove(pagePtr);
856 new_list.add(pagePtr);
857 pagePtr.p->list_index = new_idx;
861 Dbtup::disk_page_move_page_request(Disk_alloc_info& alloc,
864 Uint32 old_idx, Uint32 new_idx)
866 Page_request_list::Head *lists = alloc.m_page_requests;
867 Local_page_request_list old_list(c_page_request_pool, lists[old_idx]);
868 Local_page_request_list new_list(c_page_request_pool, lists[new_idx]);
869 old_list.remove(req);
872 ddassert(extentPtr.p->m_free_page_count[old_idx]);
873 extentPtr.p->m_free_page_count[old_idx]--;
874 extentPtr.p->m_free_page_count[new_idx]++;
875 req.p->m_list_index= new_idx;
879 Dbtup::disk_page_prealloc_initial_callback(
Signal*signal,
893 c_page_request_pool.
getPtr(req, page_request);
896 m_global_page_pool.
getPtr(gpage, page_id);
899 pagePtr.p =
reinterpret_cast<Page*
>(gpage.p);
902 fragPtr.i= req.p->m_frag_ptr_i;
903 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
906 tabPtr.i = fragPtr.p->fragTableId;
907 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
910 c_extent_pool.
getPtr(extentPtr, req.p->m_extent_info_ptr);
912 if (tabPtr.p->m_attributes[DD].m_no_of_varsize == 0)
914 convertThPage((Fix_page*)pagePtr.p, tabPtr.p, DD);
921 pagePtr.p->m_page_no= req.p->m_key.m_page_no;
922 pagePtr.p->m_file_no= req.p->m_key.m_file_no;
923 pagePtr.p->m_table_id= fragPtr.p->fragTableId;
924 pagePtr.p->m_fragment_id = fragPtr.p->fragmentId;
925 pagePtr.p->m_extent_no = extentPtr.p->m_key.m_page_idx;
926 pagePtr.p->m_extent_info_ptr= req.p->m_extent_info_ptr;
927 pagePtr.p->m_restart_seq = globalData.m_restart_seq;
928 pagePtr.p->nextList = pagePtr.p->prevList = RNIL;
929 pagePtr.p->list_index = req.p->m_list_index;
930 pagePtr.p->uncommitted_used_space = req.p->m_uncommitted_used_space;
932 Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
933 Uint32 idx = req.p->m_list_index;
936 Uint32 free = pagePtr.p->free_space - pagePtr.p->uncommitted_used_space;
937 ddassert(idx == alloc.calc_page_free_bits(free));
938 ddassert(pagePtr.p->free_space == req.p->m_original_estimated_free_space);
954 Local_page_request_list list(c_page_request_pool,
955 alloc.m_page_requests[idx]);
961 Dbtup::disk_page_set_dirty(PagePtr pagePtr)
964 Uint32 idx = pagePtr.p->list_index;
965 if ((pagePtr.p->m_restart_seq == globalData.m_restart_seq) &&
966 ((idx & 0x8000) == 0))
976 key.m_page_no = pagePtr.p->m_page_no;
977 key.m_file_no = pagePtr.p->m_file_no;
979 pagePtr.p->nextList = pagePtr.p->prevList = RNIL;
982 ndbout <<
" disk_page_set_dirty " << key << endl;
985 tabPtr.i= pagePtr.p->m_table_id;
986 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
989 getFragmentrec(fragPtr, pagePtr.p->m_fragment_id, tabPtr.p);
991 Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
993 Uint32 free = pagePtr.p->free_space;
994 Uint32 used = pagePtr.p->uncommitted_used_space;
995 if (unlikely(pagePtr.p->m_restart_seq != globalData.m_restart_seq))
998 D(V(pagePtr.p->m_restart_seq) << V(globalData.m_restart_seq));
999 restart_setup_page(alloc, pagePtr, -1);
1000 ndbassert(free == pagePtr.p->free_space);
1001 idx = alloc.calc_page_free_bits(free);
1008 ddassert(idx == alloc.calc_page_free_bits(free - used));
1011 ddassert(free >= used);
1013 D(
"Tablespace_client - disk_page_set_dirty");
1015 fragPtr.p->fragTableId,
1016 fragPtr.p->fragmentId,
1017 fragPtr.p->m_tablespace_id);
1019 pagePtr.p->list_index = idx;
1025 tsman.unmap_page(&key, MAX_FREE_LIST - 1);
1031 Uint32 page_id, Uint32 dirty_count)
1035 m_global_page_pool.
getPtr(gpage, page_id);
1037 pagePtr.i = gpage.i;
1038 pagePtr.p =
reinterpret_cast<Page*
>(gpage.p);
1040 Uint32
type = pagePtr.p->m_page_header.m_page_type;
1041 if (unlikely((type != File_formats::PT_Tup_fixsize_page &&
1042 type != File_formats::PT_Tup_varsize_page) ||
1043 f_undo_done ==
false))
1046 D(
"disk_page_unmap_callback" << V(type) << V(f_undo_done));
1050 Uint32 idx = pagePtr.p->list_index;
1053 tabPtr.i= pagePtr.p->m_table_id;
1054 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1057 getFragmentrec(fragPtr, pagePtr.p->m_fragment_id, tabPtr.p);
1071 key.m_page_no = pagePtr.p->m_page_no;
1072 key.m_file_no = pagePtr.p->m_file_no;
1073 ndbout <<
"disk_page_unmap_callback(before) " << key
1074 <<
" cnt: " << dirty_count <<
" " << (idx & ~0x8000) << endl;
1077 ndbassert((idx & 0x8000) == 0);
1085 if (dirty_count == 0)
1088 pagePtr.p->list_index = idx | 0x8000;
1091 key.m_page_no = pagePtr.p->m_page_no;
1092 key.m_file_no = pagePtr.p->m_file_no;
1094 Uint32 free = pagePtr.p->free_space;
1095 Uint32 used = pagePtr.p->uncommitted_used_space;
1096 ddassert(free >= used);
1099 D(
"Tablespace_client - disk_page_unmap_callback");
1101 fragPtr.p->fragTableId,
1102 fragPtr.p->fragmentId,
1103 fragPtr.p->m_tablespace_id);
1117 key.m_page_no = pagePtr.p->m_page_no;
1118 key.m_file_no = pagePtr.p->m_file_no;
1119 Uint32 real_free = pagePtr.p->free_space;
1123 ndbout <<
"disk_page_unmap_callback(after) " << key
1124 <<
" cnt: " << dirty_count <<
" " << (idx & ~0x8000) << endl;
1131 D(
"Tablespace_client - disk_page_unmap_callback");
1133 fragPtr.p->fragTableId,
1134 fragPtr.p->fragmentId,
1135 fragPtr.p->m_tablespace_id);
1141 <<
" idx: " << (idx & ~0x8000)
1150 Dbtup::disk_page_alloc(
Signal* signal,
1151 Tablerec* tabPtrP, Fragrecord* fragPtrP,
1152 Local_key* key, PagePtr pagePtr, Uint32 gci)
1155 Uint32 logfile_group_id= fragPtrP->m_logfile_group_id;
1156 Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
1159 if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0)
1161 ddassert(pagePtr.p->uncommitted_used_space > 0);
1162 pagePtr.p->uncommitted_used_space--;
1163 key->m_page_idx= ((Fix_page*)pagePtr.p)->alloc_record();
1164 lsn= disk_page_undo_alloc(pagePtr.p, key, 1, gci, logfile_group_id);
1168 Uint32 sz= key->m_page_idx;
1169 ddassert(pagePtr.p->uncommitted_used_space >= sz);
1170 pagePtr.p->uncommitted_used_space -= sz;
1171 key->m_page_idx= ((Var_page*)pagePtr.p)->
1172 alloc_record(sz, (Var_page*)ctemp_page, 0);
1174 lsn= disk_page_undo_alloc(pagePtr.p, key, sz, gci, logfile_group_id);
1179 Dbtup::disk_page_free(
Signal *signal,
1180 Tablerec *tabPtrP, Fragrecord * fragPtrP,
1181 Local_key* key, PagePtr pagePtr, Uint32 gci)
1185 ndbout <<
" disk_page_free " << *key << endl;
1187 Uint32 page_idx= key->m_page_idx;
1188 Uint32 logfile_group_id= fragPtrP->m_logfile_group_id;
1189 Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
1190 Uint32 old_free= pagePtr.p->free_space;
1194 if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0)
1197 const Uint32 *src= ((Fix_page*)pagePtr.p)->get_ptr(page_idx, 0);
1198 ndbassert(* (src + 1) != Tup_fixsize_page::FREE_RECORD);
1199 lsn= disk_page_undo_free(pagePtr.p, key,
1200 src, tabPtrP->m_offsets[DD].m_fix_header_size,
1201 gci, logfile_group_id);
1203 ((Fix_page*)pagePtr.p)->free_record(page_idx);
1207 const Uint32 *src= ((Var_page*)pagePtr.p)->get_ptr(page_idx);
1208 sz= ((Var_page*)pagePtr.p)->get_entry_len(page_idx);
1209 lsn= disk_page_undo_free(pagePtr.p, key,
1211 gci, logfile_group_id);
1213 ((Var_page*)pagePtr.p)->free_record(page_idx, 0);
1216 Uint32 new_free = pagePtr.p->free_space;
1218 Uint32 ext = pagePtr.p->m_extent_info_ptr;
1219 Uint32 used = pagePtr.p->uncommitted_used_space;
1220 Uint32 old_idx = pagePtr.p->list_index;
1221 ddassert(old_free >= used);
1222 ddassert(new_free >= used);
1223 ddassert(new_free >= old_free);
1224 ddassert((old_idx & 0x8000) == 0);
1226 Uint32 new_idx = alloc.calc_page_free_bits(new_free - used);
1227 ddassert(alloc.calc_page_free_bits(old_free - used) == old_idx);
1230 c_extent_pool.
getPtr(extentPtr, ext);
1232 if (old_idx != new_idx)
1235 disk_page_move_dirty_page(alloc, extentPtr, pagePtr, old_idx, new_idx);
1238 update_extent_pos(alloc, extentPtr, sz);
1239 #if NOT_YET_FREE_EXTENT
1240 if (check_free(extentPtr.p) == 0)
1242 ndbout_c(
"free: extent is free");
1248 Dbtup::disk_page_abort_prealloc(
Signal *signal, Fragrecord* fragPtrP,
1254 req.m_callback.m_callbackData= sz;
1255 req.m_callback.m_callbackFunction =
1256 safe_cast(&Dbtup::disk_page_abort_prealloc_callback);
1258 int flags= Page_cache_client::DIRTY_REQ;
1259 memcpy(&req.m_page, key,
sizeof(
Local_key));
1262 int res= pgman.get_page(signal, req, flags);
1263 m_pgman_ptr = pgman.m_ptr;
1276 m_global_page_pool.
getPtr(gpage, (Uint32)res);
1278 pagePtr.i = gpage.i;
1279 pagePtr.p =
reinterpret_cast<Page*
>(gpage.p);
1281 disk_page_abort_prealloc_callback_1(signal, fragPtrP, pagePtr, sz);
1286 Dbtup::disk_page_abort_prealloc_callback(
Signal* signal,
1287 Uint32 sz, Uint32 page_id)
1292 m_global_page_pool.
getPtr(gpage, page_id);
1295 pagePtr.i = gpage.i;
1296 pagePtr.p =
reinterpret_cast<Page*
>(gpage.p);
1299 tabPtr.i= pagePtr.p->m_table_id;
1300 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1303 getFragmentrec(fragPtr, pagePtr.p->m_fragment_id, tabPtr.p);
1305 disk_page_abort_prealloc_callback_1(signal, fragPtr.p, pagePtr, sz);
1309 Dbtup::disk_page_abort_prealloc_callback_1(
Signal* signal,
1310 Fragrecord* fragPtrP,
1315 disk_page_set_dirty(pagePtr);
1317 Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
1320 c_extent_pool.
getPtr(extentPtr, pagePtr.p->m_extent_info_ptr);
1322 Uint32 idx = pagePtr.p->list_index & 0x7FFF;
1323 Uint32 used = pagePtr.p->uncommitted_used_space;
1324 Uint32 free = pagePtr.p->free_space;
1326 ddassert(free >= used);
1327 ddassert(used >= sz);
1328 ddassert(alloc.calc_page_free_bits(free - used) == idx);
1330 pagePtr.p->uncommitted_used_space = used - sz;
1332 Uint32 new_idx = alloc.calc_page_free_bits(free - used + sz);
1337 disk_page_move_dirty_page(alloc, extentPtr, pagePtr, idx, new_idx);
1340 update_extent_pos(alloc, extentPtr, sz);
1341 #if NOT_YET_FREE_EXTENT
1342 if (check_free(extentPtr.p) == 0)
1344 ndbout_c(
"abort: extent is free");
1349 #if NOT_YET_UNDO_ALLOC_EXTENT
1351 Dbtup::disk_page_alloc_extent_log_buffer_callback(
Signal* signal,
1356 c_extent_pool.
getPtr(extentPtr, extentPtrI);
1359 Tablespace_client2 tsman(signal, c_tsman, &key);
1362 tabPtr.i= tsman.m_table_id;
1363 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1366 getFragmentrec(fragPtr, tsman.m_fragment_id, tabPtr.p);
1368 Logfile_client lgman(
this, c_lgman, fragPtr.p->m_logfile_group_id);
1370 Disk_undo::AllocExtent alloc;
1371 alloc.m_table = tabPtr.i;
1372 alloc.m_fragment = tsman.m_fragment_id;
1373 alloc.m_page_no = key.m_page_no;
1374 alloc.m_file_no = key.m_file_no;
1375 alloc.m_type_length = (Disk_undo::UNDO_ALLOC_EXTENT<<16)|(sizeof(alloc)>> 2);
1379 Uint64 lsn= lgman.add_entry(c, 1);
1381 tsman.update_lsn(&key, lsn);
1387 Dbtup::disk_page_undo_alloc(Page* page,
const Local_key* key,
1388 Uint32 sz, Uint32 gci, Uint32 logfile_group_id)
1391 D(
"Logfile_client - disk_page_undo_alloc");
1394 Disk_undo::Alloc alloc;
1395 alloc.m_type_length= (Disk_undo::UNDO_ALLOC << 16) | (sizeof(alloc) >> 2);
1396 alloc.m_page_no = key->m_page_no;
1397 alloc.m_file_no_page_idx= key->m_file_no << 16 | key->m_page_idx;
1401 Uint64 lsn= lgman.add_entry(c, 1);
1404 pgman.update_lsn(* key, lsn);
1411 Dbtup::disk_page_undo_update(Page* page,
const Local_key* key,
1412 const Uint32* src, Uint32 sz,
1413 Uint32 gci, Uint32 logfile_group_id)
1416 D(
"Logfile_client - disk_page_undo_update");
1419 Disk_undo::Update update;
1420 update.m_page_no = key->m_page_no;
1421 update.m_file_no_page_idx= key->m_file_no << 16 | key->m_page_idx;
1424 update.m_type_length=
1425 (Disk_undo::UNDO_UPDATE << 16) | (sz + (sizeof(update) >> 2) - 1);
1430 { &update.m_type_length, 1 }
1433 ndbassert(4*(3 + sz + 1) == (
sizeof(update) + 4*sz - 4));
1435 Uint64 lsn= lgman.add_entry(c, 3);
1438 pgman.update_lsn(* key, lsn);
1445 Dbtup::disk_page_undo_free(Page* page,
const Local_key* key,
1446 const Uint32* src, Uint32 sz,
1447 Uint32 gci, Uint32 logfile_group_id)
1450 D(
"Logfile_client - disk_page_undo_free");
1453 Disk_undo::Free free;
1454 free.m_page_no = key->m_page_no;
1455 free.m_file_no_page_idx= key->m_file_no << 16 | key->m_page_idx;
1459 (Disk_undo::UNDO_FREE << 16) | (sz + (sizeof(free) >> 2) - 1);
1464 { &free.m_type_length, 1 }
1467 ndbassert(4*(3 + sz + 1) == (
sizeof(free) + 4*sz - 4));
1469 Uint64 lsn= lgman.add_entry(c, 3);
1472 pgman.update_lsn(* key, lsn);
1478 #include <signaldata/LgmanContinueB.hpp>
1484 Uint32
type,
const Uint32 * ptr, Uint32 len)
1486 f_undo_done =
false;
1490 f_undo.m_type =
type;
1493 switch(f_undo.m_type){
1494 case File_formats::Undofile::UNDO_LCP_FIRST:
1495 case File_formats::Undofile::UNDO_LCP:
1498 ndbrequire(len == 3);
1499 Uint32 lcp = ptr[0];
1500 Uint32 tableId = ptr[1] >> 16;
1501 Uint32 fragId = ptr[1] & 0xFFFF;
1502 disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_LCP, lcp);
1504 disk_restart_undo_next(signal);
1508 ndbout_c(
"UNDO LCP %u (%u, %u)", lcp, tableId, fragId);
1512 case File_formats::Undofile::UNDO_TUP_ALLOC:
1516 preq.m_page.m_page_no = rec->m_page_no;
1517 preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
1518 preq.m_page.m_page_idx = rec->m_file_no_page_idx & 0xFFFF;
1521 case File_formats::Undofile::UNDO_TUP_UPDATE:
1525 preq.m_page.m_page_no = rec->m_page_no;
1526 preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
1527 preq.m_page.m_page_idx = rec->m_file_no_page_idx & 0xFFFF;
1530 case File_formats::Undofile::UNDO_TUP_FREE:
1534 preq.m_page.m_page_no = rec->m_page_no;
1535 preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
1536 preq.m_page.m_page_idx = rec->m_file_no_page_idx & 0xFFFF;
1539 case File_formats::Undofile::UNDO_TUP_CREATE:
1547 tabPtr.i= rec->m_table;
1548 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1549 for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
1550 if (tabPtr.p->fragrec[i] != RNIL)
1551 disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i],
1552 Fragrecord::UC_CREATE, 0);
1554 disk_restart_undo_next(signal);
1558 ndbout_c(
"UNDO CREATE (%u)", tabPtr.i);
1562 case File_formats::Undofile::UNDO_TUP_DROP:
1567 tabPtr.i= rec->m_table;
1568 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1569 for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
1570 if (tabPtr.p->fragrec[i] != RNIL)
1571 disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i],
1572 Fragrecord::UC_CREATE, 0);
1574 disk_restart_undo_next(signal);
1578 ndbout_c(
"UNDO DROP (%u)", tabPtr.i);
1582 case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT:
1584 case File_formats::Undofile::UNDO_TUP_FREE_EXTENT:
1586 disk_restart_undo_next(signal);
1589 case File_formats::Undofile::UNDO_END:
1597 f_undo.m_key = preq.m_page;
1598 preq.m_callback.m_callbackFunction =
1599 safe_cast(&Dbtup::disk_restart_undo_callback);
1603 int res= pgman.
get_page(signal, preq, flags);
1604 m_pgman_ptr = pgman.m_ptr;
1614 execute(signal, preq.m_callback, res);
1619 Dbtup::disk_restart_undo_next(
Signal* signal)
1621 signal->theData[0] = LgmanContinueB::EXECUTE_UNDO_RECORD;
1622 sendSignal(LGMAN_REF, GSN_CONTINUEB, signal, 1, JBB);
1626 Dbtup::disk_restart_lcp_id(Uint32 tableId, Uint32 fragId, Uint32 lcpId)
1632 disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE, 0);
1635 ndbout_c(
"mark_no_lcp (%u, %u)", tableId, fragId);
1640 disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_SET_LCP, lcpId);
1643 ndbout_c(
"mark_no_lcp (%u, %u)", tableId, fragId);
1650 Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag,
1655 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1657 if (tabPtr.p->tableStatus == DEFINED && tabPtr.p->m_no_of_disk_attributes)
1660 FragrecordPtr fragPtr;
1661 getFragmentrec(fragPtr, fragId, tabPtr.p);
1662 if (!fragPtr.isNull())
1666 case Fragrecord::UC_CREATE:
1668 fragPtr.p->m_undo_complete |= flag;
1670 case Fragrecord::UC_LCP:
1672 if (fragPtr.p->m_undo_complete == 0 &&
1673 fragPtr.p->m_restore_lcp_id == lcpId)
1676 fragPtr.p->m_undo_complete |= flag;
1678 ndbout_c(
"table: %u fragment: %u lcp: %u -> done",
1679 tableId, fragId, lcpId);
1682 case Fragrecord::UC_SET_LCP:
1686 ndbout_c(
"table: %u fragment: %u restore to lcp: %u",
1687 tableId, fragId, lcpId);
1688 ndbrequire(fragPtr.p->m_undo_complete == 0);
1689 ndbrequire(fragPtr.p->m_restore_lcp_id == RNIL);
1690 fragPtr.p->m_restore_lcp_id = lcpId;
1701 Dbtup::disk_restart_undo_callback(
Signal* signal,
1707 m_global_page_pool.
getPtr(gpage, page_id);
1709 pagePtr.i = gpage.i;
1710 pagePtr.p =
reinterpret_cast<Page*
>(gpage.p);
1712 Apply_undo* undo = &f_undo;
1714 bool update =
false;
1715 if (! (pagePtr.p->list_index & 0x8000) ||
1716 pagePtr.p->nextList != RNIL ||
1717 pagePtr.p->prevList != RNIL)
1721 pagePtr.p->list_index |= 0x8000;
1722 pagePtr.p->nextList = pagePtr.p->prevList = RNIL;
1725 Uint32 tableId= pagePtr.p->m_table_id;
1726 Uint32 fragId = pagePtr.p->m_fragment_id;
1728 if (tableId >= cnoOfTablerec)
1732 ndbout_c(
"UNDO table> %u", tableId);
1733 disk_restart_undo_next(signal);
1736 undo->m_table_ptr.i = tableId;
1737 ptrCheckGuard(undo->m_table_ptr, cnoOfTablerec, tablerec);
1739 if (! (undo->m_table_ptr.p->tableStatus == DEFINED &&
1740 undo->m_table_ptr.p->m_no_of_disk_attributes))
1744 ndbout_c(
"UNDO !defined (%u) ", tableId);
1745 disk_restart_undo_next(signal);
1749 getFragmentrec(undo->m_fragment_ptr, fragId, undo->m_table_ptr.p);
1750 if(undo->m_fragment_ptr.isNull())
1754 ndbout_c(
"UNDO fragment null %u/%u", tableId, fragId);
1755 disk_restart_undo_next(signal);
1759 if (undo->m_fragment_ptr.p->m_undo_complete)
1763 ndbout_c(
"UNDO undo complete %u/%u", tableId, fragId);
1764 disk_restart_undo_next(signal);
1773 lsn += pagePtr.p->m_page_header.m_page_lsn_hi; lsn <<= 32;
1774 lsn += pagePtr.p->m_page_header.m_page_lsn_lo;
1776 undo->m_page_ptr = pagePtr;
1778 if (undo->m_lsn <= lsn)
1783 ndbout <<
"apply: " << undo->m_lsn <<
"(" << lsn <<
" )"
1784 << key <<
" type: " << undo->m_type << endl;
1789 ndbout_c(
"applying %lld", undo->m_lsn);
1793 switch(undo->m_type){
1794 case File_formats::Undofile::UNDO_TUP_ALLOC:
1796 disk_restart_undo_alloc(undo);
1798 case File_formats::Undofile::UNDO_TUP_UPDATE:
1800 disk_restart_undo_update(undo);
1802 case File_formats::Undofile::UNDO_TUP_FREE:
1804 disk_restart_undo_free(undo);
1811 ndbout <<
"disk_restart_undo: " << undo->m_type <<
" "
1812 << undo->m_key << endl;
1814 lsn = undo->m_lsn - 1;
1817 pgman.update_lsn(undo->m_key, lsn);
1820 disk_restart_undo_page_bits(signal, undo);
1825 ndbout <<
"ignore: " << undo->m_lsn <<
"(" << lsn <<
" )"
1826 << key <<
" type: " << undo->m_type
1827 <<
" tab: " << tableId << endl;
1830 disk_restart_undo_next(signal);
1834 Dbtup::disk_restart_undo_alloc(Apply_undo* undo)
1836 ndbassert(undo->m_page_ptr.p->m_file_no == undo->m_key.m_file_no);
1837 ndbassert(undo->m_page_ptr.p->m_page_no == undo->m_key.m_page_no);
1838 if (undo->m_table_ptr.p->m_attributes[DD].m_no_of_varsize == 0)
1840 ((Fix_page*)undo->m_page_ptr.p)->free_record(undo->m_key.m_page_idx);
1843 ((Var_page*)undo->m_page_ptr.p)->free_record(undo->m_key.m_page_idx, 0);
1847 Dbtup::disk_restart_undo_update(Apply_undo* undo)
1850 Uint32 len= undo->m_len - 4;
1851 if (undo->m_table_ptr.p->m_attributes[DD].m_no_of_varsize == 0)
1853 ptr= ((Fix_page*)undo->m_page_ptr.p)->get_ptr(undo->m_key.m_page_idx, len);
1854 ndbrequire(len == undo->m_table_ptr.p->m_offsets[DD].m_fix_header_size);
1858 ptr= ((Var_page*)undo->m_page_ptr.p)->get_ptr(undo->m_key.m_page_idx);
1862 const Disk_undo::Update *update = (
const Disk_undo::Update*)undo->m_ptr;
1863 const Uint32* src= update->m_data;
1864 memcpy(ptr, src, 4 * len);
1868 Dbtup::disk_restart_undo_free(Apply_undo* undo)
1870 Uint32* ptr, idx = undo->m_key.m_page_idx;
1871 Uint32 len= undo->m_len - 4;
1872 if (undo->m_table_ptr.p->m_attributes[DD].m_no_of_varsize == 0)
1874 ndbrequire(len == undo->m_table_ptr.p->m_offsets[DD].m_fix_header_size);
1875 idx= ((Fix_page*)undo->m_page_ptr.p)->alloc_record(idx);
1876 ptr= ((Fix_page*)undo->m_page_ptr.p)->get_ptr(idx, len);
1883 ndbrequire(idx == undo->m_key.m_page_idx);
1884 const Disk_undo::Free *free = (
const Disk_undo::Free*)undo->m_ptr;
1885 const Uint32* src= free->m_data;
1886 memcpy(ptr, src, 4 * len);
1890 Dbtup::disk_restart_undo_page_bits(
Signal* signal, Apply_undo* undo)
1892 Fragrecord* fragPtrP = undo->m_fragment_ptr.p;
1893 Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
1899 Page* pageP = undo->m_page_ptr.p;
1900 Uint32 free = pageP->free_space;
1901 Uint32 new_bits = alloc.calc_page_free_bits(free);
1902 pageP->list_index = 0x8000 | new_bits;
1904 D(
"Tablespace_client - disk_restart_undo_page_bits");
1906 fragPtrP->fragTableId,
1907 fragPtrP->fragmentId,
1908 fragPtrP->m_tablespace_id);
1910 tsman.restart_undo_page_free_bits(&undo->m_key, new_bits);
1915 Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
1919 FragrecordPtr fragPtr;
1921 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1922 if (tabPtr.p->tableStatus == DEFINED && tabPtr.p->m_no_of_disk_attributes)
1924 getFragmentrec(fragPtr, fragId, tabPtr.p);
1926 if (!fragPtr.isNull())
1930 if (fragPtr.p->m_undo_complete & Fragrecord::UC_CREATE)
1936 Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
1939 ndbrequire(c_extent_pool.
seize(ext));
1941 ndbout <<
"allocated " << pages <<
" pages: " << *key
1942 <<
" table: " << tabPtr.i <<
" fragment: " << fragId << endl;
1944 ext.p->m_key = *key;
1945 ext.p->m_first_page_no = ext.p->m_key.m_page_no;
1946 ext.p->m_free_space= 0;
1947 ext.p->m_empty_page_no = (1 << 16);
1948 memset(ext.p->m_free_page_count, 0,
sizeof(ext.p->m_free_page_count));
1950 if (alloc.m_curr_extent_info_ptr_i != RNIL)
1954 c_extent_pool.
getPtr(old, alloc.m_curr_extent_info_ptr_i);
1955 ndbassert(old.p->m_free_matrix_pos == RNIL);
1956 Uint32 pos= alloc.calc_extent_pos(old.p);
1957 Local_extent_info_list new_list(c_extent_pool, alloc.m_free_extents[pos]);
1959 old.p->m_free_matrix_pos= pos;
1962 alloc.m_curr_extent_info_ptr_i = ext.i;
1963 ext.p->m_free_matrix_pos = RNIL;
1964 c_extent_hash.
add(ext);
1966 Local_fragment_extent_list list1(c_extent_pool, alloc.m_extent_list);
1976 Dbtup::disk_restart_page_bits(Uint32 tableId, Uint32 fragId,
1981 FragrecordPtr fragPtr;
1983 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
1984 if (tabPtr.p->tableStatus == DEFINED && tabPtr.p->m_no_of_disk_attributes)
1987 getFragmentrec(fragPtr, fragId, tabPtr.p);
1988 Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
1991 c_extent_pool.
getPtr(ext, alloc.m_curr_extent_info_ptr_i);
1993 Uint32 size= alloc.calc_page_free_space(bits);
1995 ext.p->m_free_page_count[bits]++;
1996 update_extent_pos(alloc, ext, size);
1997 ndbassert(ext.p->m_free_matrix_pos == RNIL);
2002 Dbtup::disk_page_get_allocated(
const Tablerec* tabPtrP,
2003 const Fragrecord * fragPtrP,
2006 res[0] = res[1] = 0;
2007 if (tabPtrP->m_no_of_disk_attributes)
2010 const Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
2015 Disk_alloc_info& tmp =
const_cast<Disk_alloc_info&
>(alloc);
2016 Local_fragment_extent_list list(c_extent_pool, tmp.m_extent_list);
2018 for (list.first(extentPtr); !extentPtr.isNull(); list.next(extentPtr))
2021 free += extentPtr.p->m_free_space;
2024 res[0] = cnt * alloc.m_extent_size * File_formats::NDB_PAGE_SIZE;
2025 res[1] = free * 4 * tabPtrP->m_offsets[DD].m_fix_header_size;