19 #define DBTUP_VAR_ALLOC_CPP
22 void Dbtup::init_list_sizes(
void)
24 c_min_list_size[0]= 200;
25 c_max_list_size[0]= 499;
27 c_min_list_size[1]= 500;
28 c_max_list_size[1]= 999;
30 c_min_list_size[2]= 1000;
31 c_max_list_size[2]= 4079;
33 c_min_list_size[3]= 4080;
34 c_max_list_size[3]= 8159;
36 c_min_list_size[4]= 0;
37 c_max_list_size[4]= 199;
64 Uint32* Dbtup::alloc_var_rec(Uint32 * err,
69 Uint32 * out_frag_page_id)
74 Uint32 *ptr = alloc_fix_rec(err, fragPtr, tabPtr, key, out_frag_page_id);
75 if (unlikely(ptr == 0))
81 Tuple_header*
tuple = (Tuple_header*)ptr;
82 Var_part_ref* dst = tuple->get_var_part_ref_ptr(tabPtr);
85 if (likely(alloc_var_part(err, fragPtr, tabPtr, alloc_size, &varref) != 0))
93 varref.m_page_no = RNIL;
99 c_page_pool.
getPtr(pagePtr, key->m_page_no);
100 free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p);
105 Dbtup::alloc_var_part(Uint32 * err,
112 pagePtr.i= get_alloc_page(fragPtr, (alloc_size + 1));
113 if (pagePtr.i == RNIL) {
115 if ((pagePtr.i= get_empty_var_page(fragPtr)) == RNIL) {
117 * err = ZMEM_NOMEM_ERROR;
120 c_page_pool.
getPtr(pagePtr);
121 ((Var_page*)pagePtr.p)->init();
122 pagePtr.p->list_index = MAX_FREE_LIST - 1;
124 fragPtr->free_var_page_array[MAX_FREE_LIST-1]);
127 c_page_pool.
getPtr(pagePtr);
130 Uint32 idx= ((Var_page*)pagePtr.p)
131 ->alloc_record(alloc_size, (Var_page*)ctemp_page, Var_page::CHAIN);
133 key->m_page_no = pagePtr.i;
134 key->m_page_idx = idx;
136 update_free_page_list(fragPtr, pagePtr);
137 return ((Var_page*)pagePtr.p)->get_ptr(idx);
146 void Dbtup::free_var_part(Fragrecord* fragPtr,
151 if (key->m_page_no != RNIL)
153 c_page_pool.
getPtr(pagePtr, key->m_page_no);
154 ((Var_page*)pagePtr.p)->free_record(key->m_page_idx, Var_page::CHAIN);
156 ndbassert(pagePtr.p->free_space <= Var_page::DATA_WORDS);
157 if (pagePtr.p->free_space == Var_page::DATA_WORDS - 1)
160 Uint32 idx = pagePtr.p->list_index;
162 list.remove(pagePtr);
163 returnCommonArea(pagePtr.i, 1);
164 fragPtr->noOfVarPages --;
167 update_free_page_list(fragPtr, pagePtr);
189 void Dbtup::free_var_rec(Fragrecord* fragPtr,
197 Uint32 *ptr = ((Fix_page*)pagePtr.p)->get_ptr(key->m_page_idx, 0);
198 Tuple_header* tuple = (Tuple_header*)ptr;
201 Var_part_ref * varref = tuple->get_var_part_ref_ptr(tabPtr);
202 varref->copyout(&ref);
204 free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p);
206 if (ref.m_page_no != RNIL)
209 c_page_pool.
getPtr(pagePtr, ref.m_page_no);
210 free_var_part(fragPtr, pagePtr, ref.m_page_idx);
216 Dbtup::free_var_part(Fragrecord* fragPtr, PagePtr pagePtr, Uint32 page_idx)
218 ((Var_page*)pagePtr.p)->free_record(page_idx, Var_page::CHAIN);
220 ndbassert(pagePtr.p->free_space <= Var_page::DATA_WORDS);
221 if (pagePtr.p->free_space == Var_page::DATA_WORDS - 1)
224 Uint32 idx = pagePtr.p->list_index;
226 list.remove(pagePtr);
227 returnCommonArea(pagePtr.i, 1);
228 fragPtr->noOfVarPages --;
233 update_free_page_list(fragPtr, pagePtr);
238 Dbtup::realloc_var_part(Uint32 * err,
239 Fragrecord* fragPtr, Tablerec* tabPtr, PagePtr pagePtr,
240 Var_part_ref* refptr, Uint32 oldsz, Uint32 newsz)
242 Uint32 add = newsz - oldsz;
244 Var_page* pageP = (Var_page*)pagePtr.p;
246 refptr->copyout(&oldref);
251 if (oldsz && pageP->free_space >= add)
254 new_var_ptr= pageP->get_ptr(oldref.m_page_idx);
255 if(!pageP->is_space_behind_entry(oldref.m_page_idx, add))
257 if(0) printf(
"extra reorg");
266 Uint32* copyBuffer= cinBuffer;
267 memcpy(copyBuffer, new_var_ptr, 4*oldsz);
268 pageP->set_entry_len(oldref.m_page_idx, 0);
269 pageP->free_space += oldsz;
270 pageP->reorg((Var_page*)ctemp_page);
271 new_var_ptr= pageP->get_free_space_ptr();
272 memcpy(new_var_ptr, copyBuffer, 4*oldsz);
273 pageP->set_entry_offset(oldref.m_page_idx, pageP->insert_pos);
276 pageP->grow_entry(oldref.m_page_idx, add);
277 update_free_page_list(fragPtr, pagePtr);
283 new_var_ptr = alloc_var_part(err, fragPtr, tabPtr, newsz, &newref);
284 if (unlikely(new_var_ptr == 0))
290 Uint32 *src = pageP->get_ptr(oldref.m_page_idx);
291 ndbassert(oldref.m_page_no != newref.m_page_no);
292 ndbassert(pageP->get_entry_len(oldref.m_page_idx) == oldsz);
293 memcpy(new_var_ptr, src, 4*oldsz);
294 free_var_part(fragPtr, pagePtr, oldref.m_page_idx);
297 refptr->assign(&newref);
304 Dbtup::move_var_part(Fragrecord* fragPtr, Tablerec* tabPtr, PagePtr pagePtr,
305 Var_part_ref* refptr, Uint32
size)
310 Var_page* pageP = (Var_page*)pagePtr.p;
312 refptr->copyout(&oldref);
317 Uint32 new_index = calculate_free_list_impl(size);
323 if (new_index > pageP->list_index)
330 new_pagePtr.i = get_alloc_page(fragPtr, size + 1);
332 if (new_pagePtr.i == RNIL)
341 if (new_pagePtr.i == pagePtr.i)
347 c_page_pool.
getPtr(new_pagePtr);
349 Uint32 idx= ((Var_page*)new_pagePtr.p)
350 ->alloc_record(size,(Var_page*)ctemp_page, Var_page::CHAIN);
355 update_free_page_list(fragPtr, new_pagePtr);
357 Uint32 *dst = ((Var_page*)new_pagePtr.p)->get_ptr(idx);
358 const Uint32 *src = pageP->get_ptr(oldref.m_page_idx);
363 memcpy(dst, src, 4*size);
368 free_var_part(fragPtr, pagePtr, oldref.m_page_idx);
374 newref.m_page_no = new_pagePtr.i;
375 newref.m_page_idx = idx;
376 refptr->assign(&newref);
384 Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size)
386 Uint32
i, start_index, loop= 0;
389 start_index= calculate_free_list_impl(alloc_size);
390 if (start_index == (MAX_FREE_LIST - 1))
397 ndbrequire(start_index < (MAX_FREE_LIST - 1));
400 for (i= start_index; i < MAX_FREE_LIST; i++)
403 if (!fragPtr->free_var_page_array[i].isEmpty())
406 return fragPtr->free_var_page_array[
i].firstItem;
409 ndbrequire(start_index > 0);
412 for(list.first(pagePtr); !pagePtr.isNull() && loop < 16; )
415 if (pagePtr.p->free_space >= alloc_size) {
426 Dbtup::get_empty_var_page(Fragrecord* fragPtr)
430 allocConsPages(1, cnt, ptr.i);
431 fragPtr->noOfVarPages+= cnt;
432 if (unlikely(cnt == 0))
438 ptr.p->physical_page_id = ptr.i;
439 ptr.p->page_state = ~0;
440 ptr.p->nextList = RNIL;
441 ptr.p->prevList = RNIL;
442 ptr.p->frag_page_id = RNIL;
450 void Dbtup::update_free_page_list(Fragrecord* fragPtr,
453 Uint32 free_space, list_index;
454 free_space= pagePtr.p->free_space;
455 list_index= pagePtr.p->list_index;
456 if ((free_space < c_min_list_size[list_index]) ||
457 (free_space > c_max_list_size[list_index])) {
458 Uint32 new_list_index= calculate_free_list_impl(free_space);
465 list(c_page_pool, fragPtr->free_var_page_array[list_index]);
466 list.remove(pagePtr);
468 if (free_space < c_min_list_size[new_list_index])
479 ndbrequire(new_list_index == 0);
480 new_list_index = MAX_FREE_LIST;
485 fragPtr->free_var_page_array[new_list_index]);
487 pagePtr.p->list_index = new_list_index;
495 Uint32 Dbtup::calculate_free_list_impl(Uint32 free_space_size)
const
498 for (i = 0; i < MAX_FREE_LIST; i++) {
500 if (free_space_size <= c_max_list_size[i]) {
509 Uint64 Dbtup::calculate_used_var_words(Fragrecord* fragPtr)
515 for (Uint32 freeList= 0; freeList <= MAX_FREE_LIST; freeList++)
518 fragPtr->free_var_page_array[freeList]);
521 if (list.first(pagePtr))
525 totalUsed+= (Tup_varsize_page::DATA_WORDS - pagePtr.p->free_space);
526 }
while (list.next(pagePtr));
534 Dbtup::alloc_var_rowid(Uint32 * err,
539 Uint32 * out_frag_page_id)
541 Uint32 *ptr = alloc_fix_rowid(err, fragPtr, tabPtr, key, out_frag_page_id);
542 if (unlikely(ptr == 0))
548 Tuple_header* tuple = (Tuple_header*)ptr;
549 Var_part_ref* dst = (Var_part_ref*)tuple->get_var_part_ref_ptr(tabPtr);
553 if (likely(alloc_var_part(err, fragPtr, tabPtr, alloc_size, &varref) != 0))
555 dst->assign(&varref);
561 varref.m_page_no = RNIL;
562 dst->assign(&varref);
567 c_page_pool.
getPtr(pagePtr, key->m_page_no);
568 free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p);