19 #define DBTUP_ABORT_CPP
21 #include <RefConvert.hpp>
22 #include <ndb_limits.h>
28 void Dbtup::execTUP_ABORTREQ(
Signal* signal)
31 do_tup_abortreq(signal, 0);
35 Dbtup::do_tup_abort_operation(
Signal* signal,
36 Tuple_header *tuple_ptr,
43 Uint32 bits= tuple_ptr->m_header_bits;
44 if (opPtrP->op_struct.op_type != ZDELETE)
46 Tuple_header *copy= get_copy_tuple(&opPtrP->m_copy_tuple_location);
48 if (opPtrP->op_struct.m_disk_preallocated)
52 memcpy(&key, copy->get_disk_ref_ptr(tablePtrP),
sizeof(key));
53 disk_page_abort_prealloc(signal, fragPtrP, &key, key.m_page_idx);
56 if(! (bits & Tuple_header::ALLOC))
59 if(bits & Tuple_header::MM_GROWN)
62 if (0) ndbout_c(
"abort grow");
64 Uint32 idx= opPtrP->m_tuple_location.m_page_idx;
67 ndbassert(! (tuple_ptr->m_header_bits & Tuple_header::COPY_TUPLE));
69 Var_part_ref *ref = tuple_ptr->get_var_part_ref_ptr(tablePtrP);
75 var_part= get_ptr(&vpage, *ref);
76 Var_page* pageP = (Var_page*)vpage.p;
77 Uint32 len= pageP->get_entry_len(idx) & ~Var_page::CHAIN;
85 Uint32 sz= var_part[len-1];
90 pageP->shrink_entry(idx, sz);
91 update_free_page_list(fragPtrP, vpage);
96 free_var_part(fragPtrP, vpage, tmp.m_page_idx);
99 bits &= ~(Uint32)Tuple_header::VAR_PART;
101 tuple_ptr->m_header_bits= bits & ~Tuple_header::MM_GROWN;
104 else if(bits & Tuple_header::MM_SHRINK)
107 if (0) ndbout_c(
"abort shrink");
110 else if (opPtrP->is_first_operation())
117 tuple_ptr->m_header_bits &= ~(Uint32)Tuple_header::ALLOC;
118 tuple_ptr->m_header_bits |= Tuple_header::FREED;
121 else if (opPtrP->is_first_operation())
124 if (bits & Tuple_header::ALLOC)
128 tuple_ptr->m_header_bits &= ~(Uint32)Tuple_header::ALLOC;
129 tuple_ptr->m_header_bits |= Tuple_header::FREED;
135 void Dbtup::do_tup_abortreq(
Signal* signal, Uint32
flags)
137 OperationrecPtr regOperPtr;
138 FragrecordPtr regFragPtr;
139 TablerecPtr regTabPtr;
141 regOperPtr.i = signal->theData[0];
142 c_operation_pool.
getPtr(regOperPtr);
143 TransState trans_state= get_trans_state(regOperPtr.p);
144 ndbrequire((trans_state == TRANS_STARTED) ||
145 (trans_state == TRANS_TOO_MUCH_AI) ||
146 (trans_state == TRANS_ERROR_WAIT_TUPKEYREQ) ||
147 (trans_state == TRANS_IDLE));
148 if (regOperPtr.p->op_struct.op_type == ZREAD) {
150 initOpConnection(regOperPtr.p);
154 regFragPtr.i = regOperPtr.p->fragmentPtr;
155 ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
157 regTabPtr.i = regFragPtr.p->fragTableId;
158 ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
161 Tuple_header *tuple_ptr= (Tuple_header*)
162 get_ptr(&page, ®OperPtr.p->m_tuple_location, regTabPtr.p);
164 if (get_tuple_state(regOperPtr.p) == TUPLE_PREPARED)
171 if (!regTabPtr.p->tuxCustomTriggers.isEmpty() &&
172 ! (flags & ZSKIP_TUX_TRIGGERS))
175 executeTuxAbortTriggers(signal,
180 OperationrecPtr loopOpPtr;
181 loopOpPtr.i = regOperPtr.p->nextActiveOp;
182 while (loopOpPtr.i != RNIL)
185 c_operation_pool.
getPtr(loopOpPtr);
186 if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED)
189 executeTuxAbortTriggers(signal,
194 loopOpPtr.i = loopOpPtr.p->nextActiveOp;
202 bool change = do_tup_abort_operation(signal,
208 OperationrecPtr loopOpPtr;
209 loopOpPtr.i = regOperPtr.p->nextActiveOp;
210 while (loopOpPtr.i != RNIL)
213 c_operation_pool.
getPtr(loopOpPtr);
214 if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED)
217 change |= do_tup_abort_operation(signal,
222 set_tuple_state(loopOpPtr.p, TUPLE_ALREADY_ABORTED);
224 loopOpPtr.i = loopOpPtr.p->nextActiveOp;
227 if (change && (regTabPtr.p->m_bits & Tablerec::TR_Checksum))
230 setChecksum(tuple_ptr, regTabPtr.p);
235 if(regOperPtr.p->is_first_operation() && regOperPtr.p->is_last_operation())
237 if (regOperPtr.p->m_undo_buffer_space)
240 D(
"Logfile_client - do_tup_abortreq");
241 Logfile_client lgman(
this, c_lgman, regFragPtr.p->m_logfile_group_id);
242 lgman.free_log_space(regOperPtr.p->m_undo_buffer_space);
246 removeActiveOpList(regOperPtr.p, tuple_ptr);
247 initOpConnection(regOperPtr.p);
253 int Dbtup::TUPKEY_abort(KeyReqStruct * req_struct,
int error_type)
258 terrorCode= ZMEM_NOMEM_ERROR;
264 terrorCode = ZREGISTER_INIT_ERROR;
269 terrorCode = ZTRY_TO_UPDATE_ERROR;
274 terrorCode = ZNO_ILLEGAL_NULL_ATTR;
279 terrorCode = ZTRY_TO_UPDATE_ERROR;
284 terrorCode = ZREGISTER_INIT_ERROR;
289 terrorCode = ZTOTAL_LEN_ERROR;
294 terrorCode = ZREGISTER_INIT_ERROR;
299 terrorCode = ZREGISTER_INIT_ERROR;
304 terrorCode = ZREGISTER_INIT_ERROR;
309 terrorCode = ZREGISTER_INIT_ERROR;
314 terrorCode = ZREGISTER_INIT_ERROR;
323 terrorCode = ZCALL_ERROR;
328 terrorCode = ZSTACK_OVERFLOW_ERROR;
333 terrorCode = ZSTACK_UNDERFLOW_ERROR;
338 terrorCode = ZNO_INSTRUCTION_ERROR;
343 terrorCode = ZOUTSIDE_OF_PROGRAM_ERROR;
348 terrorCode = ZTOO_MANY_INSTRUCTIONS_ERROR;
353 terrorCode = ZTEMPORARY_RESOURCE_FAILURE;
357 if (get_trans_state(req_struct->operPtrP) == TRANS_TOO_MUCH_AI) {
359 terrorCode = ZTOO_MUCH_ATTRINFO_ERROR;
360 }
else if (get_trans_state(req_struct->operPtrP) == TRANS_ERROR_WAIT_TUPKEYREQ) {
362 terrorCode = ZSEIZE_ATTRINBUFREC_ERROR;
369 terrorCode = ZUNSUPPORTED_BRANCH;
375 tupkeyErrorLab(req_struct);
379 void Dbtup::early_tupkey_error(KeyReqStruct* req_struct)
381 Operationrec *
const regOperPtr = req_struct->operPtrP;
382 ndbrequire(!regOperPtr->op_struct.in_active_list);
383 set_trans_state(regOperPtr, TRANS_IDLE);
384 set_tuple_state(regOperPtr, TUPLE_PREPARED);
385 initOpConnection(regOperPtr);
386 send_TUPKEYREF(req_struct->signal, regOperPtr);
389 void Dbtup::tupkeyErrorLab(KeyReqStruct* req_struct)
391 Operationrec *
const regOperPtr = req_struct->operPtrP;
392 set_trans_state(regOperPtr, TRANS_IDLE);
393 set_tuple_state(regOperPtr, TUPLE_PREPARED);
395 FragrecordPtr fragPtr;
396 fragPtr.i= regOperPtr->fragmentPtr;
397 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
400 tabPtr.i= fragPtr.p->fragTableId;
401 ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
403 if (regOperPtr->m_undo_buffer_space &&
404 (regOperPtr->is_first_operation() && regOperPtr->is_last_operation()))
407 D(
"Logfile_client - tupkeyErrorLab");
408 Logfile_client lgman(
this, c_lgman, fragPtr.p->m_logfile_group_id);
409 lgman.free_log_space(regOperPtr->m_undo_buffer_space);
413 if (!regOperPtr->m_tuple_location.isNull())
416 ptr= get_ptr(&tmp, ®OperPtr->m_tuple_location, tabPtr.p);
420 removeActiveOpList(regOperPtr, (Tuple_header*)ptr);
421 initOpConnection(regOperPtr);
422 send_TUPKEYREF(req_struct->signal, regOperPtr);
425 void Dbtup::send_TUPKEYREF(
Signal* signal,
426 Operationrec*
const regOperPtr)
429 tupKeyRef->userRef = regOperPtr->userpointer;
430 tupKeyRef->errorCode = terrorCode;
431 BlockReference lqhRef = calcInstanceBlockRef(DBLQH);
432 sendSignal(lqhRef, GSN_TUPKEYREF, signal,
433 TupKeyRef::SignalLength, JBB);
439 void Dbtup::removeActiveOpList(Operationrec*
const regOperPtr,
440 Tuple_header *tuple_ptr)
442 OperationrecPtr raoOperPtr;
444 if(!regOperPtr->m_copy_tuple_location.isNull())
450 if (regOperPtr->op_struct.in_active_list) {
451 regOperPtr->op_struct.in_active_list=
false;
452 if (regOperPtr->nextActiveOp != RNIL) {
454 raoOperPtr.i= regOperPtr->nextActiveOp;
455 c_operation_pool.
getPtr(raoOperPtr);
456 raoOperPtr.p->prevActiveOp= regOperPtr->prevActiveOp;
459 tuple_ptr->m_operation_ptr_i = regOperPtr->prevActiveOp;
461 if (regOperPtr->prevActiveOp != RNIL) {
463 raoOperPtr.i= regOperPtr->prevActiveOp;
464 c_operation_pool.
getPtr(raoOperPtr);
465 raoOperPtr.p->nextActiveOp= regOperPtr->nextActiveOp;
467 regOperPtr->prevActiveOp= RNIL;
468 regOperPtr->nextActiveOp= RNIL;