22 #include <ndb_global.h>
25 #include <OutputStream.hpp>
26 #include <NdbTest.hpp>
29 #include <NdbRestarter.hpp>
71 int m_timeout_retries;
85 const char* m_tnameperf;
104 m_timeout_retries(10),
110 m_pk1off(0x12340000),
130 <<
"usage: testBlobs options [default/max]" << endl
131 <<
" -batch N number of pk ops in batch [" << d.m_batch <<
"]" << endl
132 <<
" -core dump core on error" << endl
133 <<
" -dbg print program debug" << endl
134 <<
" -debug opt also ndb api DBUG (if no ':' becomes d:t:F:L:o,opt)" << endl
135 <<
" -fac fetch across commit in scan delete" << endl
136 <<
" -full read/write only full blob values" << endl
137 <<
" -loop N loop N times 0=forever [" << d.m_loop <<
"]" << endl
138 <<
" -min small blob sizes" << endl
139 <<
" -parts N max parts in blob value [" << d.m_parts <<
"]" << endl
140 <<
" -rows N number of rows [" << d.m_rows <<
"]" << endl
141 <<
" -rowsperf N rows for performace test [" << d.m_rowsperf <<
"]" << endl
142 <<
" -seed N random seed 0=loop number -1=random [" << d.m_seed <<
"]" << endl
143 <<
" -skip xxx skip given tests (see list) [no tests]" << endl
144 <<
" -test xxx only given tests (see list) [all tests]" << endl
145 <<
" -timeoutretries N Number of times to retry in deadlock situations ["
146 << d.m_timeout_retries <<
"]" << endl
147 <<
" -version N blob version 1 or 2 [" << d.m_blob_version <<
"]" << endl
148 <<
"metadata" << endl
149 <<
" -pk2len N native length of PK2, zero omits PK2,PK3 [" << d.m_pk2chr.m_len <<
"]" << endl
150 <<
" -pk2fixed PK2 is Char [default Varchar]" << endl
151 <<
" -pk2binary PK2 is Binary or Varbinary" << endl
152 <<
" -pk2cs PK2 charset or collation [" << d.m_pk2chr.m_cs <<
"]" << endl
153 <<
" -pk2part partition primary table by PK2" << endl
154 <<
" -oneblob only 1 blob attribute [default 2]" << endl
155 <<
" -rbatch N Read parts batchsize (bytes) [default -1] -1=random" << endl
156 <<
" -wbatch N Write parts batchsize (bytes) [default -1] -1=random" << endl
157 <<
"disk or memory storage for blobs. Don't apply to performance test" << endl
158 <<
" m Blob columns stored in memory" << endl
159 <<
" h Blob columns stored on disk" << endl
160 <<
"api styles for test/skip. Don't apply to performance test" << endl
161 <<
" a NdbRecAttr(old) interface" << endl
162 <<
" b NdbRecord interface" << endl
163 <<
"test cases for test/skip" << endl
164 <<
" k primary key ops" << endl
165 <<
" i hash index ops" << endl
166 <<
" s table scans" << endl
167 <<
" r ordered index scans" << endl
168 <<
" p performance test" << endl
169 <<
"operations for test/skip" << endl
170 <<
" u update existing blob value" << endl
171 <<
" n normal insert and update" << endl
172 <<
" w insert and update using writeTuple" << endl
173 <<
" d delete, can skip only for one subtest" << endl
174 <<
" l read with lock and unlock" << endl
175 <<
"blob operation styles for test/skip" << endl
176 <<
" 0 getValue / setValue" << endl
177 <<
" 1 setActiveHook" << endl
178 <<
" 2 readData / writeData" << endl
179 <<
"example: -test makn0 (need all 4 parts)" << endl
180 <<
"example: -test mhabkisrunwd012 (Everything except performance tests" << endl
181 <<
"bug tests" << endl
182 <<
" -bug 4088 ndb api hang with mixed ops on index table" << endl
183 <<
" -bug 27018 middle partial part write clobbers rest of part" << endl
184 <<
" -bug 27370 Potential inconsistent blob reads for ReadCommitted reads" << endl
185 <<
" -bug 36756 Handling execute(.., abortOption) and Blobs " << endl
186 <<
" -bug 45768 execute(Commit) after failing blob batch " << endl
187 <<
" -bug 62321 Blob obscures ignored error codes in batch" << endl
200 (g_opt.m_test == 0 || strchr(g_opt.m_test, x) != 0) &&
201 (g_opt.m_skip == 0 || strchr(g_opt.m_skip, x) == 0);
205 static Ndb* g_ndb = 0;
214 static bool g_printerror =
true;
215 static unsigned g_loop = 0;
221 static unsigned g_pk1_offset= 0;
222 static unsigned g_pk2_offset= 0;
223 static unsigned g_pk3_offset= 0;
224 static unsigned g_blob1_offset= 0;
225 static unsigned g_blob1_null_offset= 0;
226 static unsigned g_blob2_offset= 0;
227 static unsigned g_blob2_null_offset= 0;
228 static unsigned g_rowsize= 0;
229 static const char* g_tsName=
"DEFAULT-TS";
230 static Uint32 g_batchSize= 0;
231 static Uint32 g_scanFlags= 0;
232 static Uint32 g_parallel= 0;
233 static Uint32 g_usingDisk=
false;
234 static const Uint32 MAX_FRAGS=48 * 8 * 4;
235 static Uint32 frag_ng_mappings[MAX_FRAGS];
238 static const char* stylename[3] = {
239 "style=getValue/setValue",
240 "style=setActiveHook",
241 "style=readData/writeData"
245 static const char* apiName[2] = {
250 static const char apiSymbol[2] = {
255 static const int API_RECATTR=0;
256 static const int API_NDBRECORD=1;
258 static const char* storageName[2] = {
263 static const char storageSymbol[2] = {
268 static const int STORAGE_MEM=0;
269 static const int STORAGE_DISK=1;
272 printerror(
int line,
const char*
msg)
274 ndbout <<
"line " << line <<
" FAIL " << msg << endl;
275 if (! g_printerror) {
300 if (ope != g_opr && ope != g_const_opr && ope != g_opx && ope != g_ops)
313 g_printerror =
false;
319 printerror(__LINE__, #x); return -1; \
323 if (! g_opt.m_dbg) break; \
324 ndbout << "line " << __LINE__ << " " << x << endl; \
328 ndbout << "line " << __LINE__ << " " << x << endl; \
339 Bcol() { memset(
this, 0,
sizeof(*
this)); }
345 enum OpState {Normal, Retrying};
353 b.m_version = g_opt.m_blob_version;
354 b.m_nullable =
false;
355 b.m_inline = g_opt.m_min ? 8 : 240;
356 b.m_partsize = g_opt.m_min ? 8 : 2000;
357 b.m_stripe = b.m_version == 1 ? 4 : 0;
362 b.m_version = g_opt.m_blob_version;
364 b.m_inline = g_opt.m_min ? 9 : 99;
365 b.m_partsize = g_opt.m_min ? 5 : 55;
374 g_pk2_offset= g_pk1_offset + 4;
375 g_pk3_offset= g_pk2_offset + g_opt.m_pk2chr.m_totlen;
376 g_blob1_offset= g_pk3_offset + 2;
377 g_blob2_offset= g_blob1_offset +
sizeof(
NdbBlob *);
378 g_blob1_null_offset= g_blob2_offset +
sizeof(
NdbBlob *);
379 g_blob2_null_offset= g_blob1_null_offset + 1;
380 g_rowsize= g_blob2_null_offset + 1;
384 createDefaultTableSpace()
389 if (strcmp(lg.getName(),
"DEFAULT-LG") != 0)
391 lg.setName(
"DEFAULT-LG");
392 lg.setUndoBufferSize(8*1024*1024);
393 res = g_dic->createLogfileGroup(lg);
395 DBG(
"Failed to create logfilegroup:"
402 if (strcmp(uf.getPath(),
"undofile01.dat") != 0)
404 uf.setPath(
"undofile01.dat");
405 uf.setSize(32*1024*1024);
406 uf.setLogfileGroup(
"DEFAULT-LG");
408 res = g_dic->createUndofile(uf,
true);
410 DBG(
"Failed to create undofile:"
418 if (strcmp(uf.getPath(),
"undofile02.dat") != 0)
420 uf.setPath(
"undofile02.dat");
421 uf.setSize(32*1024*1024);
422 uf.setLogfileGroup(
"DEFAULT-LG");
424 res = g_dic->createUndofile(uf,
true);
426 DBG(
"Failed to create undofile:"
433 if (strcmp(ts.getName(), g_tsName) != 0)
435 ts.setName(g_tsName);
436 ts.setExtentSize(1024*1024);
437 ts.setDefaultLogfileGroup(
"DEFAULT-LG");
439 res = g_dic->createTablespace(ts);
441 DBG(
"Failed to create tablespace:"
449 if (strcmp(df.getPath(),
"datafile01.dat") != 0)
451 df.setPath(
"datafile01.dat");
452 df.setSize(64*1024*1024);
453 df.setTablespace(g_tsName);
455 res = g_dic->createDatafile(df,
true);
457 DBG(
"Failed to create datafile:"
466 if (strcmp(df.getPath(),
"datafile02.dat") != 0)
468 df.setPath(
"datafile02.dat");
469 df.setSize(64*1024*1024);
470 df.setTablespace(g_tsName);
472 res = g_dic->createDatafile(df,
true);
474 DBG(
"Failed to create datafile:"
488 if (g_dic->
getTable(g_opt.m_tname) != 0)
489 CHK(g_dic->
dropTable(g_opt.m_tname) == 0);
491 if (g_key_record != NULL)
492 g_dic->releaseRecord(g_key_record);
493 if (g_blob_record != NULL)
494 g_dic->releaseRecord(g_blob_record);
495 if (g_full_record != NULL)
496 g_dic->releaseRecord(g_full_record);
498 if (g_opt.m_pk2chr.m_len != 0)
500 if (g_idx_record != NULL)
501 g_dic->releaseRecord(g_idx_record);
502 if (g_ord_record != NULL)
503 g_dic->releaseRecord(g_ord_record);
518 return n == 0 ? 0 : ndb_rand() %
n;
522 createTable(
int storageType)
525 bool loggingRequired=(storageType == STORAGE_DISK);
526 NdbDictionary::Column::StorageType blobStorageType=
527 (storageType == STORAGE_MEM)?
528 NdbDictionary::Column::StorageTypeMemory :
529 NdbDictionary::Column::StorageTypeDisk;
532 if (storageType == STORAGE_DISK)
533 tab.setTablespaceName(g_tsName);
534 tab.setLogging(loggingRequired);
543 Uint32 fragTypeRange= 1 + (NdbDictionary::Object::HashMapPartition -
544 NdbDictionary::Object::DistrKeyHash);
545 Uint32 fragType= NdbDictionary::Object::DistrKeyHash + urandom(fragTypeRange);
549 fragType= NdbDictionary::Object::UserDefined;
553 if (fragType == NdbDictionary::Object::UserDefined)
558 const Uint32 numNodes= g_ncc->no_db_nodes();
559 const Uint32 numReplicas= 2;
560 const Uint32 guessNumNgs= numNodes/2;
561 const Uint32 numNgs= guessNumNgs?guessNumNgs : 1;
562 const Uint32 numFragsPerNode= 2 + (rand() % 3);
563 const Uint32 numPartitions= numReplicas * numNgs * numFragsPerNode;
565 tab.setFragmentCount(numPartitions);
566 for (Uint32
i=0;
i<numPartitions;
i++)
568 frag_ng_mappings[
i]=
i % numNgs;
570 tab.setFragmentData(frag_ng_mappings, numPartitions);
572 const Chr& pk2chr = g_opt.m_pk2chr;
576 col.setPrimaryKey(
true);
581 const Bcol& b = g_blob1;
583 col.setBlobVersion(b.m_version);
584 col.setNullable(b.m_nullable);
585 col.setInlineSize(b.m_inline);
586 col.setPartSize(b.m_partsize);
587 col.setStripeSize(b.m_stripe);
588 col.setStorageType(blobStorageType);
592 if (pk2chr.m_len != 0)
594 col.setType(pk2chr.m_type);
595 col.setPrimaryKey(
true);
596 col.setLength(pk2chr.m_bytelen);
597 if (pk2chr.m_csinfo != 0)
598 col.setCharset(pk2chr.m_csinfo);
600 col.setPartitionKey(
true);
604 if (! g_opt.m_oneblob)
606 const Bcol& b = g_blob2;
608 col.setBlobVersion(b.m_version);
609 col.setNullable(b.m_nullable);
610 col.setInlineSize(b.m_inline);
611 col.setPartSize(b.m_partsize);
612 col.setStripeSize(b.m_stripe);
613 col.setStorageType(blobStorageType);
617 if (pk2chr.m_len != 0)
620 col.setPrimaryKey(
true);
627 if (g_opt.m_pk2chr.m_len != 0)
630 idx.setLogging(loggingRequired);
631 idx.setTable(g_opt.m_tname);
632 idx.addColumnName(
"PK2");
633 idx.addColumnName(
"PK3");
637 if (g_opt.m_pk2chr.m_len != 0)
640 idx.setLogging(
false);
641 idx.setTable(g_opt.m_tname);
642 idx.addColumnName(
"PK2");
647 unsigned numpks= g_opt.m_pk2chr.m_len == 0 ? 1 : 3;
648 unsigned numblobs= g_opt.m_oneblob ? 1 : 2;
651 CHK((dict_table= g_dic->
getTable(g_opt.m_tname)) != 0);
652 memset(spec, 0,
sizeof(spec));
653 spec[0].column= dict_table->
getColumn(
"PK1");
654 spec[0].offset= g_pk1_offset;
655 spec[numpks].column= dict_table->
getColumn(
"BL1");
656 spec[numpks].offset= g_blob1_offset;
657 spec[numpks].nullbit_byte_offset= g_blob1_null_offset;
658 spec[numpks].nullbit_bit_in_byte= 0;
659 if (g_opt.m_pk2chr.m_len != 0)
661 spec[1].column= dict_table->
getColumn(
"PK2");
662 spec[1].offset= g_pk2_offset;
663 spec[2].column= dict_table->
getColumn(
"PK3");
664 spec[2].offset= g_pk3_offset;
666 if (! g_opt.m_oneblob)
668 spec[numpks+1].column= dict_table->
getColumn(
"BL2");
669 spec[numpks+1].offset= g_blob2_offset;
670 spec[numpks+1].nullbit_byte_offset= g_blob2_null_offset;
671 spec[numpks+1].nullbit_bit_in_byte= 0;
673 CHK((g_key_record= g_dic->createRecord(dict_table, &spec[0], numpks,
674 sizeof(spec[0]))) != 0);
675 CHK((g_blob_record= g_dic->createRecord(dict_table, &spec[numpks], numblobs,
676 sizeof(spec[0]))) != 0);
677 CHK((g_full_record= g_dic->createRecord(dict_table, &spec[0], numpks+numblobs,
678 sizeof(spec[0]))) != 0);
680 if (g_opt.m_pk2chr.m_len != 0)
683 CHK((dict_index= g_dic->
getIndex(g_opt.m_x1name, g_opt.m_tname)) != 0);
684 CHK((g_idx_record= g_dic->createRecord(dict_index, &spec[1], 2,
685 sizeof(spec[0]))) != 0);
686 CHK((dict_index= g_dic->
getIndex(g_opt.m_x2name, g_opt.m_tname)) != 0);
687 CHK((g_ord_record= g_dic->createRecord(dict_index, &spec[1], 1,
688 sizeof(spec[0]))) != 0);
711 ~
Bval() {
delete [] m_val;
delete [] m_buf; }
713 alloc(m_bcol.m_inline + m_bcol.m_partsize * g_opt.m_parts);
715 void alloc(
unsigned buflen) {
718 m_buf =
new char [m_buflen];
721 void copyfrom(
const Bval& v) {
727 m_val = (
char*)memcpy(
new char [m_len], v.m_val, m_len);
731 memset(m_buf,
'x', m_buflen);
739 operator<<(NdbOut& out,
const Bval& v)
741 if (g_opt.m_min && v.m_val != 0) {
742 out <<
"[" << v.m_len <<
"]";
743 for (uint
i = 0;
i < v.m_len;
i++) {
744 const Bcol& b = v.m_bcol;
745 if (
i == b.m_inline ||
746 (
i > b.m_inline && (
i - b.m_inline) % b.m_partsize == 0))
748 out.print(
"%c", v.m_val[
i]);
767 m_pk2(
new char [g_opt.m_pk2chr.m_totlen + 1]),
768 m_pk2eq(
new char [g_opt.m_pk2chr.m_totlen + 1]),
771 m_key_row(
new char[g_rowsize]),
772 m_row(
new char[g_rowsize]),
790 void copyfrom(
const Tup& tup) {
791 assert(m_pk1 == tup.m_pk1);
792 m_bval1.copyfrom(tup.m_bval1);
793 m_bval2.copyfrom(tup.m_bval2);
801 if (g_opt.m_blob_version == 1)
803 return urandom(2) == 0 ? m_pk2 : m_pk2eq;
805 Uint32 getPartitionId(Uint32 numParts)
const {
807 return m_pk1 % numParts;
812 Tup& operator=(
const Tup&);
824 DBG(
"Setting partition id to " << partId <<
" out of " <<
831 setUDpartIdNdbRecord(
const Tup& tup,
835 opts.optionsPresent= 0;
838 opts.optionsPresent= NdbOperation::OperationOptions::OO_PARTITION_ID;
844 calcBval(
const Bcol& b,
Bval& v,
bool keepsize)
846 if (b.m_nullable && urandom(10) == 0) {
850 v.m_buf =
new char [1];
852 if (keepsize && v.m_val != 0)
854 else if (urandom(10) == 0)
855 v.m_len = urandom(b.m_inline);
857 v.m_len = urandom(b.m_inline + g_opt.m_parts * b.m_partsize + 1);
859 v.m_val =
new char [v.m_len + 1];
860 for (
unsigned i = 0;
i < v.m_len;
i++)
861 v.m_val[
i] =
'a' + urandom(26);
862 v.m_val[v.m_len] = 0;
863 v.m_buf =
new char [v.m_len];
865 v.m_buflen = v.m_len;
879 bool isTimeout= ((code == 274) ||
882 ndbout <<
"Connection error is not timeout, but is "
895 calcBval(
Tup& tup,
bool keepsize)
897 calcBval(g_blob1, tup.m_bval1, keepsize);
898 if (! g_opt.m_oneblob)
899 calcBval(g_blob2, tup.m_bval2, keepsize);
904 calcTups(
bool keys,
bool keepsize =
false)
906 for (uint k = 0; k < g_opt.m_rows; k++) {
907 Tup& tup = g_tups[k];
909 tup.m_pk1 = g_opt.m_pk1off + k;
911 const Chr& c = g_opt.m_pk2chr;
912 char*
const p = tup.m_pk2;
913 char*
const q = tup.m_pk2eq;
914 uint len = urandom(c.m_len + 1);
917 *(uchar*)&p[0] = *(uchar*)&q[0] = len;
923 if (urandom(3) == 0) {
924 uint u = urandom(26);
926 q[
i] = c.m_caseins ?
'a' + u :
'A' + u;
928 uint u = urandom(26);
930 q[
i] = c.m_caseins ?
'A' + u :
'a' + u;
935 while (j < c.m_bytelen) {
943 assert(i == c.m_totlen);
946 tup.m_pk3 = (Uint16)k;
948 calcBval(tup, keepsize);
952 static void setBatchSizes()
954 if (g_opt.m_rbatch != 0)
956 Uint32 byteSize = (g_opt.m_rbatch == -1) ?
957 urandom(~Uint32(0)) :
960 DBG(
"Setting read batch size to " << byteSize
962 g_con->setMaxPendingBlobReadBytes(byteSize);
965 if (g_opt.m_wbatch != 0)
967 Uint32 byteSize = (g_opt.m_wbatch == -1) ?
968 urandom(~Uint32(0)) :
971 DBG(
"Setting write batch size to " << byteSize
973 g_con->setMaxPendingBlobWriteBytes(byteSize);
984 if (! g_opt.m_oneblob)
997 if (! g_opt.m_oneblob)
1008 if (! g_opt.m_oneblob)
1015 getBlobLength(
NdbBlob* h,
unsigned& len)
1017 Uint64 len2 = (unsigned)-1;
1019 len = (unsigned)len2;
1020 assert(len == len2);
1022 CHK(h->getNull(isNull) == 0);
1023 DBG(
"getBlobLength " << h->
getColumn()->
getName() <<
" len=" << len <<
" null=" << isNull);
1030 setBlobValue(
NdbBlob* h,
const Bval& v,
int error_code = 0)
1032 bool null = (v.m_val == 0);
1035 DBG(
"setValue " << h->
getColumn()->
getName() <<
" len=" << v.m_len <<
" null=" << null <<
" " << v);
1041 CHK(h->getNull(isNull) == 0 && isNull ==
true);
1042 CHK(getBlobLength(h, len) == 0 && len == 0);
1047 CHK(h->getNull(isNull) == 0 && isNull ==
false);
1048 CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1054 setBlobValue(
const Tup& tup,
int error_code = 0)
1056 CHK(setBlobValue(g_bh1, tup.m_bval1, error_code) == 0);
1057 if (! g_opt.m_oneblob)
1058 CHK(setBlobValue(g_bh2, tup.m_bval2, error_code) == 0);
1066 CHK(h->
getValue(v.m_buf, v.m_buflen) == 0);
1071 getBlobValue(
const Tup& tup)
1073 CHK(getBlobValue(g_bh1, tup.m_bval1) == 0);
1074 if (! g_opt.m_oneblob)
1075 CHK(getBlobValue(g_bh2, tup.m_bval2) == 0);
1089 static int presetBH1(
int rowNumber)
1091 unsigned int variant = urandom(2);
1092 DBG(
"presetBH1 - Variant=" << variant);
1097 CHK(setBlobValue(g_tups[(rowNumber+1) % g_opt.m_rows]) == 0);
1105 bool null = (v.m_val == 0);
1110 CHK(h->getNull(isNull) == 0 && isNull ==
true);
1111 CHK(getBlobLength(h, len) == 0 && len == 0);
1114 CHK(h->getNull(isNull) == 0 && isNull ==
false);
1115 CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1116 for (
unsigned i = 0; i < v.m_len; i++)
1117 CHK(v.m_val[i] == v.m_buf[i]);
1123 verifyBlobValue(
const Tup& tup)
1125 CHK(verifyBlobValue(g_bh1, tup.m_bval1) == 0);
1126 if (! g_opt.m_oneblob)
1127 CHK(verifyBlobValue(g_bh2, tup.m_bval2) == 0);
1136 bool null = (v.m_val == 0);
1139 DBG(
"write " << h->
getColumn()->
getName() <<
" len=" << v.m_len <<
" null=" << null <<
" " << v);
1140 int error_code = v.m_error_code;
1146 CHK(h->getNull(isNull) == 0 && isNull ==
true);
1147 CHK(getBlobLength(h, len) == 0 && len == 0);
1155 unsigned m = g_opt.m_full ? v.m_len : urandom(v.m_len + 1);
1156 if (m > v.m_len - n)
1158 DBG(
"write pos=" << n <<
" cnt=" << m);
1161 }
while (n < v.m_len);
1162 assert(n == v.m_len);
1164 CHK(h->getNull(isNull) == 0 && isNull ==
false);
1165 CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1171 writeBlobData(
Tup& tup,
int error_code = 0)
1173 tup.m_bval1.m_error_code = error_code;
1174 CHK(writeBlobData(g_bh1, tup.m_bval1) == 0);
1175 if (! g_opt.m_oneblob) {
1176 tup.m_bval2.m_error_code = error_code;
1177 CHK(writeBlobData(g_bh2, tup.m_bval2) == 0);
1185 bool null = (v.m_val == 0);
1188 DBG(
"read " << h->
getColumn()->
getName() <<
" len=" << v.m_len <<
" null=" << null);
1191 CHK(h->getNull(isNull) == 0 && isNull ==
true);
1192 CHK(getBlobLength(h, len) == 0 && len == 0);
1195 CHK(h->getNull(isNull) == 0 && isNull ==
false);
1196 CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1199 while (n < v.m_len) {
1200 unsigned m = g_opt.m_full ? v.m_len : urandom(v.m_len + 1);
1201 if (m > v.m_len - n)
1203 DBG(
"read pos=" << n <<
" cnt=" << m);
1204 const unsigned m2 = m;
1205 CHK(h->
readData(v.m_buf + n, m) == 0);
1209 assert(n == v.m_len);
1211 CHK(g_con->
execute(NoCommit) == 0);
1212 for (
unsigned i = 0; i < v.m_len; i++)
1213 CHK(v.m_val[i] == v.m_buf[i]);
1219 readBlobData(
const Tup& tup)
1221 CHK(readBlobData(g_bh1, tup.m_bval1) == 0);
1222 if (! g_opt.m_oneblob)
1223 CHK(readBlobData(g_bh2, tup.m_bval2) == 0);
1232 blobWriteHook(
NdbBlob* h,
void* arg)
1234 DBG(
"blobWriteHook");
1236 CHK(writeBlobData(h, v) == 0);
1242 setBlobWriteHook(
NdbBlob* h,
Bval& v,
int error_code = 0)
1244 DBG(
"setBlobWriteHook");
1245 v.m_error_code = error_code;
1251 setBlobWriteHook(
Tup& tup,
int error_code = 0)
1253 CHK(setBlobWriteHook(g_bh1, tup.m_bval1, error_code) == 0);
1254 if (! g_opt.m_oneblob)
1255 CHK(setBlobWriteHook(g_bh2, tup.m_bval2, error_code) == 0);
1263 blobReadHook(
NdbBlob* h,
void* arg)
1265 DBG(
"blobReadHook");
1268 CHK(getBlobLength(h, len) == 0);
1270 Uint32 maxlen = 0xffffffff;
1271 CHK(h->
readData(v.m_buf, maxlen) == 0);
1272 DBG(
"read " << maxlen <<
" bytes");
1280 DBG(
"setBlobReadHook");
1286 setBlobReadHook(
Tup& tup)
1288 CHK(setBlobReadHook(g_bh1, tup.m_bval1) == 0);
1289 if (! g_opt.m_oneblob)
1290 CHK(setBlobReadHook(g_bh2, tup.m_bval2) == 0);
1295 tryRowLock(
Tup& tup,
bool exclusive)
1304 CHK(testOp->
equal(
"PK1", tup.m_pk1) == 0);
1305 if (g_opt.m_pk2chr.m_len != 0) {
1306 CHK(testOp->
equal(
"PK2", tup.m_pk2) == 0);
1307 CHK(testOp->
equal(
"PK3", tup.m_pk3) == 0);
1309 setUDpartId(tup, testOp);
1311 if (testTrans->
execute(Commit, AbortOnError) == 0)
1327 DBG(
"Error on tryRowLock, exclusive = " << exclusive
1337 verifyRowLocked(
Tup& tup)
1339 CHK(tryRowLock(tup,
true) == -2);
1344 verifyRowNotLocked(
Tup& tup)
1346 CHK(tryRowLock(tup,
true) == 0);
1360 NdbBlob::unpackBlobHead(head, ra->
aRef(), b.m_version);
1361 CHK(head.length == v.m_len);
1362 const char* data = ra->
aRef() + head.headsize;
1363 for (
unsigned i = 0; i < head.length && i < b.m_inline; i++)
1364 CHK(data[i] == v.m_val[i]);
1370 verifyHeadInline(
Tup& tup)
1372 DBG(
"verifyHeadInline pk1=" << hex << tup.m_pk1);
1376 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
1377 if (g_opt.m_pk2chr.m_len != 0) {
1378 CHK(g_opr->
equal(
"PK2", tup.pk2()) == 0);
1379 CHK(g_opr->
equal(
"PK3", (
char*)&tup.m_pk3) == 0);
1381 setUDpartId(tup, g_opr);
1385 CHK((ra1 = g_opr->
getValue(
"BL1")) != 0);
1386 if (! g_opt.m_oneblob)
1387 CHK((ra2 = g_opr->
getValue(
"BL2")) != 0);
1388 CHK((ra_frag = g_opr->
getValue(NdbDictionary::Column::FRAGMENT)) != 0);
1390 CHK(g_con->
execute(Commit, AbortOnError) == 0);
1392 DBG(
"fragment id: " << tup.m_frag);
1393 DBG(
"verifyHeadInline BL1");
1394 CHK(verifyHeadInline(g_blob1, tup.m_bval1, ra1) == 0);
1395 if (! g_opt.m_oneblob) {
1396 DBG(
"verifyHeadInline BL2");
1397 CHK(verifyHeadInline(g_blob2, tup.m_bval2, ra2) == 0);
1400 CHK(g_con->
execute(Commit, AbortOnError) == -1 &&
1410 getvarsize(
const char*
buf)
1412 const unsigned char* p = (
const unsigned char*)buf;
1413 return p[0] + (p[1] << 8);
1417 verifyBlobTable(
const Bval& v, Uint32 pk1, Uint32 frag,
bool exists)
1419 const Bcol& b = v.m_bcol;
1420 DBG(
"verify " << b.m_btname <<
" pk1=" << hex << pk1);
1428 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1429 enum OpState opState;
1440 if (b.m_version == 1) {
1441 CHK((ra_pk = g_ops->
getValue(
"PK")) != 0);
1442 CHK((ra_part = g_ops->
getValue(
"PART")) != 0);
1443 CHK((ra_data = g_ops->
getValue(
"DATA")) != 0);
1445 CHK((ra_pk1 = g_ops->
getValue(
"PK1")) != 0);
1446 if (g_opt.m_pk2chr.m_len != 0) {
1447 CHK((ra_pk2 = g_ops->
getValue(
"PK2")) != 0);
1448 CHK((ra_pk3 = g_ops->
getValue(
"PK3")) != 0);
1450 CHK((ra_part = g_ops->
getValue(
"NDB$PART")) != 0);
1451 CHK((ra_data = g_ops->
getValue(
"NDB$DATA")) != 0);
1458 CHK((ra_frag = g_ops->
getValue(NdbDictionary::Column::FRAGMENT)) != 0);
1459 CHK(g_con->
execute(NoCommit) == 0);
1461 if (! exists || v.m_len <= b.m_inline)
1464 partcount = (v.m_len - b.m_inline + b.m_partsize - 1) / b.m_partsize;
1465 char* seen =
new char [partcount];
1466 memset(seen, 0, partcount);
1472 CHK(conHasTimeoutError());
1477 DISP(
"Parts table scan failed due to timeout("
1478 << conError() <<
"). Retries left : "
1479 << opTimeoutRetries -1);
1480 CHK(--opTimeoutRetries);
1486 CHK(opState == Normal);
1487 CHK((ret == 0) || (ret == 1));
1490 if (b.m_version == 1) {
1499 DBG(
"part " << part <<
" of " << partcount <<
" from fragment " << frag2);
1500 CHK(part < partcount && ! seen[part]);
1502 unsigned n = b.m_inline + part * b.m_partsize;
1503 assert(exists && v.m_val != 0 && n < v.m_len);
1504 unsigned m = v.m_len -
n;
1505 if (m > b.m_partsize)
1507 const char* data = ra_data->
aRef();
1508 if (b.m_version == 1)
1515 unsigned sz = getvarsize(data);
1516 DBG(
"varsize " << sz);
1517 DBG(
"b.m_partsize " << b.m_partsize);
1518 CHK(sz <= b.m_partsize);
1520 if (part + 1 < partcount)
1521 CHK(sz == b.m_partsize);
1526 CHK(memcmp(data, v.m_val + n, m) == 0);
1527 if (b.m_version == 1 ||
1536 while (i < b.m_partsize) {
1537 CHK(data[i] == fillchr);
1541 DBG(
"frags main=" << frag <<
" blob=" << frag2 <<
" stripe=" << b.m_stripe);
1542 if (b.m_stripe == 0)
1546 if (opState == Normal)
1548 for (
unsigned i = 0; i < partcount; i++)
1554 }
while (opState == Retrying);
1562 verifyBlobTable(
const Tup& tup)
1564 CHK(verifyBlobTable(tup.m_bval1, tup.m_pk1, tup.m_frag, tup.m_exists) == 0);
1565 if (! g_opt.m_oneblob)
1566 CHK(verifyBlobTable(tup.m_bval2, tup.m_pk1, tup.m_frag, tup.m_exists) == 0);
1573 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
1574 Tup& tup = g_tups[k];
1575 DBG(
"verifyBlob pk1=" << hex << tup.m_pk1);
1576 CHK(verifyHeadInline(tup) == 0);
1577 CHK(verifyBlobTable(tup) == 0);
1583 rowIsLocked(
Tup& tup)
1592 CHK(testOp->
equal(
"PK1", tup.m_pk1) == 0);
1593 if (g_opt.m_pk2chr.m_len != 0)
1595 CHK(testOp->
equal(
"PK2", tup.m_pk2) == 0);
1596 CHK(testOp->
equal(
"PK3", tup.m_pk3) == 0);
1598 setUDpartId(tup, testOp);
1601 CHK(testTrans->
execute(Commit) == -1);
1614 insertPk(
int style,
int api)
1616 DBG(
"--- insertPk " << stylename[style] <<
" " << apiName[api] <<
" ---");
1619 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1620 enum OpState opState;
1626 for (; k < g_opt.m_rows; k++) {
1627 Tup& tup = g_tups[k];
1628 DBG(
"insertPk pk1=" << hex << tup.m_pk1);
1629 if (api == API_RECATTR)
1633 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
1634 if (g_opt.m_pk2chr.m_len != 0)
1636 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
1637 CHK(g_opr->
equal(
"PK3", tup.m_pk3) == 0);
1639 setUDpartId(tup, g_opr);
1640 CHK(getBlobHandles(g_opr) == 0);
1644 memcpy(&tup.m_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
1645 if (g_opt.m_pk2chr.m_len != 0) {
1646 memcpy(&tup.m_row[g_pk2_offset], tup.m_pk2, g_opt.m_pk2chr.m_totlen);
1647 memcpy(&tup.m_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
1650 setUDpartIdNdbRecord(tup,
1653 CHK((g_const_opr = g_con->insertTuple(g_full_record,
1657 sizeof(opts))) != 0);
1658 CHK(getBlobHandles(g_const_opr) == 0);
1660 bool timeout=
false;
1662 CHK(setBlobValue(tup) == 0);
1663 }
else if (style == 1) {
1664 CHK(presetBH1(k) == 0);
1665 CHK(setBlobWriteHook(tup) == 0);
1667 CHK(presetBH1(k) == 0);
1668 CHK(g_con->
execute(NoCommit) == 0);
1669 if (writeBlobData(tup) == -1)
1670 CHK((timeout= conHasTimeoutError()) ==
true);
1674 (++n == g_opt.m_batch)) {
1675 if (g_con->
execute(Commit) == 0)
1683 CHK((timeout = conHasTimeoutError()) ==
true);
1691 DISP(
"Insert failed due to timeout("
1692 << conError() <<
") "
1693 <<
" Operations lost : " << n - 1
1694 <<
" Retries left : "
1695 << opTimeoutRetries -1);
1696 CHK(--opTimeoutRetries);
1707 tup.m_exists =
true;
1709 if (opState == Normal)
1712 CHK(g_con->
execute(Commit) == 0);
1717 }
while (opState == Retrying);
1723 readPk(
int style,
int api)
1725 DBG(
"--- readPk " << stylename[style] <<
" " << apiName[api] <<
" ---");
1726 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
1727 Tup& tup = g_tups[k];
1728 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1734 DBG(
"readPk pk1=" << hex << tup.m_pk1);
1748 if (api == API_RECATTR)
1752 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
1753 if (g_opt.m_pk2chr.m_len != 0)
1755 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
1756 CHK(g_opr->
equal(
"PK3", tup.m_pk3) == 0);
1758 setUDpartId(tup, g_opr);
1759 CHK(getBlobHandles(g_opr) == 0);
1763 memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
1764 if (g_opt.m_pk2chr.m_len != 0) {
1765 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
1766 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
1769 setUDpartIdNdbRecord(tup,
1772 CHK((g_const_opr = g_con->readTuple(g_key_record, tup.m_key_row,
1773 g_blob_record, tup.m_row,
1777 sizeof(opts))) != 0);
1779 CHK(getBlobHandles(g_const_opr) == 0);
1781 bool timeout=
false;
1783 CHK(getBlobValue(tup) == 0);
1784 }
else if (style == 1) {
1785 CHK(setBlobReadHook(tup) == 0);
1787 CHK(g_con->
execute(NoCommit) == 0);
1788 if (readBlobData(tup) == -1)
1789 CHK((timeout= conHasTimeoutError()) ==
true);
1793 if (urandom(200) == 0)
1795 if (g_con->
execute(NoCommit) == 0)
1800 CHK(rowIsLocked(tup) == 0);
1801 CHK(g_con->
execute(Commit) == 0);
1805 CHK((timeout= conHasTimeoutError()) ==
true);
1810 if (g_con->
execute(Commit) != 0)
1812 CHK((timeout= conHasTimeoutError()) ==
true);
1818 DISP(
"ReadPk failed due to timeout("
1819 << conError() <<
") Retries left : "
1820 << opTimeoutRetries -1);
1821 CHK(--opTimeoutRetries);
1828 CHK((g_opr?g_opr:g_const_opr)->getLockMode() == NdbOperation::LM_Read);
1830 if (style == 0 || style == 1) {
1831 CHK(verifyBlobValue(tup) == 0);
1835 }
while (opState == Retrying);
1844 readLockPk(
int style,
int api)
1846 DBG(
"--- readLockPk " << stylename[style] <<
" " << apiName[api] <<
" ---");
1847 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
1848 Tup& tup = g_tups[k];
1849 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1855 DBG(
"readLockPk pk1=" << hex << tup.m_pk1);
1875 if (api == API_RECATTR)
1880 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
1881 if (g_opt.m_pk2chr.m_len != 0)
1883 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
1884 CHK(g_opr->
equal(
"PK3", tup.m_pk3) == 0);
1886 setUDpartId(tup, g_opr);
1887 CHK(getBlobHandles(g_opr) == 0);
1890 CHK(g_opr->getLockHandle() != NULL);
1895 memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
1896 if (g_opt.m_pk2chr.m_len != 0) {
1897 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
1898 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
1901 setUDpartIdNdbRecord(tup,
1906 opts.optionsPresent |= NdbOperation::OperationOptions::OO_LOCKHANDLE;
1908 CHK((g_const_opr = g_con->readTuple(g_key_record, tup.m_key_row,
1909 g_blob_record, tup.m_row,
1913 sizeof(opts))) != 0);
1914 CHK(getBlobHandles(g_const_opr) == 0);
1916 bool timeout=
false;
1918 CHK(getBlobValue(tup) == 0);
1919 }
else if (style == 1) {
1920 CHK(setBlobReadHook(tup) == 0);
1922 CHK(g_con->
execute(NoCommit) == 0);
1923 if (readBlobData(tup) == -1)
1924 CHK((timeout= conHasTimeoutError()) ==
true);
1928 if (g_con->
execute(NoCommit) == 0)
1937 CHK((lmused == NdbOperation::LM_Read) ||
1940 if (style == 0 || style == 1) {
1941 CHK(verifyBlobValue(tup) == 0);
1945 if (urandom(200) == 0)
1946 CHK(verifyRowLocked(tup) == 0);
1949 CHK(g_bh1->
close() == 0);
1950 CHK(g_bh1->
getState() == NdbBlob::Closed);
1951 if (! g_opt.m_oneblob)
1953 CHK(g_bh2->
close() == 0);
1954 CHK(g_bh2->
getState() == NdbBlob::Closed);
1960 CHK(g_bh1->
readData(&byte, len) != 0);
1962 CHK(g_bh1->
close() != 0);
1964 if(! g_opt.m_oneblob)
1966 CHK(g_bh2->
readData(&byte, len) != 0);
1968 CHK(g_bh2->
close() != 0);
1979 const NdbOperation* readOp = (g_opr?g_opr:g_const_opr);
1983 CHK(unlockOp != NULL);
1990 CHK(g_con->
execute(NoCommit) == 0);
1991 CHK(verifyRowNotLocked(tup) == 0);
1993 if (g_con->
execute(Commit) != 0)
1995 CHK((timeout= conHasTimeoutError()) ==
true);
2000 CHK((timeout= conHasTimeoutError()) ==
true);
2005 DISP(
"ReadLockPk failed due to timeout on read("
2006 << conError() <<
") Retries left : "
2007 << opTimeoutRetries -1);
2008 CHK(--opTimeoutRetries);
2014 }
while (opState == Retrying);
2023 updatePk(
int style,
int api)
2025 DBG(
"--- updatePk " << stylename[style] <<
" " << apiName[api] <<
" ---");
2026 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
2027 Tup& tup = g_tups[k];
2028 DBG(
"updatePk pk1=" << hex << tup.m_pk1);
2029 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2035 int mode = urandom(3);
2036 int error_code = mode == 0 ? 0 : 4275;
2038 if (api == API_RECATTR)
2042 DBG(
"using updateTuple");
2044 }
else if (mode == 1) {
2045 DBG(
"using readTuple exclusive");
2048 DBG(
"using readTuple - will fail and retry");
2051 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
2052 if (g_opt.m_pk2chr.m_len != 0)
2054 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
2055 CHK(g_opr->
equal(
"PK3", tup.m_pk3) == 0);
2057 setUDpartId(tup, g_opr);
2058 CHK(getBlobHandles(g_opr) == 0);
2062 memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
2063 if (g_opt.m_pk2chr.m_len != 0) {
2064 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2065 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2068 setUDpartIdNdbRecord(tup,
2072 DBG(
"using updateTuple");
2073 CHK((g_const_opr= g_con->updateTuple(g_key_record, tup.m_key_row,
2074 g_blob_record, tup.m_row,
2075 NULL, &opts,
sizeof(opts))) != 0);
2076 }
else if (mode == 1) {
2077 DBG(
"using readTuple exclusive");
2078 CHK((g_const_opr= g_con->readTuple(g_key_record, tup.m_key_row,
2079 g_blob_record, tup.m_row,
2081 NULL, &opts,
sizeof(opts))) != 0);
2083 DBG(
"using readTuple - will fail and retry");
2084 CHK((g_const_opr= g_con->readTuple(g_key_record, tup.m_key_row,
2085 g_blob_record, tup.m_row,
2086 NdbOperation::LM_Read,
2087 NULL, &opts,
sizeof(opts))) != 0);
2089 CHK(getBlobHandles(g_const_opr) == 0);
2092 bool timeout=
false;
2094 CHK(setBlobValue(tup, error_code) == 0);
2095 }
else if (style == 1) {
2096 CHK(setBlobWriteHook(tup, error_code) == 0);
2098 CHK(g_con->
execute(NoCommit) == 0);
2099 if (writeBlobData(tup, error_code) != 0)
2100 CHK((timeout= conHasTimeoutError()) ==
true);
2103 (error_code == 0)) {
2105 if (g_con->
execute(Commit) != 0)
2106 CHK((timeout= conHasTimeoutError()) ==
true);
2115 DISP(
"UpdatePk failed due to timeout("
2116 << conError() <<
") Retries left : "
2117 << opTimeoutRetries -1);
2118 CHK(--opTimeoutRetries);
2127 }
while (opState == Retrying);
2131 tup.m_exists =
true;
2137 writePk(
int style,
int api)
2139 DBG(
"--- writePk " << stylename[style] <<
" " << apiName[api] <<
" ---");
2140 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
2141 Tup& tup = g_tups[k];
2142 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2143 enum OpState opState;
2148 DBG(
"writePk pk1=" << hex << tup.m_pk1);
2150 if (api == API_RECATTR)
2154 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
2155 if (g_opt.m_pk2chr.m_len != 0)
2157 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
2158 CHK(g_opr->
equal(
"PK3", tup.m_pk3) == 0);
2160 setUDpartId(tup, g_opr);
2161 CHK(getBlobHandles(g_opr) == 0);
2165 memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
2166 memcpy(&tup.m_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
2167 if (g_opt.m_pk2chr.m_len != 0) {
2168 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2169 memcpy(&tup.m_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2170 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2171 memcpy(&tup.m_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2174 setUDpartIdNdbRecord(tup,
2177 CHK((g_const_opr= g_con->writeTuple(g_key_record, tup.m_key_row,
2178 g_full_record, tup.m_row,
2179 NULL, &opts,
sizeof(opts))) != 0);
2180 CHK(getBlobHandles(g_const_opr) == 0);
2182 bool timeout=
false;
2184 CHK(setBlobValue(tup) == 0);
2185 }
else if (style == 1) {
2186 CHK(presetBH1(k) == 0);
2187 CHK(setBlobWriteHook(tup) == 0);
2189 CHK(presetBH1(k) == 0);
2190 CHK(g_con->
execute(NoCommit) == 0);
2191 if (writeBlobData(tup) != 0)
2192 CHK((timeout= conHasTimeoutError()) ==
true);
2197 if (g_con->
execute(Commit) != 0)
2198 CHK((timeout= conHasTimeoutError()) ==
true);
2202 DISP(
"WritePk failed due to timeout("
2203 << conError() <<
") Retries left : "
2204 << opTimeoutRetries -1);
2205 CHK(--opTimeoutRetries);
2211 }
while (opState == Retrying);
2216 tup.m_exists =
true;
2224 DBG(
"--- deletePk " << apiName[api] <<
" ---");
2227 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2228 enum OpState opState;
2234 for (; k < g_opt.m_rows; k++) {
2235 Tup& tup = g_tups[k];
2236 DBG(
"deletePk pk1=" << hex << tup.m_pk1);
2237 if (api == API_RECATTR)
2245 setUDpartId(tup, g_opr);
2246 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
2247 if (g_opt.m_pk2chr.m_len != 0)
2249 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
2250 CHK(g_opr->
equal(
"PK3", tup.m_pk3) == 0);
2255 memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
2256 if (g_opt.m_pk2chr.m_len != 0) {
2257 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2258 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2261 setUDpartIdNdbRecord(tup,
2264 CHK((g_const_opr= g_con->deleteTuple(g_key_record, tup.m_key_row,
2265 g_full_record, NULL,
2266 NULL, &opts,
sizeof(opts))) != 0);
2268 if (++n == g_opt.m_batch) {
2269 if (g_con->
execute(Commit) != 0)
2271 CHK(conHasTimeoutError());
2272 DISP(
"DeletePk failed due to timeout("
2273 << conError() <<
") Retries left : "
2274 << opTimeoutRetries -1);
2275 CHK(--opTimeoutRetries);
2290 tup.m_exists =
false;
2292 if (opState == Normal)
2295 if (g_con->
execute(Commit) != 0)
2297 CHK(conHasTimeoutError());
2298 DISP(
"DeletePk failed on last batch ("
2299 << conError() <<
") Retries left : "
2300 << opTimeoutRetries -1);
2301 CHK(--opTimeoutRetries);
2311 }
while (opState == Retrying);
2319 DBG(
"--- deleteNoPk ---");
2321 no_tup.m_pk1 = 0xb1ff;
2322 const Chr& pk2chr = g_opt.m_pk2chr;
2323 if (pk2chr.m_len != 0) {
2324 char*
const p = no_tup.m_pk2;
2325 uint len = urandom(pk2chr.m_len + 1);
2327 if (! pk2chr.m_fixed) {
2328 *(uchar*)&p[0] = len;
2333 p[
i] =
"b1ff"[j % 4];
2338 no_tup.m_pk3 = 0xb1ff;
2341 DBG(
"deletePk pk1=" << hex << tup.m_pk1);
2344 setUDpartId(tup, g_opr);
2345 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
2346 if (pk2chr.m_len != 0) {
2347 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
2348 CHK(g_opr->
equal(
"PK3", (
char*)&tup.m_pk2) == 0);
2350 CHK(g_con->
execute(Commit) == -1);
2364 readIdx(
int style,
int api)
2366 DBG(
"--- readIdx " << stylename[style] <<
" " << apiName[api] <<
" ---");
2367 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
2368 Tup& tup = g_tups[k];
2369 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2370 enum OpState opState;
2375 DBG(
"readIdx pk1=" << hex << tup.m_pk1);
2389 if (api == API_RECATTR)
2393 CHK(g_opx->
equal(
"PK2", tup.m_pk2) == 0);
2394 CHK(g_opx->
equal(
"PK3", tup.m_pk3) == 0);
2396 CHK(getBlobHandles(g_opx) == 0);
2400 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2401 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2403 CHK((g_const_opr= g_con->readTuple(g_idx_record, tup.m_key_row,
2404 g_blob_record, tup.m_row,
2406 CHK(getBlobHandles(g_const_opr) == 0);
2409 bool timeout=
false;
2411 CHK(getBlobValue(tup) == 0);
2412 }
else if (style == 1) {
2413 CHK(setBlobReadHook(tup) == 0);
2415 if(g_con->
execute(NoCommit) ||
2417 CHK((timeout= conHasTimeoutError()) ==
true);
2421 if (g_con->
execute(Commit) != 0)
2423 CHK((timeout= conHasTimeoutError()) ==
true);
2429 CHK((g_opx?g_opx:g_const_opr)->getLockMode() == NdbOperation::LM_Read);
2430 if (style == 0 || style == 1) {
2431 CHK(verifyBlobValue(tup) == 0);
2436 DISP(
"Timeout while reading via index ("
2437 << conError() <<
") Retries left : "
2438 << opTimeoutRetries -1);
2439 CHK(--opTimeoutRetries);
2445 }
while (opState == Retrying);
2454 updateIdx(
int style,
int api)
2456 DBG(
"--- updateIdx " << stylename[style] <<
" " << apiName[api] <<
" ---");
2457 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
2458 Tup& tup = g_tups[k];
2459 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2460 enum OpState opState;
2465 DBG(
"updateIdx pk1=" << hex << tup.m_pk1);
2468 if (api == API_RECATTR)
2472 CHK(g_opx->
equal(
"PK2", tup.m_pk2) == 0);
2473 CHK(g_opx->
equal(
"PK3", tup.m_pk3) == 0);
2475 CHK(getBlobHandles(g_opx) == 0);
2479 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2480 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2482 CHK((g_const_opr= g_con->updateTuple(g_idx_record, tup.m_key_row,
2483 g_blob_record, tup.m_row)) != 0);
2484 CHK(getBlobHandles(g_const_opr) == 0);
2486 bool timeout=
false;
2488 CHK(setBlobValue(tup) == 0);
2489 }
else if (style == 1) {
2490 CHK(setBlobWriteHook(tup) == 0);
2492 if (g_con->
execute(NoCommit) ||
2494 CHK((timeout= conHasTimeoutError()) ==
true);
2498 if (g_con->
execute(Commit) != 0)
2499 CHK((timeout= conHasTimeoutError()) ==
true);
2503 DISP(
"Timeout in Index Update ("
2504 << conError() <<
") Retries left : "
2505 << opTimeoutRetries-1);
2506 CHK(--opTimeoutRetries);
2511 }
while (opState == Retrying);
2515 tup.m_exists =
true;
2521 writeIdx(
int style,
int api)
2523 DBG(
"--- writeIdx " << stylename[style] <<
" " << apiName[api] <<
" ---");
2524 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
2525 Tup& tup = g_tups[k];
2526 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2527 enum OpState opState;
2532 DBG(
"writeIdx pk1=" << hex << tup.m_pk1);
2534 if (api == API_RECATTR)
2538 CHK(g_opx->
equal(
"PK2", tup.m_pk2) == 0);
2539 CHK(g_opx->
equal(
"PK3", tup.m_pk3) == 0);
2541 CHK(getBlobHandles(g_opx) == 0);
2545 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2546 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2547 memcpy(&tup.m_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
2548 memcpy(&tup.m_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2549 memcpy(&tup.m_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2551 CHK((g_const_opr= g_con->writeTuple(g_idx_record, tup.m_key_row,
2552 g_full_record, tup.m_row)) != 0);
2553 CHK(getBlobHandles(g_const_opr) == 0);
2555 bool timeout=
false;
2557 CHK(setBlobValue(tup) == 0);
2558 }
else if (style == 1) {
2561 CHK(setBlobWriteHook(tup) == 0);
2565 if (g_con->
execute(NoCommit) ||
2567 CHK((timeout= conHasTimeoutError()) ==
true);
2572 CHK((timeout= conHasTimeoutError()) ==
true);
2576 DISP(
"Timeout in Index Write ("
2577 << conError() <<
") Retries left : "
2578 << opTimeoutRetries-1);
2579 CHK(--opTimeoutRetries);
2584 }
while (opState == Retrying);
2588 tup.m_exists =
true;
2596 DBG(
"--- deleteIdx " << apiName[api] <<
" ---");
2599 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2600 enum OpState opState;
2606 for (; k < g_opt.m_rows; k++) {
2607 Tup& tup = g_tups[k];
2608 DBG(
"deleteIdx pk1=" << hex << tup.m_pk1);
2609 if (api == API_RECATTR)
2613 CHK(g_opx->
equal(
"PK2", tup.m_pk2) == 0);
2614 CHK(g_opx->
equal(
"PK3", tup.m_pk3) == 0);
2619 memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2620 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
2622 CHK((g_const_opr= g_con->deleteTuple(g_idx_record, tup.m_key_row,
2623 g_full_record)) != 0);
2625 if (++n == g_opt.m_batch) {
2628 CHK(conHasTimeoutError());
2629 DISP(
"Timeout deleteing via index ("
2630 << conError() <<
") Retries left :"
2631 << opTimeoutRetries-1);
2632 CHK(--opTimeoutRetries);
2647 tup.m_exists =
false;
2649 if ((opState == Normal) &&
2653 CHK(conHasTimeoutError());
2654 DISP(
"Timeout on last idx delete batch ("
2655 << conError() <<
") Retries left :"
2656 << opTimeoutRetries-1);
2657 CHK(--opTimeoutRetries);
2665 }
while (opState == Retrying);
2675 readScan(
int style,
int api,
bool idx)
2677 DBG(
"--- " <<
"readScan" << (idx ?
"Idx" :
"") <<
" " << stylename[style] <<
" " << apiName[api] <<
" ---");
2681 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2682 enum OpState opState;
2700 if (api == API_RECATTR)
2711 CHK(g_ops->
getValue(
"PK1", (
char*)&tup.m_pk1) != 0);
2712 if (g_opt.m_pk2chr.m_len != 0)
2714 CHK(g_ops->
getValue(
"PK2", tup.m_pk2) != 0);
2715 CHK(g_ops->
getValue(
"PK3", (
char *) &tup.m_pk3) != 0);
2718 CHK(getBlobHandles(g_ops) == 0);
2724 CHK((g_ops= g_con->
scanTable(g_full_record,
2727 CHK((g_ops= g_con->
scanIndex(g_ord_record, g_full_record,
2729 CHK(getBlobHandles(g_ops) == 0);
2733 CHK(getBlobValue(tup) == 0);
2734 }
else if (style == 1) {
2735 CHK(setBlobReadHook(tup) == 0);
2739 CHK(conHasTimeoutError());
2740 DISP(
"Timeout scan read ("
2742 <<
"). Retries left : "
2743 << opTimeoutRetries - 1);
2744 CHK(--opTimeoutRetries);
2756 if (api == API_RECATTR)
2758 tup.m_pk1 = (Uint32)-1;
2759 memset(tup.m_pk2,
'x', g_opt.m_pk2chr.m_len);
2765 const char *out_row= NULL;
2767 if (0 == (ret = g_ops->
nextResult(&out_row,
true,
false)))
2769 memcpy(&tup.m_pk1, &out_row[g_pk1_offset],
sizeof(tup.m_pk1));
2770 if (g_opt.m_pk2chr.m_len != 0)
2772 memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
2773 memcpy(&tup.m_pk3, &out_row[g_pk3_offset],
sizeof(tup.m_pk3));
2781 if (conHasTimeoutError())
2786 DISP(
"Scan read failed due to deadlock timeout ("
2787 << conError() <<
") retries left :"
2788 << opTimeoutRetries -1);
2789 CHK(--opTimeoutRetries);
2796 CHK(opState == Normal);
2797 CHK((ret == 0) || (ret == 1));
2801 DBG(
"readScan" << (idx ?
"Idx" :
"") <<
" pk1=" << hex << tup.m_pk1);
2802 Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
2803 CHK(k < g_opt.m_rows && g_tups[k].m_exists);
2804 tup.copyfrom(g_tups[k]);
2806 CHK(verifyBlobValue(tup) == 0);
2807 }
else if (style == 1) {
2809 CHK(verifyBlobValue(tup) == 0);
2811 if (readBlobData(tup))
2813 CHK(conHasTimeoutError());
2814 DISP(
"Timeout in readScan("
2816 <<
") Retries left : "
2817 << opTimeoutRetries - 1);
2818 CHK(--opTimeoutRetries);
2828 if (opState == Normal)
2829 CHK(g_opt.m_rows == rows);
2831 }
while (opState == Retrying);
2839 updateScan(
int style,
int api,
bool idx)
2841 DBG(
"--- " <<
"updateScan" << (idx ?
"Idx" :
"") <<
" " << stylename[style] <<
" " << apiName[api] <<
" ---");
2845 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2846 enum OpState opState;
2852 if (api == API_RECATTR)
2863 CHK(g_ops->
getValue(
"PK1", (
char*)&tup.m_pk1) != 0);
2864 if (g_opt.m_pk2chr.m_len != 0)
2866 CHK(g_ops->
getValue(
"PK2", tup.m_pk2) != 0);
2867 CHK(g_ops->
getValue(
"PK3", (
char *) &tup.m_pk3) != 0);
2875 CHK((g_ops= g_con->
scanTable(g_key_record,
2878 CHK((g_ops= g_con->
scanIndex(g_ord_record, g_key_record,
2881 CHK(g_con->
execute(NoCommit) == 0);
2884 const char *out_row= NULL;
2887 if (api == API_RECATTR)
2889 tup.m_pk1 = (Uint32)-1;
2890 memset(tup.m_pk2,
'x', g_opt.m_pk2chr.m_totlen);
2897 if(0 == (ret = g_ops->
nextResult(&out_row,
true,
false)))
2899 memcpy(&tup.m_pk1, &out_row[g_pk1_offset],
sizeof(tup.m_pk1));
2900 if (g_opt.m_pk2chr.m_len != 0) {
2901 memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
2902 memcpy(&tup.m_pk3, &out_row[g_pk3_offset],
sizeof(tup.m_pk3));
2910 if (conHasTimeoutError())
2915 DISP(
"Scan update failed due to deadlock timeout ("
2916 << conError() <<
"), retries left :"
2917 << opTimeoutRetries -1);
2918 CHK(--opTimeoutRetries);
2925 CHK(opState == Normal);
2926 CHK((ret == 0) || (ret == 1));
2930 DBG(
"updateScan" << (idx ?
"Idx" :
"") <<
" pk1=" << hex << tup.m_pk1);
2931 Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
2932 CHK(k < g_opt.m_rows && g_tups[k].m_exists);
2934 calcBval(g_tups[k],
false);
2935 tup.copyfrom(g_tups[k]);
2937 if (api == API_RECATTR)
2940 CHK(getBlobHandles(g_opr) == 0);
2944 CHK((g_const_opr = g_ops->
updateCurrentTuple(g_con, g_blob_record, tup.m_row)) != 0);
2945 CHK(getBlobHandles(g_const_opr) == 0);
2947 bool timeout=
false;
2949 CHK(setBlobValue(tup) == 0);
2950 }
else if (style == 1) {
2951 CHK(setBlobWriteHook(tup) == 0);
2953 CHK(g_con->
execute(NoCommit) == 0);
2954 if (writeBlobData(tup))
2955 CHK((timeout= conHasTimeoutError()) ==
true);
2959 CHK((timeout= conHasTimeoutError()) ==
true);
2963 DISP(
"Scan update timeout("
2965 <<
") Retries left : "
2966 << opTimeoutRetries-1);
2967 CHK(opTimeoutRetries--);
2977 if (opState == Normal)
2979 CHK(g_con->
execute(Commit) == 0);
2980 CHK(g_opt.m_rows == rows);
2983 }
while (opState == Retrying);
2990 lockUnlockScan(
int style,
int api,
bool idx)
2992 DBG(
"--- " <<
"lockUnlockScan" << (idx ?
"Idx" :
"") <<
" " << stylename[style] <<
" " << apiName[api] <<
" ---");
2996 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2997 enum OpState opState;
3004 if (urandom(2) == 0)
3007 Uint32 scanFlags = g_scanFlags | NdbScanOperation::SF_KeyInfo;
3009 if (api == API_RECATTR)
3020 CHK(g_ops->
getValue(
"PK1", (
char*)&tup.m_pk1) != 0);
3021 if (g_opt.m_pk2chr.m_len != 0)
3023 CHK(g_ops->
getValue(
"PK2", tup.m_pk2) != 0);
3024 CHK(g_ops->
getValue(
"PK3", (
char *) &tup.m_pk3) != 0);
3031 opts.optionsPresent = NdbScanOperation::ScanOptions::SO_SCANFLAGS;
3032 opts.scan_flags = scanFlags;
3036 CHK((g_ops= g_con->
scanTable(g_key_record,
3037 lm, 0, &opts,
sizeof(opts))) != 0);
3039 CHK((g_ops= g_con->
scanIndex(g_ord_record, g_key_record,
3040 lm, 0, 0, &opts,
sizeof(opts))) != 0);
3042 CHK(g_con->
execute(NoCommit) == 0);
3045 const char *out_row= NULL;
3048 if (api == API_RECATTR)
3050 tup.m_pk1 = (Uint32)-1;
3051 memset(tup.m_pk2,
'x', g_opt.m_pk2chr.m_totlen);
3058 if(0 == (ret = g_ops->
nextResult(&out_row,
true,
false)))
3060 memcpy(&tup.m_pk1, &out_row[g_pk1_offset],
sizeof(tup.m_pk1));
3061 if (g_opt.m_pk2chr.m_len != 0) {
3062 memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
3063 memcpy(&tup.m_pk3, &out_row[g_pk3_offset],
sizeof(tup.m_pk3));
3071 if (conHasTimeoutError())
3076 DISP(
"Scan failed due to deadlock timeout ("
3077 << conError() <<
"), retries left :"
3078 << opTimeoutRetries -1);
3079 CHK(--opTimeoutRetries);
3086 CHK(opState == Normal);
3087 CHK((ret == 0) || (ret == 1));
3091 DBG(
"lockUnlockScan" << (idx ?
"Idx" :
"") <<
" pk1=" << hex << tup.m_pk1);
3093 Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
3094 CHK(k < g_opt.m_rows && g_tups[k].m_exists);
3095 tup.copyfrom(g_tups[k]);
3097 if (api == API_RECATTR)
3100 CHK(g_opr->getLockHandle() != NULL);
3101 CHK(getBlobHandles(g_opr) == 0);
3106 opts.optionsPresent = NdbOperation::OperationOptions::OO_LOCKHANDLE;
3107 CHK((g_const_opr = g_ops->
lockCurrentTuple(g_con, g_blob_record, tup.m_row,
3108 0, &opts,
sizeof(opts))) != 0);
3109 CHK(getBlobHandles(g_const_opr) == 0);
3111 bool timeout=
false;
3113 CHK(getBlobValue(tup) == 0);
3114 }
else if (style == 1) {
3115 CHK(setBlobReadHook(tup) == 0);
3117 CHK(g_con->
execute(NoCommit) == 0);
3118 if (readBlobData(tup))
3119 CHK((timeout= conHasTimeoutError()) ==
true);
3123 if (g_con->
execute(NoCommit) == 0)
3132 CHK((lmused == NdbOperation::LM_Read) ||
3135 if (style == 0 || style == 1) {
3136 CHK(verifyBlobValue(tup) == 0);
3140 if (urandom(200) == 0)
3141 CHK(verifyRowLocked(tup) == 0);
3144 CHK(g_bh1->
close() == 0);
3145 if (! g_opt.m_oneblob)
3146 CHK(g_bh2->
close() == 0);
3154 const NdbOperation* readOp = (g_opr?g_opr:g_const_opr);
3158 CHK(unlockOp != NULL);
3164 CHK(g_con->
execute(NoCommit) == 0);
3168 CHK((timeout= conHasTimeoutError()) ==
true);
3174 DISP(
"Scan read lock unlock timeout("
3176 <<
") Retries left : "
3177 << opTimeoutRetries-1);
3178 CHK(opTimeoutRetries--);
3188 if (opState == Normal)
3194 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
3195 CHK(verifyRowNotLocked(g_tups[k]) == 0);
3198 CHK(g_con->
execute(Commit) == 0);
3199 CHK(g_opt.m_rows == rows);
3202 }
while (opState == Retrying);
3209 deleteScan(
int api,
bool idx)
3211 DBG(
"--- " <<
"deleteScan" << (idx ?
"Idx" :
"") << apiName[api] <<
" ---");
3213 Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
3214 enum OpState opState;
3223 if (api == API_RECATTR)
3234 CHK(g_ops->
getValue(
"PK1", (
char*)&tup.m_pk1) != 0);
3235 if (g_opt.m_pk2chr.m_len != 0)
3237 CHK(g_ops->
getValue(
"PK2", tup.m_pk2) != 0);
3238 CHK(g_ops->
getValue(
"PK3", (
char *) &tup.m_pk3) != 0);
3246 CHK((g_ops= g_con->
scanTable(g_key_record,
3249 CHK((g_ops= g_con->
scanIndex(g_ord_record, g_key_record,
3252 CHK(g_con->
execute(NoCommit) == 0);
3257 if (api == API_RECATTR)
3259 tup.m_pk1 = (Uint32)-1;
3260 memset(tup.m_pk2,
'x', g_opt.m_pk2chr.m_len);
3266 const char *out_row= NULL;
3268 if (0 == (ret = g_ops->
nextResult(&out_row,
true,
false)))
3270 memcpy(&tup.m_pk1, &out_row[g_pk1_offset],
sizeof(tup.m_pk1));
3271 if (g_opt.m_pk2chr.m_len != 0)
3273 memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
3274 memcpy(&tup.m_pk3, &out_row[g_pk3_offset],
sizeof(tup.m_pk3));
3282 if (conHasTimeoutError())
3287 DISP(
"Scan delete failed due to deadlock timeout ("
3288 << conError() <<
") retries left :"
3289 << opTimeoutRetries -1);
3290 CHK(--opTimeoutRetries);
3297 CHK(opState == Normal);
3298 CHK((ret == 0) || (ret == 1));
3303 DBG(
"deleteScan" << (idx ?
"Idx" :
"") <<
" pk1=" << hex << tup.m_pk1);
3304 Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
3305 CHK(k < g_opt.m_rows && g_tups[k].m_exists);
3306 g_tups[k].m_exists =
false;
3307 if (api == API_RECATTR)
3311 tup.m_pk1 = (Uint32)-1;
3312 memset(tup.m_pk2,
'x', g_opt.m_pk2chr.m_len);
3314 if (api == API_RECATTR)
3318 const char *out_row= NULL;
3319 ret = g_ops->
nextResult(&out_row,
false,
false);
3322 memcpy(&tup.m_pk1, &out_row[g_pk1_offset],
sizeof(tup.m_pk1));
3323 if (g_opt.m_pk2chr.m_len != 0)
3325 memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
3326 memcpy(&tup.m_pk3, &out_row[g_pk3_offset],
sizeof(tup.m_pk3));
3334 if (conHasTimeoutError())
3339 DISP(
"Scan delete failed due to deadlock timeout ("
3340 << conError() <<
") retries left :"
3341 << opTimeoutRetries -1);
3342 CHK(--opTimeoutRetries);
3349 CHK(opState == Normal);
3350 CHK((ret == 0) || (ret == 1) || (ret == 2));
3352 if (++n == g_opt.m_batch || ret == 2) {
3353 DBG(
"execute batch: n=" << n <<
" ret=" << ret);
3354 if (! g_opt.m_fac) {
3355 CHK(g_con->
execute(NoCommit) == 0);
3357 CHK(g_con->
execute(Commit) == 0);
3366 if (opState == Retrying)
3369 if (opState == Normal)
3372 CHK(g_con->
execute(Commit) == 0);
3373 CHK(g_opt.m_rows == rows);
3377 }
while (opState == Retrying);
3396 operationName(OpTypes optype)
3418 return "Bad operation type";
3423 aoName(
int abortOption)
3425 if (abortOption == 0)
3426 return "AbortOnError";
3427 return "IgnoreError";
3435 case PkRead:
case PkInsert :
case PkUpdate:
3436 case PkWrite :
case PkDelete :
3475 setUDpartId(tup, op);
3476 CHK(op->
equal(
"PK1", tup.m_pk1) == 0);
3477 if (g_opt.m_pk2chr.m_len != 0)
3479 CHK(op->
equal(
"PK2", tup.m_pk2) == 0);
3480 CHK(op->
equal(
"PK3", tup.m_pk3) == 0);
3485 CHK(op->
equal(
"PK2", tup.m_pk2) == 0);
3486 CHK(op->
equal(
"PK3", tup.m_pk3) == 0);
3489 CHK(getBlobHandles(op) == 0);
3494 CHK(getBlobValue(tup) == 0);
3502 CHK(setBlobValue(tup) == 0);
3540 struct ExpectedOutcome
3543 int transactionErrorCode;
3559 ExpectedOutcome outcomes[9][2]=
3591 DBG(
"bugtest_36756 : IgnoreError Delete of nonexisting tuple aborts");
3592 DBG(
" Also 36851 : Insert IgnoreError of existing tuple aborts");
3594 for (
int iterations=0; iterations < 50; iterations++)
3603 Tup& tupExists = g_tups[0];
3604 Tup& tupDoesNotExist = g_tups[1];
3610 CHK(g_opr->
equal(
"PK1", tupExists.m_pk1) == 0);
3611 if (g_opt.m_pk2chr.m_len != 0)
3613 CHK(g_opr->
equal(
"PK2", tupExists.m_pk2) == 0);
3614 CHK(g_opr->
equal(
"PK3", tupExists.m_pk3) == 0);
3616 setUDpartId(tupExists, g_opr);
3617 CHK(getBlobHandles(g_opr) == 0);
3619 CHK(setBlobValue(tupExists) == 0);
3621 CHK(g_con->
execute(Commit) == 0);
3624 DBG(
"Iteration : " << iterations);
3625 for (
int optype=PkRead; optype <= UkDelete; optype++)
3627 DBG(
" " << operationName((OpTypes)optype));
3629 Tup* tup1= &tupExists;
3630 Tup* tup2= &tupDoesNotExist;
3632 if (optype == PkInsert)
3639 tup1= &tupDoesNotExist;
3643 for (
int abortOption=0; abortOption < 2; abortOption++)
3645 DBG(
" " << aoName(abortOption));
3654 CHK(setupOperation(opr1, (OpTypes)optype, *tup1) == 0);
3657 CHK(setupOperation(opr2, (OpTypes)optype, *tup2) == 0);
3659 ExpectedOutcome eo= outcomes[optype][abortOption];
3663 DBG(
"execute returned " << rc <<
3669 CHK(rc == eo.executeRc);
3683 setUDpartId(tupExists, g_opr);
3684 CHK(g_opr->
equal(
"PK1", tupExists.m_pk1) == 0);
3685 if (g_opt.m_pk2chr.m_len != 0)
3687 CHK(g_opr->
equal(
"PK2", tupExists.m_pk2) == 0);
3688 CHK(g_opr->
equal(
"PK3", tupExists.m_pk3) == 0);
3691 CHK(g_con->
execute(Commit) == 0);
3712 DBG(
"bugtest_45768 : Batched blob transaction with abort followed by commit");
3714 const int numIterations = 5;
3716 for (
int iteration=0; iteration < numIterations; iteration++)
3725 const Uint32 totalRows = 100;
3726 const Uint32 preExistingTupNum = totalRows / 2;
3728 Tup& tupExists = g_tups[ preExistingTupNum ];
3734 CHK(g_opr->
equal(
"PK1", tupExists.m_pk1) == 0);
3735 if (g_opt.m_pk2chr.m_len != 0)
3737 CHK(g_opr->
equal(
"PK2", tupExists.m_pk2) == 0);
3738 CHK(g_opr->
equal(
"PK3", tupExists.m_pk3) == 0);
3740 setUDpartId(tupExists, g_opr);
3741 CHK(getBlobHandles(g_opr) == 0);
3743 CHK(setBlobValue(tupExists) == 0);
3745 CHK(g_con->
execute(Commit) == 0);
3748 DBG(
"Iteration : " << iteration);
3760 for (Uint32 tupNum = 0; tupNum < totalRows ; tupNum++)
3762 Tup& tup = g_tups[ tupNum ];
3765 CHK(g_opr->
equal(
"PK1", tup.m_pk1) == 0);
3766 if (g_opt.m_pk2chr.m_len != 0)
3768 CHK(g_opr->
equal(
"PK2", tup.m_pk2) == 0);
3769 CHK(g_opr->
equal(
"PK3", tup.m_pk3) == 0);
3771 setUDpartId(tup, g_opr);
3773 CHK(getBlobHandles(g_opr) == 0);
3774 CHK(setBlobValue(tup) == 0);
3787 DBG(
"Send Buffers overloaded, retrying");
3790 }
while (retries--);
3807 setUDpartId(tupExists, g_opr);
3808 CHK(g_opr->
equal(
"PK1", tupExists.m_pk1) == 0);
3809 if (g_opt.m_pk2chr.m_len != 0)
3811 CHK(g_opr->
equal(
"PK2", tupExists.m_pk2) == 0);
3812 CHK(g_opr->
equal(
"PK3", tupExists.m_pk3) == 0);
3815 CHK(g_con->
execute(Commit) == 0);
3826 static int bugtest_48040()
3846 DBG(
"bugtest 48040 - Infinite ContinueB loop in TC abort + unique");
3848 restarter.waitConnected();
3850 int rc = restarter.insertErrorInAllNodes(8082);
3852 DBG(
" Initial error insert rc" << rc << endl);
3854 rc = bugtest_45768();
3858 restarter.insertErrorInAllNodes(0);
3864 static int bugtest_62321()
3871 DBG(
"bugtest_62321 : Error code from other ops in batch obscured");
3885 Tup& tupExists = g_tups[0];
3886 Tup& notExists = g_tups[1];
3891 CHK(g_opr->
equal(
"PK1", tupExists.m_pk1) == 0);
3892 if (g_opt.m_pk2chr.m_len != 0)
3894 CHK(g_opr->
equal(
"PK2", tupExists.m_pk2) == 0);
3895 CHK(g_opr->
equal(
"PK3", tupExists.m_pk3) == 0);
3897 setUDpartId(tupExists, g_opr);
3898 CHK(getBlobHandles(g_opr) == 0);
3900 CHK(setBlobValue(tupExists) == 0);
3902 CHK(g_con->
execute(Commit) == 0);
3906 for (
int scenario = 0; scenario < 4; scenario++)
3908 DBG(
" Scenario : " << scenario);
3911 if ((scenario & 0x1) == 0)
3913 DBG(
" Fail op before");
3918 CHK(failOp->
equal(
"PK1", notExists.m_pk1) == 0);
3919 if (g_opt.m_pk2chr.m_len != 0)
3921 CHK(failOp->
equal(
"PK2", notExists.m_pk2) == 0);
3922 CHK(failOp->
equal(
"PK3", notExists.m_pk3) == 0);
3924 setUDpartId(notExists, failOp);
3932 CHK(g_opr->
equal(
"PK1", tupExists.m_pk1) == 0);
3933 if (g_opt.m_pk2chr.m_len != 0)
3935 CHK(g_opr->
equal(
"PK2", tupExists.m_pk2) == 0);
3936 CHK(g_opr->
equal(
"PK3", tupExists.m_pk3) == 0);
3938 setUDpartId(tupExists, g_opr);
3939 CHK(getBlobHandles(g_opr) == 0);
3941 CHK(getBlobValue(tupExists) == 0);
3947 DBG(
" Fail op after");
3951 CHK(failOp->
equal(
"PK1", notExists.m_pk1) == 0);
3952 if (g_opt.m_pk2chr.m_len != 0)
3954 CHK(failOp->
equal(
"PK2", notExists.m_pk2) == 0);
3955 CHK(failOp->
equal(
"PK3", notExists.m_pk3) == 0);
3957 setUDpartId(notExists, failOp);
3968 "NoCommit":
"Commit"));
3975 DBG(
" Error code on transaction as expected");
3989 ndbout << "line " << __LINE__ << " " << x << endl; \
3995 g_ndb =
new Ndb(g_ncc,
"TEST_DB");
3996 CHK(g_ndb->
init(20) == 0);
4001 g_tups =
new Tup [g_opt.m_rows];
4005 createDefaultTableSpace();
4007 if (g_opt.m_seed == -1)
4008 g_opt.m_seed = getpid();
4009 if (g_opt.m_seed != 0) {
4010 DBG(
"random seed = " << g_opt.m_seed);
4011 ndb_srand(g_opt.m_seed);
4013 for (g_loop = 0; g_opt.m_loop == 0 || g_loop < g_opt.m_loop; g_loop++) {
4014 for (
int storage= 0; storage < 2; storage++) {
4015 if (!
testcase(storageSymbol[storage]))
4018 DBG(
"Create table " << storageName[storage]);
4019 CHK(dropTable() == 0);
4020 CHK(createTable(storage) == 0);
4023 DBG(
"FragType: " << g_dic->getTable(g_opt.m_tname)->getFragmentType());
4025 DBG(
"BL1: inline=" << b1.m_inline <<
" part=" << b1.m_partsize <<
" table=" << b1.m_btname);
4026 if (! g_opt.m_oneblob) {
4029 DBG(
"BL2: inline=" << b2.m_inline <<
" part=" << b2.m_partsize <<
" table=" << b2.m_btname);
4036 if (storage == STORAGE_DISK)
4054 DBG(
"Settings : usingdisk " << g_usingDisk
4055 <<
" batchSize " << g_batchSize
4056 <<
" parallel " << g_parallel
4057 <<
" scanFlags " << g_scanFlags);
4061 DBG(
"=== loop " << g_loop <<
" ===");
4062 if (g_opt.m_seed == 0)
4064 if (g_opt.m_bugtest != 0) {
4066 CHK((*g_opt.m_bugtest)() == 0);
4070 for (api = 0; api <=1; api++) {
4074 for (style = 0; style <= 2; style++) {
4077 DBG(
"--- pk ops " << stylename[style] <<
" " << apiName[api] <<
" ---");
4080 CHK(insertPk(style, api) == 0);
4081 CHK(verifyBlob() == 0);
4082 CHK(readPk(style, api) == 0);
4085 CHK(updatePk(style, api) == 0);
4086 CHK(verifyBlob() == 0);
4087 CHK(readPk(style, api) == 0);
4090 CHK(readLockPk(style,api) == 0);
4093 CHK(deletePk(api) == 0);
4094 CHK(deleteNoPk() == 0);
4095 CHK(verifyBlob() == 0);
4100 CHK(writePk(style, api) == 0);
4101 CHK(verifyBlob() == 0);
4102 CHK(readPk(style, api) == 0);
4105 CHK(writePk(style, api) == 0);
4106 CHK(verifyBlob() == 0);
4107 CHK(readPk(style, api) == 0);
4110 CHK(readLockPk(style,api) == 0);
4113 CHK(deletePk(api) == 0);
4114 CHK(deleteNoPk() == 0);
4115 CHK(verifyBlob() == 0);
4121 for (style = 0; style <= 2; style++) {
4124 DBG(
"--- idx ops " << stylename[style] <<
" " << apiName[api] <<
" ---");
4127 CHK(insertPk(style, api) == 0);
4128 CHK(verifyBlob() == 0);
4129 CHK(readIdx(style, api) == 0);
4132 CHK(updateIdx(style, api) == 0);
4133 CHK(verifyBlob() == 0);
4134 CHK(readIdx(style, api) == 0);
4137 CHK(deleteIdx(api) == 0);
4138 CHK(verifyBlob() == 0);
4143 CHK(writePk(style, api) == 0);
4144 CHK(verifyBlob() == 0);
4145 CHK(readIdx(style, api) == 0);
4148 CHK(writeIdx(style, api) == 0);
4149 CHK(verifyBlob() == 0);
4150 CHK(readIdx(style, api) == 0);
4153 CHK(deleteIdx(api) == 0);
4154 CHK(verifyBlob() == 0);
4159 for (style = 0; style <= 2; style++) {
4162 DBG(
"--- table scan " << stylename[style] <<
" " << apiName[api] <<
" ---");
4164 CHK(insertPk(style, api) == 0);
4165 CHK(verifyBlob() == 0);
4166 CHK(readScan(style, api,
false) == 0);
4168 CHK(updateScan(style, api,
false) == 0);
4169 CHK(verifyBlob() == 0);
4172 CHK(lockUnlockScan(style, api,
false) == 0);
4175 CHK(deleteScan(api,
false) == 0);
4176 CHK(verifyBlob() == 0);
4180 for (style = 0; style <= 2; style++) {
4183 DBG(
"--- index scan " << stylename[style] <<
" " << apiName[api] <<
" ---");
4185 CHK(insertPk(style, api) == 0);
4186 CHK(verifyBlob() == 0);
4187 CHK(readScan(style, api,
true) == 0);
4189 CHK(updateScan(style, api,
true) == 0);
4190 CHK(verifyBlob() == 0);
4193 CHK(lockUnlockScan(style, api,
true) == 0);
4196 CHK(deleteScan(api,
true) == 0);
4197 CHK(verifyBlob() == 0);
4214 m_on = m_ms = m_cnt = m_time[0] = m_text[0] = 0;
4218 m_on = NdbTick_CurrentMillisecond();
4220 void off(
unsigned cnt = 0) {
4221 NDB_TICKS off = NdbTick_CurrentMillisecond();
4222 assert(m_on != 0 && off >= m_on);
4227 const char* time() {
4229 sprintf(m_time,
"%u ms", (Uint32)m_ms);
4231 sprintf(m_time,
"%u ms per %u ( %llu ms per 1000 )", (Uint32)m_ms, m_cnt, (1000 * m_ms) / m_cnt);
4234 const char* pct (
const Tmr& t1) {
4236 sprintf(m_text,
"%llu pct", (100 * m_ms) / t1.m_ms);
4238 sprintf(m_text,
"[cannot measure]");
4241 const char* over(
const Tmr& t1) {
4243 if (t1.m_ms <= m_ms)
4244 sprintf(m_text,
"%llu pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms);
4246 sprintf(m_text,
"-%llu pct", (100 * (t1.m_ms - m_ms)) / t1.m_ms);
4248 sprintf(m_text,
"[cannot measure]");
4263 DBG(
"=== perf test ===");
4265 g_ndb =
new Ndb(g_ncc,
"TEST_DB");
4266 CHK(g_ndb->
init() == 0);
4270 if (g_dic->getTable(tab.
getName()) != 0)
4271 CHK(g_dic->dropTable(tab.
getName()) == 0);
4275 col.setPrimaryKey(
true);
4282 col.setNullable(
true);
4288 col.setBlobVersion(g_opt.m_blob_version);
4289 col.setInlineSize(20);
4290 col.setPartSize(512);
4291 col.setStripeSize(1);
4292 col.setNullable(
true);
4296 CHK(g_dic->createTable(tab) == 0);
4297 Uint32 cA = 0, cB = 1, cC = 2;
4303 DBG(
"--- insert char ---");
4307 for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4310 CHK(g_opr->
equal(cA, (
char*)&k) == 0);
4311 memset(b, 0x20,
sizeof(b));
4314 CHK(g_con->
execute(NoCommit) == 0);
4316 t1.off(g_opt.m_rowsperf);
4317 CHK(g_con->
execute(Rollback) == 0);
4324 DBG(
"--- insert text ---");
4327 for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4330 CHK(g_opr->
equal(cA, (
char*)&k) == 0);
4332 CHK((g_bh1->
setValue(
"c", 1) == 0));
4333 CHK(g_con->
execute(NoCommit) == 0);
4335 t2.off(g_opt.m_rowsperf);
4336 CHK(g_con->
execute(Rollback) == 0);
4343 DBG(
"insert overhead: " << t2.over(t1));
4348 DBG(
"--- insert for read test ---");
4352 for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4355 CHK(g_opr->
equal(cA, (
char*)&k) == 0);
4356 memset(b, 0x20,
sizeof(b));
4360 CHK((g_bh1->
setValue(
"c", 1) == 0));
4361 if (++n == g_opt.m_batch) {
4362 CHK(g_con->
execute(Commit) == 0);
4369 CHK(g_con->
execute(Commit) == 0);
4378 DBG(
"--- pk read char ---");
4383 for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4386 CHK(g_opr->
equal(cA, (
char*)&k) == 0);
4387 CHK(g_opr->
getValue(cA, (
char*)&a) != 0);
4391 CHK(g_con->
execute(NoCommit) == 0);
4392 CHK(a == k && b[0] ==
'b');
4394 CHK(g_con->
execute(Commit) == 0);
4395 t1.off(g_opt.m_rowsperf);
4402 DBG(
"--- pk read text ---");
4407 for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4409 CHK(g_opr->readTuple() == 0);
4410 CHK(g_opr->equal(cA, (
char*)&k) == 0);
4411 CHK(g_opr->getValue(cA, (
char*)&a) != 0);
4412 CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
4415 CHK(g_con->
execute(NoCommit) == 0);
4418 CHK(a == k && m == 1 && c[0] ==
'c');
4420 CHK(g_con->
execute(Commit) == 0);
4421 t2.off(g_opt.m_rowsperf);
4427 DBG(
"pk read overhead: " << t2.over(t1));
4431 const uint scan_loops = 10;
4433 DBG(
"--- scan read char ---");
4437 for (i = 0; i < scan_loops; i++) {
4440 CHK(g_ops->
readTuples(NdbOperation::LM_Read) == 0);
4441 CHK(g_ops->
getValue(cA, (
char*)&a) != 0);
4443 CHK(g_con->
execute(NoCommit) == 0);
4450 CHK((ret = g_ops->
nextResult(
true)) == 0 || ret == 1);
4453 CHK(a < g_opt.m_rowsperf && b[0] ==
'b');
4456 CHK(n == g_opt.m_rowsperf);
4457 t1.off(g_opt.m_rowsperf);
4465 DBG(
"--- read text ---");
4469 for (i = 0; i < scan_loops; i++) {
4472 CHK(g_ops->
readTuples(NdbOperation::LM_Read) == 0);
4473 CHK(g_ops->
getValue(cA, (
char*)&a) != 0);
4475 CHK(g_con->
execute(NoCommit) == 0);
4482 CHK((ret = g_ops->
nextResult(
true)) == 0 || ret == 1);
4487 CHK(a < g_opt.m_rowsperf && m == 1 && c[0] ==
'c');
4490 CHK(n == g_opt.m_rowsperf);
4491 t2.off(g_opt.m_rowsperf);
4499 DBG(
"scan read overhead: " << t2.over(t1));
4512 DBG(
"bug test 4088 - ndb api hang with mixed ops on index table");
4515 CHK(insertPk(0, API_NDBRECORD) == 0);
4518 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
4519 Tup& tup = g_tups[k];
4521 const unsigned pkcnt = 2;
4523 for (i = 0; i < pkcnt; i++) {
4526 sprintf(name,
"%d/%s", 4, g_opt.m_x1name);
4528 CHK(g_opr->readTuple() == 0);
4529 CHK(g_opr->equal(
"PK2", tup.m_pk2) == 0);
4530 setUDpartId(tup, g_opr);
4531 CHK(g_opr->getValue(
"NDB$PK", (
char*)&pktup[i].m_pk1) != 0);
4536 CHK(g_opx->
equal(
"PK2", tup.m_pk2) == 0);
4537 assert(tup.m_bval1.m_buf != 0);
4538 CHK(g_opx->
getValue(
"BL1", (
char*)tup.m_bval1.m_buf) != 0);
4541 CHK(g_con->
execute(Commit) == 0);
4543 for (i = 0; i < pkcnt; i++) {
4544 CHK(pktup[i].m_pk1 == tup.m_pk1);
4545 CHK(memcmp(pktup[i].m_pk2, tup.m_pk2, g_opt.m_pk2chr.m_len) == 0);
4547 CHK(memcmp(tup.m_bval1.m_val, tup.m_bval1.m_buf, 8 + g_blob1.m_inline) == 0);
4555 DBG(
"bug test 27018 - middle partial part write clobbers rest of part");
4559 CHK(insertPk(0, API_NDBRECORD) == 0);
4561 for (
unsigned k= 0; k < g_opt.m_rows; k++)
4563 Tup& tup= g_tups[k];
4566 Uint32
offset= urandom(tup.m_bval1.m_len + 1);
4567 if (offset == tup.m_bval1.m_len) {
4574 memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1,
sizeof(tup.m_pk1));
4575 if (g_opt.m_pk2chr.m_len != 0) {
4576 memcpy(&tup.m_key_row[g_pk2_offset], tup.m_pk2, g_opt.m_pk2chr.m_totlen);
4577 memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3,
sizeof(tup.m_pk3));
4580 setUDpartIdNdbRecord(tup,
4583 CHK((g_const_opr= g_con->updateTuple(g_key_record, tup.m_key_row,
4584 g_blob_record, tup.m_row,
4587 sizeof(opts))) != 0);
4588 CHK(getBlobHandles(g_const_opr) == 0);
4589 CHK(g_con->
execute(NoCommit) == 0);
4591 tup.m_bval1.m_buf[0]= 0xff ^ tup.m_bval1.m_val[
offset];
4592 CHK(g_bh1->
setPos(offset) == 0);
4593 CHK(g_bh1->
writeData(&(tup.m_bval1.m_buf[0]), 1) == 0);
4594 CHK(g_con->
execute(Commit) == 0);
4598 CHK((g_const_opr= g_con->readTuple(g_key_record, tup.m_key_row,
4599 g_blob_record, tup.m_row,
4600 NdbOperation::LM_Read,
4603 sizeof(opts))) != 0);
4604 CHK(getBlobHandles(g_const_opr) == 0);
4606 CHK(g_bh1->
getValue(tup.m_bval1.m_buf, tup.m_bval1.m_len) == 0);
4607 CHK(g_con->
execute(Commit) == 0);
4610 CHK(g_bh1->
getLength(len) == 0 && len == tup.m_bval1.m_len);
4611 tup.m_bval1.m_buf[
offset]^= 0xff;
4614 while (i < tup.m_bval1.m_len) {
4615 CHK(tup.m_bval1.m_buf[i] == tup.m_bval1.m_val[i]);
4623 CHK(deletePk(API_NDBRECORD) == 0);
4631 char m_current_write_value;
4633 Uint32 m_blob1_size;
4641 void *bugtest_27370_thread(
void *arg)
4645 while (!data->m_thread_stop)
4647 memset(data->m_writebuf, data->m_current_write_value, data->m_blob1_size);
4648 data->m_current_write_value++;
4652 return (
void *)
"Failed to create transaction";
4654 memcpy(data->m_write_row, data->m_key_row, g_rowsize);
4655 if ((opr= con->writeTuple(g_key_record, data->m_key_row,
4656 g_full_record, data->m_write_row,
4660 return (
void *)
"Failed to create operation";
4663 return (
void *)
"getBlobHandle() failed";
4664 if (bh->
setValue(data->m_writebuf, data->m_blob1_size) != 0)
4665 return (
void *)
"setValue() failed";
4666 if (con->
execute(Commit, AbortOnError, 1) != 0)
4667 return (
void *)
"execute() failed";
4677 DBG(
"bug test 27370 - Potential inconsistent blob reads for ReadCommitted reads");
4681 CHK((data.m_key_row=
new char[g_rowsize*3]) != 0);
4682 data.m_read_row= data.m_key_row + g_rowsize;
4683 data.m_write_row= data.m_read_row + g_rowsize;
4685 data.m_ndb=
new Ndb(g_ncc,
"TEST_DB");
4686 CHK(data.m_ndb->
init(20) == 0);
4689 data.m_current_write_value= 0;
4690 data.m_blob1_size= g_blob1.m_inline + 10 * g_blob1.m_partsize;
4691 CHK((data.m_writebuf=
new char [data.m_blob1_size]) != 0);
4692 Uint32 pk1_value= 27370;
4695 bool isUserDefined= (t->
getFragmentType() == NdbDictionary::Object::UserDefined);
4697 Uint32 udPartId= pk1_value % partCount;
4699 opts.optionsPresent= 0;
4703 opts.optionsPresent= NdbOperation::OperationOptions::OO_PARTITION_ID;
4704 opts.partitionId= udPartId;
4706 memcpy(&data.m_key_row[g_pk1_offset], &pk1_value,
sizeof(pk1_value));
4707 if (g_opt.m_pk2chr.m_len != 0)
4709 memset(&data.m_key_row[g_pk2_offset],
'x', g_opt.m_pk2chr.m_totlen);
4710 if (!g_opt.m_pk2chr.m_fixed)
4711 data.m_key_row[g_pk2_offset]= urandom(g_opt.m_pk2chr.m_len + 1);
4712 Uint16 pk3_value= 27370;
4713 memcpy(&data.m_key_row[g_pk3_offset], &pk3_value,
sizeof(pk3_value));
4715 data.m_thread_stop=
false;
4717 memset(data.m_writebuf, data.m_current_write_value, data.m_blob1_size);
4718 data.m_current_write_value++;
4721 memcpy(data.m_write_row, data.m_key_row, g_rowsize);
4722 CHK((g_const_opr= g_con->writeTuple(g_key_record, data.m_key_row,
4723 g_full_record, data.m_write_row,
4726 sizeof(opts))) != 0);
4727 CHK((g_bh1= g_const_opr->getBlobHandle(
"BL1")) != 0);
4728 CHK(g_bh1->
setValue(data.m_writebuf, data.m_blob1_size) == 0);
4729 CHK(g_con->
execute(Commit) == 0);
4733 pthread_t thread_handle;
4734 CHK(pthread_create(&thread_handle, NULL, bugtest_27370_thread, &data) == 0);
4736 DBG(
"bug test 27370 - PK blob reads");
4737 Uint32 seen_updates= 0;
4738 while (seen_updates < 50)
4741 CHK((g_const_opr= g_con->readTuple(g_key_record, data.m_key_row,
4742 g_blob_record, data.m_read_row,
4746 sizeof(opts))) != 0);
4747 CHK((g_bh1= g_const_opr->getBlobHandle(
"BL1")) != 0);
4748 CHK(g_con->
execute(NoCommit, AbortOnError, 1) == 0);
4750 const Uint32 loop_max= 10;
4752 char original_read_char= 0;
4754 for (readloop= 0;; readloop++)
4761 CHK(read_char == original_read_char);
4770 if (original_read_char != read_char)
4772 original_read_char= read_char;
4775 if (readloop > loop_max)
4778 CHK(g_bh1->
setPos(urandom(data.m_blob1_size)) == 0);
4779 CHK(g_bh1->
readData(&read_char, readSize) == 0);
4781 ExecType commitType= readloop == loop_max ? Commit : NoCommit;
4782 CHK(g_con->
execute(commitType, AbortOnError, 1) == 0);
4788 DBG(
"bug test 27370 - table scan blob reads");
4790 while (seen_updates < 50)
4793 CHK((g_ops= g_con->
scanTable(g_full_record,
4796 CHK(g_con->
execute(NoCommit, AbortOnError, 1) == 0);
4797 const char *out_row= NULL;
4798 CHK(g_ops->
nextResult(&out_row,
true,
false) == 0);
4800 const Uint32 loop_max= 10;
4802 char original_read_char= 0;
4804 for (readloop= 0;; readloop++)
4811 CHK(read_char == original_read_char);
4820 if (original_read_char != read_char)
4822 original_read_char= read_char;
4825 if (readloop > loop_max)
4828 CHK(g_bh1->
setPos(urandom(data.m_blob1_size)) == 0);
4829 CHK(g_bh1->
readData(&read_char, readSize) == 0);
4831 CHK(g_con->
execute(NoCommit, AbortOnError, 1) == 0);
4834 CHK(g_ops->
nextResult(&out_row,
true,
false) == 1);
4839 data.m_thread_stop=
true;
4840 void *thread_return;
4841 CHK(pthread_join(thread_handle, &thread_return) == 0);
4842 DBG(
"bug 27370 - thread return status: " <<
4843 (thread_return ? (
char *)thread_return :
"<null>"));
4844 CHK(thread_return == 0);
4846 delete [] data.m_key_row;
4856 DBG(
"bug test 28116 - Crash in getBlobHandle() when called without full key");
4858 if (g_opt.m_pk2chr.m_len == 0)
4860 DBG(
" ... skipped, requires multi-column primary key.");
4866 for (
unsigned k = 0; k < g_opt.m_rows; k++) {
4867 Tup& tup = g_tups[k];
4870 int reqType = urandom(4);
4875 CHK(g_opr->readTuple() == 0);
4881 CHK(g_opr->insertTuple() == 0);
4887 CHK(g_opr->updateTuple() == 0);
4894 CHK(g_opr->deleteTuple() == 0);
4898 switch (urandom(3)) {
4907 CHK(g_opr->equal(
"PK1", tup.m_pk1) == 0);
4914 if (g_opt.m_pk2chr.m_len != 0)
4916 CHK(g_opr->equal(
"PK2", tup.m_pk2) == 0);
4917 CHK(g_opr->equal(
"PK3", tup.m_pk3) == 0);
4923 CHK(g_opr->getBlobHandle(
"BL1") == 0);
4927 CHK(g_opr->getNdbError().code == 4264);
4940 { 4088, bugtest_4088 },
4941 { 27018, bugtest_27018 },
4942 { 27370, bugtest_27370 },
4943 { 36756, bugtest_36756 },
4944 { 45768, bugtest_45768 },
4945 { 48040, bugtest_48040 },
4946 { 28116, bugtest_28116 },
4947 { 62321, bugtest_62321 }
4950 NDB_COMMAND(testOdbcDriver,
"testBlobs",
"testBlobs",
"testBlobs", 65535)
4956 const char* progname =
4957 strchr(argv[0],
'/') ? strrchr(argv[0],
'/') + 1 : argv[0];
4958 strcpy(cmdline, progname);
4959 for (
int i = 1; i < argc; i++) {
4960 strcat(cmdline,
" ");
4961 strcat(cmdline, argv[i]);
4964 Chr& pk2chr = g_opt.m_pk2chr;
4965 while (++argv, --argc > 0) {
4966 const char* arg = argv[0];
4967 if (strcmp(arg,
"-batch") == 0) {
4968 if (++argv, --argc > 0) {
4969 g_opt.m_batch = atoi(argv[0]);
4973 if (strcmp(arg,
"-core") == 0) {
4974 g_opt.m_core =
true;
4977 if (strcmp(arg,
"-dbg") == 0) {
4981 if (strcmp(arg,
"-debug") == 0) {
4982 if (++argv, --argc > 0) {
4984 g_opt.m_debug = strdup(argv[0]);
4988 if (strcmp(arg,
"-fac") == 0) {
4992 if (strcmp(arg,
"-full") == 0) {
4993 g_opt.m_full =
true;
4996 if (strcmp(arg,
"-loop") == 0) {
4997 if (++argv, --argc > 0) {
4998 g_opt.m_loop = atoi(argv[0]);
5002 if (strcmp(arg,
"-min") == 0) {
5006 if (strcmp(arg,
"-parts") == 0) {
5007 if (++argv, --argc > 0) {
5008 g_opt.m_parts = atoi(argv[0]);
5012 if (strcmp(arg,
"-rows") == 0) {
5013 if (++argv, --argc > 0) {
5014 g_opt.m_rows = atoi(argv[0]);
5018 if (strcmp(arg,
"-rowsperf") == 0) {
5019 if (++argv, --argc > 0) {
5020 g_opt.m_rowsperf = atoi(argv[0]);
5024 if (strcmp(arg,
"-seed") == 0) {
5025 if (++argv, --argc > 0) {
5026 g_opt.m_seed = atoi(argv[0]);
5030 if (strcmp(arg,
"-skip") == 0) {
5031 if (++argv, --argc > 0) {
5032 g_opt.m_skip = strdup(argv[0]);
5036 if (strcmp(arg,
"-test") == 0) {
5037 if (++argv, --argc > 0) {
5038 g_opt.m_test = strdup(argv[0]);
5042 if (strcmp(arg,
"-timeoutretries") == 0) {
5043 if (++argv, --argc > 0) {
5044 g_opt.m_timeout_retries = atoi(argv[0]);
5048 if (strcmp(arg,
"-version") == 0) {
5049 if (++argv, --argc > 0) {
5050 g_opt.m_blob_version = atoi(argv[0]);
5051 if (g_opt.m_blob_version == 1 || g_opt.m_blob_version == 2)
5056 if (strcmp(arg,
"-pk2len") == 0) {
5057 if (++argv, --argc > 0) {
5058 pk2chr.m_len = atoi(argv[0]);
5062 if (strcmp(arg,
"-pk2fixed") == 0) {
5063 pk2chr.m_fixed =
true;
5066 if (strcmp(arg,
"-pk2binary") == 0) {
5067 pk2chr.m_binary =
true;
5070 if (strcmp(arg,
"-pk2cs") == 0) {
5071 if (++argv, --argc > 0) {
5072 pk2chr.m_cs = strdup(argv[0]);
5076 if (strcmp(arg,
"-pk2part") == 0) {
5077 g_opt.m_pk2part =
true;
5080 if (strcmp(arg,
"-oneblob") == 0) {
5081 g_opt.m_oneblob =
true;
5084 if (strcmp(arg,
"-rbatch") == 0) {
5085 if (++argv, --argc > 0) {
5086 g_opt.m_rbatch = atoi(argv[0]);
5090 if (strcmp(arg,
"-wbatch") == 0) {
5091 if (++argv, --argc > 0) {
5092 g_opt.m_wbatch = atoi(argv[0]);
5097 if (strcmp(arg,
"-bug") == 0) {
5098 if (++argv, --argc > 0) {
5099 g_opt.m_bug = atoi(argv[0]);
5100 for (
unsigned i = 0; i <
sizeof(g_bugtest)/
sizeof(g_bugtest[0]); i++) {
5101 if (g_opt.m_bug == g_bugtest[i].m_bug) {
5102 g_opt.m_bugtest = g_bugtest[
i].m_test;
5106 if (g_opt.m_bugtest != 0)
5110 if (strcmp(arg,
"-?") == 0 || strcmp(arg,
"-h") == 0) {
5114 ndbout <<
"unknown option " << arg << endl;
5117 if (g_opt.m_debug != 0) {
5118 if (strchr(g_opt.m_debug,
':') == 0) {
5119 const char* s =
"d:t:F:L:o,";
5120 char* t =
new char [strlen(s) + strlen(g_opt.m_debug) + 1];
5122 strcat(t, g_opt.m_debug);
5125 DBUG_PUSH(g_opt.m_debug);
5128 if (pk2chr.m_len == 0) {
5131 if (g_opt.m_skip != 0)
5132 strcpy(b, g_opt.m_skip);
5135 g_opt.m_skip = strdup(b);
5137 if (pk2chr.m_len != 0) {
5147 assert(c.m_cs != 0);
5152 c.m_csinfo = get_charset_by_name(c.m_cs, MYF(0));
5153 if (c.m_csinfo == 0)
5154 c.m_csinfo = get_charset_by_csname(c.m_cs, MY_CS_PRIMARY, MYF(0));
5155 if (c.m_csinfo == 0) {
5156 ndbout <<
"unknown charset " << c.m_cs << endl;
5159 c.m_mblen = c.m_csinfo->mbmaxlen;
5163 c.m_bytelen = c.m_len * c.m_mblen;
5164 if (c.m_bytelen > 255) {
5165 ndbout <<
"length of pk2 in bytes exceeds 255" << endl;
5169 c.m_totlen = c.m_bytelen;
5171 c.m_totlen = 1 + c.m_bytelen;
5172 c.m_caseins =
false;
5175 const char* p =
"ABCxyz";
5176 const char* q =
"abcXYZ";
5178 if ((*info->cset->well_formed_len)(info, p, p + 6, 999, &e) != 6) {
5179 ndbout <<
"charset does not contain ascii" << endl;
5182 if ((*info->coll->strcasecmp)(info, p, q) == 0) {
5185 ndbout <<
"charset: " << c.m_cs <<
" caseins: " << c.m_caseins << endl;
5188 ndbout << cmdline << endl;
5190 if (g_ncc->
connect(30) != 0 || testmain() == -1 || testperf() == -1) {
5191 ndbout <<
"line " << __LINE__ <<
" FAIL loop=" << g_loop << endl;
5192 return NDBT_ProgramExit(NDBT_FAILED);
5197 return NDBT_ProgramExit(NDBT_OK);
5199 return NDBT_ProgramExit(NDBT_WRONGARGS);