24 #include "ha_ndbcluster_glue.h"
26 #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
27 #include "ha_ndbcluster.h"
28 #include <ndbapi/NdbApi.hpp>
29 #include <util/Bitmask.hpp>
30 #include <ndbapi/NdbIndexStat.hpp>
31 #include <ndbapi/NdbInterpretedCode.hpp>
32 #include "../storage/ndb/src/ndbapi/NdbQueryBuilder.hpp"
33 #include "../storage/ndb/src/ndbapi/NdbQueryOperation.hpp"
35 #include "ha_ndbcluster_binlog.h"
36 #include "ha_ndbcluster_push.h"
37 #include "ha_ndbcluster_cond.h"
38 #include "ha_ndbcluster_tables.h"
39 #include "ha_ndbcluster_connection.h"
41 #include "ndb_table_guard.h"
42 #include "ndb_global_schema_lock.h"
43 #include "ndb_global_schema_lock_guard.h"
44 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
45 #include "abstract_query_plan.h"
47 #include "ndb_dist_priv_util.h"
48 #include "ha_ndb_index_stat.h"
50 #include <mysql/plugin.h>
51 #include <ndb_version.h>
55 extern "C" void ndb_init_internal();
56 extern "C" void ndb_end_internal();
58 static const int DEFAULT_PARALLELISM= 0;
59 static const ha_rows DEFAULT_AUTO_PREFETCH= 32;
60 static const ulong ONE_YEAR_IN_SECONDS= (ulong) 3600L*24L*365L;
62 ulong opt_ndb_extra_logging;
63 static ulong opt_ndb_wait_connected;
64 ulong opt_ndb_wait_setup;
65 static ulong opt_ndb_cache_check_time;
66 static uint opt_ndb_cluster_connection_pool;
67 static char* opt_ndb_index_stat_option;
68 static char* opt_ndb_connectstring;
69 static uint opt_ndb_nodeid;
71 static MYSQL_THDVAR_UINT(
72 autoincrement_prefetch_sz,
74 "Specify number of autoincrement values that are prefetched.",
84 static MYSQL_THDVAR_BOOL(
87 "Force send of buffers to ndb immediately without waiting for "
95 static MYSQL_THDVAR_BOOL(
98 "Use exact records count during query planning and for fast "
99 "select count(*), disable for faster queries.",
106 static MYSQL_THDVAR_BOOL(
109 "Use transactions for large inserts, if enabled then large "
110 "inserts will be split into several smaller transactions",
117 static MYSQL_THDVAR_BOOL(
118 use_copying_alter_table,
120 "Force ndbcluster to always copy tables at alter table (should "
121 "only be used if on-line alter table fails).",
128 static MYSQL_THDVAR_UINT(
129 optimized_node_selection,
131 "Select nodes for transactions in a more optimal way.",
141 static MYSQL_THDVAR_ULONG(
144 "Batch size in bytes.",
154 static MYSQL_THDVAR_ULONG(
157 "For optimize table, specifies the delay in milliseconds "
158 "for each batch of rows sent.",
167 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
168 #define DEFAULT_NDB_INDEX_STAT_ENABLE FALSE
170 #define DEFAULT_NDB_INDEX_STAT_ENABLE TRUE
173 static MYSQL_THDVAR_BOOL(
176 "Use ndb index statistics in query optimization.",
179 DEFAULT_NDB_INDEX_STAT_ENABLE
183 static MYSQL_THDVAR_ULONG(
184 index_stat_cache_entries,
186 "Obsolete (ignored and will be removed later).",
196 static MYSQL_THDVAR_ULONG(
197 index_stat_update_freq,
199 "Obsolete (ignored and will be removed later).",
209 static MYSQL_THDVAR_BOOL(
219 static MYSQL_THDVAR_BOOL(
228 static MYSQL_THDVAR_UINT(
229 blob_read_batch_bytes,
231 "Specifies the bytesize large Blob reads "
232 "should be batched into. 0 == No limit.",
241 static MYSQL_THDVAR_UINT(
242 blob_write_batch_bytes,
244 "Specifies the bytesize large Blob writes "
245 "should be batched into. 0 == No limit.",
254 static MYSQL_THDVAR_UINT(
255 deferred_constraints,
257 "Specified that constraints should be checked deferred (when supported)",
266 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
267 #define DEFAULT_NDB_JOIN_PUSHDOWN FALSE
269 #define DEFAULT_NDB_JOIN_PUSHDOWN TRUE
272 static MYSQL_THDVAR_BOOL(
275 "Enable pushing down of join to datanodes",
278 DEFAULT_NDB_JOIN_PUSHDOWN
285 bool ndb_index_stat_get_enable(THD *thd)
287 const bool value = THDVAR(thd, index_stat_enable);
291 static int ndbcluster_end(
handlerton *hton, ha_panic_function flag);
292 static bool ndbcluster_show_status(
handlerton *hton, THD*,
295 static int ndbcluster_alter_tablespace(
handlerton *hton,
298 static int ndbcluster_fill_files_table(
handlerton *hton,
303 #if MYSQL_VERSION_ID >= 50501
317 Item *cond,
enum enum_schema_tables schema_table_idx)
319 if (schema_table_idx == SCH_FILES)
320 return ndbcluster_fill_files_table(hton, thd, tables, cond);
336 ndbcluster_partition_flags()
338 return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
339 HA_CAN_PARTITION_UNIQUE | HA_USE_AUTO_PARTITION);
342 #ifndef NDB_WITHOUT_ONLINE_ALTER
344 ndbcluster_alter_table_flags(uint
flags)
346 if (flags & ALTER_DROP_PARTITION)
349 return (HA_PARTITION_FUNCTION_SUPPORTED);
353 ndbcluster_alter_table_flags(uint flags)
356 HA_PARTITION_FUNCTION_SUPPORTED |
359 if (flags & Alter_info::ALTER_DROP_PARTITION)
366 #define NDB_AUTO_INCREMENT_RETRIES 100
367 #define BATCH_FLUSH_SIZE (32768)
372 #define MAX_CONFLICT_INTERPRETED_PROG_SIZE 16
374 static int ndb_to_mysql_error(
const NdbError *ndberr);
376 #define ERR_PRINT(err) \
377 DBUG_PRINT("error", ("%d message: %s", err.code, err.message))
379 #define ERR_RETURN(err) \
381 const NdbError& tmp= err; \
382 DBUG_RETURN(ndb_to_mysql_error(&tmp)); \
385 #define ERR_BREAK(err, code) \
387 const NdbError& tmp= err; \
388 code= ndb_to_mysql_error(&tmp); \
392 #define ERR_SET(err, code) \
394 const NdbError& tmp= err; \
395 code= ndb_to_mysql_error(&tmp); \
398 static int ndbcluster_inited= 0;
399 int ndbcluster_terminating= 0;
406 int ndb_setup_complete= 0;
407 pthread_cond_t COND_ndb_setup_complete;
411 uchar g_node_id_map[max_ndb_nodes];
414 pthread_mutex_t ndbcluster_mutex;
417 HASH ndbcluster_open_tables;
419 static uchar *ndbcluster_get_key(
NDB_SHARE *share,
size_t *length,
420 my_bool not_used __attribute__((unused)));
422 static void modify_shared_stats(
NDB_SHARE *share,
427 bool have_lock= FALSE,
428 uint part_id= ~(uint)0);
430 THD *injector_thd= 0;
433 pthread_t ndb_util_thread;
434 int ndb_util_thread_running= 0;
435 pthread_mutex_t LOCK_ndb_util_thread;
436 pthread_cond_t COND_ndb_util_thread;
437 pthread_cond_t COND_ndb_util_ready;
438 pthread_handler_t ndb_util_thread_func(
void *arg);
441 pthread_t ndb_index_stat_thread;
442 int ndb_index_stat_thread_running= 0;
443 pthread_mutex_t LOCK_ndb_index_stat_thread;
444 pthread_cond_t COND_ndb_index_stat_thread;
445 pthread_cond_t COND_ndb_index_stat_ready;
446 pthread_mutex_t ndb_index_stat_list_mutex;
447 pthread_mutex_t ndb_index_stat_stat_mutex;
448 pthread_cond_t ndb_index_stat_stat_cond;
449 pthread_handler_t ndb_index_stat_thread_func(
void *arg);
451 extern void ndb_index_stat_free(
NDB_SHARE *share);
452 extern void ndb_index_stat_end();
458 long g_ndb_status_index_stat_cache_query = 0;
459 long g_ndb_status_index_stat_cache_clean = 0;
461 long long g_event_data_count = 0;
462 long long g_event_nondata_count = 0;
463 long long g_event_bytes_count = 0;
465 static long long g_slave_api_client_stats[Ndb::NumClientStatistics];
467 static long long g_server_api_client_stats[Ndb::NumClientStatistics];
470 update_slave_api_stats(
Ndb* ndb)
472 for (Uint32
i=0;
i < Ndb::NumClientStatistics;
i++)
473 g_slave_api_client_stats[
i] = ndb->getClientStat(
i);
478 st_ndb_slave_state::st_ndb_slave_state()
479 : current_conflict_defined_op_count(0),
480 current_master_server_epoch(0),
481 current_max_rep_epoch(0),
483 sql_run_id(~Uint32(0))
485 memset(current_violation_count, 0,
sizeof(current_violation_count));
486 memset(total_violation_count, 0,
sizeof(total_violation_count));
490 st_ndb_slave_state::atTransactionAbort()
493 memset(current_violation_count, 0,
sizeof(current_violation_count));
494 current_conflict_defined_op_count = 0;
495 current_max_rep_epoch = 0;
499 st_ndb_slave_state::atTransactionCommit()
504 for (
int i=0;
i < CFT_NUMBER_OF_CFTS;
i++)
506 total_violation_count[
i]+= current_violation_count[
i];
507 current_violation_count[
i] = 0;
509 current_conflict_defined_op_count = 0;
510 if (current_max_rep_epoch > max_rep_epoch)
512 DBUG_PRINT(
"info", (
"Max replicated epoch increases from %llu to %llu",
514 current_max_rep_epoch));
516 max_rep_epoch = current_max_rep_epoch;
518 current_max_rep_epoch = 0;
522 st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id,
523 Uint32 row_server_id,
525 bool is_row_server_id_local)
527 if (row_server_id == master_server_id)
534 current_master_server_epoch = row_epoch;
535 assert(! is_row_server_id_local);
537 else if (is_row_server_id_local)
539 DBUG_PRINT(
"info", (
"Recording application of local server %u epoch %llu "
541 row_server_id, row_epoch,
542 (row_epoch > g_ndb_slave_state.current_max_rep_epoch)?
543 " new highest." :
" older than previously applied"));
544 if (row_epoch > current_max_rep_epoch)
550 current_max_rep_epoch = row_epoch;
556 st_ndb_slave_state::atResetSlave()
564 current_max_rep_epoch = 0;
568 static int check_slave_state(THD* thd)
570 DBUG_ENTER(
"check_slave_state");
572 #ifdef HAVE_NDB_BINLOG
573 if (!thd->slave_thread)
576 const Uint32 runId = ndb_mi_get_slave_run_id();
577 DBUG_PRINT(
"info", (
"Slave SQL thread run id is %u",
579 if (unlikely(runId != g_ndb_slave_state.sql_run_id))
581 DBUG_PRINT(
"info", (
"Slave run id changed from %u, "
582 "treating as Slave restart",
583 g_ndb_slave_state.sql_run_id));
584 g_ndb_slave_state.sql_run_id = runId;
595 DBUG_PRINT(
"info", (
"Loading applied epoch information from %s",
598 Uint64 highestAppliedEpoch = 0;
601 Ndb* ndb= check_ndb_in_thd(thd);
607 const NDBTAB* ndbtab= ndbtab_g.get_table();
608 if (unlikely(ndbtab == NULL))
615 if (unlikely(trans == NULL))
624 if (unlikely(sop == NULL))
630 const Uint32 server_id_col_num = 0;
631 const Uint32 epoch_col_num = 1;
636 ((server_id_ra = sop->
getValue(server_id_col_num)) == NULL) ||
637 ((epoch_ra = sop->
getValue(epoch_col_num)) == NULL)))
655 if ((serverid == ::server_id) ||
656 (ndb_mi_get_ignore_server_id(serverid)))
658 highestAppliedEpoch = MAX(epoch, highestAppliedEpoch);
672 if (ndb_error.
code != 0)
674 sql_print_warning(
"NDB Slave : Could not determine maximum replicated epoch from %s.%s "
675 "at Slave start, error %u %s",
686 g_ndb_slave_state.max_rep_epoch = highestAppliedEpoch;
687 sql_print_information(
"NDB Slave : MaxReplicatedEpoch set to %llu (%u/%u) at Slave start",
688 g_ndb_slave_state.max_rep_epoch,
689 (Uint32)(g_ndb_slave_state.max_rep_epoch >> 32),
690 (Uint32)(g_ndb_slave_state.max_rep_epoch & 0xffffffff));
699 static int update_status_variables(
Thd_ndb *thd_ndb,
703 ns->connected_port= c->get_connected_port();
704 ns->connected_host= c->get_connected_host();
705 if (ns->cluster_node_id != (
int) c->node_id())
707 ns->cluster_node_id= c->node_id();
708 if (&g_ndb_status == ns && g_ndb_cluster_connection == c)
709 sql_print_information(
"NDB: NodeID is %lu, management server '%s:%lu'",
710 ns->cluster_node_id, ns->connected_host,
713 ns->number_of_replicas= 0;
715 int n= c->get_no_ready();
716 ns->number_of_ready_data_nodes= n > 0 ? n : 0;
718 ns->number_of_data_nodes= c->no_db_nodes();
719 ns->connect_count= c->get_connect_count();
722 ns->execute_count= thd_ndb->m_execute_count;
723 ns->scan_count= thd_ndb->m_scan_count;
724 ns->pruned_scan_count= thd_ndb->m_pruned_scan_count;
730 for (
int i= 0;
i < MAX_NDB_NODES;
i++)
732 ns->transaction_no_hint_count[
i]= thd_ndb->m_transaction_no_hint_count[
i];
733 ns->transaction_hint_count[
i]= thd_ndb->m_transaction_hint_count[
i];
735 for (
int i=0;
i < Ndb::NumClientStatistics;
i++)
737 ns->api_client_stats[
i] = thd_ndb->ndb->getClientStat(
i);
739 ns->schema_locks_count= thd_ndb->schema_locks_count;
746 #define NDBAPI_COUNTERS(NAME_SUFFIX, ARRAY_LOCATION) \
747 {"api_wait_exec_complete_count" NAME_SUFFIX, \
748 (char*) ARRAY_LOCATION[ Ndb::WaitExecCompleteCount ], \
750 {"api_wait_scan_result_count" NAME_SUFFIX, \
751 (char*) ARRAY_LOCATION[ Ndb::WaitScanResultCount ], \
753 {"api_wait_meta_request_count" NAME_SUFFIX, \
754 (char*) ARRAY_LOCATION[ Ndb::WaitMetaRequestCount ], \
756 {"api_wait_nanos_count" NAME_SUFFIX, \
757 (char*) ARRAY_LOCATION[ Ndb::WaitNanosCount ], \
759 {"api_bytes_sent_count" NAME_SUFFIX, \
760 (char*) ARRAY_LOCATION[ Ndb::BytesSentCount ], \
762 {"api_bytes_received_count" NAME_SUFFIX, \
763 (char*) ARRAY_LOCATION[ Ndb::BytesRecvdCount ], \
765 {"api_trans_start_count" NAME_SUFFIX, \
766 (char*) ARRAY_LOCATION[ Ndb::TransStartCount ], \
768 {"api_trans_commit_count" NAME_SUFFIX, \
769 (char*) ARRAY_LOCATION[ Ndb::TransCommitCount ], \
771 {"api_trans_abort_count" NAME_SUFFIX, \
772 (char*) ARRAY_LOCATION[ Ndb::TransAbortCount ], \
774 {"api_trans_close_count" NAME_SUFFIX, \
775 (char*) ARRAY_LOCATION[ Ndb::TransCloseCount ], \
777 {"api_pk_op_count" NAME_SUFFIX, \
778 (char*) ARRAY_LOCATION[ Ndb::PkOpCount ], \
780 {"api_uk_op_count" NAME_SUFFIX, \
781 (char*) ARRAY_LOCATION[ Ndb::UkOpCount ], \
783 {"api_table_scan_count" NAME_SUFFIX, \
784 (char*) ARRAY_LOCATION[ Ndb::TableScanCount ], \
786 {"api_range_scan_count" NAME_SUFFIX, \
787 (char*) ARRAY_LOCATION[ Ndb::RangeScanCount ], \
789 {"api_pruned_scan_count" NAME_SUFFIX, \
790 (char*) ARRAY_LOCATION[ Ndb::PrunedScanCount ], \
792 {"api_scan_batch_count" NAME_SUFFIX, \
793 (char*) ARRAY_LOCATION[ Ndb::ScanBatchCount ], \
795 {"api_read_row_count" NAME_SUFFIX, \
796 (char*) ARRAY_LOCATION[ Ndb::ReadRowCount ], \
798 {"api_trans_local_read_row_count" NAME_SUFFIX, \
799 (char*) ARRAY_LOCATION[ Ndb::TransLocalReadRowCount ], \
802 SHOW_VAR ndb_status_variables_dynamic[]= {
803 {
"cluster_node_id", (
char*) &g_ndb_status.cluster_node_id, SHOW_LONG},
804 {
"config_from_host", (
char*) &g_ndb_status.connected_host, SHOW_CHAR_PTR},
805 {
"config_from_port", (
char*) &g_ndb_status.connected_port, SHOW_LONG},
807 {
"number_of_data_nodes",(
char*) &g_ndb_status.number_of_data_nodes, SHOW_LONG},
808 {
"number_of_ready_data_nodes",
809 (
char*) &g_ndb_status.number_of_ready_data_nodes, SHOW_LONG},
810 {
"connect_count", (
char*) &g_ndb_status.connect_count, SHOW_LONG},
811 {
"execute_count", (
char*) &g_ndb_status.execute_count, SHOW_LONG},
812 {
"scan_count", (
char*) &g_ndb_status.scan_count, SHOW_LONG},
813 {
"pruned_scan_count", (
char*) &g_ndb_status.pruned_scan_count, SHOW_LONG},
814 {
"schema_locks_count", (
char*) &g_ndb_status.schema_locks_count, SHOW_LONG},
815 NDBAPI_COUNTERS(
"_session", &g_ndb_status.api_client_stats),
816 {
"sorted_scan_count", (
char*) &g_ndb_status.sorted_scan_count, SHOW_LONG},
817 {
"pushed_queries_defined", (
char*) &g_ndb_status.pushed_queries_defined,
819 {
"pushed_queries_dropped", (
char*) &g_ndb_status.pushed_queries_dropped,
821 {
"pushed_queries_executed", (
char*) &g_ndb_status.pushed_queries_executed,
823 {
"pushed_reads", (
char*) &g_ndb_status.pushed_reads, SHOW_LONG},
824 {NullS, NullS, SHOW_LONG}
827 SHOW_VAR ndb_status_conflict_variables[]= {
828 {
"fn_max", (
char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_MAX], SHOW_LONGLONG},
829 {
"fn_old", (
char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_OLD], SHOW_LONGLONG},
830 {
"fn_max_del_win", (
char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_MAX_DEL_WIN], SHOW_LONGLONG},
831 {
"fn_epoch", (
char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH], SHOW_LONGLONG},
832 {NullS, NullS, SHOW_LONG}
835 SHOW_VAR ndb_status_injector_variables[]= {
836 {
"api_event_data_count_injector", (
char*) &g_event_data_count, SHOW_LONGLONG},
837 {
"api_event_nondata_count_injector", (
char*) &g_event_nondata_count, SHOW_LONGLONG},
838 {
"api_event_bytes_count_injector", (
char*) &g_event_bytes_count, SHOW_LONGLONG},
839 {NullS, NullS, SHOW_LONG}
842 SHOW_VAR ndb_status_slave_variables[]= {
843 NDBAPI_COUNTERS(
"_slave", &g_slave_api_client_stats),
844 {
"slave_max_replicated_epoch", (
char*) &g_ndb_slave_state.max_rep_epoch, SHOW_LONGLONG},
845 {NullS, NullS, SHOW_LONG}
848 SHOW_VAR ndb_status_server_client_stat_variables[]= {
849 NDBAPI_COUNTERS(
"", &g_server_api_client_stats),
850 {
"api_event_data_count",
851 (
char*) &g_server_api_client_stats[ Ndb::DataEventsRecvdCount ],
853 {
"api_event_nondata_count",
854 (
char*) &g_server_api_client_stats[ Ndb::NonDataEventsRecvdCount ],
856 {
"api_event_bytes_count",
857 (
char*) &g_server_api_client_stats[ Ndb::EventBytesRecvdCount ],
859 {NullS, NullS, SHOW_LONG}
862 static int show_ndb_server_api_stats(THD *thd,
SHOW_VAR *var,
char *buff)
871 ndb_get_connection_stats((Uint64*) &g_server_api_client_stats[0]);
873 var->type= SHOW_ARRAY;
874 var->value= (
char*) ndb_status_server_client_stat_variables;
879 SHOW_VAR ndb_status_index_stat_variables[]= {
880 {
"cache_query", (
char*) &g_ndb_status_index_stat_cache_query, SHOW_LONG},
881 {
"cache_clean", (
char*) &g_ndb_status_index_stat_cache_clean, SHOW_LONG},
882 {NullS, NullS, SHOW_LONG}
885 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
895 static int ndb_to_mysql_error(
const NdbError *ndberr)
904 case HA_ERR_NO_SUCH_TABLE:
905 case HA_ERR_KEY_NOT_FOUND:
921 if (!current_thd->abort_on_warning)
929 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
930 ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG),
933 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
934 ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
940 #ifdef HAVE_NDB_BINLOG
943 static int write_conflict_row(
NDB_SHARE *share,
948 DBUG_ENTER(
"write_conflict_row");
951 NDB_CONFLICT_FN_SHARE *cfn_share= share->m_cfn_share;
952 const NDBTAB *ex_tab= cfn_share->m_ex_tab;
953 DBUG_ASSERT(ex_tab != NULL);
968 uint32 server_id= (uint32)::server_id;
969 uint32 master_server_id= (uint32) ndb_mi_get_master_server_id();
970 uint64 master_epoch= (uint64) g_ndb_slave_state.current_master_server_epoch;
971 uint32 count= (uint32)++(cfn_share->m_count);
972 if (ex_op->
setValue((Uint32)0, (
const char *)&(server_id)) ||
973 ex_op->
setValue((Uint32)1, (
const char *)&(master_server_id)) ||
974 ex_op->
setValue((Uint32)2, (
const char *)&(master_epoch)) ||
975 ex_op->
setValue((Uint32)3, (
const char *)&(count)))
983 const int fixed_cols= 4;
984 int nkey= cfn_share->m_pk_cols;
986 for (k= 0; k < nkey; k++)
988 DBUG_ASSERT(row != NULL);
989 const uchar* data= row + cfn_share->m_offset[k];
990 if (ex_op->
setValue((Uint32)(fixed_cols + k), (
const char*)data) == -1)
1001 #ifdef HAVE_NDB_BINLOG
1003 handle_conflict_op_error(
Thd_ndb* thd_ndb,
1009 handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share,
1010 const char* tab_name,
1012 const uchar* pk_row,
1013 enum_conflicting_op_type op_type,
1014 enum_conflict_cause conflict_cause,
1020 static const Uint32 error_op_after_refresh_op = 920;
1029 DBUG_ENTER(
"check_completed_operations_pre_commit");
1031 if (unlikely(first == 0))
1041 #ifdef HAVE_NDB_BINLOG
1042 const NdbOperation* lastUserOp = trans->getLastDefinedOperation();
1047 const bool op_has_conflict_detection = (first->getCustomData() != NULL);
1048 if (!op_has_conflict_detection)
1056 DBUG_PRINT(
"info", (
"err.code == %u", err.
code));
1057 DBUG_RETURN(err.
code);
1060 #ifdef HAVE_NDB_BINLOG
1069 int res = handle_conflict_op_error(thd_ndb,
1087 *ignore_count= ignores;
1088 #ifdef HAVE_NDB_BINLOG
1093 if (trans->getLastDefinedOperation() != lastUserOp)
1095 const NdbOperation* last_conflict_op = trans->getLastDefinedOperation();
1099 thd_ndb->m_force_send))
1112 assert(conflict_op != NULL);
1117 if ((err.
code != 0) &&
1118 (err.
code != (
int) error_op_after_refresh_op))
1127 char msg[FN_REFLEN];
1128 my_snprintf(msg,
sizeof(msg),
"Executing extra operations for "
1129 "conflict handling hit Ndb error %d '%s'",
1131 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_ERROR,
1132 ER_EXCEPTIONS_WRITE_ERROR,
1133 ER(ER_EXCEPTIONS_WRITE_ERROR), msg);
1135 DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR);
1138 }
while (conflict_op != last_conflict_op);
1152 DBUG_ENTER(
"check_completed_operations");
1154 if (unlikely(first == 0))
1170 #ifdef HAVE_NDB_BINLOG
1172 DBUG_ASSERT((err.
code != (
int) error_conflict_fn_violation) &&
1173 (err.
code != (
int) error_op_after_refresh_op));
1175 DBUG_RETURN(err.
code);
1186 *ignore_count= ignores;
1191 ha_ndbcluster::release_completed_operations(
NdbTransaction *trans)
1201 trans->releaseCompletedOperations();
1202 trans->releaseCompletedQueries();
1207 uint *ignore_count= 0);
1213 DBUG_ENTER(
"execute_no_commit");
1214 ha_ndbcluster::release_completed_operations(trans);
1215 const NdbOperation *first= trans->getFirstDefinedOperation();
1216 const NdbOperation *last= trans->getLastDefinedOperation();
1217 thd_ndb->m_execute_count++;
1218 thd_ndb->m_unsent_bytes= 0;
1219 DBUG_PRINT(
"info", (
"execute_count: %u", thd_ndb->m_execute_count));
1222 thd_ndb->m_force_send))
1229 DBUG_RETURN(check_completed_operations_pre_commit(thd_ndb, trans,
1235 int force_send,
int ignore_error, uint *ignore_count= 0);
1238 int force_send,
int ignore_error, uint *ignore_count)
1240 DBUG_ENTER(
"execute_commit");
1242 if (thd_ndb->m_unsent_bytes && !ignore_error)
1251 const NdbOperation *first= trans->getFirstDefinedOperation();
1252 const NdbOperation *last= trans->getLastDefinedOperation();
1253 thd_ndb->m_execute_count++;
1254 thd_ndb->m_unsent_bytes= 0;
1255 DBUG_PRINT(
"info", (
"execute_count: %u", thd_ndb->m_execute_count));
1258 if (thd->slave_thread)
1259 g_ndb_slave_state.atTransactionAbort();
1263 if (thd->slave_thread)
1265 g_ndb_slave_state.atTransactionCommit();
1269 DBUG_RETURN(check_completed_operations(thd_ndb, trans, first, last,
1276 DBUG_ENTER(
"execute_no_commit_ie");
1277 ha_ndbcluster::release_completed_operations(trans);
1280 thd_ndb->m_force_send);
1281 thd_ndb->m_unsent_bytes= 0;
1282 thd_ndb->m_execute_count++;
1283 DBUG_PRINT(
"info", (
"execute_count: %u", thd_ndb->m_execute_count));
1290 typedef struct st_thd_ndb_share {
1295 uchar *thd_ndb_share_get_key(THD_NDB_SHARE *thd_ndb_share,
size_t *length,
1296 my_bool not_used __attribute__((unused)))
1298 *length=
sizeof(thd_ndb_share->key);
1299 return (uchar*) &thd_ndb_share->key;
1302 Thd_ndb::Thd_ndb(THD* thd) :
1304 schema_locks_count(0)
1306 connection= ndb_get_cluster_connection();
1307 m_connect_count= connection->get_connect_count();
1308 ndb=
new Ndb(connection,
"");
1310 start_stmt_count= 0;
1311 save_point_count= 0;
1317 (void) my_hash_init(&
open_tables, table_alias_charset, 5, 0, 0,
1318 (my_hash_get_key)thd_ndb_share_get_key, 0, 0);
1322 m_pruned_scan_count= 0;
1323 m_sorted_scan_count= 0;
1324 m_pushed_queries_defined= 0;
1325 m_pushed_queries_dropped= 0;
1326 m_pushed_queries_executed= 0;
1328 memset(m_transaction_no_hint_count, 0,
sizeof(m_transaction_no_hint_count));
1329 memset(m_transaction_hint_count, 0,
sizeof(m_transaction_hint_count));
1330 global_schema_lock_trans= NULL;
1331 global_schema_lock_count= 0;
1332 global_schema_lock_error= 0;
1333 init_alloc_root(&m_batch_mem_root, BATCH_FLUSH_SIZE/4, 0);
1338 if (opt_ndb_extra_logging > 1)
1343 for (
int i= 0;
i < MAX_NDB_NODES;
i++)
1345 if (m_transaction_hint_count[
i] > 0 ||
1346 m_transaction_no_hint_count[
i] > 0)
1348 sql_print_information(
"tid %u: node[%u] "
1349 "transaction_hint=%u, transaction_no_hint=%u",
1350 (
unsigned)current_thd->thread_id,
i,
1351 m_transaction_hint_count[
i],
1352 m_transaction_no_hint_count[i]);
1361 changed_tables.empty();
1363 free_root(&m_batch_mem_root, MYF(0));
1368 Ndb *ha_ndbcluster::get_ndb(THD *thd)
1370 return get_thd_ndb(thd)->ndb;
1377 void ha_ndbcluster::set_rec_per_key()
1379 DBUG_ENTER(
"ha_ndbcluster::set_rec_per_key");
1387 for (uint
i=0 ;
i < table_share->keys ;
i++)
1389 bool is_unique_index=
false;
1390 KEY* key_info= table->key_info +
i;
1391 switch (get_index_type(
i))
1394 case PRIMARY_KEY_INDEX:
1398 is_unique_index=
true;
1401 case UNIQUE_ORDERED_INDEX:
1402 case PRIMARY_KEY_ORDERED_INDEX:
1403 is_unique_index=
true;
1409 THD *thd= current_thd;
1410 const bool index_stat_enable= THDVAR(NULL, index_stat_enable) &&
1411 THDVAR(thd, index_stat_enable);
1412 if (index_stat_enable)
1414 int err= ndb_index_stat_set_rpk(
i);
1417 err != NdbIndexStat::NoIndexStats &&
1419 err != Ndb_index_stat_error_HAS_ERROR)
1421 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
1423 "index stats (RPK) for key %s:"
1424 " unexpected error %d",
1425 key_info->
name, err);
1435 if (is_unique_index)
1445 DBUG_ENTER(
"ha_ndbcluster::records");
1446 DBUG_PRINT(
"info", (
"id=%d, no_uncommitted_rows_count=%d",
1447 ((
const NDBTAB *)m_table)->getTableId(),
1448 m_table_info->no_uncommitted_rows_count));
1450 if (update_stats(table->in_use, 1) == 0)
1452 DBUG_RETURN(
stats.records);
1456 DBUG_RETURN(HA_POS_ERROR);
1460 void ha_ndbcluster::no_uncommitted_rows_execute_failure()
1462 DBUG_ENTER(
"ha_ndbcluster::no_uncommitted_rows_execute_failure");
1463 get_thd_ndb(current_thd)->m_error= TRUE;
1467 void ha_ndbcluster::no_uncommitted_rows_update(
int c)
1469 DBUG_ENTER(
"ha_ndbcluster::no_uncommitted_rows_update");
1471 local_info->no_uncommitted_rows_count+= c;
1472 DBUG_PRINT(
"info", (
"id=%d, no_uncommitted_rows_count=%d",
1473 ((
const NDBTAB *)m_table)->getTableId(),
1474 local_info->no_uncommitted_rows_count));
1478 void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
1480 DBUG_ENTER(
"ha_ndbcluster::no_uncommitted_rows_reset");
1481 Thd_ndb *thd_ndb= get_thd_ndb(thd);
1483 thd_ndb->m_error= FALSE;
1484 thd_ndb->m_unsent_bytes= 0;
1492 THD *thd= current_thd;
1495 DBUG_ENTER(
"ndb_err");
1501 m_table->setStatusInvalid();
1504 memset(&table_list, 0,
sizeof(table_list));
1505 table_list.db= m_dbname;
1506 table_list.alias= table_list.table_name= m_tabname;
1507 close_cached_tables(thd, &table_list, have_lock, FALSE, FALSE);
1513 res= ndb_to_mysql_error(&err);
1514 DBUG_PRINT(
"info", (
"transformed ndbcluster error %d to mysql error %d",
1516 if (res == HA_ERR_FOUND_DUPP_KEY)
1518 char *error_data= err.
details;
1519 uint dupkey= MAX_KEY;
1521 for (uint
i= 0;
i < MAX_KEY;
i++)
1523 if (m_index[
i].
type == UNIQUE_INDEX ||
1524 m_index[
i].
type == UNIQUE_ORDERED_INDEX)
1527 (
const NDBINDEX *) m_index[
i].unique_index;
1529 (
char *) unique_index->
getObjectId() == error_data)
1536 if (m_rows_to_insert == 1)
1543 m_dupkey= err.
code == 630 ? table_share->primary_key : dupkey;
1548 m_dupkey= (uint) -1;
1563 DBUG_ENTER(
"ha_ndbcluster::get_error_message");
1564 DBUG_PRINT(
"enter", (
"error: %d", error));
1566 Ndb *ndb= check_ndb_in_thd(current_thd);
1573 DBUG_PRINT(
"exit", (
"message: %s, temporary: %d", buf->ptr(), temporary));
1574 DBUG_RETURN(temporary);
1585 uint32 field_used_length(
const Field* field)
1587 if (field->type() == MYSQL_TYPE_VARCHAR)
1590 return f->length_bytes +
const_cast<Field_varstring*
>(f)->data_length();
1593 return field->pack_length();
1600 static bool field_type_forces_var_part(enum_field_types
type)
1603 case MYSQL_TYPE_VAR_STRING:
1604 case MYSQL_TYPE_VARCHAR:
1606 case MYSQL_TYPE_TINY_BLOB:
1607 case MYSQL_TYPE_BLOB:
1608 case MYSQL_TYPE_MEDIUM_BLOB:
1609 case MYSQL_TYPE_LONG_BLOB:
1610 case MYSQL_TYPE_GEOMETRY:
1622 ha_ndbcluster::add_row_check_if_batch_full_size(
Thd_ndb *thd_ndb, uint
size)
1624 if (thd_ndb->m_unsent_bytes == 0)
1625 free_root(&(thd_ndb->m_batch_mem_root), MY_MARK_BLOCKS_FREE);
1627 uint unsent= thd_ndb->m_unsent_bytes;
1629 thd_ndb->m_unsent_bytes= unsent;
1630 return unsent >= thd_ndb->m_batch_size;
1654 ha_ndbcluster::get_buffer(
Thd_ndb *thd_ndb, uint size)
1656 return (uchar*)alloc_root(&(thd_ndb->m_batch_mem_root), size);
1660 ha_ndbcluster::copy_row_to_buffer(
Thd_ndb *thd_ndb,
const uchar *
record)
1662 uchar *row= get_buffer(thd_ndb, table->s->reclength);
1665 memcpy(row, record, table->s->reclength);
1678 if (error.
code != 0)
1683 if (error.
code != 0)
1688 if (error.
code != 0)
1696 int g_get_ndb_blobs_value(
NdbBlob *ndb_blob,
void *arg)
1699 DBUG_ENTER(
"g_get_ndb_blobs_value");
1700 DBUG_PRINT(
"info", (
"destination row: %p", ha->m_blob_destination_record));
1702 if (ha->m_blob_counter == 0)
1703 ha->m_blobs_row_total_size= 0;
1707 if (ndb_blob->getNull(isNull) != 0)
1714 ha->m_blobs_row_total_size+= (len64 + 7) & ~((Uint64)7);
1715 if (ha->m_blobs_row_total_size > 0xffffffff)
1720 DBUG_PRINT(
"info", (
"Blob number %d needs size %llu, total buffer reqt. now %llu",
1723 ha->m_blobs_row_total_size));
1725 ha->m_blob_counter++;
1731 if (ha->m_blob_counter < ha->m_blob_expected_count_per_row)
1735 ha->m_blob_counter= 0;
1738 if (ha->m_blobs_row_total_size > ha->m_blobs_buffer_size)
1740 my_free(ha->m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
1741 DBUG_PRINT(
"info", (
"allocate blobs buffer size %u",
1742 (uint32)(ha->m_blobs_row_total_size)));
1746 if(((
size_t)ha->m_blobs_row_total_size) != ha->m_blobs_row_total_size)
1748 ha->m_blobs_buffer= NULL;
1749 ha->m_blobs_buffer_size= 0;
1754 (uchar*) my_malloc((
size_t) ha->m_blobs_row_total_size, MYF(MY_WME));
1755 if (ha->m_blobs_buffer == NULL)
1757 ha->m_blobs_buffer_size= 0;
1760 ha->m_blobs_buffer_size= ha->m_blobs_row_total_size;
1769 for (uint
i= 0;
i < ha->table->s->fields;
i++)
1771 Field *field= ha->table->field[
i];
1772 if (! (field->flags & BLOB_FLAG))
1775 if (value.blob == NULL)
1777 DBUG_PRINT(
"info",(
"[%u] skipped",
i));
1781 NdbBlob *ndb_blob= value.blob;
1783 if (ndb_blob->getNull(isNull) != 0)
1789 DBUG_ASSERT(len64 < 0xffffffff);
1790 uchar *buf= ha->m_blobs_buffer +
offset;
1791 uint32 len= (uint32)(ha->m_blobs_buffer_size - offset);
1792 if (ndb_blob->
readData(buf, len) != 0)
1795 if (findBlobError(err, ndb_blob) == 0)
1802 assert(err.
code != 0);
1806 DBUG_PRINT(
"info", (
"[%u] offset: %u buf: 0x%lx len=%u",
1807 i, offset, (
long) buf, len));
1808 DBUG_ASSERT(len == len64);
1809 if (ha->m_blob_destination_record)
1811 my_ptrdiff_t ptrdiff=
1812 ha->m_blob_destination_record - ha->table->record[0];
1813 field_blob->move_field_offset(ptrdiff);
1814 field_blob->set_ptr(len, buf);
1815 field_blob->set_notnull();
1816 field_blob->move_field_offset(-ptrdiff);
1818 offset+= Uint32((len64 + 7) & ~((Uint64)7));
1820 else if (ha->m_blob_destination_record)
1823 my_ptrdiff_t ptrdiff=
1824 ha->m_blob_destination_record - ha->table->record[0];
1825 uchar *buf= ha->m_blobs_buffer +
offset;
1826 field_blob->move_field_offset(ptrdiff);
1827 field_blob->set_ptr((uint32)0, buf);
1828 field_blob->set_null();
1829 field_blob->move_field_offset(-ptrdiff);
1830 DBUG_PRINT(
"info", (
"[%u] isNull=%d",
i, isNull));
1834 if (!ha->m_active_cursor)
1840 for (uint
i= 0;
i < ha->table->s->fields;
i++)
1842 Field *field= ha->table->field[
i];
1843 if (! (field->flags & BLOB_FLAG))
1846 if (value.blob == NULL)
1848 DBUG_PRINT(
"info",(
"[%u] skipped",
i));
1851 NdbBlob *ndb_blob= value.blob;
1853 assert(ndb_blob->
getState() == NdbBlob::Active);
1861 if (ndb_blob->
close(
true) != 0)
1879 ha_ndbcluster::get_blob_values(
const NdbOperation *ndb_op, uchar *dst_record,
1883 DBUG_ENTER(
"ha_ndbcluster::get_blob_values");
1886 m_blob_expected_count_per_row= 0;
1887 m_blob_destination_record= dst_record;
1888 m_blobs_row_total_size= 0;
1890 setMaxPendingBlobReadBytes(THDVAR(current_thd, blob_read_batch_bytes));
1892 for (i= 0; i < table_share->fields; i++)
1894 Field *field= table->field[
i];
1895 if (!(field->flags & BLOB_FLAG))
1898 DBUG_PRINT(
"info", (
"fieldnr=%d", i));
1900 if (bitmap_is_set(bitmap, i))
1905 m_blob_expected_count_per_row++;
1910 m_value[
i].blob= ndb_blob;
1917 ha_ndbcluster::set_blob_values(
const NdbOperation *ndb_op,
1918 my_ptrdiff_t row_offset,
const MY_BITMAP *bitmap,
1919 uint *set_count,
bool batch)
1922 uint *blob_index, *blob_index_end;
1924 DBUG_ENTER(
"ha_ndbcluster::set_blob_values");
1928 if (table_share->blob_fields == 0)
1932 setMaxPendingBlobWriteBytes(THDVAR(current_thd, blob_write_batch_bytes));
1933 blob_index= table_share->blob_field;
1934 blob_index_end= blob_index + table_share->blob_fields;
1937 field_no= *blob_index;
1939 if (bitmap && !bitmap_is_set(bitmap, field_no))
1941 Field *field= table->field[field_no];
1944 if (ndb_blob == NULL)
1946 if (field->is_real_null(row_offset))
1948 DBUG_PRINT(
"info", (
"Setting Blob %d to NULL", field_no));
1957 const uchar *field_ptr= field->ptr + row_offset;
1958 uint32 blob_len= field_blob->get_length(field_ptr);
1959 uchar* blob_ptr= NULL;
1960 field_blob->get_ptr(&blob_ptr);
1963 if (blob_ptr == NULL) {
1964 DBUG_ASSERT(blob_len == 0);
1965 blob_ptr= (uchar*)
"";
1968 DBUG_PRINT(
"value", (
"set blob ptr: 0x%lx len: %u",
1969 (
long) blob_ptr, blob_len));
1970 DBUG_DUMP(
"value", blob_ptr, MIN(blob_len, 26));
1976 if (batch && blob_len > 0)
1978 uchar *tmp_buf= get_buffer(m_thd_ndb, blob_len);
1980 DBUG_RETURN(HA_ERR_OUT_OF_MEM);
1981 memcpy(tmp_buf, blob_ptr, blob_len);
1984 res= ndb_blob->
setValue((
char*)blob_ptr, blob_len);
1990 }
while (++blob_index != blob_index_end);
2000 int get_ndb_blobs_value(
TABLE* table,
NdbValue* value_array,
2001 uchar*& buffer, uint& buffer_size,
2002 my_ptrdiff_t ptrdiff)
2004 DBUG_ENTER(
"get_ndb_blobs_value");
2008 for (
int loop= 0; loop <= 1; loop++)
2011 for (uint i= 0; i < table->s->fields; i++)
2013 Field *field= table->field[
i];
2015 if (! (field->flags & BLOB_FLAG))
2017 if (value.blob == NULL)
2019 DBUG_PRINT(
"info",(
"[%u] skipped", i));
2023 NdbBlob *ndb_blob= value.blob;
2025 if (ndb_blob->getNull(isNull) != 0)
2032 uint32 size= Uint32(len64);
2034 size+= 8 - size % 8;
2037 uchar *buf= buffer +
offset;
2038 uint32 len= 0xffffffff;
2039 if (ndb_blob->
readData(buf, len) != 0)
2041 DBUG_PRINT(
"info", (
"[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]",
2042 i, offset, (
long) buf, len, (
int)ptrdiff));
2043 DBUG_ASSERT(len == len64);
2045 field_blob->set_ptr_offset(ptrdiff, len, buf);
2052 uchar *buf= buffer +
offset;
2054 field_blob->set_ptr_offset(ptrdiff, len, buf);
2055 DBUG_PRINT(
"info", (
"[%u] isNull=%d", i, isNull));
2058 if (loop == 0 && offset > buffer_size)
2060 my_free(buffer, MYF(MY_ALLOW_ZERO_PTR));
2062 DBUG_PRINT(
"info", (
"allocate blobs buffer size %u", offset));
2063 buffer= (uchar*) my_malloc(offset, MYF(MY_WME));
2066 sql_print_error(
"ha_ndbcluster::get_ndb_blobs_value: "
2067 "my_malloc(%u) failed", offset);
2081 bool ha_ndbcluster::uses_blob_value(
const MY_BITMAP *bitmap)
const
2083 uint *blob_index, *blob_index_end;
2084 if (table_share->blob_fields == 0)
2087 blob_index= table_share->blob_field;
2088 blob_index_end= blob_index + table_share->blob_fields;
2091 if (bitmap_is_set(bitmap, table->field[*blob_index]->field_index))
2093 }
while (++blob_index != blob_index_end);
2097 void ha_ndbcluster::release_blobs_buffer()
2099 DBUG_ENTER(
"releaseBlobsBuffer");
2100 if (m_blobs_buffer_size > 0)
2102 DBUG_PRINT(
"info", (
"Deleting blobs buffer, size %llu", m_blobs_buffer_size));
2103 my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
2105 m_blobs_row_total_size= 0;
2106 m_blobs_buffer_size= 0;
2123 int cmp_frm(
const NDBTAB *ndbtab,
const void *pack_data,
2126 DBUG_ENTER(
"cmp_frm");
2130 if ((pack_length != ndbtab->getFrmLength()) ||
2131 (memcmp(pack_data, ndbtab->
getFrmData(), pack_length)))
2140 type_supports_default_value(enum_field_types mysql_type)
2142 bool ret = (mysql_type != MYSQL_TYPE_BLOB &&
2143 mysql_type != MYSQL_TYPE_TINY_BLOB &&
2144 mysql_type != MYSQL_TYPE_MEDIUM_BLOB &&
2145 mysql_type != MYSQL_TYPE_LONG_BLOB &&
2146 mysql_type != MYSQL_TYPE_GEOMETRY);
2160 int ha_ndbcluster::check_default_values(
const NDBTAB* ndbtab)
2165 bool defaults_aligned=
true;
2167 if (ndbtab->hasDefaultValues())
2170 my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
2172 for (uint f=0; f < table_share->fields; f++)
2174 Field* field= table->field[f];
2177 if ((! (field->flags & (PRI_KEY_FLAG |
2178 NO_DEFAULT_VALUE_FLAG))) &&
2179 type_supports_default_value(field->real_type()))
2184 my_ptrdiff_t src_offset= table_share->default_values -
2185 field->table->record[0];
2188 field->move_field_offset(src_offset);
2190 const uchar* ndb_default= (
const uchar*) ndbCol->getDefaultValue();
2192 if (ndb_default == NULL)
2194 defaults_aligned= field->is_null();
2197 if (field->type() != MYSQL_TYPE_BIT)
2199 defaults_aligned= (0 == field->cmp(ndb_default));
2203 longlong value= (
static_cast<Field_bit*
>(field))->val_int();
2208 for (
int b=0; b < 64; b++)
2210 out[b >> 5] |= (value & 1) << (b & 31);
2214 Uint32 defaultLen = field_used_length(field);
2215 defaultLen = ((defaultLen + 3) & ~(Uint32)0x7);
2216 defaults_aligned= (0 == memcmp(ndb_default,
2222 field->move_field_offset(-src_offset);
2224 if (unlikely(!defaults_aligned))
2226 DBUG_PRINT(
"info", (
"Default values differ for column %u",
2227 field->field_index));
2234 if (unlikely(ndbCol->getDefaultValue() != NULL))
2237 DBUG_PRINT(
"info", (
"Column %u has native default, but shouldn't."
2238 " Flags=%u, type=%u",
2239 field->field_index, field->flags, field->real_type()));
2240 defaults_aligned=
false;
2245 tmp_restore_column_map(table->read_set, old_map);
2248 return (defaults_aligned? 0: -1);
2251 int ha_ndbcluster::get_metadata(THD *thd,
const char *path)
2253 Ndb *ndb= get_thd_ndb(thd)->ndb;
2257 DBUG_ENTER(
"get_metadata");
2258 DBUG_PRINT(
"enter", (
"m_tabname: %s, path: %s", m_tabname, path));
2260 DBUG_ASSERT(m_table == NULL);
2261 DBUG_ASSERT(m_table_info == NULL);
2263 uchar *data= NULL, *pack_data= NULL;
2264 size_t length, pack_length;
2270 if (
readfrm(path, &data, &length) ||
2271 packfrm(data, length, &pack_data, &pack_length))
2273 my_free(data, MYF(MY_ALLOW_ZERO_PTR));
2274 my_free(pack_data, MYF(MY_ALLOW_ZERO_PTR));
2280 if (!(tab= ndbtab_g.get_table()))
2283 if (get_ndb_share_state(m_share) != NSS_ALTERED
2284 && cmp_frm(tab, pack_data, pack_length))
2287 (
"metadata, pack_length: %lu getFrmLength: %d memcmp: %d",
2288 (ulong) pack_length, tab->getFrmLength(),
2289 memcmp(pack_data, tab->
getFrmData(), pack_length)));
2290 DBUG_DUMP(
"pack_data", (uchar*) pack_data, pack_length);
2291 DBUG_DUMP(
"frm", (uchar*) tab->
getFrmData(), tab->getFrmLength());
2292 error= HA_ERR_TABLE_DEF_CHANGED;
2294 my_free((
char*)data, MYF(0));
2295 my_free((
char*)pack_data, MYF(0));
2300 DBUG_ASSERT(check_default_values(tab) == 0);
2305 DBUG_PRINT(
"info", (
"fetched table %s", tab->
getName()));
2308 if (bitmap_init(&m_bitmap, m_bitmap_buf, table_share->fields, 0))
2310 error= HA_ERR_OUT_OF_MEM;
2313 if (table_share->primary_key == MAX_KEY)
2316 if ((error= add_hidden_pk_ndb_record(dict)) != 0)
2320 if ((error= add_table_ndb_record(dict)) != 0)
2326 m_bytes_per_write= 12 + tab->getRowSizeInBytes() + 4 * tab->
getNoOfColumns();
2329 if ((error= open_indexes(thd, ndb, table, FALSE)) != 0)
2336 if (table_share->mysql_version < 50120 &&
2337 !table_share->tablespace )
2340 if (tab->getTablespace(&
id))
2346 const char *tablespace= ts.getName();
2347 const size_t tablespace_len= strlen(tablespace);
2348 if (tablespace_len != 0)
2350 DBUG_PRINT(
"info", (
"Found tablespace '%s'", tablespace));
2351 table_share->tablespace= strmake_root(&table_share->mem_root,
2361 #ifdef HAVE_NDB_BINLOG
2362 ndbcluster_read_binlog_replication(thd, ndb, m_share, m_table,
2363 ::server_id, table, FALSE);
2369 ndbtab_g.invalidate();
2378 DBUG_ENTER(
"fix_unique_index_attr_order");
2381 if (data.unique_index_attrid_map)
2382 my_free((
char*)data.unique_index_attrid_map, MYF(0));
2383 data.unique_index_attrid_map= (uchar*)my_malloc(sz,MYF(MY_WME));
2384 if (data.unique_index_attrid_map == 0)
2386 sql_print_error(
"fix_unique_index_attr_order: my_malloc(%u) failure",
2388 DBUG_RETURN(HA_ERR_OUT_OF_MEM);
2394 for (
unsigned i= 0; key_part != end; key_part++, i++)
2396 const char *field_name= key_part->field->field_name;
2398 data.unique_index_attrid_map[
i]= 255;
2400 for (
unsigned j= 0; j < sz; j++)
2403 if (strcmp(field_name, c->
getName()) == 0)
2405 data.unique_index_attrid_map[
i]= j;
2409 DBUG_ASSERT(data.unique_index_attrid_map[i] != 255);
2419 int ha_ndbcluster::create_indexes(THD *thd,
Ndb *ndb,
TABLE *tab)
2423 const char *index_name;
2424 KEY* key_info= tab->key_info;
2425 const char **key_name= tab->s->keynames.type_names;
2426 DBUG_ENTER(
"ha_ndbcluster::create_indexes");
2428 for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
2430 index_name= *key_name;
2431 NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
2432 error= create_index(thd, index_name, key_info, idx_type, i);
2435 DBUG_PRINT(
"error", (
"Failed to create index %u", i));
2445 data.type= UNDEFINED_INDEX;
2446 data.status= UNDEFINED;
2447 data.unique_index= NULL;
2449 data.unique_index_attrid_map= NULL;
2450 data.ndb_record_key= NULL;
2451 data.ndb_unique_record_key= NULL;
2452 data.ndb_unique_record_row= NULL;
2457 if (data.unique_index_attrid_map)
2459 my_free((
char*)data.unique_index_attrid_map, MYF(0));
2461 if (data.ndb_unique_record_key)
2462 dict->releaseRecord(data.ndb_unique_record_key);
2463 if (data.ndb_unique_record_row)
2464 dict->releaseRecord(data.ndb_unique_record_row);
2465 if (data.ndb_record_key)
2466 dict->releaseRecord(data.ndb_record_key);
2467 ndb_init_index(data);
2471 void ndb_protect_char(
const char* from,
char*
to, uint to_length,
char protect)
2473 uint fpos= 0, tpos= 0;
2475 while(from[fpos] !=
'\0' && tpos < to_length - 1)
2477 if (from[fpos] == protect)
2481 if(tpos < to_length - 5)
2483 len= sprintf(to+tpos,
"00%u", (uint) protect);
2489 to[tpos++]= from[fpos];
2500 int ha_ndbcluster::add_index_handle(THD *thd,
NDBDICT *dict,
KEY *key_info,
2501 const char *key_name, uint index_no)
2503 char index_name[FN_LEN + 1];
2506 NDB_INDEX_TYPE idx_type= get_index_type_from_table(index_no);
2507 m_index[index_no].type= idx_type;
2508 DBUG_ENTER(
"ha_ndbcluster::add_index_handle");
2509 DBUG_PRINT(
"enter", (
"table %s", m_tabname));
2511 ndb_protect_char(key_name, index_name,
sizeof(index_name) - 1,
'/');
2512 if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX)
2514 DBUG_PRINT(
"info", (
"Get handle to index %s", index_name));
2518 index= dict->getIndexGlobal(index_name, *m_table);
2521 DBUG_PRINT(
"info", (
"index: 0x%lx id: %d version: %d.%d status: %d",
2524 index->getObjectVersion() & 0xFFFFFF,
2525 index->getObjectVersion() >> 24,
2526 index->getObjectStatus()));
2527 DBUG_ASSERT(index->getObjectStatus() ==
2531 m_index[index_no].index=
index;
2533 if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
2535 char unique_index_name[FN_LEN + 1];
2536 static const char* unique_suffix=
"$unique";
2537 m_has_unique_index= TRUE;
2538 strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
2539 DBUG_PRINT(
"info", (
"Get handle to unique_index %s", unique_index_name));
2543 index= dict->getIndexGlobal(unique_index_name, *m_table);
2546 DBUG_PRINT(
"info", (
"index: 0x%lx id: %d version: %d.%d status: %d",
2549 index->getObjectVersion() & 0xFFFFFF,
2550 index->getObjectVersion() >> 24,
2551 index->getObjectStatus()));
2552 DBUG_ASSERT(index->getObjectStatus() ==
2556 m_index[index_no].unique_index=
index;
2557 error= fix_unique_index_attr_order(m_index[index_no], index, key_info);
2561 error= add_index_ndb_record(dict, key_info, index_no);
2564 m_index[index_no].status= ACTIVE;
2574 null_bit_mask_to_bit_number(uchar bit_mask)
2582 case 0x10:
return 4;
2583 case 0x20:
return 5;
2584 case 0x40:
return 6;
2585 case 0x80:
return 7;
2593 ndb_set_record_specification(uint field_no,
2598 spec->column= ndb_table->
getColumn(field_no);
2599 spec->offset= Uint32(table->field[field_no]->ptr - table->record[0]);
2600 if (table->field[field_no]->
null_ptr)
2602 spec->nullbit_byte_offset=
2603 Uint32(table->field[field_no]->
null_ptr - table->record[0]);
2604 spec->nullbit_bit_in_byte=
2605 null_bit_mask_to_bit_number(table->field[field_no]->null_bit);
2607 else if (table->field[field_no]->type() == MYSQL_TYPE_BIT)
2611 spec->nullbit_byte_offset=
2612 Uint32(field_bit->bit_ptr - table->record[0]);
2613 spec->nullbit_bit_in_byte= field_bit->bit_ofs;
2617 spec->nullbit_byte_offset= 0;
2618 spec->nullbit_bit_in_byte= 0;
2623 ha_ndbcluster::add_table_ndb_record(
NDBDICT *dict)
2625 DBUG_ENTER(
"ha_ndbcluster::add_table_ndb_record()");
2630 for (i= 0; i < table_share->fields; i++)
2632 ndb_set_record_specification(i, &spec[i], table, m_table);
2635 rec= dict->createRecord(m_table, spec, i,
sizeof(spec[0]),
2636 NdbDictionary::RecMysqldBitfield);
2646 ha_ndbcluster::add_hidden_pk_ndb_record(
NDBDICT *dict)
2648 DBUG_ENTER(
"ha_ndbcluster::add_hidden_pk_ndb_record");
2652 spec[0].column= m_table->
getColumn(table_share->fields);
2654 spec[0].nullbit_byte_offset= 0;
2655 spec[0].nullbit_bit_in_byte= 0;
2657 rec= dict->createRecord(m_table, spec, 1,
sizeof(spec[0]));
2660 m_ndb_hidden_key_record=
rec;
2666 ha_ndbcluster::add_index_ndb_record(
NDBDICT *dict,
KEY *key_info, uint index_no)
2668 DBUG_ENTER(
"ha_ndbcluster::add_index_ndb_record");
2677 spec[
i].column= m_table->
getColumn(kp->fieldnr - 1);
2678 if (! spec[i].column)
2683 spec[
i].offset= offset + 1;
2684 spec[
i].nullbit_byte_offset=
offset;
2685 spec[
i].nullbit_bit_in_byte= 0;
2691 spec[
i].nullbit_byte_offset= 0;
2692 spec[
i].nullbit_bit_in_byte= 0;
2694 offset+= kp->store_length;
2697 if (m_index[index_no].index)
2704 rec= dict->createRecord(m_index[index_no].index, m_table,
2706 ( NdbDictionary::RecMysqldShrinkVarchar |
2707 NdbDictionary::RecMysqldBitfield ));
2710 m_index[index_no].ndb_record_key=
rec;
2713 m_index[index_no].ndb_record_key= NULL;
2715 if (m_index[index_no].unique_index)
2717 rec= dict->createRecord(m_index[index_no].unique_index, m_table,
2719 ( NdbDictionary::RecMysqldShrinkVarchar |
2720 NdbDictionary::RecMysqldBitfield ));
2723 m_index[index_no].ndb_unique_record_key=
rec;
2725 else if (index_no == table_share->primary_key)
2728 rec= dict->createRecord(m_table,
2730 ( NdbDictionary::RecMysqldShrinkVarchar |
2731 NdbDictionary::RecMysqldBitfield ));
2734 m_index[index_no].ndb_unique_record_key=
rec;
2737 m_index[index_no].ndb_unique_record_key= NULL;
2744 spec[
i].offset= kp->offset;
2748 spec[
i].nullbit_byte_offset= kp->null_offset;
2749 spec[
i].nullbit_bit_in_byte= null_bit_mask_to_bit_number(kp->null_bit);
2754 spec[
i].nullbit_byte_offset= 0;
2755 spec[
i].nullbit_bit_in_byte= 0;
2759 if (m_index[index_no].unique_index)
2761 rec= dict->createRecord(m_index[index_no].unique_index, m_table,
2763 NdbDictionary::RecMysqldBitfield);
2766 m_index[index_no].ndb_unique_record_row=
rec;
2768 else if (index_no == table_share->primary_key)
2770 rec= dict->createRecord(m_table,
2772 NdbDictionary::RecMysqldBitfield);
2775 m_index[index_no].ndb_unique_record_row=
rec;
2778 m_index[index_no].ndb_unique_record_row= NULL;
2786 int ha_ndbcluster::open_indexes(THD *thd,
Ndb *ndb,
TABLE *tab,
2792 KEY* key_info= tab->key_info;
2793 const char **key_name= tab->s->keynames.type_names;
2794 DBUG_ENTER(
"ha_ndbcluster::open_indexes");
2795 m_has_unique_index= FALSE;
2796 btree_keys.clear_all();
2797 for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
2799 if ((error= add_index_handle(thd, dict, key_info, *key_name, i)))
2802 m_index[
i].index= m_index[
i].unique_index= NULL;
2806 m_index[
i].null_in_unique_index= FALSE;
2807 if (check_index_fields_not_null(key_info))
2808 m_index[
i].null_in_unique_index= TRUE;
2810 if (error == 0 &&
test(index_flags(i, 0, 0) & HA_READ_RANGE))
2811 btree_keys.set_bit(i);
2814 if (error && !ignore_error)
2819 if (m_index[i].index)
2821 dict->removeIndexGlobal(*m_index[i].index, 1);
2822 m_index[
i].index= NULL;
2824 if (m_index[i].unique_index)
2826 dict->removeIndexGlobal(*m_index[i].unique_index, 1);
2827 m_index[
i].unique_index= NULL;
2832 DBUG_ASSERT(error == 0 || error == 4243);
2841 void ha_ndbcluster::renumber_indexes(
Ndb *ndb,
TABLE *tab)
2844 const char *index_name;
2845 KEY* key_info= tab->key_info;
2846 const char **key_name= tab->s->keynames.type_names;
2847 DBUG_ENTER(
"ha_ndbcluster::renumber_indexes");
2849 for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
2851 index_name= *key_name;
2852 NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
2853 m_index[
i].type= idx_type;
2854 if (m_index[i].status == TO_BE_DROPPED)
2856 DBUG_PRINT(
"info", (
"Shifting index %s(%i) out of the list",
2861 while(j != MAX_KEY && m_index[j].status != UNDEFINED)
2863 tmp= m_index[j - 1];
2864 m_index[j - 1]= m_index[j];
2877 int ha_ndbcluster::drop_indexes(
Ndb *ndb,
TABLE *tab)
2881 const char *index_name;
2882 KEY* key_info= tab->key_info;
2884 DBUG_ENTER(
"ha_ndbcluster::drop_indexes");
2886 for (i= 0; i < tab->s->keys; i++, key_info++)
2888 NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
2889 m_index[
i].type= idx_type;
2890 if (m_index[i].status == TO_BE_DROPPED)
2898 DBUG_PRINT(
"info", (
"Dropping index %u: %s", i, index_name));
2900 error= dict->dropIndexGlobal(*index);
2903 dict->removeIndexGlobal(*index, 1);
2904 m_index[
i].index= NULL;
2907 if (!error && unique_index)
2909 index_name= unique_index->
getName();
2910 DBUG_PRINT(
"info", (
"Dropping unique index %u: %s", i, index_name));
2912 error= dict->dropIndexGlobal(*unique_index);
2915 dict->removeIndexGlobal(*unique_index, 1);
2916 m_index[
i].unique_index= NULL;
2921 ndb_clear_index(dict, m_index[i]);
2933 NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx)
const
2935 return get_index_type_from_key(inx, table_share->key_info,
2936 inx == table_share->primary_key);
2939 NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx,
2943 bool is_hash_index= (key_info[inx].algorithm ==
2946 return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX;
2948 return ((key_info[inx].flags & HA_NOSAME) ?
2949 (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) :
2953 bool ha_ndbcluster::check_index_fields_not_null(
KEY* key_info)
2957 DBUG_ENTER(
"ha_ndbcluster::check_index_fields_not_null");
2959 for (; key_part != end; key_part++)
2961 Field* field= key_part->field;
2962 if (field->maybe_null())
2969 void ha_ndbcluster::release_metadata(THD *thd,
Ndb *ndb)
2973 DBUG_ENTER(
"release_metadata");
2974 DBUG_PRINT(
"enter", (
"m_tabname: %s", m_tabname));
2977 int invalidate_indexes= 0;
2978 if (thd && thd->lex && thd->lex->sql_command == SQLCOM_FLUSH)
2980 invalidate_indexes = 1;
2982 if (m_table != NULL)
2984 if (m_ndb_record != NULL)
2986 dict->releaseRecord(m_ndb_record);
2989 if (m_ndb_hidden_key_record != NULL)
2991 dict->releaseRecord(m_ndb_hidden_key_record);
2992 m_ndb_hidden_key_record= NULL;
2995 invalidate_indexes= 1;
2996 dict->removeTableGlobal(*m_table, invalidate_indexes);
2999 DBUG_ASSERT(m_table_info == NULL);
3003 for (i= 0; i < MAX_KEY; i++)
3005 if (m_index[i].unique_index)
3007 DBUG_ASSERT(m_table != NULL);
3008 dict->removeIndexGlobal(*m_index[i].unique_index, invalidate_indexes);
3010 if (m_index[i].index)
3012 DBUG_ASSERT(m_table != NULL);
3013 dict->removeIndexGlobal(*m_index[i].index, invalidate_indexes);
3015 ndb_clear_index(dict, m_index[i]);
3029 if (type >= TL_WRITE_ALLOW_WRITE)
3031 if (type == TL_READ_WITH_SHARED_LOCKS)
3037 static const ulong index_type_flags[]=
3043 HA_ONLY_WHOLE_INDEX,
3058 HA_ONLY_WHOLE_INDEX,
3073 static const int index_flags_size=
sizeof(index_type_flags)/
sizeof(ulong);
3075 inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no)
const
3077 DBUG_ASSERT(idx_no < MAX_KEY);
3078 return m_index[idx_no].type;
3081 inline bool ha_ndbcluster::has_null_in_unique_index(uint idx_no)
const
3083 DBUG_ASSERT(idx_no < MAX_KEY);
3084 return m_index[idx_no].null_in_unique_index;
3095 inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part,
3096 bool all_parts)
const
3098 DBUG_ENTER(
"ha_ndbcluster::index_flags");
3099 DBUG_PRINT(
"enter", (
"idx_no: %u", idx_no));
3100 DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size);
3101 DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)] |
3102 HA_KEY_SCAN_NOT_ROR);
3106 ha_ndbcluster::primary_key_is_clustered()
3109 if (table->s->primary_key == MAX_KEY)
3121 const ndb_index_type idx_type =
3122 get_index_type_from_table(table->s->primary_key);
3123 return (idx_type == PRIMARY_KEY_ORDERED_INDEX ||
3124 idx_type == UNIQUE_ORDERED_INDEX ||
3125 idx_type == ORDERED_INDEX);
3128 bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno)
3130 KEY* key_info= table->key_info + keyno;
3134 DBUG_ENTER(
"check_index_fields_in_write_set");
3136 for (i= 0; key_part != end; key_part++, i++)
3138 Field* field= key_part->field;
3139 if (!bitmap_is_set(table->write_set, field->field_index))
3153 int ha_ndbcluster::pk_read(
const uchar *key, uint key_len, uchar *buf,
3158 DBUG_ENTER(
"pk_read");
3159 DBUG_PRINT(
"enter", (
"key_len: %u read_set=%x",
3160 key_len, table->read_set->bitmap[0]));
3161 DBUG_DUMP(
"key", key, key_len);
3166 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
3171 const int error= pk_unique_index_read_key_pushed(table->s->primary_key, key,
3172 (m_user_defined_partitioning ?
3174 if (unlikely(error))
3177 DBUG_ASSERT(m_active_query!=NULL);
3178 if ((res = execute_no_commit_ie(m_thd_ndb, trans)) != 0 ||
3181 table->status= STATUS_NOT_FOUND;
3182 DBUG_RETURN(ndb_err(trans));
3185 int result= fetch_next_pushed();
3186 if (result == NdbQuery::NextResult_gotRow)
3190 else if (result == NdbQuery::NextResult_scanComplete)
3192 DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
3196 DBUG_RETURN(ndb_err(trans));
3202 if (m_pushed_join_operation == PUSHED_ROOT)
3208 if (!(op= pk_unique_index_read_key(table->s->primary_key, key, buf, lm,
3209 (m_user_defined_partitioning ?
3214 if ((res = execute_no_commit_ie(m_thd_ndb, trans)) != 0 ||
3217 table->status= STATUS_NOT_FOUND;
3218 DBUG_RETURN(ndb_err(trans));
3229 int ha_ndbcluster::ndb_pk_update_row(THD *thd,
3230 const uchar *old_data, uchar *new_data,
3236 DBUG_ENTER(
"ndb_pk_update_row");
3241 options.optionsPresent=0;
3243 DBUG_PRINT(
"info", (
"primary key update or partition change, "
3244 "doing read+delete+insert"));
3248 const uchar *key_row;
3250 if (m_user_defined_partitioning)
3252 options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID;
3253 options.partitionId=old_part_id;
3257 setup_key_ref_for_ndb_record(&key_rec, &key_row, old_data, FALSE);
3259 if (!bitmap_is_set_all(table->read_set))
3268 bitmap_copy(&m_bitmap, table->read_set);
3269 bitmap_union(&m_bitmap, table->write_set);
3270 bitmap_invert(&m_bitmap);
3271 if (!(op= trans->readTuple(key_rec, (
const char *)key_row,
3272 m_ndb_record, (
char *)new_data,
3273 get_ndb_lock_mode(m_lock.type),
3274 (
const unsigned char *)(m_bitmap.bitmap),
3279 if (table_share->blob_fields > 0)
3281 my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
3282 error= get_blob_values(op, new_data, &m_bitmap);
3283 dbug_tmp_restore_column_map(table->read_set, old_map);
3287 if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0)
3289 table->status= STATUS_NOT_FOUND;
3290 DBUG_RETURN(ndb_err(trans));
3295 error= ndb_delete_row(old_data, TRUE);
3298 DBUG_PRINT(
"info", (
"delete failed"));
3303 DBUG_PRINT(
"info", (
"delete succeded"));
3304 bool batched_update= (m_active_cursor != 0);
3309 if (table->found_next_number_field &&
3310 bitmap_is_set(table->write_set,
3311 table->found_next_number_field->field_index) &&
3312 (error= set_auto_inc(thd, table->found_next_number_field)))
3325 my_bitmap_map *old_map=
3326 tmp_use_all_columns(table, table->write_set);
3327 error= ndb_write_row(new_data, TRUE, batched_update);
3328 tmp_restore_column_map(table->write_set, old_map);
3332 DBUG_PRINT(
"info", (
"insert failed"));
3335 if (thd->slave_thread)
3336 g_ndb_slave_state.atTransactionAbort();
3337 m_thd_ndb->m_unsent_bytes= 0;
3338 m_thd_ndb->m_execute_count++;
3339 DBUG_PRINT(
"info", (
"execute_count: %u", m_thd_ndb->m_execute_count));
3341 #ifdef FIXED_OLD_DATA_TO_ACTUALLY_CONTAIN_GOOD_DATA
3344 undo_res= ndb_write_row((uchar *)old_data, TRUE, batched_update);
3346 push_warning(table->in_use,
3347 Sql_condition::WARN_LEVEL_WARN,
3349 "NDB failed undoing delete at primary key update");
3354 DBUG_PRINT(
"info", (
"delete+insert succeeded"));
3365 bool ha_ndbcluster::check_all_operations_for_error(
NdbTransaction *trans,
3371 DBUG_ENTER(
"ha_ndbcluster::check_all_operations_for_error");
3378 if (ndb_to_mysql_error(&err) != (
int) errcode)
3380 if (op == last)
break;
3388 if (errcode == HA_ERR_KEY_NOT_FOUND)
3393 for(uint i= 0; i<table->s->keys; i++)
3395 if (m_index[i].unique_index == index)
3407 if (errcode == HA_ERR_KEY_NOT_FOUND)
3408 m_dupkey= table->s->primary_key;
3422 check_null_in_record(
const KEY* key_info,
const uchar *record)
3425 curr_part= key_info->key_part;
3428 while (curr_part != end_part)
3430 if (curr_part->null_bit &&
3431 (record[curr_part->null_offset] & curr_part->null_bit))
3446 static unsigned char empty_mask[(NDB_MAX_ATTRIBUTES_IN_TABLE+7)/8];
3447 static char dummy_row[1];
3454 int ha_ndbcluster::peek_indexed_rows(
const uchar *record,
3455 NDB_WRITE_OP write_op)
3462 options.optionsPresent = 0;
3465 DBUG_ENTER(
"peek_indexed_rows");
3466 if (unlikely(!(trans= get_transaction(error))))
3472 if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY)
3478 m_index[table->s->primary_key].ndb_unique_record_row;
3480 if (m_user_defined_partitioning)
3484 longlong func_value;
3485 my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
3486 error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
3487 dbug_tmp_restore_column_map(table->read_set, old_map);
3490 m_part_info->err_value= func_value;
3493 options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID;
3494 options.partitionId=part_id;
3498 if (!(op= trans->readTuple(key_rec, (
const char *)record,
3499 m_ndb_record, dummy_row, lm, empty_mask,
3510 for (i= 0, key_info= table->key_info; i < table->s->keys; i++, key_info++)
3512 if (i != table_share->primary_key &&
3513 key_info->
flags & HA_NOSAME &&
3514 bitmap_is_overlapping(table->write_set, m_key_fields[i]))
3522 if (check_null_in_record(key_info, record))
3524 DBUG_PRINT(
"info", (
"skipping check for key with NULL"));
3527 if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i))
3529 DBUG_PRINT(
"info", (
"skipping check for key %u not in write_set", i));
3534 const NdbRecord *key_rec= m_index[
i].ndb_unique_record_row;
3535 if (!(iop= trans->readTuple(key_rec, (
const char *)record,
3536 m_ndb_record, dummy_row,
3544 last= trans->getLastDefinedOperation();
3546 res= execute_no_commit_ie(m_thd_ndb, trans);
3550 table->status= STATUS_NOT_FOUND;
3551 DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
3553 if (check_all_operations_for_error(trans, first, last,
3554 HA_ERR_KEY_NOT_FOUND))
3556 table->status= STATUS_NOT_FOUND;
3557 DBUG_RETURN(ndb_err(trans));
3561 DBUG_PRINT(
"info", (
"m_dupkey %d", m_dupkey));
3571 int ha_ndbcluster::unique_index_read(
const uchar *key,
3572 uint key_len, uchar *buf)
3575 DBUG_ENTER(
"ha_ndbcluster::unique_index_read");
3576 DBUG_PRINT(
"enter", (
"key_len: %u, index: %u", key_len, active_index));
3577 DBUG_DUMP(
"key", key, key_len);
3582 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
3586 const int error= pk_unique_index_read_key_pushed(active_index, key, NULL);
3587 if (unlikely(error))
3590 DBUG_ASSERT(m_active_query!=NULL);
3591 if (execute_no_commit_ie(m_thd_ndb, trans) != 0 ||
3594 table->status= STATUS_GARBAGE;
3595 DBUG_RETURN(ndb_err(trans));
3598 int result= fetch_next_pushed();
3599 if (result == NdbQuery::NextResult_gotRow)
3603 else if (result == NdbQuery::NextResult_scanComplete)
3605 DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
3609 DBUG_RETURN(ndb_err(trans));
3615 if (m_pushed_join_operation == PUSHED_ROOT)
3622 if (!(op= pk_unique_index_read_key(active_index, key, buf, lm, NULL)))
3625 if (execute_no_commit_ie(m_thd_ndb, trans) != 0 ||
3628 int err= ndb_err(trans);
3629 if(err==HA_ERR_KEY_NOT_FOUND)
3630 table->status= STATUS_NOT_FOUND;
3632 table->status= STATUS_GARBAGE;
3646 DBUG_ENTER(
"ha_ndbcluster::scan_handle_lock_tuple");
3657 DBUG_PRINT(
"info", (
"Keeping lock on scanned row"));
3660 dummy_row, empty_mask)))
3663 m_lock_tuple= FALSE;
3667 m_thd_ndb->m_unsent_bytes+=12;
3669 m_lock_tuple= FALSE;
3675 DBUG_ENTER(
"fetch_next");
3681 if ((error= scan_handle_lock_tuple(cursor, trans)) != 0)
3684 bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE &&
3685 m_lock.type != TL_READ_WITH_SHARED_LOCKS;
3687 DBUG_PRINT(
"info", (
"Call nextResult, contact_ndb: %d", contact_ndb));
3691 if (m_thd_ndb->m_unsent_bytes && m_blobs_pending)
3693 if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0)
3694 DBUG_RETURN(ndb_err(trans));
3701 release_completed_operations(trans);
3703 if ((local_check= cursor->
nextResult(&_m_next_row,
3705 m_thd_ndb->m_force_send)) == 0)
3711 m_lock_tuple= (m_lock.type == TL_WRITE_ALLOW_WRITE
3713 m_lock.type == TL_READ_WITH_SHARED_LOCKS);
3716 else if (local_check == 1 || local_check == 2)
3726 DBUG_PRINT(
"info", (
"thd_ndb->m_unsent_bytes: %ld",
3727 (
long) m_thd_ndb->m_unsent_bytes));
3728 if (m_thd_ndb->m_unsent_bytes)
3730 if ((error = flush_bulk_insert()) != 0)
3733 contact_ndb= (local_check == 2);
3737 DBUG_RETURN(ndb_err(trans));
3739 }
while (local_check == 2);
3744 int ha_ndbcluster::fetch_next_pushed()
3746 DBUG_ENTER(
"fetch_next_pushed (from pushed operation)");
3748 DBUG_ASSERT(m_pushed_operation);
3756 if (result == NdbQuery::NextResult_gotRow)
3758 DBUG_ASSERT(m_next_row!=NULL);
3759 DBUG_PRINT(
"info", (
"One more record found"));
3761 unpack_record(table->record[0], m_next_row);
3765 else if (result == NdbQuery::NextResult_scanComplete)
3767 DBUG_ASSERT(m_next_row==NULL);
3768 DBUG_PRINT(
"info", (
"No more records"));
3769 table->status= STATUS_NOT_FOUND;
3775 DBUG_PRINT(
"info", (
"Error from 'nextResult()'"));
3776 table->status= STATUS_GARBAGE;
3780 DBUG_RETURN(result);
3791 ha_ndbcluster::index_read_pushed(uchar *buf,
const uchar *key,
3792 key_part_map keypart_map)
3794 DBUG_ENTER(
"index_read_pushed");
3798 if (unlikely(!check_is_pushed()))
3800 DBUG_RETURN(
index_read_map(buf, key, keypart_map, HA_READ_KEY_EXACT));
3807 if (result == NdbQuery::NextResult_gotRow)
3809 DBUG_ASSERT(m_next_row!=NULL);
3810 unpack_record(buf, m_next_row);
3816 DBUG_ASSERT(result!=NdbQuery::NextResult_gotRow);
3817 table->status= STATUS_NOT_FOUND;
3818 DBUG_PRINT(
"info", (
"No record found"));
3832 int ha_ndbcluster::index_next_pushed(uchar *buf)
3834 DBUG_ENTER(
"index_next_pushed");
3838 if (unlikely(!check_is_pushed()))
3843 DBUG_ASSERT(m_pushed_join_operation>PUSHED_ROOT);
3844 DBUG_ASSERT(m_active_query==NULL);
3846 int res = fetch_next_pushed();
3847 if (res == NdbQuery::NextResult_gotRow)
3851 else if (res == NdbQuery::NextResult_scanComplete)
3853 DBUG_RETURN(HA_ERR_END_OF_FILE);
3857 DBUG_RETURN(ndb_err(m_thd_ndb->trans));
3872 inline int ha_ndbcluster::next_result(uchar *buf)
3875 DBUG_ENTER(
"next_result");
3877 if (m_active_cursor)
3879 if ((res= fetch_next(m_active_cursor)) == 0)
3881 DBUG_PRINT(
"info", (
"One more record found"));
3883 unpack_record(buf, m_next_row);
3890 table->status= STATUS_NOT_FOUND;
3892 DBUG_PRINT(
"info", (
"No more records"));
3893 DBUG_RETURN(HA_ERR_END_OF_FILE);
3897 DBUG_RETURN(ndb_err(m_thd_ndb->trans));
3900 else if (m_active_query)
3902 res= fetch_next_pushed();
3903 if (res == NdbQuery::NextResult_gotRow)
3907 else if (res == NdbQuery::NextResult_scanComplete)
3909 DBUG_RETURN(HA_ERR_END_OF_FILE);
3913 DBUG_RETURN(ndb_err(m_thd_ndb->trans));
3917 DBUG_RETURN(HA_ERR_END_OF_FILE);
3925 ha_ndbcluster::pk_unique_index_read_key(uint idx,
const uchar *key, uchar *buf,
3927 Uint32 *ppartition_id)
3933 options.optionsPresent= 0;
3936 DBUG_ASSERT(m_thd_ndb->trans);
3939 key_rec= m_index[idx].ndb_unique_record_key;
3941 key_rec= m_ndb_hidden_key_record;
3944 memset(buf, 0xff, table->s->null_bytes);
3946 if (table_share->primary_key == MAX_KEY)
3948 get_hidden_fields_keyop(&options, gets);
3952 if (ppartition_id != NULL)
3954 assert(m_user_defined_partitioning);
3955 options.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID;
3956 options.partitionId= *ppartition_id;
3960 op= m_thd_ndb->trans->readTuple(key_rec, (
const char *)key, m_ndb_record,
3962 (uchar *)(table->read_set->bitmap), poptions,
3965 if (uses_blob_value(table->read_set) &&
3966 get_blob_values(op, buf, table->read_set) != 0)
3972 extern void sql_print_information(
const char *format, ...);
3974 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
3977 is_shrinked_varchar(
const Field *field)
3979 if (field->real_type() == MYSQL_TYPE_VARCHAR)
3989 ha_ndbcluster::pk_unique_index_read_key_pushed(uint idx,
3991 Uint32 *ppartition_id)
3993 DBUG_ENTER(
"pk_unique_index_read_key_pushed");
3996 options.optionsPresent= 0;
3999 DBUG_ASSERT(m_thd_ndb->trans);
4000 DBUG_ASSERT(idx < MAX_KEY);
4004 m_active_query->
close(FALSE);
4005 m_active_query= NULL;
4008 if (table_share->primary_key == MAX_KEY)
4010 get_hidden_fields_keyop(&options, gets);
4014 if (ppartition_id != NULL)
4016 assert(m_user_defined_partitioning);
4017 options.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID;
4018 options.partitionId= *ppartition_id;
4022 KEY *key_def= &table->key_info[idx];
4031 ndbcluster_build_key_map(m_table, m_index[idx], &table->key_info[idx], map);
4034 for (i = 0, key_part= key_def->key_part; i < key_def->user_defined_key_parts; i++, key_part++)
4036 bool shrinkVarChar= is_shrinked_varchar(key_part->field);
4038 if (key_part->null_bit)
4040 DBUG_ASSERT(idx != table_share->primary_key);
4041 DBUG_ASSERT(*(key+offset)==0);
4049 offset+= key_part->store_length;
4060 count_key_columns(
const KEY *key_info,
const key_range *key)
4066 for(key_part= first_key_part; key_part < key_part_end; key_part++)
4068 if (length >= key->length)
4070 length+= key_part->store_length;
4072 return key_part - first_key_part;
4079 const KEY *key_info,
4083 DBUG_ENTER(
"ha_ndbcluster::compute_index_bounds");
4084 DBUG_PRINT(
"info", (
"from: %d", from));
4087 DBUG_PRINT(
"info", (
"key parts: %u length: %u",
4090 for (uint j= 0; j <= 1; j++)
4092 const key_range* kr= (j == 0 ? start_key : end_key);
4095 DBUG_PRINT(
"info", (
"key range %u: length: %u map: %lx flag: %d",
4096 j, kr->length, kr->keypart_map, kr->flag));
4097 DBUG_DUMP(
"key", kr->key, kr->length);
4101 DBUG_PRINT(
"info", (
"key range %u: none", j));
4109 bound.low_key= (
const char*)start_key->key;
4110 bound.low_key_count= count_key_columns(key_info, start_key);
4111 bound.low_inclusive=
4112 start_key->flag != HA_READ_AFTER_KEY &&
4113 start_key->flag != HA_READ_BEFORE_KEY;
4117 bound.low_key= NULL;
4118 bound.low_key_count= 0;
4123 (start_key->flag == HA_READ_KEY_EXACT ||
4124 start_key->flag == HA_READ_PREFIX_LAST) &&
4127 bound.high_key= bound.low_key;
4128 bound.high_key_count= bound.low_key_count;
4129 bound.high_inclusive= TRUE;
4133 bound.high_key= (
const char*)end_key->key;
4134 bound.high_key_count= count_key_columns(key_info, end_key);
4141 bound.high_inclusive= end_key->flag != HA_READ_BEFORE_KEY;
4142 if (end_key->flag == HA_READ_KEY_EXACT ||
4143 end_key->flag == HA_READ_PREFIX_LAST)
4145 bound.low_key= bound.high_key;
4146 bound.low_key_count= bound.high_key_count;
4147 bound.low_inclusive= TRUE;
4152 bound.high_key= NULL;
4153 bound.high_key_count= 0;
4155 DBUG_PRINT(
"info", (
"start_flag=%d end_flag=%d"
4156 " lo_keys=%d lo_incl=%d hi_keys=%d hi_incl=%d",
4157 start_key?start_key->flag:0, end_key?end_key->flag:0,
4158 bound.low_key_count,
4159 bound.low_key_count?bound.low_inclusive:0,
4160 bound.high_key_count,
4161 bound.high_key_count?bound.high_inclusive:0));
4169 int ha_ndbcluster::ordered_index_scan(
const key_range *start_key,
4171 bool sorted,
bool descending,
4178 DBUG_ENTER(
"ha_ndbcluster::ordered_index_scan");
4179 DBUG_PRINT(
"enter", (
"index: %u, sorted: %d, descending: %d read_set=0x%x",
4180 active_index, sorted, descending, table->read_set->bitmap[0]));
4181 DBUG_PRINT(
"enter", (
"Starting new ordered scan on %s", m_tabname));
4184 DBUG_ASSERT(sorted == 0 || sorted == 1);
4186 if (unlikely(!(trans= get_transaction(error))))
4191 if ((error= close_scan()))
4196 const NdbRecord *key_rec= m_index[active_index].ndb_record_key;
4201 if (start_key != NULL || end_key != NULL)
4207 compute_index_bounds(bound,
4208 table->key_info + active_index,
4210 end_key : start_key),
4212 start_key : end_key),
4218 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
4222 const int error= create_pushed_join();
4223 if (unlikely(error))
4227 if (sorted && query->getQueryOperation((uint)PUSHED_ROOT)
4228 ->
setOrdering(descending ? NdbQueryOptions::ScanOrdering_descending
4229 : NdbQueryOptions::ScanOrdering_ascending))
4234 if (pbound && query->setBound(key_rec, pbound)!=0)
4237 m_thd_ndb->m_scan_count++;
4239 bool prunable =
false;
4240 if (unlikely(query->
isPrunable(prunable) != 0))
4243 m_thd_ndb->m_pruned_scan_count++;
4245 DBUG_ASSERT(!uses_blob_value(table->read_set));
4250 if (m_pushed_join_operation == PUSHED_ROOT)
4256 options.optionsPresent=NdbScanOperation::ScanOptions::SO_SCANFLAGS;
4257 options.scan_flags=0;
4260 if (table_share->primary_key == MAX_KEY)
4261 get_hidden_fields_scan(&options, gets);
4264 options.scan_flags|= NdbScanOperation::SF_KeyInfo;
4268 options.scan_flags|= NdbScanOperation::SF_Descending;
4271 if (m_use_partition_pruning &&
4272 m_user_defined_partitioning && part_spec != NULL &&
4273 part_spec->start_part == part_spec->end_part)
4276 options.partitionId = part_spec->start_part;
4277 options.optionsPresent |= NdbScanOperation::ScanOptions::SO_PARTITION_ID;
4281 if (m_cond && m_cond->generate_scan_filter(&
code, &options))
4282 ERR_RETURN(
code.getNdbError());
4284 if (!(op= trans->
scanIndex(key_rec, row_rec, lm,
4285 (uchar *)(table->read_set->bitmap),
4291 DBUG_PRINT(
"info", (
"Is scan pruned to 1 partition? : %u", op->
getPruned()));
4292 m_thd_ndb->m_scan_count++;
4293 m_thd_ndb->m_pruned_scan_count += (op->
getPruned()? 1 : 0);
4295 if (uses_blob_value(table->read_set) &&
4296 get_blob_values(op, NULL, table->read_set) != 0)
4299 m_active_cursor= op;
4307 if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0)
4308 DBUG_RETURN(ndb_err(trans));
4310 DBUG_RETURN(next_result(buf));
4322 int ret = tab->
checkColumns(readset->bitmap, no_bytes_in_map(readset));
4326 flags |= NdbScanOperation::SF_DiskScan;
4331 flags |= NdbScanOperation::SF_DiskScan;
4341 int ha_ndbcluster::full_table_scan(
const KEY* key_info,
4349 bool use_set_part_id= FALSE;
4352 DBUG_ENTER(
"full_table_scan");
4353 DBUG_PRINT(
"enter", (
"Starting new scan on %s", m_tabname));
4355 if (m_use_partition_pruning && m_user_defined_partitioning)
4357 DBUG_ASSERT(m_pushed_join_operation != PUSHED_ROOT);
4358 part_spec.start_part= 0;
4359 part_spec.end_part= m_part_info->get_tot_partitions() - 1;
4360 prune_partition_set(table, &part_spec);
4361 DBUG_PRINT(
"info", (
"part_spec.start_part: %u part_spec.end_part: %u",
4362 part_spec.start_part, part_spec.end_part));
4367 if (part_spec.start_part > part_spec.end_part)
4369 DBUG_RETURN(HA_ERR_END_OF_FILE);
4372 if (part_spec.start_part == part_spec.end_part)
4386 use_set_part_id= TRUE;
4388 if (unlikely(!(trans= get_transaction_part_id(part_spec.start_part,
4394 if (unlikely(!(trans= start_transaction(error))))
4399 options.optionsPresent = (NdbScanOperation::ScanOptions::SO_SCANFLAGS |
4400 NdbScanOperation::ScanOptions::SO_PARALLEL);
4401 options.scan_flags = guess_scan_flags(lm, m_table, table->read_set);
4402 options.parallel= DEFAULT_PARALLELISM;
4404 if (use_set_part_id) {
4405 assert(m_user_defined_partitioning);
4406 options.optionsPresent|= NdbScanOperation::ScanOptions::SO_PARTITION_ID;
4407 options.partitionId = part_spec.start_part;
4410 if (table_share->primary_key == MAX_KEY)
4411 get_hidden_fields_scan(&options, gets);
4413 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
4416 const int error= create_pushed_join();
4417 if (unlikely(error))
4420 m_thd_ndb->m_scan_count++;
4421 DBUG_ASSERT(!uses_blob_value(table->read_set));
4426 if (m_pushed_join_operation == PUSHED_ROOT)
4436 if (m_cond && m_cond->generate_scan_filter(&
code, &options))
4437 ERR_RETURN(
code.getNdbError());
4442 DBUG_PRINT(
"info", (
"Starting unique index scan"));
4448 my_errno= HA_ERR_OUT_OF_MEM;
4449 DBUG_RETURN(my_errno);
4451 if (m_cond->generate_scan_filter_from_key(&
code, &options, key_info, start_key, end_key, buf))
4452 ERR_RETURN(
code.getNdbError());
4455 if (!(op= trans->
scanTable(m_ndb_record, lm,
4456 (uchar *)(table->read_set->bitmap),
4460 m_thd_ndb->m_scan_count++;
4461 m_thd_ndb->m_pruned_scan_count += (op->
getPruned()? 1 : 0);
4463 DBUG_ASSERT(m_active_cursor==NULL);
4464 m_active_cursor= op;
4466 if (uses_blob_value(table->read_set) &&
4467 get_blob_values(op, NULL, table->read_set) != 0)
4471 if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0)
4472 DBUG_RETURN(ndb_err(trans));
4473 DBUG_PRINT(
"exit", (
"Scan started successfully"));
4474 DBUG_RETURN(next_result(buf));
4478 ha_ndbcluster::set_auto_inc(THD *thd,
Field *field)
4480 DBUG_ENTER(
"ha_ndbcluster::set_auto_inc");
4481 bool read_bit= bitmap_is_set(table->read_set, field->field_index);
4482 bitmap_set_bit(table->read_set, field->field_index);
4483 Uint64 next_val= (Uint64) field->val_int() + 1;
4485 bitmap_clear_bit(table->read_set, field->field_index);
4486 DBUG_RETURN(set_auto_inc_val(thd, next_val));
4491 ha_ndbcluster::set_auto_inc_val(THD *thd, Uint64 value)
4493 Ndb *ndb= get_ndb(thd);
4494 DBUG_ENTER(
"ha_ndbcluster::set_auto_inc_val");
4498 (
"Trying to set next auto increment value to %s",
4499 llstr(value, buff)));
4501 if (ndb->checkUpdateAutoIncrementValue(m_share->tuple_id_range, value))
4504 if (ndb->setAutoIncrementValue(m_table, g.range, value, TRUE)
4519 gets[num_gets].column= get_hidden_key_column();
4520 gets[num_gets].appStorage= &m_ref;
4522 if (m_user_defined_partitioning)
4525 gets[num_gets].column= NdbDictionary::Column::FRAGMENT;
4526 gets[num_gets].appStorage= &m_part_id;
4536 Uint32 num_gets= setup_get_hidden_fields(gets);
4537 options->optionsPresent|= NdbOperation::OperationOptions::OO_GETVALUE;
4538 options->extraGetValues= gets;
4539 options->numExtraGetValues= num_gets;
4546 Uint32 num_gets= setup_get_hidden_fields(gets);
4547 options->optionsPresent|= NdbScanOperation::ScanOptions::SO_GETVALUE;
4548 options->extraGetValues= gets;
4549 options->numExtraGetValues= num_gets;
4553 ha_ndbcluster::eventSetAnyValue(THD *thd,
4556 options->anyValue= 0;
4557 if (unlikely(m_slow_path))
4565 Thd_ndb *thd_ndb= get_thd_ndb(thd);
4566 if (thd->slave_thread)
4576 options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE;
4577 options->anyValue = thd_unmasked_server_id(thd);
4579 else if (thd_ndb->trans_options & TNTO_NO_LOGGING)
4581 options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE;
4582 ndbcluster_anyvalue_set_nologging(options->anyValue);
4590 const char* p = getenv(
"NDB_TEST_ANYVALUE_USERDATA");
4591 if (p != 0 && *p != 0 && *p !=
'0' && *p !=
'n' && *p !=
'N')
4593 options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE;
4594 dbug_ndbcluster_anyvalue_set_userbits(options->anyValue);
4599 #ifdef HAVE_NDB_BINLOG
4611 ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type,
4613 const uchar* old_data,
4614 const uchar* new_data,
4618 DBUG_ENTER(
"prepare_conflict_detection");
4621 const st_conflict_fn_def* conflict_fn = m_share->m_cfn_share->m_conflict_fn;
4622 assert( conflict_fn != NULL );
4629 if (op_type != WRITE_ROW)
4631 res = conflict_fn->prep_func(m_share->m_cfn_share,
4641 options->optionsPresent|=NdbOperation::OperationOptions::OO_INTERPRETED;
4642 options->interpretedCode= code;
4646 g_ndb_slave_state.current_conflict_defined_op_count++;
4649 const uchar* row_to_save = (op_type == DELETE_ROW)? old_data : new_data;
4650 Ndb_exceptions_data ex_data;
4651 ex_data.share= m_share;
4652 ex_data.key_rec= key_rec;
4653 ex_data.op_type= op_type;
4658 ex_data.row= copy_row_to_buffer(m_thd_ndb, row_to_save);
4659 uchar* ex_data_buffer= get_buffer(m_thd_ndb,
sizeof(ex_data));
4660 if (ex_data.row == NULL || ex_data_buffer == NULL)
4662 DBUG_RETURN(HA_ERR_OUT_OF_MEM);
4664 memcpy(ex_data_buffer, &ex_data,
sizeof(ex_data));
4667 options->optionsPresent|= NdbOperation::OperationOptions::OO_CUSTOMDATA;
4668 options->customData= (
void*)ex_data_buffer;
4687 handle_conflict_op_error(
Thd_ndb* thd_ndb,
4692 DBUG_ENTER(
"handle_conflict_op_error");
4693 DBUG_PRINT(
"info", (
"ndb error: %d", err.
code));
4695 if ((err.
code == (
int) error_conflict_fn_violation) ||
4696 (err.
code == (
int) error_op_after_refresh_op) ||
4701 (
"err.code %s (int) error_conflict_fn_violation, "
4702 "err.classification %s",
4703 err.
code == (
int) error_conflict_fn_violation ?
"==" :
"!=",
4706 ?
"== NdbError::ConstraintViolation"
4708 ?
"== NdbError::NoDataFound" :
"!=")));
4710 enum_conflict_cause conflict_cause;
4712 if ((err.
code == (
int) error_conflict_fn_violation) ||
4713 (err.
code == (
int) error_op_after_refresh_op))
4715 conflict_cause= ROW_IN_CONFLICT;
4719 conflict_cause= ROW_ALREADY_EXISTS;
4724 conflict_cause= ROW_DOES_NOT_EXIST;
4727 const void* buffer=op->getCustomData();
4729 Ndb_exceptions_data ex_data;
4730 memcpy(&ex_data, buffer,
sizeof(ex_data));
4732 const NdbRecord* key_rec= ex_data.key_rec;
4733 const uchar* row= ex_data.row;
4734 enum_conflicting_op_type op_type = ex_data.op_type;
4735 DBUG_ASSERT(share != NULL && row != NULL);
4737 NDB_CONFLICT_FN_SHARE* cfn_share= share->m_cfn_share;
4740 enum_conflict_fn_type cft = cfn_share->m_conflict_fn->type;
4741 bool haveExTable = cfn_share->m_ex_tab != NULL;
4743 g_ndb_slave_state.current_violation_count[cft]++;
4747 if (handle_row_conflict(cfn_share,
4758 char msg[FN_REFLEN];
4759 my_snprintf(msg,
sizeof(msg),
"Row conflict handling "
4760 "on table %s hit Ndb error %d '%s'",
4768 ERR_RETURN(handle_error);
4772 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
4773 ER_EXCEPTIONS_WRITE_ERROR,
4774 ER(ER_EXCEPTIONS_WRITE_ERROR), msg);
4776 DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR);
4785 if (write_conflict_row(share, trans, row, ex_err))
4787 char msg[FN_REFLEN];
4788 my_snprintf(msg,
sizeof(msg),
"table %s NDB error %d '%s'",
4789 cfn_share->m_ex_tab->getName(),
4796 dict->removeTableGlobal(*(cfn_share->m_ex_tab),
false);
4797 cfn_share->m_ex_tab= NULL;
4806 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
4807 ER_EXCEPTIONS_WRITE_ERROR,
4808 ER(ER_EXCEPTIONS_WRITE_ERROR), msg);
4810 DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR);
4819 DBUG_PRINT(
"info", (
"missing cfn_share"));
4826 DBUG_PRINT(
"info", (
"err.code == %u", err.
code));
4827 DBUG_RETURN(err.
code);
4835 #ifdef HAVE_NDB_BINLOG
4839 static bool is_serverid_local(Uint32 serverid)
4846 return ((serverid == ::server_id) ||
4847 ndb_mi_get_ignore_server_id(serverid));
4851 int ha_ndbcluster::write_row(uchar *record)
4853 DBUG_ENTER(
"ha_ndbcluster::write_row");
4854 #ifdef HAVE_NDB_BINLOG
4855 if (m_share == ndb_apply_status_share && table->in_use->slave_thread)
4857 uint32 row_server_id, master_server_id= ndb_mi_get_master_server_id();
4859 memcpy(&row_server_id, table->field[0]->ptr + (record - table->record[0]),
4860 sizeof(row_server_id));
4861 memcpy(&row_epoch, table->field[1]->ptr + (record - table->record[0]),
4863 g_ndb_slave_state.atApplyStatusWrite(master_server_id,
4866 is_serverid_local(row_server_id));
4869 DBUG_RETURN(ndb_write_row(record, FALSE, FALSE));
4875 int ha_ndbcluster::ndb_write_row(uchar *record,
4876 bool primary_key_update,
4877 bool batched_update)
4879 bool has_auto_increment;
4881 THD *thd= table->in_use;
4888 DBUG_ENTER(
"ha_ndbcluster::ndb_write_row");
4890 error = check_slave_state(thd);
4891 if (unlikely(error))
4894 has_auto_increment= (table->next_number_field && record == table->record[0]);
4896 if (has_auto_increment && table_share->primary_key != MAX_KEY)
4901 m_skip_auto_increment= FALSE;
4902 if ((error= update_auto_increment()))
4910 if (!m_use_write && m_ignore_dup_key)
4917 int peek_res= peek_indexed_rows(record, NDB_INSERT);
4921 DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
4923 if (peek_res != HA_ERR_KEY_NOT_FOUND)
4924 DBUG_RETURN(peek_res);
4927 bool uses_blobs= uses_blob_value(table->write_set);
4931 const uchar *key_row;
4932 if (table_share->primary_key == MAX_KEY)
4935 Ndb *ndb= get_ndb(thd);
4936 uint retries= NDB_AUTO_INCREMENT_RETRIES;
4937 int retry_sleep= 30;
4941 if (ndb->getAutoIncrementValue(m_table, g.range, auto_value, 1000) == -1)
4943 if (--retries && !thd->killed &&
4946 do_retry_sleep(retry_sleep);
4953 sets[num_sets].column= get_hidden_key_column();
4954 sets[num_sets].value= &auto_value;
4956 key_rec= m_ndb_hidden_key_record;
4957 key_row= (
const uchar *)&auto_value;
4961 key_rec= m_index[table_share->primary_key].ndb_unique_record_row;
4965 trans= thd_ndb->trans;
4966 if (m_user_defined_partitioning)
4968 DBUG_ASSERT(m_use_partition_pruning);
4969 longlong func_value= 0;
4970 my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
4971 error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
4972 dbug_tmp_restore_column_map(table->read_set, old_map);
4973 if (unlikely(error))
4975 m_part_info->err_value= func_value;
4984 if (func_value >= INT_MAX32)
4985 func_value= INT_MAX32;
4986 sets[num_sets].column= get_partition_id_column();
4987 sets[num_sets].value= &func_value;
4991 if (unlikely(!(trans= start_transaction_part_id(part_id, error))))
4996 if (unlikely(!(trans= start_transaction_row(key_rec, key_row, error))))
5001 ha_statistic_increment(&SSV::ha_write_count);
5008 options.optionsPresent=0;
5010 eventSetAnyValue(thd, &options);
5011 bool need_flush= add_row_check_if_batch_full(thd_ndb);
5013 const Uint32 authorValue = 1;
5014 if ((thd->slave_thread) &&
5015 (m_table->getExtraRowAuthorBits()))
5018 sets[num_sets].column= NdbDictionary::Column::ROW_AUTHOR;
5019 sets[num_sets].value= &authorValue;
5023 if (m_user_defined_partitioning)
5025 options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID;
5026 options.partitionId= part_id;
5030 options.optionsPresent |= NdbOperation::OperationOptions::OO_SETVALUE;
5031 options.extraSetValues= sets;
5032 options.numExtraSetValues= num_sets;
5034 if (thd->slave_thread || THDVAR(thd, deferred_constraints))
5036 options.optionsPresent |=
5037 NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
5040 if (options.optionsPresent != 0)
5043 const Uint32 bitmapSz= (NDB_MAX_ATTRIBUTES_IN_TABLE + 31)/32;
5044 uint32 tmpBitmapSpace[bitmapSz];
5047 #ifdef HAVE_NDB_BINLOG
5048 bool haveConflictFunction =
5049 (thd->slave_thread &&
5050 m_share->m_cfn_share &&
5051 m_share->m_cfn_share->m_conflict_fn);
5055 #ifdef HAVE_NDB_BINLOG
5057 && !haveConflictFunction
5068 bool useWriteSet= isManualBinlogExec(thd);
5070 #ifdef HAVE_NDB_BINLOG
5075 useWriteSet |= thd->slave_thread;
5081 user_cols_written_bitmap= table->write_set;
5082 mask= (uchar *)(user_cols_written_bitmap->bitmap);
5086 user_cols_written_bitmap= NULL;
5090 op= trans->writeTuple(key_rec, (
const char *)key_row, m_ndb_record,
5091 (
char *)record, mask,
5096 #ifdef HAVE_NDB_BINLOG
5097 if (haveConflictFunction)
5100 if (unlikely((error = prepare_conflict_detection(WRITE_ROW,
5112 if (m_table->hasDefaultValues())
5114 DBUG_PRINT(
"info", (
"Not sending values for native defaulted columns"));
5129 user_cols_written_bitmap= &tmpBitmap;
5130 bitmap_init(user_cols_written_bitmap, tmpBitmapSpace,
5131 table->write_set->n_bits,
false);
5132 bitmap_copy(user_cols_written_bitmap, table->write_set);
5134 for (uint i= 0; i < table->s->fields; i++)
5136 Field *field= table->field[
i];
5137 DBUG_PRINT(
"info", (
"Field#%u, (%u), Type : %u "
5138 "NO_DEFAULT_VALUE_FLAG : %u PRI_KEY_FLAG : %u",
5142 field->flags & NO_DEFAULT_VALUE_FLAG,
5143 field->flags & PRI_KEY_FLAG));
5144 if ((field->flags & (NO_DEFAULT_VALUE_FLAG |
5146 ! type_supports_default_value(field->real_type()))
5148 bitmap_set_bit(user_cols_written_bitmap, field->field_index);
5152 mask= (uchar *)(user_cols_written_bitmap->bitmap);
5157 DBUG_PRINT(
"info", (
"No native defaults, sending all values"));
5158 user_cols_written_bitmap= NULL;
5163 op= trans->insertTuple(key_rec, (
const char *)key_row, m_ndb_record,
5164 (
char *)record, mask,
5170 bool do_batch= !need_flush &&
5171 (batched_update || thd_allow_batch(thd));
5173 if (table_share->blob_fields > 0)
5175 my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
5177 int res= set_blob_values(op, record - table->record[0],
5178 user_cols_written_bitmap, &blob_count, do_batch);
5179 dbug_tmp_restore_column_map(table->read_set, old_map);
5194 no_uncommitted_rows_update(1);
5195 if (( (m_rows_to_insert == 1 || uses_blobs) && !do_batch ) ||
5196 primary_key_update ||
5199 int res= flush_bulk_insert();
5202 m_skip_auto_increment= TRUE;
5206 if ((has_auto_increment) && (m_skip_auto_increment))
5209 if ((ret_val= set_auto_inc(thd, table->next_number_field)))
5211 DBUG_RETURN(ret_val);
5214 m_skip_auto_increment= TRUE;
5216 DBUG_PRINT(
"exit",(
"ok"));
5222 int ha_ndbcluster::primary_key_cmp(
const uchar * old_row,
const uchar * new_row)
5224 uint keynr= table_share->primary_key;
5228 for (; key_part != end ; key_part++)
5230 if (!bitmap_is_set(table->write_set, key_part->fieldnr - 1))
5234 DBUG_ASSERT(!key_part->null_bit);
5236 if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART))
5239 if (key_part->field->cmp_binary((old_row + key_part->offset),
5240 (new_row + key_part->offset),
5241 (ulong) key_part->length))
5246 if (memcmp(old_row+key_part->offset, new_row+key_part->offset,
5254 #ifdef HAVE_NDB_BINLOG
5256 handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share,
5259 const uchar* pk_row,
5260 enum_conflicting_op_type op_type,
5261 enum_conflict_cause conflict_cause,
5266 DBUG_ENTER(
"handle_row_conflict");
5268 if (cfn_share->m_flags & CFF_REFRESH_ROWS)
5276 DBUG_PRINT(
"info", (
"Conflict on table %s. Operation type : %s, "
5277 "conflict cause :%s, conflict error : %u : %s",
5279 ((op_type == WRITE_ROW)?
"WRITE_ROW":
5280 (op_type == UPDATE_ROW)?
"UPDATE_ROW":
5282 ((conflict_cause == ROW_ALREADY_EXISTS)?
"ROW_ALREADY_EXISTS":
5283 (conflict_cause == ROW_DOES_NOT_EXIST)?
"ROW_DOES_NOT_EXIST":
5285 conflict_error.
code,
5288 assert(key_rec != NULL);
5289 assert(pk_row != NULL);
5297 if ((conflict_cause == ROW_IN_CONFLICT) &&
5298 (conflict_error.
code == (
int) error_op_after_refresh_op))
5303 DBUG_PRINT(
"info", (
"Operation after refresh error - ignoring"));
5330 if ((op_type == DELETE_ROW) &&
5331 (conflict_cause == ROW_DOES_NOT_EXIST))
5333 DBUG_PRINT(
"info", (
"Delete vs Delete detected, NOT refreshing"));
5342 const NdbOperation* refresh_op= conflict_trans->refreshTuple(key_rec,
5343 (
const char*) pk_row);
5362 DBUG_ENTER(
"ha_ndbcluster::start_bulk_update");
5363 if (!m_use_write && m_ignore_dup_key)
5365 DBUG_PRINT(
"info", (
"Batching turned off as duplicate key is "
5366 "ignored by using peek_row"));
5373 uint *dup_key_found)
5375 DBUG_ENTER(
"ha_ndbcluster::bulk_update_row");
5377 DBUG_RETURN(ndb_update_row(old_data, new_data, 1));
5383 DBUG_ENTER(
"ha_ndbcluster::exec_bulk_update");
5387 assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler ==
this);
5389 if (m_thd_ndb->m_handler &&
5390 m_read_before_write_removal_possible)
5401 DBUG_PRINT(
"info", (
"committing auto-commit+rbwr early"));
5402 uint ignore_count= 0;
5403 const int ignore_error= 1;
5404 if (execute_commit(table->in_use, m_thd_ndb, trans,
5405 m_thd_ndb->m_force_send, ignore_error,
5406 &ignore_count) != 0)
5408 no_uncommitted_rows_execute_failure();
5409 DBUG_RETURN(ndb_err(trans));
5411 DBUG_PRINT(
"info", (
"ignore_count: %u", ignore_count));
5412 assert(m_rows_changed >= ignore_count);
5413 assert(m_rows_updated >= ignore_count);
5414 m_rows_changed-= ignore_count;
5415 m_rows_updated-= ignore_count;
5419 if (m_thd_ndb->m_unsent_bytes == 0)
5421 DBUG_PRINT(
"exit", (
"skip execute - no unsent bytes"));
5425 if (thd_allow_batch(table->in_use))
5431 DBUG_PRINT(
"exit", (
"skip execute - transaction_allow_batching is ON"));
5435 if (m_thd_ndb->m_handler &&
5439 DBUG_PRINT(
"exit", (
"skip execute - simple autocommit"));
5443 uint ignore_count= 0;
5444 if (execute_no_commit(m_thd_ndb, trans,
5445 m_ignore_no_key || m_read_before_write_removal_used,
5446 &ignore_count) != 0)
5448 no_uncommitted_rows_execute_failure();
5449 DBUG_RETURN(ndb_err(trans));
5451 assert(m_rows_changed >= ignore_count);
5452 assert(m_rows_updated >= ignore_count);
5453 m_rows_changed-= ignore_count;
5454 m_rows_updated-= ignore_count;
5460 DBUG_ENTER(
"ha_ndbcluster::end_bulk_update");
5464 int ha_ndbcluster::update_row(
const uchar *old_data, uchar *new_data)
5466 return ndb_update_row(old_data, new_data, 0);
5470 ha_ndbcluster::setup_key_ref_for_ndb_record(
const NdbRecord **key_rec,
5471 const uchar **key_row,
5472 const uchar *record,
5473 bool use_active_index)
5475 DBUG_ENTER(
"setup_key_ref_for_ndb_record");
5476 if (use_active_index)
5479 DBUG_PRINT(
"info", (
"Using unique index (%u)", active_index));
5480 *key_rec= m_index[active_index].ndb_unique_record_row;
5483 else if (table_share->primary_key != MAX_KEY)
5486 DBUG_PRINT(
"info", (
"Using primary key"));
5487 *key_rec= m_index[table_share->primary_key].ndb_unique_record_row;
5493 DBUG_PRINT(
"info", (
"Using hidden primary key (%llu)", m_ref));
5495 DBUG_ASSERT(m_read_before_write_removal_used ==
false);
5496 *key_rec= m_ndb_hidden_key_record;
5497 *key_row= (
const uchar *)(&m_ref);
5507 int ha_ndbcluster::ndb_update_row(
const uchar *old_data, uchar *new_data,
5510 THD *thd= table->in_use;
5515 uint32 old_part_id= ~uint32(0), new_part_id= ~uint32(0);
5517 longlong func_value;
5518 Uint32 func_value_uint32;
5519 bool have_pk= (table_share->primary_key != MAX_KEY);
5520 bool pk_update= (!m_read_before_write_removal_possible &&
5522 bitmap_is_overlapping(table->write_set, m_pk_bitmap_p) &&
5523 primary_key_cmp(old_data, new_data));
5524 bool batch_allowed= !m_update_cannot_batch &&
5525 (is_bulk_update || thd_allow_batch(thd));
5529 DBUG_ENTER(
"ndb_update_row");
5532 error = check_slave_state(thd);
5533 if (unlikely(error))
5540 if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
5541 thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
5543 NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE;
5544 int peek_res= peek_indexed_rows(new_data, write_op);
5548 DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
5550 if (peek_res != HA_ERR_KEY_NOT_FOUND)
5551 DBUG_RETURN(peek_res);
5554 ha_statistic_increment(&SSV::ha_update_count);
5556 bool skip_partition_for_unique_index= FALSE;
5557 if (m_use_partition_pruning)
5559 if (!cursor && m_read_before_write_removal_used)
5561 ndb_index_type type= get_index_type(active_index);
5568 if (type == UNIQUE_INDEX ||
5569 type == UNIQUE_ORDERED_INDEX)
5571 skip_partition_for_unique_index= TRUE;
5572 goto skip_partition_pruning;
5575 if ((error= get_parts_for_update(old_data, new_data, table->record[0],
5576 m_part_info, &old_part_id, &new_part_id,
5579 m_part_info->err_value= func_value;
5582 DBUG_PRINT(
"info", (
"old_part_id: %u new_part_id: %u", old_part_id, new_part_id));
5583 skip_partition_pruning:
5591 if (pk_update || old_part_id != new_part_id)
5593 DBUG_RETURN(ndb_pk_update_row(thd, old_data, new_data, old_part_id));
5599 if (table->found_next_number_field &&
5600 bitmap_is_set(table->write_set,
5601 table->found_next_number_field->field_index) &&
5602 (error= set_auto_inc(thd, table->found_next_number_field)))
5611 bitmap_copy(&m_bitmap, table->write_set);
5612 bitmap_subtract(&m_bitmap, m_pk_bitmap_p);
5613 uchar *mask= (uchar *)(m_bitmap.bitmap);
5614 DBUG_ASSERT(!pk_update);
5618 options.optionsPresent=0;
5623 if (m_user_defined_partitioning && !skip_partition_for_unique_index)
5625 if (func_value >= INT_MAX32)
5626 func_value_uint32= INT_MAX32;
5628 func_value_uint32= (uint32)func_value;
5629 sets[num_sets].column= get_partition_id_column();
5630 sets[num_sets].value= &func_value_uint32;
5635 options.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID;
5636 options.partitionId= new_part_id;
5640 eventSetAnyValue(thd, &options);
5642 bool need_flush= add_row_check_if_batch_full(thd_ndb);
5644 const Uint32 authorValue = 1;
5645 if ((thd->slave_thread) &&
5646 (m_table->getExtraRowAuthorBits()))
5649 sets[num_sets].column= NdbDictionary::Column::ROW_AUTHOR;
5650 sets[num_sets].value= &authorValue;
5656 options.optionsPresent|= NdbOperation::OperationOptions::OO_SETVALUE;
5657 options.extraSetValues= sets;
5658 options.numExtraSetValues= num_sets;
5661 if (thd->slave_thread || THDVAR(thd, deferred_constraints))
5663 options.optionsPresent |=
5664 NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
5676 DBUG_PRINT(
"info", (
"Calling updateTuple on cursor, write_set=0x%x",
5677 table->write_set->bitmap[0]));
5679 if (options.optionsPresent != 0)
5680 poptions = &options;
5683 (
const char*)new_data, mask,
5688 m_lock_tuple= FALSE;
5689 thd_ndb->m_unsent_bytes+= 12;
5694 const uchar *key_row;
5695 setup_key_ref_for_ndb_record(&key_rec, &key_row, new_data,
5696 m_read_before_write_removal_used);
5698 #ifdef HAVE_NDB_BINLOG
5699 Uint32 buffer[ MAX_CONFLICT_INTERPRETED_PROG_SIZE ];
5701 sizeof(buffer)/
sizeof(buffer[0]));
5703 if (thd->slave_thread && m_share->m_cfn_share &&
5704 m_share->m_cfn_share->m_conflict_fn)
5707 if (unlikely((error = prepare_conflict_detection(UPDATE_ROW,
5716 if (options.optionsPresent !=0)
5719 if (!(op= trans->updateTuple(key_rec, (
const char *)key_row,
5720 m_ndb_record, (
const char*)new_data, mask,
5727 if (uses_blob_value(table->write_set))
5729 int row_offset= new_data - table->record[0];
5730 int res= set_blob_values(op, row_offset, table->write_set, &blob_count,
5731 (batch_allowed && !need_flush));
5735 uint ignore_count= 0;
5740 if (m_update_cannot_batch ||
5741 !(cursor || (batch_allowed && have_pk)) ||
5744 if (execute_no_commit(m_thd_ndb, trans,
5745 m_ignore_no_key || m_read_before_write_removal_used,
5746 &ignore_count) != 0)
5748 no_uncommitted_rows_execute_failure();
5749 DBUG_RETURN(ndb_err(trans));
5752 else if (blob_count > 0)
5753 m_blobs_pending= TRUE;
5758 assert(m_rows_changed >= ignore_count);
5759 assert(m_rows_updated >= ignore_count);
5760 m_rows_changed-= ignore_count;
5761 m_rows_updated-= ignore_count;
5771 int ha_ndbcluster::delete_row(
const uchar *record)
5773 return ndb_delete_row(record, FALSE);
5776 bool ha_ndbcluster::start_bulk_delete()
5778 DBUG_ENTER(
"start_bulk_delete");
5779 m_is_bulk_delete =
true;
5783 int ha_ndbcluster::end_bulk_delete()
5786 DBUG_ENTER(
"end_bulk_delete");
5787 assert(m_is_bulk_delete);
5788 m_is_bulk_delete =
false;
5791 assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler ==
this);
5793 if (m_thd_ndb->m_handler &&
5794 m_read_before_write_removal_possible)
5805 DBUG_PRINT(
"info", (
"committing auto-commit+rbwr early"));
5806 uint ignore_count= 0;
5807 const int ignore_error= 1;
5808 if (execute_commit(table->in_use, m_thd_ndb, trans,
5809 m_thd_ndb->m_force_send, ignore_error,
5810 &ignore_count) != 0)
5812 no_uncommitted_rows_execute_failure();
5813 DBUG_RETURN(ndb_err(trans));
5815 DBUG_PRINT(
"info", (
"ignore_count: %u", ignore_count));
5816 assert(m_rows_deleted >= ignore_count);
5817 m_rows_deleted-= ignore_count;
5821 if (m_thd_ndb->m_unsent_bytes == 0)
5823 DBUG_PRINT(
"exit", (
"skip execute - no unsent bytes"));
5827 if (thd_allow_batch(table->in_use))
5833 DBUG_PRINT(
"exit", (
"skip execute - transaction_allow_batching is ON"));
5837 if (m_thd_ndb->m_handler)
5840 DBUG_PRINT(
"exit", (
"skip execute - simple autocommit"));
5844 uint ignore_count= 0;
5845 if (execute_no_commit(m_thd_ndb, trans,
5846 m_ignore_no_key || m_read_before_write_removal_used,
5847 &ignore_count) != 0)
5849 no_uncommitted_rows_execute_failure();
5850 DBUG_RETURN(ndb_err(trans));
5853 assert(m_rows_deleted >= ignore_count);
5854 m_rows_deleted-= ignore_count;
5855 no_uncommitted_rows_update(ignore_count);
5864 int ha_ndbcluster::ndb_delete_row(
const uchar *record,
5865 bool primary_key_update)
5867 THD *thd= table->in_use;
5868 Thd_ndb *thd_ndb= get_thd_ndb(thd);
5872 uint32 part_id= ~uint32(0);
5874 bool allow_batch= !m_delete_cannot_batch &&
5875 (m_is_bulk_delete || thd_allow_batch(thd));
5877 DBUG_ENTER(
"ndb_delete_row");
5880 error = check_slave_state(thd);
5881 if (unlikely(error))
5884 ha_statistic_increment(&SSV::ha_delete_count);
5887 bool skip_partition_for_unique_index= FALSE;
5888 if (m_use_partition_pruning)
5890 if (!cursor && m_read_before_write_removal_used)
5892 ndb_index_type type= get_index_type(active_index);
5899 if (type == UNIQUE_INDEX ||
5900 type == UNIQUE_ORDERED_INDEX)
5902 skip_partition_for_unique_index= TRUE;
5903 goto skip_partition_pruning;
5906 if ((error= get_part_for_delete(record, table->record[0], m_part_info,
5911 skip_partition_pruning:
5917 options.optionsPresent=0;
5919 eventSetAnyValue(thd, &options);
5924 uint delete_size= 12 + (m_bytes_per_write >> 2);
5925 bool need_flush= add_row_check_if_batch_full_size(thd_ndb, delete_size);
5927 if (thd->slave_thread || THDVAR(thd, deferred_constraints))
5929 options.optionsPresent |=
5930 NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
5935 if (options.optionsPresent != 0)
5936 poptions = &options;
5945 DBUG_PRINT(
"info", (
"Calling deleteTuple on cursor"));
5952 m_lock_tuple= FALSE;
5953 thd_ndb->m_unsent_bytes+= 12;
5955 no_uncommitted_rows_update(-1);
5958 if (!(primary_key_update || m_delete_cannot_batch))
5967 const uchar *key_row;
5969 if (m_user_defined_partitioning && !skip_partition_for_unique_index)
5971 options.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID;
5972 options.partitionId= part_id;
5975 setup_key_ref_for_ndb_record(&key_rec, &key_row, record,
5976 m_read_before_write_removal_used);
5978 #ifdef HAVE_NDB_BINLOG
5979 Uint32 buffer[ MAX_CONFLICT_INTERPRETED_PROG_SIZE ];
5981 sizeof(buffer)/
sizeof(buffer[0]));
5982 if (thd->slave_thread && m_share->m_cfn_share &&
5983 m_share->m_cfn_share->m_conflict_fn)
5986 if (unlikely((error = prepare_conflict_detection(DELETE_ROW,
5995 if (options.optionsPresent != 0)
5998 if (!(op=trans->deleteTuple(key_rec, (
const char *)key_row,
6006 no_uncommitted_rows_update(-1);
6031 table_share->primary_key != MAX_KEY &&
6032 !primary_key_update &&
6040 uint ignore_count= 0;
6041 if (execute_no_commit(m_thd_ndb, trans,
6042 m_ignore_no_key || m_read_before_write_removal_used,
6043 &ignore_count) != 0)
6045 no_uncommitted_rows_execute_failure();
6046 DBUG_RETURN(ndb_err(trans));
6048 if (!primary_key_update)
6050 assert(m_rows_deleted >= ignore_count);
6051 m_rows_deleted-= ignore_count;
6052 no_uncommitted_rows_update(ignore_count);
6065 void ha_ndbcluster::unpack_record(uchar *dst_row,
const uchar *src_row)
6068 DBUG_ASSERT(src_row != NULL);
6070 my_ptrdiff_t dst_offset= dst_row - table->record[0];
6071 my_ptrdiff_t src_offset= src_row - table->record[0];
6074 memset(dst_row, 0xff, table->s->null_bytes);
6076 uchar *blob_ptr= m_blobs_buffer;
6078 for (uint i= 0; i < table_share->fields; i++)
6080 Field *field= table->field[
i];
6081 if (bitmap_is_set(table->read_set, i))
6083 if (field->type() == MYSQL_TYPE_BIT)
6086 if (!field->is_real_null(src_offset))
6088 field->move_field_offset(src_offset);
6089 longlong value= field_bit->val_int();
6090 field->move_field_offset(dst_offset-src_offset);
6091 field_bit->set_notnull();
6093 my_bitmap_map *old_map=
6094 dbug_tmp_use_all_columns(table, table->write_set);
6095 int res = field_bit->store(value,
true);
6096 assert(res == 0); NDB_IGNORE_VALUE(res);
6097 dbug_tmp_restore_column_map(table->write_set, old_map);
6098 field->move_field_offset(-dst_offset);
6101 else if (field->flags & BLOB_FLAG)
6104 NdbBlob *ndb_blob= m_value[
i].blob;
6110 DBUG_ASSERT(ndb_blob != 0);
6111 DBUG_ASSERT(ndb_blob->
getState() == NdbBlob::Active);
6113 res= ndb_blob->getNull(isNull);
6114 DBUG_ASSERT(res == 0);
6116 field_blob->move_field_offset(dst_offset);
6120 DBUG_ASSERT(res == 0 && len64 <= (Uint64)0xffffffff);
6121 field->set_notnull();
6124 field_blob->set_ptr((uint32)len64, blob_ptr);
6125 field_blob->move_field_offset(-dst_offset);
6126 blob_ptr+= (len64 + 7) & ~((Uint64)7);
6130 field->move_field_offset(src_offset);
6132 if (!field->is_null())
6135 uint32 actual_length= field_used_length(field);
6136 uchar *src_ptr= field->ptr;
6137 field->move_field_offset(dst_offset - src_offset);
6138 field->set_notnull();
6139 memcpy(field->ptr, src_ptr, actual_length);
6147 if (actual_length < field->pack_length())
6148 memset(field->ptr + actual_length, 0,
6149 field->pack_length() - actual_length);
6151 field->move_field_offset(-dst_offset);
6154 field->move_field_offset(-src_offset);
6165 static void get_default_value(
void *def_val,
Field *field)
6167 DBUG_ASSERT(field != NULL);
6169 my_ptrdiff_t src_offset= field->table->s->default_values - field->table->record[0];
6172 if (bitmap_is_set(field->table->read_set, field->field_index))
6174 if (field->type() == MYSQL_TYPE_BIT)
6177 if (!field->is_real_null(src_offset))
6179 field->move_field_offset(src_offset);
6180 longlong value= field_bit->val_int();
6185 for (
int b=0; b < 64; b++)
6187 out[b >> 5] |= (value & 1) << (b & 31);
6191 memcpy(def_val, out,
sizeof(longlong));
6192 field->move_field_offset(-src_offset);
6195 else if (field->flags & BLOB_FLAG)
6201 field->move_field_offset(src_offset);
6203 if (!field->is_null())
6206 uint32 actual_length= field_used_length(field);
6207 uchar *src_ptr= field->ptr;
6208 field->set_notnull();
6209 memcpy(def_val, src_ptr, actual_length);
6211 if (actual_length < field->pack_length())
6212 memset(((
char*)def_val) + actual_length, 0,
6213 field->pack_length() - actual_length);
6216 field->move_field_offset(-src_offset);
6227 void ha_ndbcluster::print_results()
6229 DBUG_ENTER(
"print_results");
6233 char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH];
6234 String type(buf_type,
sizeof(buf_type), &my_charset_bin);
6235 String val(buf_val,
sizeof(buf_val), &my_charset_bin);
6236 for (uint f= 0; f < table_share->fields; f++)
6245 field= table->field[f];
6246 if (!(value= m_value[f]).ptr)
6248 strmov(buf,
"not read");
6254 if (! (field->flags & BLOB_FLAG))
6258 strmov(buf,
"NULL");
6263 field->sql_type(type);
6264 field->val_str(&val);
6265 my_snprintf(buf,
sizeof(buf),
"%s %s", type.c_ptr(), val.c_ptr());
6269 NdbBlob *ndb_blob= value.blob;
6271 assert(ndb_blob->
getState() == NdbBlob::Active);
6272 ndb_blob->getNull(isNull);
6274 strmov(buf,
"NULL");
6278 DBUG_PRINT(
"value", (
"%u,%s: %s", f, field->field_name, buf));
6302 include_partition_fields_in_used_fields(
Field **ptr,
MY_BITMAP *read_set)
6304 DBUG_ENTER(
"include_partition_fields_in_used_fields");
6307 bitmap_set_bit(read_set, (*ptr)->field_index);
6313 int ha_ndbcluster::index_init(uint index,
bool sorted)
6315 DBUG_ENTER(
"ha_ndbcluster::index_init");
6316 DBUG_PRINT(
"enter", (
"index: %u sorted: %d", index, sorted));
6317 active_index=
index;
6324 m_lock_tuple= FALSE;
6325 if (table_share->primary_key == MAX_KEY &&
6326 m_use_partition_pruning)
6327 include_partition_fields_in_used_fields(
6328 m_part_info->full_part_field_array,
6334 int ha_ndbcluster::index_end()
6336 DBUG_ENTER(
"ha_ndbcluster::index_end");
6337 DBUG_RETURN(close_scan());
6345 check_null_in_key(
const KEY* key_info,
const uchar *key, uint key_len)
6348 const uchar* end_ptr= key + key_len;
6349 curr_part= key_info->key_part;
6352 for (; curr_part != end_part && key < end_ptr; curr_part++)
6354 if (curr_part->null_bit && *key)
6357 key += curr_part->store_length;
6365 key_part_map keypart_map,
6366 enum ha_rkey_function find_flag)
6368 DBUG_ENTER(
"ha_ndbcluster::index_read_idx_map");
6369 int error= index_init(index, 0);
6370 if (unlikely(error))
6377 int ha_ndbcluster::index_read(uchar *buf,
6378 const uchar *key, uint key_len,
6379 enum ha_rkey_function find_flag)
6382 bool descending= FALSE;
6383 DBUG_ENTER(
"ha_ndbcluster::index_read");
6384 DBUG_PRINT(
"enter", (
"active_index: %u, key_len: %u, find_flag: %d",
6385 active_index, key_len, find_flag));
6388 start_key.length= key_len;
6389 start_key.flag= find_flag;
6391 switch (find_flag) {
6392 case HA_READ_KEY_OR_PREV:
6393 case HA_READ_BEFORE_KEY:
6394 case HA_READ_PREFIX_LAST:
6395 case HA_READ_PREFIX_LAST_OR_PREV:
6401 const int error= read_range_first_to_buf(&start_key, 0, descending,
6403 table->status=error ? STATUS_NOT_FOUND: 0;
6410 DBUG_ENTER(
"ha_ndbcluster::index_next");
6411 ha_statistic_increment(&SSV::ha_read_next_count);
6412 const int error= next_result(buf);
6413 table->status=error ? STATUS_NOT_FOUND: 0;
6420 DBUG_ENTER(
"ha_ndbcluster::index_prev");
6421 ha_statistic_increment(&SSV::ha_read_prev_count);
6422 const int error= next_result(buf);
6423 table->status=error ? STATUS_NOT_FOUND: 0;
6430 DBUG_ENTER(
"ha_ndbcluster::index_first");
6431 ha_statistic_increment(&SSV::ha_read_first_count);
6435 const int error= ordered_index_scan(0, 0, m_sorted, FALSE, buf, NULL);
6436 table->status=error ? STATUS_NOT_FOUND: 0;
6443 DBUG_ENTER(
"ha_ndbcluster::index_last");
6444 ha_statistic_increment(&SSV::ha_read_last_count);
6445 const int error= ordered_index_scan(0, 0, m_sorted, TRUE, buf, NULL);
6446 table->status=error ? STATUS_NOT_FOUND: 0;
6450 int ha_ndbcluster::index_read_last(uchar * buf,
const uchar * key, uint key_len)
6452 DBUG_ENTER(
"ha_ndbcluster::index_read_last");
6453 DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST));
6472 DBUG_ENTER(
"ha_ndbcluster::read_first_row");
6474 ha_statistic_increment(&SSV::ha_read_first_count);
6481 if (
stats.deleted < 10 || primary_key >= MAX_KEY ||
6482 !(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
6485 while ((error=
rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
6497 int ha_ndbcluster::read_range_first_to_buf(
const key_range *start_key,
6499 bool desc,
bool sorted,
6503 ndb_index_type type= get_index_type(active_index);
6504 const KEY* key_info= table->key_info+active_index;
6506 DBUG_ENTER(
"ha_ndbcluster::read_range_first_to_buf");
6507 DBUG_PRINT(
"info", (
"desc: %d, sorted: %d", desc, sorted));
6509 if (unlikely((error= close_scan())))
6512 if (m_use_partition_pruning)
6514 DBUG_ASSERT(m_pushed_join_operation != PUSHED_ROOT);
6515 get_partition_set(table, buf, active_index, start_key, &part_spec);
6516 DBUG_PRINT(
"info", (
"part_spec.start_part: %u part_spec.end_part: %u",
6517 part_spec.start_part, part_spec.end_part));
6524 if (part_spec.start_part > part_spec.end_part)
6526 DBUG_RETURN(HA_ERR_END_OF_FILE);
6529 if (part_spec.start_part == part_spec.end_part)
6537 if (unlikely(!get_transaction_part_id(part_spec.start_part, error)))
6545 case PRIMARY_KEY_ORDERED_INDEX:
6546 case PRIMARY_KEY_INDEX:
6549 start_key->flag == HA_READ_KEY_EXACT)
6551 if (!m_thd_ndb->trans)
6552 if (unlikely(!start_transaction_key(active_index,
6553 start_key->key, error)))
6555 error= pk_read(start_key->key, start_key->length, buf,
6556 (m_use_partition_pruning)? &(part_spec.start_part) : NULL);
6557 DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
6560 case UNIQUE_ORDERED_INDEX:
6562 if (start_key && start_key->length == key_info->
key_length &&
6563 start_key->flag == HA_READ_KEY_EXACT &&
6564 !check_null_in_key(key_info, start_key->key, start_key->length))
6566 if (!m_thd_ndb->trans)
6567 if (unlikely(!start_transaction_key(active_index,
6568 start_key->key, error)))
6570 error= unique_index_read(start_key->key, start_key->length, buf);
6571 DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
6573 else if (type == UNIQUE_INDEX)
6574 DBUG_RETURN(full_table_scan(key_info,
6582 if (!m_use_partition_pruning && !m_thd_ndb->trans)
6584 get_partition_set(table, buf, active_index, start_key, &part_spec);
6585 if (part_spec.start_part == part_spec.end_part)
6586 if (unlikely(!start_transaction_part_id(part_spec.start_part, error)))
6590 DBUG_RETURN(ordered_index_scan(start_key, end_key, sorted, desc, buf,
6591 (m_use_partition_pruning)? &part_spec : NULL));
6596 bool eq_r,
bool sorted)
6598 uchar* buf= table->record[0];
6599 DBUG_ENTER(
"ha_ndbcluster::read_range_first");
6600 DBUG_RETURN(read_range_first_to_buf(start_key, end_key, FALSE,
6606 DBUG_ENTER(
"ha_ndbcluster::read_range_next");
6607 DBUG_RETURN(next_result(table->record[0]));
6614 DBUG_ENTER(
"rnd_init");
6615 DBUG_PRINT(
"enter", (
"scan: %d", scan));
6617 if ((error= close_scan()))
6619 index_init(table_share->primary_key, 0);
6623 int ha_ndbcluster::close_scan()
6631 if (m_thd_ndb == NULL)
6635 DBUG_ENTER(
"close_scan");
6639 m_active_query->
close(m_thd_ndb->m_force_send);
6640 m_active_query= NULL;
6647 cursor = m_multi_cursor;
6652 if ((error= scan_handle_lock_tuple(cursor, trans)) != 0)
6655 if (m_thd_ndb->m_unsent_bytes)
6661 DBUG_PRINT(
"info", (
"thd_ndb->m_unsent_bytes: %ld",
6662 (
long) m_thd_ndb->m_unsent_bytes));
6663 if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0)
6665 no_uncommitted_rows_execute_failure();
6666 DBUG_RETURN(ndb_err(trans));
6670 cursor->
close(m_thd_ndb->m_force_send, TRUE);
6671 m_active_cursor= NULL;
6672 m_multi_cursor= NULL;
6676 int ha_ndbcluster::rnd_end()
6678 DBUG_ENTER(
"rnd_end");
6679 DBUG_RETURN(close_scan());
6685 DBUG_ENTER(
"rnd_next");
6686 ha_statistic_increment(&SSV::ha_read_rnd_next_count);
6689 if (m_active_cursor)
6690 error= next_result(buf);
6691 else if (m_active_query)
6692 error= next_result(buf);
6694 error= full_table_scan(NULL, NULL, NULL, buf);
6696 table->status= error ? STATUS_NOT_FOUND: 0;
6709 DBUG_ENTER(
"rnd_pos");
6710 ha_statistic_increment(&SSV::ha_read_rnd_count);
6716 if (m_user_defined_partitioning)
6718 if (table_share->primary_key == MAX_KEY)
6724 DBUG_DUMP(
"key+part", pos, key_length);
6726 part_spec.start_part= part_spec.end_part= *(uint32 *)(pos + key_length);
6731 KEY *key_info= table->key_info + table_share->primary_key;
6733 key_spec.length= key_length;
6734 key_spec.flag= HA_READ_KEY_EXACT;
6735 get_full_part_id_from_key(table, buf, key_info,
6736 &key_spec, &part_spec);
6737 DBUG_ASSERT(part_spec.start_part == part_spec.end_part);
6739 DBUG_PRINT(
"info", (
"partition id %u", part_spec.start_part));
6741 DBUG_DUMP(
"key", pos, key_length);
6742 int res= pk_read(pos, key_length, buf,
6743 (m_user_defined_partitioning) ?
6744 &(part_spec.start_part)
6746 if (res == HA_ERR_KEY_NOT_FOUND)
6760 res= HA_ERR_RECORD_DELETED;
6762 table->status= res ? STATUS_NOT_FOUND: 0;
6774 void ha_ndbcluster::position(
const uchar *record)
6782 DBUG_ENTER(
"position");
6784 if (table_share->primary_key != MAX_KEY)
6787 key_info= table->key_info + table_share->primary_key;
6788 key_part= key_info->key_part;
6792 for (; key_part != end; key_part++)
6794 if (key_part->null_bit) {
6796 if (record[key_part->null_offset]
6797 & key_part->null_bit) {
6804 size_t len = key_part->length;
6805 const uchar * ptr = record + key_part->offset;
6806 Field *field = key_part->field;
6807 if (field->type() == MYSQL_TYPE_VARCHAR)
6816 memcpy(buff+2, ptr + 1, len);
6820 memcpy(buff, ptr, len + 2);
6826 memcpy(buff, ptr, len);
6834 DBUG_PRINT(
"info", (
"Getting hidden key"));
6836 if (m_user_defined_partitioning)
6838 DBUG_PRINT(
"info", (
"Saving partition id %u", m_part_id));
6840 memcpy(ref+key_length, (
void *)&m_part_id,
sizeof(m_part_id));
6845 int hidden_no= table->s->fields;
6846 const NDBTAB *tab= m_table;
6849 hidden_col->getAutoIncrement() &&
6850 key_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH);
6852 memcpy(ref, &m_ref, key_length);
6855 if (table_share->primary_key == MAX_KEY && m_user_defined_partitioning)
6856 DBUG_DUMP(
"key+part", ref, key_length+
sizeof(m_part_id));
6858 DBUG_DUMP(
"ref", ref, key_length);
6863 ha_ndbcluster::cmp_ref(
const uchar * ref1,
const uchar * ref2)
6865 DBUG_ENTER(
"cmp_ref");
6867 if (table_share->primary_key != MAX_KEY)
6869 KEY *key_info= table->key_info + table_share->primary_key;
6873 for (; key_part != end; key_part++)
6877 Field *field= key_part->field;
6878 int result= field->key_cmp(ref1, ref2);
6881 DBUG_RETURN(result);
6884 if (field->type() == MYSQL_TYPE_VARCHAR)
6890 ref1+= key_part->length;
6891 ref2+= key_part->length;
6901 int ha_ndbcluster::info(uint flag)
6903 THD *thd= table->in_use;
6906 DBUG_PRINT(
"enter", (
"flag: %d", flag));
6908 if (flag & HA_STATUS_POS)
6909 DBUG_PRINT(
"info", (
"HA_STATUS_POS"));
6910 if (flag & HA_STATUS_TIME)
6911 DBUG_PRINT(
"info", (
"HA_STATUS_TIME"));
6912 while (flag & HA_STATUS_VARIABLE)
6916 DBUG_PRINT(
"info", (
"HA_STATUS_VARIABLE"));
6920 if ((my_errno= check_ndb_connection(thd)))
6921 DBUG_RETURN(my_errno);
6932 bool exact_count= THDVAR(thd, use_exact_count);
6934 !(flag & HA_STATUS_NO_LOCK) ||
6935 m_table_info == NULL ||
6936 m_table_info->records == ~(ha_rows)0)
6938 result= update_stats(thd, (exact_count || !(flag & HA_STATUS_NO_LOCK)));
6940 DBUG_RETURN(result);
6945 DBUG_ASSERT(m_table_info->records != ~(ha_rows)0);
6946 stats.records= m_table_info->records +
6947 m_table_info->no_uncommitted_rows_count;
6950 if (thd->lex->sql_command != SQLCOM_SHOW_TABLE_STATUS &&
6951 thd->lex->sql_command != SQLCOM_SHOW_KEYS)
6958 if (
stats.records < 2)
6964 if (flag & HA_STATUS_VARIABLE)
6967 DBUG_PRINT(
"info", (
"rec_per_key"));
6970 if (flag & HA_STATUS_ERRKEY)
6972 DBUG_PRINT(
"info", (
"HA_STATUS_ERRKEY"));
6975 if (flag & HA_STATUS_AUTO)
6977 DBUG_PRINT(
"info", (
"HA_STATUS_AUTO"));
6978 if (m_table && table->found_next_number_field)
6982 if ((my_errno= check_ndb_connection(thd)))
6983 DBUG_RETURN(my_errno);
6984 Ndb *ndb= get_ndb(thd);
6987 Uint64 auto_increment_value64;
6988 if (ndb->readAutoIncrementValue(m_table, g.range,
6989 auto_increment_value64) == -1)
6992 sql_print_error(
"Error %lu in readAutoIncrementValue(): %s",
6994 stats.auto_increment_value= ~(ulonglong)0;
6997 stats.auto_increment_value= (ulonglong)auto_increment_value64;
7002 result= HA_ERR_NO_CONNECTION;
7004 DBUG_RETURN(result);
7008 void ha_ndbcluster::get_dynamic_partition_info(
PARTITION_STATS *stat_info,
7011 DBUG_PRINT(
"info", (
"ha_ndbcluster::get_dynamic_partition_info"));
7015 THD *thd = table->in_use;
7021 if ((error = check_ndb_connection(thd)))
7024 error = update_stats(thd, 1,
false, part_id);
7028 stat_info->records =
stats.records;
7029 stat_info->mean_rec_length =
stats.mean_rec_length;
7030 stat_info->data_file_length =
stats.data_file_length;
7031 stat_info->delete_length =
stats.delete_length;
7032 stat_info->max_data_file_length =
stats.max_data_file_length;
7038 DBUG_PRINT(
"warning",
7039 (
"ha_ndbcluster::get_dynamic_partition_info failed with error code %u",
7044 int ha_ndbcluster::extra(
enum ha_extra_function operation)
7046 DBUG_ENTER(
"extra");
7047 switch (operation) {
7048 case HA_EXTRA_IGNORE_DUP_KEY:
7049 DBUG_PRINT(
"info", (
"HA_EXTRA_IGNORE_DUP_KEY"));
7050 DBUG_PRINT(
"info", (
"Ignoring duplicate key"));
7051 m_ignore_dup_key= TRUE;
7053 case HA_EXTRA_NO_IGNORE_DUP_KEY:
7054 DBUG_PRINT(
"info", (
"HA_EXTRA_NO_IGNORE_DUP_KEY"));
7055 m_ignore_dup_key= FALSE;
7057 case HA_EXTRA_IGNORE_NO_KEY:
7058 DBUG_PRINT(
"info", (
"HA_EXTRA_IGNORE_NO_KEY"));
7059 DBUG_PRINT(
"info", (
"Turning on AO_IgnoreError at Commit/NoCommit"));
7060 m_ignore_no_key= TRUE;
7062 case HA_EXTRA_NO_IGNORE_NO_KEY:
7063 DBUG_PRINT(
"info", (
"HA_EXTRA_NO_IGNORE_NO_KEY"));
7064 DBUG_PRINT(
"info", (
"Turning on AO_IgnoreError at Commit/NoCommit"));
7065 m_ignore_no_key= FALSE;
7067 case HA_EXTRA_WRITE_CAN_REPLACE:
7068 DBUG_PRINT(
"info", (
"HA_EXTRA_WRITE_CAN_REPLACE"));
7069 if (!m_has_unique_index ||
7070 current_thd->slave_thread ||
7071 isManualBinlogExec(current_thd))
7073 DBUG_PRINT(
"info", (
"Turning ON use of write instead of insert"));
7077 case HA_EXTRA_WRITE_CANNOT_REPLACE:
7078 DBUG_PRINT(
"info", (
"HA_EXTRA_WRITE_CANNOT_REPLACE"));
7079 DBUG_PRINT(
"info", (
"Turning OFF use of write instead of insert"));
7082 case HA_EXTRA_DELETE_CANNOT_BATCH:
7083 DBUG_PRINT(
"info", (
"HA_EXTRA_DELETE_CANNOT_BATCH"));
7084 m_delete_cannot_batch= TRUE;
7086 case HA_EXTRA_UPDATE_CANNOT_BATCH:
7087 DBUG_PRINT(
"info", (
"HA_EXTRA_UPDATE_CANNOT_BATCH"));
7088 m_update_cannot_batch= TRUE;
7091 case HA_EXTRA_KEYREAD:
7092 DBUG_PRINT(
"info", (
"HA_EXTRA_KEYREAD"));
7093 m_disable_pushed_join= TRUE;
7095 case HA_EXTRA_NO_KEYREAD:
7096 DBUG_PRINT(
"info", (
"HA_EXTRA_NO_KEYREAD"));
7097 m_disable_pushed_join= FALSE;
7109 THD *thd= table->in_use;
7110 DBUG_ENTER(
"start_read_removal");
7112 if (uses_blob_value(table->write_set))
7114 DBUG_PRINT(
"exit", (
"No! Blob field in write_set"));
7118 if (thd->lex->sql_command == SQLCOM_DELETE &&
7119 table_share->blob_fields)
7121 DBUG_PRINT(
"exit", (
"No! DELETE from table with blob(s)"));
7125 if (table_share->primary_key == MAX_KEY)
7127 DBUG_PRINT(
"exit", (
"No! Table with hidden key"));
7131 if (bitmap_is_overlapping(table->write_set, m_pk_bitmap_p))
7133 DBUG_PRINT(
"exit", (
"No! Updating primary key"));
7137 if (m_has_unique_index)
7139 for (uint i= 0; i < table_share->keys; i++)
7141 const KEY* key= table->key_info +
i;
7142 if ((key->
flags & HA_NOSAME) &&
7143 bitmap_is_overlapping(table->write_set,
7146 DBUG_PRINT(
"exit", (
"No! Unique key %d is updated", i));
7151 m_read_before_write_removal_possible= TRUE;
7152 DBUG_PRINT(
"exit", (
"Yes, rbwr is possible!"));
7159 DBUG_ENTER(
"end_read_removal");
7160 DBUG_ASSERT(m_read_before_write_removal_possible);
7161 DBUG_PRINT(
"info", (
"updated: %llu, deleted: %llu",
7162 m_rows_updated, m_rows_deleted));
7163 DBUG_RETURN(m_rows_updated + m_rows_deleted);
7169 DBUG_ENTER(
"ha_ndbcluster::reset");
7172 m_cond->cond_clear();
7174 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
7175 DBUG_ASSERT(m_active_query == NULL);
7176 if (m_pushed_join_operation==PUSHED_ROOT)
7178 delete m_pushed_join_member;
7180 m_pushed_join_member= NULL;
7181 m_pushed_join_operation= -1;
7182 m_disable_pushed_join= FALSE;
7194 bitmap_set_all(&m_part_info->used_partitions);
7198 m_read_before_write_removal_possible= FALSE;
7199 m_read_before_write_removal_used= FALSE;
7200 m_rows_updated= m_rows_deleted= 0;
7201 m_ignore_dup_key= FALSE;
7203 m_ignore_no_key= FALSE;
7204 m_rows_inserted= (ha_rows) 0;
7205 m_rows_to_insert= (ha_rows) 1;
7206 m_delete_cannot_batch= FALSE;
7207 m_update_cannot_batch= FALSE;
7209 assert(m_is_bulk_delete ==
false);
7210 m_is_bulk_delete =
false;
7225 ha_ndbcluster::flush_bulk_insert(
bool allow_batch)
7228 DBUG_ENTER(
"ha_ndbcluster::flush_bulk_insert");
7229 DBUG_PRINT(
"info", (
"Sending inserts to NDB, rows_inserted: %d",
7230 (
int)m_rows_inserted));
7234 if (! (m_thd_ndb->trans_options & TNTO_TRANSACTIONS_OFF))
7237 execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0)
7239 no_uncommitted_rows_execute_failure();
7240 DBUG_RETURN(ndb_err(trans));
7249 THD *thd= table->in_use;
7250 thd->transaction.all.mark_modified_non_trans_table();
7251 thd->transaction.stmt.mark_modified_non_trans_table();
7252 if (execute_commit(thd, m_thd_ndb, trans, m_thd_ndb->m_force_send,
7253 m_ignore_no_key) != 0)
7255 no_uncommitted_rows_execute_failure();
7256 DBUG_RETURN(ndb_err(trans));
7267 void ha_ndbcluster::start_bulk_insert(ha_rows rows)
7269 DBUG_ENTER(
"start_bulk_insert");
7270 DBUG_PRINT(
"enter", (
"rows: %d", (
int)rows));
7272 m_rows_inserted= (ha_rows) 0;
7273 if (!m_use_write && m_ignore_dup_key)
7280 DBUG_PRINT(
"info", (
"Batching turned off as duplicate key is "
7281 "ignored by using peek_row"));
7282 m_rows_to_insert= 1;
7285 if (rows == (ha_rows) 0)
7289 (m_autoincrement_prefetch > DEFAULT_AUTO_PREFETCH)
7290 ? m_autoincrement_prefetch
7291 : DEFAULT_AUTO_PREFETCH;
7292 m_autoincrement_prefetch= m_rows_to_insert;
7296 m_rows_to_insert= rows;
7297 if (m_autoincrement_prefetch < m_rows_to_insert)
7298 m_autoincrement_prefetch= m_rows_to_insert;
7307 int ha_ndbcluster::end_bulk_insert()
7311 DBUG_ENTER(
"end_bulk_insert");
7314 THD *thd= table->in_use;
7317 if (!thd_allow_batch(thd) && thd_ndb->m_unsent_bytes)
7319 bool allow_batch= (thd_ndb->m_handler != 0);
7320 error= flush_bulk_insert(allow_batch);
7325 m_rows_inserted= (ha_rows) 0;
7326 m_rows_to_insert= (ha_rows) 1;
7331 int ha_ndbcluster::extra_opt(
enum ha_extra_function operation, ulong cache_size)
7333 DBUG_ENTER(
"extra_opt");
7334 DBUG_PRINT(
"enter", (
"cache_size: %lu", cache_size));
7335 DBUG_RETURN(
extra(operation));
7338 static const char *ha_ndbcluster_exts[] = {
7345 return ha_ndbcluster_exts;
7355 double ha_ndbcluster::scan_time()
7357 DBUG_ENTER(
"ha_ndbcluster::scan_time()");
7358 double res= rows2double(
stats.records*1000);
7359 DBUG_PRINT(
"exit", (
"table: %s value: %f",
7374 enum thr_lock_type lock_type)
7376 DBUG_ENTER(
"store_lock");
7377 if (lock_type != TL_IGNORE && m_lock.type == TL_UNLOCK)
7386 const bool in_lock_tables = thd_in_lock_tables(thd);
7387 const uint sql_command = thd_sql_command(thd);
7388 if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
7389 lock_type <= TL_WRITE) &&
7390 !(in_lock_tables && sql_command == SQLCOM_LOCK_TABLES))
7391 lock_type= TL_WRITE_ALLOW_WRITE;
7399 if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
7409 if (sql_command == SQLCOM_ALTER_TABLE)
7410 lock_type = TL_WRITE;
7412 m_lock.type=lock_type;
7416 DBUG_PRINT(
"exit", (
"lock_type: %d", lock_type));
7437 #ifdef HAVE_NDB_BINLOG
7438 static int ndbcluster_update_apply_status(THD *thd,
int do_update)
7440 Thd_ndb *thd_ndb= get_thd_ndb(thd);
7441 Ndb *ndb= thd_ndb->ndb;
7447 if (!(ndbtab= ndbtab_g.get_table()))
7454 DBUG_ASSERT(r == 0);
7459 DBUG_ASSERT(r == 0);
7461 r|= op->
equal(0u, (Uint32)thd->server_id);
7462 DBUG_ASSERT(r == 0);
7467 DBUG_ASSERT(r == 0);
7469 const char* group_master_log_name =
7470 ndb_mi_get_group_master_log_name();
7471 const Uint64 group_master_log_pos =
7472 ndb_mi_get_group_master_log_pos();
7473 const Uint64 future_event_relay_log_pos =
7474 ndb_mi_get_future_event_relay_log_pos();
7475 const Uint64 group_relay_log_pos =
7476 ndb_mi_get_group_relay_log_pos();
7479 char tmp_buf[FN_REFLEN];
7480 ndb_pack_varchar(ndbtab->
getColumn(2u), tmp_buf,
7481 group_master_log_name, strlen(group_master_log_name));
7483 DBUG_ASSERT(r == 0);
7485 r|= op->
setValue(3u, group_master_log_pos);
7486 DBUG_ASSERT(r == 0);
7488 r|= op->
setValue(4u, group_master_log_pos +
7489 (future_event_relay_log_pos - group_relay_log_pos));
7490 DBUG_ASSERT(r == 0);
7495 static void transaction_checks(THD *thd,
Thd_ndb *thd_ndb)
7497 if (thd->lex->sql_command == SQLCOM_LOAD)
7498 thd_ndb->trans_options|= TNTO_TRANSACTIONS_OFF;
7499 else if (!thd->transaction.flags.enabled)
7500 thd_ndb->trans_options|= TNTO_TRANSACTIONS_OFF;
7501 else if (!THDVAR(thd, use_transactions))
7502 thd_ndb->trans_options|= TNTO_TRANSACTIONS_OFF;
7503 thd_ndb->m_force_send= THDVAR(thd, force_send);
7504 if (!thd->slave_thread)
7505 thd_ndb->m_batch_size= THDVAR(thd, batch_size);
7508 thd_ndb->m_batch_size= THDVAR(NULL, batch_size);
7510 THDVAR(thd, optimized_node_selection)=
7511 THDVAR(NULL, optimized_node_selection) & 1;
7515 int ha_ndbcluster::start_statement(THD *thd,
7521 DBUG_ENTER(
"ha_ndbcluster::start_statement");
7524 transaction_checks(thd, m_thd_ndb);
7526 if (table_count == 0)
7529 if (thd_options(thd) & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
7533 thd_ndb->m_handler= NULL;
7542 thd_ndb->m_handler=
this;
7552 thd_ndb->m_handler= NULL;
7553 if (handler != NULL)
7561 add_handler_to_open_tables(thd, thd_ndb, handler);
7564 if (!trans && table_count == 0)
7566 DBUG_ASSERT(thd_ndb->changed_tables.is_empty() == TRUE);
7567 thd_ndb->trans_options= 0;
7569 DBUG_PRINT(
"trans",(
"Possibly starting transaction"));
7570 const uint opti_node_select = THDVAR(thd, optimized_node_selection);
7571 DBUG_PRINT(
"enter", (
"optimized_node_selection: %u", opti_node_select));
7572 if (!(opti_node_select & 2) ||
7573 thd->lex->sql_command == SQLCOM_LOAD)
7574 if (unlikely(!start_transaction(error)))
7577 thd_ndb->init_open_tables();
7578 thd_ndb->m_slow_path= FALSE;
7579 if (!(thd_options(thd) & OPTION_BIN_LOG) ||
7580 thd->variables.binlog_format == BINLOG_FORMAT_STMT)
7582 thd_ndb->trans_options|= TNTO_NO_LOGGING;
7583 thd_ndb->m_slow_path= TRUE;
7585 else if (thd->slave_thread)
7586 thd_ndb->m_slow_path= TRUE;
7594 if (thd_options(thd) & (OPTION_TABLE_LOCK))
7598 DBUG_PRINT(
"info", (
"Locking the table..." ));
7600 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
7601 ER_GET_ERRMSG, ER(ER_GET_ERRMSG), 0,
7602 "Table only locked locally in this mysqld",
"NDB");
7609 ha_ndbcluster::add_handler_to_open_tables(THD *thd,
7613 DBUG_ENTER(
"ha_ndbcluster::add_handler_to_open_tables");
7614 DBUG_PRINT(
"info", (
"Adding %s", handler->m_share->key));
7619 DBUG_ASSERT(thd_ndb->m_handler == NULL);
7620 const void *key= handler->m_share;
7621 HASH_SEARCH_STATE state;
7622 THD_NDB_SHARE *thd_ndb_share=
7623 (THD_NDB_SHARE*)my_hash_first(&thd_ndb->open_tables,
7624 (
const uchar *)&key,
sizeof(key),
7626 while (thd_ndb_share && thd_ndb_share->key != key)
7629 (THD_NDB_SHARE*)my_hash_next(&thd_ndb->open_tables,
7630 (
const uchar *)&key,
sizeof(key),
7633 if (thd_ndb_share == 0)
7635 thd_ndb_share= (THD_NDB_SHARE *) alloc_root(&thd->transaction.mem_root,
7636 sizeof(THD_NDB_SHARE));
7639 mem_alloc_error(
sizeof(THD_NDB_SHARE));
7642 thd_ndb_share->key= key;
7643 thd_ndb_share->stat.last_count= thd_ndb->count;
7644 thd_ndb_share->stat.no_uncommitted_rows_count= 0;
7645 thd_ndb_share->stat.records= ~(ha_rows)0;
7646 my_hash_insert(&thd_ndb->open_tables, (uchar *)thd_ndb_share);
7648 else if (thd_ndb_share->stat.last_count != thd_ndb->count)
7650 thd_ndb_share->stat.last_count= thd_ndb->count;
7651 thd_ndb_share->stat.no_uncommitted_rows_count= 0;
7652 thd_ndb_share->stat.records= ~(ha_rows)0;
7655 handler->m_table_info= &thd_ndb_share->stat;
7659 int ha_ndbcluster::init_handler_for_statement(THD *thd)
7673 DBUG_ENTER(
"ha_ndbcluster::init_handler_for_statement");
7675 DBUG_ASSERT(thd_ndb);
7678 m_autoincrement_prefetch= THDVAR(thd, autoincrement_prefetch_sz);
7681 m_blobs_pending= FALSE;
7682 release_blobs_buffer();
7683 m_slow_path= m_thd_ndb->m_slow_path;
7684 #ifdef HAVE_NDB_BINLOG
7685 if (unlikely(m_slow_path))
7687 if (m_share == ndb_apply_status_share && thd->slave_thread)
7688 m_thd_ndb->trans_options|= TNTO_INJECTED_APPLY_STATUS;
7693 if (thd_ndb->m_handler == 0)
7695 DBUG_ASSERT(m_share);
7696 ret = add_handler_to_open_tables(thd, thd_ndb,
this);
7701 stat.last_count= thd_ndb->count;
7702 stat.no_uncommitted_rows_count= 0;
7703 stat.records= ~(ha_rows)0;
7704 m_table_info= &stat;
7709 int ha_ndbcluster::external_lock(THD *thd,
int lock_type)
7711 DBUG_ENTER(
"external_lock");
7712 if (lock_type != F_UNLCK)
7719 if (check_ndb_connection(thd))
7721 Thd_ndb *thd_ndb= get_thd_ndb(thd);
7723 DBUG_PRINT(
"enter", (
"lock_type != F_UNLCK "
7724 "this: 0x%lx thd: 0x%lx thd_ndb: %lx "
7725 "thd_ndb->lock_count: %d",
7726 (
long)
this, (
long) thd, (
long) thd_ndb,
7727 thd_ndb->lock_count));
7729 if ((error= start_statement(thd, thd_ndb,
7730 thd_ndb->lock_count++)))
7732 thd_ndb->lock_count--;
7735 if ((error= init_handler_for_statement(thd)))
7737 thd_ndb->lock_count--;
7745 DBUG_ASSERT(thd_ndb);
7747 DBUG_PRINT(
"enter", (
"lock_type == F_UNLCK "
7748 "this: 0x%lx thd: 0x%lx thd_ndb: %lx "
7749 "thd_ndb->lock_count: %d",
7750 (
long)
this, (
long) thd, (
long) thd_ndb,
7751 thd_ndb->lock_count));
7753 if (m_rows_changed && global_system_variables.query_cache_type)
7755 DBUG_PRINT(
"info", (
"Rows has changed"));
7757 if (thd_ndb->trans &&
7758 thd_options(thd) & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
7760 DBUG_PRINT(
"info", (
"Add share to list of changed tables, %p",
7763 thd_ndb->changed_tables.push_back(get_share(m_share),
7764 &thd->transaction.mem_root);
7767 if (opt_ndb_cache_check_time)
7769 pthread_mutex_lock(&m_share->mutex);
7770 DBUG_PRINT(
"info", (
"Invalidating commit_count"));
7771 m_share->commit_count= 0;
7772 m_share->commit_count_lock++;
7773 pthread_mutex_unlock(&m_share->mutex);
7777 if (!--thd_ndb->lock_count)
7779 DBUG_PRINT(
"trans", (
"Last external_lock"));
7781 if ((!(thd_options(thd) & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) &&
7791 DBUG_PRINT(
"trans",(
"ending non-updating transaction"));
7793 thd_ndb->trans= NULL;
7794 thd_ndb->m_handler= NULL;
7810 DBUG_PRINT(
"warning", (
"m_active_query != NULL"));
7811 m_active_query= NULL;
7813 if (m_active_cursor)
7814 DBUG_PRINT(
"warning", (
"m_active_cursor != NULL"));
7815 m_active_cursor= NULL;
7818 DBUG_PRINT(
"warning", (
"m_multi_cursor != NULL"));
7819 m_multi_cursor= NULL;
7821 if (m_blobs_pending)
7822 DBUG_PRINT(
"warning", (
"blobs_pending != 0"));
7836 void ha_ndbcluster::unlock_row()
7838 DBUG_ENTER(
"unlock_row");
7840 DBUG_PRINT(
"info", (
"Unlocking row"));
7841 m_lock_tuple= FALSE;
7869 int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type)
7873 DBUG_ENTER(
"start_stmt");
7874 DBUG_ASSERT(thd == table->in_use);
7876 thd_ndb= get_thd_ndb(thd);
7877 if ((error= start_statement(thd, thd_ndb, thd_ndb->start_stmt_count++)))
7879 if ((error= init_handler_for_statement(thd)))
7883 thd_ndb->start_stmt_count--;
7888 ha_ndbcluster::start_transaction_row(
const NdbRecord *ndb_record,
7889 const uchar *record,
7893 DBUG_ENTER(
"ha_ndbcluster::start_transaction_row");
7894 DBUG_ASSERT(m_thd_ndb);
7895 DBUG_ASSERT(m_thd_ndb->trans == NULL);
7897 transaction_checks(table->in_use, m_thd_ndb);
7899 Ndb *ndb= m_thd_ndb->ndb;
7901 Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1];
7902 char *buf= (
char*)&tmp[0];
7904 (
const char*)record,
7910 DBUG_PRINT(
"info", (
"Delayed allocation of TC"));
7911 DBUG_RETURN(m_thd_ndb->trans= trans);
7919 ha_ndbcluster::start_transaction_key(uint inx_no,
7920 const uchar *key_data,
7924 DBUG_ENTER(
"ha_ndbcluster::start_transaction_key");
7925 DBUG_ASSERT(m_thd_ndb);
7926 DBUG_ASSERT(m_thd_ndb->trans == NULL);
7928 transaction_checks(table->in_use, m_thd_ndb);
7930 Ndb *ndb= m_thd_ndb->ndb;
7931 const NdbRecord *key_rec= m_index[inx_no].ndb_unique_record_key;
7933 Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1];
7934 char *buf= (
char*)&tmp[0];
7936 (
const char*)key_data,
7942 DBUG_PRINT(
"info", (
"Delayed allocation of TC"));
7943 DBUG_RETURN(m_thd_ndb->trans= trans);
7951 ha_ndbcluster::start_transaction(
int &error)
7954 DBUG_ENTER(
"ha_ndbcluster::start_transaction");
7956 DBUG_ASSERT(m_thd_ndb);
7957 DBUG_ASSERT(m_thd_ndb->trans == NULL);
7959 transaction_checks(table->in_use, m_thd_ndb);
7960 const uint opti_node_select= THDVAR(table->in_use, optimized_node_selection);
7961 m_thd_ndb->connection->set_optimized_node_selection(opti_node_select & 1);
7965 DBUG_PRINT(
"info", (
"Delayed allocation of TC"));
7966 DBUG_RETURN(m_thd_ndb->trans= trans);
7974 ha_ndbcluster::start_transaction_part_id(Uint32 part_id,
int &error)
7977 DBUG_ENTER(
"ha_ndbcluster::start_transaction_part_id");
7979 DBUG_ASSERT(m_thd_ndb);
7980 DBUG_ASSERT(m_thd_ndb->trans == NULL);
7982 transaction_checks(table->in_use, m_thd_ndb);
7986 DBUG_PRINT(
"info", (
"Delayed allocation of TC"));
7987 DBUG_RETURN(m_thd_ndb->trans= trans);
7999 int ndbcluster_commit(
handlerton *hton, THD *thd,
bool all)
8002 Thd_ndb *thd_ndb= get_thd_ndb(thd);
8003 Ndb *ndb= thd_ndb->ndb;
8006 DBUG_ENTER(
"ndbcluster_commit");
8008 DBUG_PRINT(
"enter", (
"Commit %s", (all ?
"all" :
"stmt")));
8009 thd_ndb->start_stmt_count= 0;
8012 DBUG_PRINT(
"info", (
"trans == NULL"));
8015 if (!all && (thd_options(thd) & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
8027 thd_ndb->save_point_count++;
8028 DBUG_PRINT(
"info", (
"Commit before start or end-of-statement only"));
8031 thd_ndb->save_point_count= 0;
8033 #ifdef HAVE_NDB_BINLOG
8034 if (unlikely(thd_ndb->m_slow_path))
8036 if (thd->slave_thread)
8037 ndbcluster_update_apply_status
8038 (thd, thd_ndb->trans_options & TNTO_INJECTED_APPLY_STATUS);
8042 if (thd->slave_thread)
8044 if (!g_ndb_slave_state.current_conflict_defined_op_count ||
8045 !thd_ndb->m_unsent_bytes ||
8046 !(res= execute_no_commit(thd_ndb, trans, TRUE)))
8047 res= execute_commit(thd, thd_ndb, trans, 1, TRUE);
8049 update_slave_api_stats(thd_ndb->ndb);
8053 if (thd_ndb->m_handler &&
8054 thd_ndb->m_handler->m_read_before_write_removal_possible)
8061 DBUG_PRINT(
"info", (
"autocommit+rbwr, transaction already comitted"));
8064 sql_print_error(
"found uncomitted autocommit+rbwr transaction, "
8070 res= execute_commit(thd, thd_ndb, trans, THDVAR(thd, force_send), FALSE);
8077 res= ndb_to_mysql_error(&err);
8079 ndbcluster_print_error(res, error_op);
8084 if (thd_ndb->m_handler &&
8085 thd_ndb->m_handler->m_share &&
8086 thd_ndb->m_handler->m_table_info)
8088 modify_shared_stats(thd_ndb->m_handler->m_share, thd_ndb->m_handler->m_table_info);
8092 for (uint i= 0; i<thd_ndb->open_tables.records; i++)
8094 THD_NDB_SHARE *thd_share=
8095 (THD_NDB_SHARE*)my_hash_element(&thd_ndb->open_tables, i);
8096 modify_shared_stats((
NDB_SHARE*)thd_share->key, &thd_share->stat);
8101 thd_ndb->trans= NULL;
8102 thd_ndb->m_handler= NULL;
8107 while ((share= it++))
8109 DBUG_PRINT(
"info", (
"Remove share to list of changed tables, %p",
8111 pthread_mutex_lock(&share->mutex);
8112 DBUG_PRINT(
"info", (
"Invalidate commit_count for %s, share->commit_count: %lu",
8113 share->table_name, (ulong) share->commit_count));
8114 share->commit_count= 0;
8115 share->commit_count_lock++;
8116 pthread_mutex_unlock(&share->mutex);
8119 thd_ndb->changed_tables.empty();
8129 static int ndbcluster_rollback(
handlerton *hton, THD *thd,
bool all)
8132 Thd_ndb *thd_ndb= get_thd_ndb(thd);
8133 Ndb *ndb= thd_ndb->ndb;
8136 DBUG_ENTER(
"ndbcluster_rollback");
8137 DBUG_PRINT(
"enter", (
"all: %d thd_ndb->save_point_count: %d",
8138 all, thd_ndb->save_point_count));
8140 thd_ndb->start_stmt_count= 0;
8144 DBUG_PRINT(
"info", (
"trans == NULL"));
8147 if (!all && (thd_options(thd) & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
8148 (thd_ndb->save_point_count > 0))
8156 DBUG_PRINT(
"info", (
"Rollback before start or end-of-statement only"));
8157 mark_transaction_to_rollback(thd, 1);
8158 my_error(ER_WARN_ENGINE_TRANSACTION_ROLLBACK, MYF(0),
"NDB");
8161 thd_ndb->save_point_count= 0;
8162 if (thd->slave_thread)
8163 g_ndb_slave_state.atTransactionAbort();
8164 thd_ndb->m_unsent_bytes= 0;
8165 thd_ndb->m_execute_count++;
8166 DBUG_PRINT(
"info", (
"execute_count: %u", thd_ndb->m_execute_count));
8171 res= ndb_to_mysql_error(&err);
8173 ndbcluster_print_error(res, error_op);
8176 thd_ndb->trans= NULL;
8177 thd_ndb->m_handler= NULL;
8182 while ((share= it++))
8184 DBUG_PRINT(
"info", (
"Remove share to list of changed tables, %p",
8188 thd_ndb->changed_tables.empty();
8190 if (thd->slave_thread)
8191 update_slave_api_stats(thd_ndb->ndb);
8202 enum { M_BOOL } m_type;
8203 const char * m_name;
8219 struct NDB_Modifier ndb_table_modifiers[] =
8221 { NDB_Modifier::M_BOOL, STRING_WITH_LEN(
"NOLOGGING"), 0, {0} },
8222 { NDB_Modifier::M_BOOL, 0, 0, 0, {0} }
8226 struct NDB_Modifier ndb_column_modifiers[] =
8228 { NDB_Modifier::M_BOOL, STRING_WITH_LEN(
"MAX_BLOB_PART_SIZE"), 0, {0} },
8229 { NDB_Modifier::M_BOOL, 0, 0, 0, {0} }
8241 NDB_Modifiers(
const NDB_Modifier modifiers[]);
8247 int parse(THD* thd,
const char * prefix,
const char * str,
size_t strlen);
8252 const NDB_Modifier *
get(
const char *
name)
const;
8255 struct NDB_Modifier * m_modifiers;
8257 int parse_modifier(THD *thd,
const char * prefix,
8258 struct NDB_Modifier* m,
const char * str);
8263 end_of_token(
const char * str)
8265 return str[0] == 0 || str[0] ==
' ' || str[0] ==
',';
8268 NDB_Modifiers::NDB_Modifiers(
const NDB_Modifier modifiers[])
8270 for (m_len = 0; modifiers[m_len].m_name != 0; m_len++)
8272 m_modifiers =
new NDB_Modifier[m_len];
8273 memcpy(m_modifiers, modifiers, m_len *
sizeof(NDB_Modifier));
8276 NDB_Modifiers::~NDB_Modifiers()
8278 delete [] m_modifiers;
8282 NDB_Modifiers::parse_modifier(THD *thd,
8283 const char * prefix,
8284 struct NDB_Modifier* m,
8289 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8290 ER_ILLEGAL_HA_CREATE_OPTION,
8291 "%s : modifier %s specified twice",
8296 case NDB_Modifier::M_BOOL:
8297 if (end_of_token(str))
8299 m->m_val_bool =
true;
8306 if (str[0] ==
'1' && end_of_token(str+1))
8308 m->m_val_bool =
true;
8312 if (str[0] ==
'0' && end_of_token(str+1))
8314 m->m_val_bool =
false;
8320 const char * end = strpbrk(str,
" ,");
8323 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8324 ER_ILLEGAL_HA_CREATE_OPTION,
8325 "%s : invalid value '%.*s' for %s",
8326 prefix, (
int)(end - str), str, m->m_name);
8330 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8331 ER_ILLEGAL_HA_CREATE_OPTION,
8332 "%s : invalid value '%s' for %s",
8333 prefix, str, m->m_name);
8343 NDB_Modifiers::parse(THD *thd,
8344 const char * prefix,
8345 const char * _source,
8348 if (_source == 0 || _source_len == 0)
8351 const char * source = 0;
8356 for (
size_t i = 0; i<_source_len; i++)
8358 if (_source[i] == 0)
8370 char * tmp =
new char[_source_len+1];
8373 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8374 ER_ILLEGAL_HA_CREATE_OPTION,
8375 "%s : unable to parse due to out of memory",
8379 memcpy(tmp, _source, _source_len);
8380 tmp[_source_len] = 0;
8384 const char * pos = source;
8385 if ((pos = strstr(pos, prefix)) == 0)
8387 if (source != _source)
8392 pos += strlen(prefix);
8394 while (pos && pos[0] != 0 && pos[0] !=
' ')
8396 const char * end = strpbrk(pos,
" ,");
8398 for (uint i = 0; i < m_len; i++)
8400 size_t l = m_modifiers[
i].m_name_len;
8401 if (strncmp(pos, m_modifiers[i].m_name, l) == 0)
8407 if (! (end_of_token(pos + l) || pos[l] ==
'='))
8411 int res = parse_modifier(thd, prefix, m_modifiers+i, pos);
8428 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8429 ER_ILLEGAL_HA_CREATE_OPTION,
8430 "%s : unknown modifier: %.*s",
8431 prefix, (
int)(end - pos), pos);
8435 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8436 ER_ILLEGAL_HA_CREATE_OPTION,
8437 "%s : unknown modifier: %s",
8444 if (pos && pos[0] ==
',')
8448 if (source != _source)
8454 const NDB_Modifier *
8455 NDB_Modifiers::get(
const char *
name)
const
8457 for (uint i = 0; i < m_len; i++)
8459 if (strcmp(name, m_modifiers[i].m_name) == 0)
8461 return m_modifiers +
i;
8486 const char* p= getenv(
"NDB_BLOB_STRIPING");
8487 if (p != 0 && *p != 0 && *p !=
'0' && *p !=
'n' && *p !=
'N')
8493 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
8494 const Uint32 OLD_NDB_MAX_TUPLE_SIZE_IN_WORDS = 2013;
8496 const Uint32 OLD_NDB_MAX_TUPLE_SIZE_IN_WORDS = NDB_MAX_TUPLE_SIZE_IN_WORDS;
8499 static int create_ndb_column(THD *thd,
8503 #ifndef NDB_WITHOUT_COLUMN_FORMAT
8504 , column_format_type
8505 default_format= COLUMN_FORMAT_TYPE_DEFAULT
8509 NDBCOL::StorageType type= NDBCOL::StorageTypeMemory;
8510 bool dynamic= FALSE;
8512 char buf[MAX_ATTR_DEFAULT_VALUE_SIZE];
8513 DBUG_ENTER(
"create_ndb_column");
8515 if (col.
setName(field->field_name))
8517 DBUG_RETURN(my_errno= errno);
8522 const enum enum_field_types mysql_type= field->real_type();
8524 NDB_Modifiers column_modifiers(ndb_column_modifiers);
8525 column_modifiers.parse(thd,
"NDB_COLUMN=",
8527 field->comment.length);
8529 const NDB_Modifier * mod_maxblob = column_modifiers.get(
"MAX_BLOB_PART_SIZE");
8538 bool nativeDefaults =
8540 (! ndb_native_default_support(get_thd_ndb(thd)->
8541 ndb->getMinDbNodeVersion())));
8543 if (likely( nativeDefaults ))
8545 if ((!(field->flags & PRI_KEY_FLAG) ) &&
8546 type_supports_default_value(mysql_type))
8548 if (!(field->flags & NO_DEFAULT_VALUE_FLAG))
8550 my_ptrdiff_t src_offset= field->table->s->default_values
8551 - field->table->record[0];
8552 if ((! field->is_real_null(src_offset)) ||
8553 ((field->flags & NOT_NULL_FLAG)))
8556 memset(buf, 0, MAX_ATTR_DEFAULT_VALUE_SIZE);
8557 get_default_value(buf, field);
8562 Uint32 defaultLen = field_used_length(field);
8563 if(field->type() == MYSQL_TYPE_BIT)
8564 defaultLen = ((defaultLen + 3) /4) * 4;
8571 switch (mysql_type) {
8573 case MYSQL_TYPE_TINY:
8574 if (field->flags & UNSIGNED_FLAG)
8580 case MYSQL_TYPE_SHORT:
8581 if (field->flags & UNSIGNED_FLAG)
8587 case MYSQL_TYPE_LONG:
8588 if (field->flags & UNSIGNED_FLAG)
8594 case MYSQL_TYPE_INT24:
8595 if (field->flags & UNSIGNED_FLAG)
8601 case MYSQL_TYPE_LONGLONG:
8602 if (field->flags & UNSIGNED_FLAG)
8608 case MYSQL_TYPE_FLOAT:
8612 case MYSQL_TYPE_DOUBLE:
8616 case MYSQL_TYPE_DECIMAL:
8619 uint precision= f->pack_length();
8620 uint scale= f->decimals();
8621 if (field->flags & UNSIGNED_FLAG)
8623 col.
setType(NDBCOL::Olddecimalunsigned);
8624 precision-= (scale > 0);
8629 precision-= 1 + (scale > 0);
8636 case MYSQL_TYPE_NEWDECIMAL:
8639 uint precision= f->precision;
8640 uint scale= f->decimals();
8641 if (field->flags & UNSIGNED_FLAG)
8643 col.
setType(NDBCOL::Decimalunsigned);
8655 case MYSQL_TYPE_DATETIME:
8659 case MYSQL_TYPE_DATE:
8663 case MYSQL_TYPE_NEWDATE:
8667 case MYSQL_TYPE_TIME:
8671 case MYSQL_TYPE_YEAR:
8675 case MYSQL_TYPE_TIMESTAMP:
8680 case MYSQL_TYPE_STRING:
8681 if (field->pack_length() == 0)
8686 else if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
8698 case MYSQL_TYPE_VAR_STRING:
8699 case MYSQL_TYPE_VARCHAR:
8702 if (f->length_bytes == 1)
8704 if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
8711 else if (f->length_bytes == 2)
8713 if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
8722 DBUG_RETURN(HA_ERR_UNSUPPORTED);
8728 mysql_type_tiny_blob:
8729 case MYSQL_TYPE_TINY_BLOB:
8730 if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
8742 case MYSQL_TYPE_GEOMETRY:
8743 case MYSQL_TYPE_BLOB:
8744 if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
8761 goto mysql_type_tiny_blob;
8767 if (mod_maxblob->m_found)
8769 col.
setPartSize(4 * (NDB_MAX_TUPLE_SIZE_IN_WORDS - 13));
8773 goto mysql_type_medium_blob;
8775 goto mysql_type_long_blob;
8778 mysql_type_medium_blob:
8779 case MYSQL_TYPE_MEDIUM_BLOB:
8780 if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
8789 if (mod_maxblob->m_found)
8791 col.
setPartSize(4 * (NDB_MAX_TUPLE_SIZE_IN_WORDS - 13));
8794 mysql_type_long_blob:
8795 case MYSQL_TYPE_LONG_BLOB:
8796 if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
8803 col.
setPartSize(4 * (OLD_NDB_MAX_TUPLE_SIZE_IN_WORDS - 13));
8805 if (mod_maxblob->m_found)
8807 col.
setPartSize(4 * (NDB_MAX_TUPLE_SIZE_IN_WORDS - 13));
8811 case MYSQL_TYPE_ENUM:
8815 case MYSQL_TYPE_SET:
8819 case MYSQL_TYPE_BIT:
8821 int no_of_bits= field->field_length;
8829 case MYSQL_TYPE_NULL:
8830 goto mysql_type_unsupported;
8831 mysql_type_unsupported:
8833 DBUG_RETURN(HA_ERR_UNSUPPORTED);
8838 if ((field->flags & FIELD_IN_PART_FUNC_FLAG) != 0)
8844 if (field->flags & AUTO_INCREMENT_FLAG)
8849 col.setAutoIncrement(TRUE);
8850 ulonglong value= create_info->auto_increment_value ?
8851 create_info->auto_increment_value : (ulonglong) 1;
8852 DBUG_PRINT(
"info", (
"Autoincrement key, initial: %s", llstr(value, buff)));
8853 col.setAutoIncrementInitialValue(value);
8856 col.setAutoIncrement(FALSE);
8858 #ifndef NDB_WITHOUT_COLUMN_FORMAT
8859 DBUG_PRINT(
"info", (
"storage: %u format: %u ",
8860 field->field_storage_type(),
8861 field->column_format()));
8862 switch (field->field_storage_type()) {
8863 case(HA_SM_DEFAULT):
8865 if (create_info->storage_media == HA_SM_DISK)
8866 type= NDBCOL::StorageTypeDisk;
8868 type= NDBCOL::StorageTypeMemory;
8871 type= NDBCOL::StorageTypeDisk;
8874 type= NDBCOL::StorageTypeMemory;
8878 switch (field->column_format()) {
8879 case(COLUMN_FORMAT_TYPE_FIXED):
8882 case(COLUMN_FORMAT_TYPE_DYNAMIC):
8885 case(COLUMN_FORMAT_TYPE_DEFAULT):
8887 if (create_info->row_type == ROW_TYPE_DEFAULT)
8888 dynamic= default_format;
8890 dynamic= (create_info->row_type == ROW_TYPE_DYNAMIC);
8894 DBUG_PRINT(
"info", (
"Column %s is declared %s", field->field_name,
8895 (dynamic) ?
"dynamic" :
"static"));
8896 if (type == NDBCOL::StorageTypeDisk)
8900 DBUG_PRINT(
"info", (
"Dynamic disk stored column %s changed to static",
8901 field->field_name));
8905 #ifndef NDB_WITHOUT_COLUMN_FORMAT
8906 if (thd && field->column_format() == COLUMN_FORMAT_TYPE_DYNAMIC)
8908 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8909 ER_ILLEGAL_HA_CREATE_OPTION,
8910 "DYNAMIC column %s with "
8911 "STORAGE DISK is not supported, "
8912 "column will become FIXED",
8918 switch (create_info->row_type) {
8919 case ROW_TYPE_FIXED:
8920 if (thd && (dynamic || field_type_forces_var_part(field->type())))
8922 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
8923 ER_ILLEGAL_HA_CREATE_OPTION,
8924 "Row format FIXED incompatible with "
8925 "dynamic attribute %s",
8929 case ROW_TYPE_DYNAMIC:
8938 DBUG_PRINT(
"info", (
"Format %s, Storage %s", (dynamic)?
"dynamic":
"fixed",(type == NDBCOL::StorageTypeDisk)?
"disk":
"memory"));
8939 col.setStorageType(type);
8945 void ha_ndbcluster::update_create_info(
HA_CREATE_INFO *create_info)
8947 DBUG_ENTER(
"ha_ndbcluster::update_create_info");
8948 THD *thd= current_thd;
8949 const NDBTAB *ndbtab= m_table;
8950 Ndb *ndb= check_ndb_in_thd(thd);
8952 if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
8957 for (uint i= 0; i < table->s->fields; i++)
8959 Field *field= table->field[
i];
8960 if (field->flags & AUTO_INCREMENT_FLAG)
8962 ulonglong auto_value;
8963 uint retries= NDB_AUTO_INCREMENT_RETRIES;
8964 int retry_sleep= 30;
8968 if (ndb->readAutoIncrementValue(ndbtab, g.range, auto_value))
8970 if (--retries && !thd->killed &&
8973 do_retry_sleep(retry_sleep);
8977 sql_print_error(
"Error %lu in ::update_create_info(): %s",
8985 create_info->auto_increment_value= auto_value;
8998 static uint get_no_fragments(ulonglong max_rows)
9000 ulonglong acc_row_size= 25 + 2;
9001 ulonglong acc_fragment_size= 512*1024*1024;
9002 return uint((max_rows*acc_row_size)/acc_fragment_size)+1;
9013 adjusted_frag_count(
Ndb* ndb,
9014 uint requested_frags,
9015 uint &reported_frags)
9017 unsigned no_nodes= g_ndb_cluster_connection->no_db_nodes();
9018 unsigned no_replicas= no_nodes == 1 ? 1 : 2;
9020 unsigned no_threads= 1;
9021 const unsigned no_nodegroups= g_ndb_cluster_connection->max_nodegroup() + 1;
9027 char dbname[FN_HEADLEN+1];
9028 dbname[FN_HEADLEN]= 0;
9035 no_replicas= ndbtab_g.get_table()->getReplicaCount();
9041 const Uint32 frags = tab->getFragmentCount();
9044 for (Uint32 i = 0; i<frags; i++)
9047 if (tab->getFragmentNodes(i, replicas, NDB_ARRAY_SIZE(replicas)))
9049 if (node == replicas[0] || node == 0)
9062 const unsigned usable_nodes = no_replicas * no_nodegroups;
9063 const uint max_replicas = 8 * usable_nodes * no_threads;
9065 reported_frags = usable_nodes * no_threads;
9066 Uint32 replicas = reported_frags * no_replicas;
9071 while (reported_frags < requested_frags &&
9072 (replicas + usable_nodes * no_threads * no_replicas) <= max_replicas)
9074 reported_frags += usable_nodes * no_threads;
9075 replicas += usable_nodes * no_threads * no_replicas;
9078 return (reported_frags < requested_frags);
9086 int ha_ndbcluster::create(
const char *name,
9090 THD *thd= current_thd;
9093 size_t pack_length, length;
9094 uint
i, pk_length= 0;
9095 uchar *data= NULL, *pack_data= NULL;
9096 bool create_temporary= (create_info->options & HA_LEX_CREATE_TMP_TABLE);
9097 bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
9098 bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE);
9099 bool use_disk= FALSE;
9100 NdbDictionary::Table::SingleUserMode single_user_mode= NdbDictionary::Table::SingleUserModeLocked;
9101 bool ndb_sys_table= FALSE;
9105 DBUG_ENTER(
"ha_ndbcluster::create");
9106 DBUG_PRINT(
"enter", (
"name: %s", name));
9108 if (create_temporary)
9113 my_errno= ER_ILLEGAL_HA_CREATE_OPTION;
9114 DBUG_PRINT(
"info", (
"Ndb doesn't support temporary tables"));
9115 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
9116 ER_ILLEGAL_HA_CREATE_OPTION,
9117 "Ndb doesn't support temporary tables");
9118 DBUG_RETURN(my_errno);
9121 DBUG_ASSERT(*fn_rext((
char*)name) == 0);
9125 if ((my_errno= check_ndb_connection(thd)))
9126 DBUG_RETURN(my_errno);
9128 Ndb *ndb= get_ndb(thd);
9132 if (create_from_engine)
9139 if ((my_errno= write_ndb_file(name)))
9140 DBUG_RETURN(my_errno);
9142 ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
9143 m_dbname, m_tabname, form);
9144 DBUG_RETURN(my_errno);
9147 Thd_ndb *thd_ndb= get_thd_ndb(thd);
9149 if (!((thd_ndb->options & TNO_NO_LOCK_SCHEMA_OP) ||
9150 thd_ndb->has_required_global_schema_lock(
"ha_ndbcluster::create")))
9152 DBUG_RETURN(HA_ERR_NO_CONNECTION);
9159 if (!ndb_schema_share)
9161 if (!(strcmp(m_dbname, NDB_REP_DB) == 0 &&
9162 strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0))
9164 DBUG_PRINT(
"info", (
"Schema distribution table not setup"));
9165 DBUG_RETURN(HA_ERR_NO_CONNECTION);
9167 single_user_mode = NdbDictionary::Table::SingleUserModeReadWrite;
9168 ndb_sys_table= TRUE;
9171 if (!ndb_apply_status_share)
9173 if ((strcmp(m_dbname, NDB_REP_DB) == 0 &&
9174 strcmp(m_tabname, NDB_APPLY_TABLE) == 0))
9176 ndb_sys_table= TRUE;
9183 ndbtab_g.init(m_tabname);
9184 if (!(m_table= ndbtab_g.get_table()))
9187 DBUG_PRINT(
"info", (
"Dropping and re-creating table for TRUNCATE"));
9189 DBUG_RETURN(my_errno);
9193 NDB_Modifiers table_modifiers(ndb_table_modifiers);
9194 table_modifiers.parse(thd,
"NDB_TABLE=", create_info->comment.str,
9195 create_info->comment.length);
9196 const NDB_Modifier * mod_nologging = table_modifiers.get(
"NOLOGGING");
9198 #ifdef HAVE_NDB_BINLOG
9200 Uint32 binlog_flags;
9201 const st_conflict_fn_def* conflict_fn= NULL;
9202 st_conflict_fn_arg args[MAX_CONFLICT_ARGS];
9203 Uint32 num_args = MAX_CONFLICT_ARGS;
9205 int rep_read_rc= ndbcluster_get_binlog_replication_info(thd,
9215 if (rep_read_rc != 0)
9217 DBUG_RETURN(rep_read_rc);
9224 if (conflict_fn != NULL)
9226 switch(conflict_fn->type)
9233 Uint32 numExtraGciBits = 6;
9234 Uint32 numExtraAuthorBits = 1;
9236 if ((num_args == 1) &&
9237 (args[0].type == CFAT_EXTRA_GCI_BITS))
9239 numExtraGciBits = args[0].extraGciBits;
9241 DBUG_PRINT(
"info", (
"Setting ExtraRowGciBits to %u, "
9242 "ExtraAuthorBits to %u",
9244 numExtraAuthorBits));
9257 DBUG_PRINT(
"info", (
"Failed to start schema transaction"));
9260 DBUG_PRINT(
"info", (
"Started schema transaction"));
9262 DBUG_PRINT(
"table", (
"name: %s", m_tabname));
9270 if (THDVAR(thd, table_temporary))
9272 #ifdef DOES_NOT_WORK_CURRENTLY
9273 tab.setTemporary(TRUE);
9277 else if (THDVAR(thd, table_no_logging))
9282 if (mod_nologging->m_found)
9290 if (
readfrm(name, &data, &length))
9295 if (packfrm(data, length, &pack_data, &pack_length))
9297 my_free((
char*)data, MYF(0));
9302 (
"setFrm data: 0x%lx len: %lu", (
long) pack_data,
9303 (ulong) pack_length));
9304 tab.
setFrm(pack_data, Uint32(pack_length));
9305 my_free((
char*)data, MYF(0));
9306 my_free((
char*)pack_data, MYF(0));
9318 case ROW_TYPE_FIXED:
9319 tab.setForceVarPart(FALSE);
9321 case ROW_TYPE_DYNAMIC:
9325 case ROW_TYPE_DEFAULT:
9326 tab.setForceVarPart(TRUE);
9333 my_bitmap_map *old_map;
9335 restore_record(form, s->default_values);
9336 old_map= tmp_use_all_columns(form, form->read_set);
9339 for (i= 0; i < form->s->fields; i++)
9341 Field *field= form->field[
i];
9342 DBUG_PRINT(
"info", (
"name: %s, type: %u, pack_length: %d",
9343 field->field_name, field->real_type(),
9344 field->pack_length()));
9345 if ((my_errno= create_ndb_column(thd, col, field, create_info)))
9349 col.getStorageType() == NDBCOL::StorageTypeDisk)
9358 pk_length += (field->pack_length() + 3) / 4;
9361 tmp_restore_column_map(form->read_set, old_map);
9365 tab.setTemporary(FALSE);
9366 if (create_info->tablespace)
9367 tab.setTablespaceName(create_info->tablespace);
9369 tab.setTablespaceName(
"DEFAULT-TS");
9373 switch(create_info->storage_media)
9386 DBUG_PRINT(
"info", (
"Table %s is %s stored with tablespace %s",
9388 (use_disk) ?
"disk" :
"memory",
9389 (use_disk) ? tab.getTablespaceName() :
"N/A"));
9392 for (i= 0, key_info= form->key_info; i < form->s->keys; i++, key_info++)
9396 for (; key_part != end; key_part++)
9398 #ifndef NDB_WITHOUT_COLUMN_FORMAT
9399 if (key_part->field->field_storage_type() == HA_SM_DISK)
9401 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
9402 ER_ILLEGAL_HA_CREATE_OPTION,
9403 ER(ER_ILLEGAL_HA_CREATE_OPTION),
9404 ndbcluster_hton_name,
9407 "STORAGE DISK is not supported");
9408 result= HA_ERR_UNSUPPORTED;
9412 tab.
getColumn(key_part->fieldnr-1)->setStorageType(
9413 NdbDictionary::Column::StorageTypeMemory);
9418 if (form->s->primary_key == MAX_KEY)
9420 DBUG_PRINT(
"info", (
"Generating shadow key"));
9430 col.setAutoIncrement(TRUE);
9441 for (i= 0; i < form->s->fields; i++)
9451 switch (form->field[i]->real_type()) {
9452 case MYSQL_TYPE_GEOMETRY:
9453 case MYSQL_TYPE_BLOB:
9454 case MYSQL_TYPE_MEDIUM_BLOB:
9455 case MYSQL_TYPE_LONG_BLOB:
9458 unsigned size= pk_length + (column->
getPartSize()+3)/4 + 7;
9459 unsigned ndb_max= OLD_NDB_MAX_TUPLE_SIZE_IN_WORDS;
9461 ndb_max= NDB_MAX_TUPLE_SIZE_IN_WORDS;
9463 if (size > ndb_max &&
9464 (pk_length+7) < ndb_max)
9466 size= ndb_max - pk_length - 7;
9481 if ((my_errno= set_up_partition_info(form->part_info, tab)))
9485 tab.getDefaultNoPartitionsFlag() &&
9486 (create_info->max_rows != 0 || create_info->min_rows != 0))
9488 ulonglong rows= create_info->max_rows >= create_info->min_rows ?
9489 create_info->max_rows :
9490 create_info->min_rows;
9491 uint no_fragments= get_no_fragments(rows);
9492 uint reported_frags= no_fragments;
9493 if (adjusted_frag_count(ndb, no_fragments, reported_frags))
9495 push_warning(current_thd,
9496 Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
9497 "Ndb might have problems storing the max amount "
9498 "of rows specified");
9507 tab.getDefaultNoPartitionsFlag())
9522 my_errno= ndb_to_mysql_error(&err);
9530 my_errno= ndb_to_mysql_error(&err);
9540 my_errno= ndb_to_mysql_error(&err);
9544 DBUG_PRINT(
"info", (
"Table %s/%s created successfully",
9545 m_dbname, m_tabname));
9550 my_errno= create_indexes(thd, ndb, form);
9560 my_errno= write_ndb_file(name);
9568 DBUG_PRINT(
"info", (
"Aborting schema transaction due to error %i",
9570 if (dict->
endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort)
9572 DBUG_PRINT(
"info", (
"Failed to abort schema transaction, %i",
9575 DBUG_RETURN(my_errno);
9577 DBUG_PRINT(
"info", (
"Aborting schema transaction"));
9578 if (dict->
endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort)
9580 DBUG_PRINT(
"info", (
"Failed to abort schema transaction, %i",
9582 DBUG_RETURN(result);
9592 m_table= ndbtab_g.get_table();
9600 while (!thd->killed)
9603 goto cleanup_failed;
9604 if (dict->dropTableGlobal(*m_table))
9611 if (dict->
endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort)
9613 DBUG_PRINT(
"info", (
"Failed to abort schema transaction, %i",
9615 goto cleanup_failed;
9625 DBUG_PRINT(
"info", (
"Could not cleanup failed create %i",
9632 DBUG_RETURN(my_errno);
9637 pthread_mutex_lock(&ndbcluster_mutex);
9642 uint length= (uint) strlen(name);
9643 if ((share= (
NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
9644 (
const uchar*) name, length)))
9645 handle_trailing_share(thd, share);
9652 if (!(share= get_share(name, form, TRUE, TRUE)))
9654 sql_print_error(
"NDB: allocating table share for %s failed", name);
9659 DBUG_PRINT(
"NDB_SHARE", (
"%s binlog create use_count: %u",
9660 share->key, share->use_count));
9662 pthread_mutex_unlock(&ndbcluster_mutex);
9664 while (!IS_TMP_PREFIX(m_tabname))
9666 #ifdef HAVE_NDB_BINLOG
9670 ndbcluster_apply_binlog_replication_info(thd,
9681 String event_name(INJECTOR_EVENT_LEN);
9682 ndb_rep_event_name(&event_name, m_dbname, m_tabname,
9683 get_binlog_full(share));
9684 int do_event_op= ndb_binlog_running;
9686 if (!ndb_schema_share &&
9687 strcmp(share->db, NDB_REP_DB) == 0 &&
9688 strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
9695 if (!ndbcluster_create_event(thd, ndb, m_table, event_name.c_ptr(), share,
9696 share && do_event_op ? 2 : 1))
9698 if (opt_ndb_extra_logging)
9699 sql_print_information(
"NDB Binlog: CREATE TABLE Event: %s",
9700 event_name.c_ptr());
9702 ndbcluster_create_event_ops(thd, share,
9703 m_table, event_name.c_ptr()))
9705 sql_print_error(
"NDB Binlog: FAILED CREATE TABLE event operations."
9706 " Event: %s", name);
9714 if (share && !do_event_op)
9715 set_binlog_nologging(share);
9716 ndbcluster_log_schema_op(thd,
9717 thd->query(), thd->query_length(),
9718 share->db, share->table_name,
9719 m_table->getObjectId(),
9720 m_table->getObjectVersion(),
9722 SOT_TRUNCATE_TABLE : SOT_CREATE_TABLE,
9729 DBUG_RETURN(my_errno);
9733 int ha_ndbcluster::create_index(THD *thd,
const char *name,
KEY *key_info,
9734 NDB_INDEX_TYPE idx_type, uint idx_no)
9737 char unique_name[FN_LEN + 1];
9738 static const char* unique_suffix=
"$unique";
9739 DBUG_ENTER(
"ha_ndbcluster::create_ordered_index");
9740 DBUG_PRINT(
"info", (
"Creating index %u: %s", idx_no, name));
9742 if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
9744 strxnmov(unique_name, FN_LEN, name, unique_suffix, NullS);
9745 DBUG_PRINT(
"info", (
"Created unique index name \'%s\' for index %d",
9746 unique_name, idx_no));
9750 case PRIMARY_KEY_INDEX:
9753 case PRIMARY_KEY_ORDERED_INDEX:
9754 error= create_ordered_index(thd, name, key_info);
9756 case UNIQUE_ORDERED_INDEX:
9757 if (!(error= create_ordered_index(thd, name, key_info)))
9758 error= create_unique_index(thd, unique_name, key_info);
9761 if (check_index_fields_not_null(key_info))
9763 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
9764 ER_NULL_COLUMN_IN_INDEX,
9765 "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan");
9767 error= create_unique_index(thd, unique_name, key_info);
9770 if (key_info->algorithm == HA_KEY_ALG_HASH)
9772 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
9773 ER_ILLEGAL_HA_CREATE_OPTION,
9774 ER(ER_ILLEGAL_HA_CREATE_OPTION),
9775 ndbcluster_hton_name,
9776 "Ndb does not support non-unique "
9777 "hash based indexes");
9778 error= HA_ERR_UNSUPPORTED;
9781 error= create_ordered_index(thd, name, key_info);
9791 int ha_ndbcluster::create_ordered_index(THD *thd,
const char *name,
9794 DBUG_ENTER(
"ha_ndbcluster::create_ordered_index");
9795 DBUG_RETURN(create_ndb_index(thd, name, key_info, FALSE));
9798 int ha_ndbcluster::create_unique_index(THD *thd,
const char *name,
9802 DBUG_ENTER(
"ha_ndbcluster::create_unique_index");
9803 DBUG_RETURN(create_ndb_index(thd, name, key_info, TRUE));
9814 int ha_ndbcluster::create_ndb_index(THD *thd,
const char *name,
9818 char index_name[FN_LEN + 1];
9819 Ndb *ndb= get_ndb(thd);
9824 DBUG_ENTER(
"ha_ndbcluster::create_index");
9825 DBUG_PRINT(
"enter", (
"name: %s ", name));
9827 ndb_protect_char(name, index_name,
sizeof(index_name) - 1,
'/');
9828 DBUG_PRINT(
"info", (
"index name: %s ", index_name));
9837 ndb_index.setLogging(FALSE);
9839 if (!m_table->getLogging())
9840 ndb_index.setLogging(FALSE);
9841 if (((
NDBTAB*)m_table)->getTemporary())
9842 ndb_index.setTemporary(TRUE);
9843 if (ndb_index.setTable(m_tabname))
9845 DBUG_RETURN(my_errno= errno);
9848 for (; key_part != end; key_part++)
9850 Field *field= key_part->field;
9851 #ifndef NDB_WITHOUT_COLUMN_FORMAT
9852 if (field->field_storage_type() == HA_SM_DISK)
9854 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
9855 ER_ILLEGAL_HA_CREATE_OPTION,
9856 ER(ER_ILLEGAL_HA_CREATE_OPTION),
9857 ndbcluster_hton_name,
9860 "STORAGE DISK is not supported");
9861 DBUG_RETURN(HA_ERR_UNSUPPORTED);
9864 DBUG_PRINT(
"info", (
"attr: %s", field->field_name));
9865 if (ndb_index.addColumnName(field->field_name))
9867 DBUG_RETURN(my_errno= errno);
9875 DBUG_PRINT(
"info", (
"Created index %s", name));
9879 int ha_ndbcluster::add_index_impl(THD *thd,
TABLE *table_arg,
9880 KEY *key_info, uint num_of_keys)
9884 DBUG_ENTER(
"ha_ndbcluster::add_index");
9885 DBUG_PRINT(
"enter", (
"table %s", table_arg->s->table_name.str));
9886 DBUG_ASSERT(m_share->state == NSS_ALTERED);
9888 for (idx= 0; idx < num_of_keys; idx++)
9890 KEY *key= key_info + idx;
9893 NDB_INDEX_TYPE idx_type= get_index_type_from_key(idx, key_info,
false);
9894 DBUG_PRINT(
"info", (
"Adding index: '%s'", key_info[idx].name));
9896 for (; key_part != end; key_part++)
9897 key_part->field= table->field[key_part->fieldnr];
9900 if((error= create_index(thd, key_info[idx].name, key, idx_type, idx)))
9912 THD *thd= current_thd;
9914 char old_dbname[FN_HEADLEN];
9915 char new_dbname[FN_HEADLEN];
9916 char new_tabname[FN_HEADLEN];
9919 bool recreate_indexes= FALSE;
9922 DBUG_ENTER(
"ha_ndbcluster::rename_table");
9923 DBUG_PRINT(
"info", (
"Renaming %s to %s", from, to));
9925 if (thd == injector_thd)
9935 set_dbname(from, old_dbname);
9936 set_dbname(to, new_dbname);
9938 set_tabname(to, new_tabname);
9940 if (check_ndb_connection(thd))
9941 DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION);
9943 Thd_ndb *thd_ndb= thd_get_thd_ndb(thd);
9944 if (!thd_ndb->has_required_global_schema_lock(
"ha_ndbcluster::rename_table"))
9945 DBUG_RETURN(HA_ERR_NO_CONNECTION);
9947 Ndb *ndb= get_ndb(thd);
9951 if (!(orig_tab= ndbtab_g.get_table()))
9954 if (my_strcasecmp(system_charset_info, new_dbname, old_dbname))
9957 recreate_indexes= TRUE;
9969 NDB_SHARE *share= get_share(from, 0, FALSE);
9970 int is_old_table_tmpfile= IS_TMP_PREFIX(m_tabname);
9971 int is_new_table_tmpfile= IS_TMP_PREFIX(new_tabname);
9972 if (!is_new_table_tmpfile && !is_old_table_tmpfile)
9978 ndbcluster_log_schema_op(thd, to, strlen(to),
9979 old_dbname, m_tabname,
9980 ndb_table_id, ndb_table_version,
9981 SOT_RENAME_TABLE_PREPARE,
9982 m_dbname, new_tabname);
9986 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
9987 share->key, share->use_count));
9988 ndbcluster_prepare_rename_share(share, to);
9989 int ret = ndbcluster_rename_share(thd, share);
9990 assert(ret == 0); NDB_IGNORE_VALUE(ret);
9995 if (dict->alterTableGlobal(*orig_tab, new_tab) != 0)
10000 int ret = ndbcluster_undo_rename_share(thd, share);
10001 assert(ret == 0); NDB_IGNORE_VALUE(ret);
10003 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
10004 share->key, share->use_count));
10005 free_share(&share);
10007 ERR_RETURN(ndb_error);
10017 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
10018 share->key, share->use_count));
10019 free_share(&share);
10021 DBUG_RETURN(result);
10025 if (!is_old_table_tmpfile)
10027 ndbcluster_drop_event(thd, ndb, share,
"rename table",
10028 old_dbname, m_tabname);
10031 if (!result && !is_new_table_tmpfile)
10034 const NDBTAB *ndbtab= ndbtab_g2.get_table();
10035 #ifdef HAVE_NDB_BINLOG
10037 ndbcluster_read_binlog_replication(thd, ndb, share, ndbtab,
10038 ::server_id, NULL, TRUE);
10041 String event_name(INJECTOR_EVENT_LEN);
10042 ndb_rep_event_name(&event_name, new_dbname, new_tabname,
10043 get_binlog_full(share));
10045 if (!Ndb_dist_priv_util::is_distributed_priv_table(new_dbname,
10047 !ndbcluster_create_event(thd, ndb, ndbtab, event_name.c_ptr(), share,
10048 share && ndb_binlog_running ? 2 : 1))
10050 if (opt_ndb_extra_logging)
10051 sql_print_information(
"NDB Binlog: RENAME Event: %s",
10052 event_name.c_ptr());
10053 if (share && (share->op == 0) &&
10054 ndbcluster_create_event_ops(thd, share, ndbtab, event_name.c_ptr()))
10056 sql_print_error(
"NDB Binlog: FAILED create event operations "
10057 "during RENAME. Event %s", event_name.c_ptr());
10065 if (!is_old_table_tmpfile)
10068 ndbcluster_log_schema_op(thd, thd->query(), thd->query_length(),
10069 old_dbname, m_tabname,
10070 ndb_table_id, ndb_table_version,
10072 m_dbname, new_tabname);
10077 ndbcluster_log_schema_op(thd, thd->query(), thd->query_length(),
10078 m_dbname, new_tabname,
10079 ndb_table_id, ndb_table_version,
10080 SOT_ALTER_TABLE_COMMIT,
10088 if (recreate_indexes)
10090 for (
unsigned i = 0; i < index_list.
count; i++)
10092 NDBDICT::List::Element& index_el = index_list.
elements[
i];
10094 if (my_strcasecmp(system_charset_info,
10095 index_el.database, NDB_SYSTEM_DATABASE))
10099 const NDBINDEX * index= dict->getIndexGlobal(index_el.name, new_tab);
10100 DBUG_PRINT(
"info", (
"Creating index %s/%s",
10101 index_el.database, index->
getName()));
10103 DBUG_PRINT(
"info", (
"Dropping index %s/%s",
10104 index_el.database, index->
getName()));
10107 dict->dropIndexGlobal(*index);
10114 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
10115 share->key, share->use_count));
10116 free_share(&share);
10119 DBUG_RETURN(result);
10129 delete_table_drop_share(
NDB_SHARE* share,
const char * path)
10133 pthread_mutex_lock(&ndbcluster_mutex);
10135 if (share->state != NSS_DROPPED)
10140 share->state= NSS_DROPPED;
10142 DBUG_PRINT(
"NDB_SHARE", (
"%s create free use_count: %u",
10143 share->key, share->use_count));
10144 free_share(&share, TRUE);
10147 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
10148 share->key, share->use_count));
10149 free_share(&share, TRUE);
10150 pthread_mutex_unlock(&ndbcluster_mutex);
10154 pthread_mutex_lock(&ndbcluster_mutex);
10155 share= get_share(path, 0, FALSE, TRUE);
10160 pthread_mutex_unlock(&ndbcluster_mutex);
10170 const char *table_name)
10172 DBUG_ENTER(
"ha_ndbcluster::ndbcluster_delete_table");
10174 int ndb_table_id= 0;
10175 int ndb_table_version= 0;
10180 if (!ndb_schema_share)
10182 DBUG_PRINT(
"info", (
"Schema distribution table not setup"));
10183 DBUG_RETURN(HA_ERR_NO_CONNECTION);
10186 NDB_SHARE *share= get_share(path, 0, FALSE);
10189 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
10190 share->key, share->use_count));
10196 if (h && h->m_table)
10198 retry_temporary_error1:
10199 if (dict->dropTableGlobal(*h->m_table) == 0)
10203 DBUG_PRINT(
"info", (
"success 1"));
10211 goto retry_temporary_error1;
10217 DBUG_PRINT(
"info", (
"error(1) %u", res));
10219 h->release_metadata(thd, ndb);
10227 if (ndbtab_g.get_table())
10229 retry_temporary_error2:
10230 if (dict->dropTableGlobal(*ndbtab_g.get_table()) == 0)
10232 ndb_table_id= ndbtab_g.get_table()->getObjectId();
10233 ndb_table_version= ndbtab_g.get_table()->getObjectVersion();
10234 DBUG_PRINT(
"info", (
"success 2"));
10243 goto retry_temporary_error2;
10248 ndbtab_g.invalidate();
10256 DBUG_PRINT(
"info", (
"error(2) %u", res));
10264 delete_table_drop_share(share, 0);
10281 ndbcluster_handle_drop_table(thd, ndb, share,
"delete table",
10289 ndbcluster_handle_drop_table(thd, ndb, share,
"delete table",
10294 if (!IS_TMP_PREFIX(table_name) && share &&
10295 thd->lex->sql_command != SQLCOM_TRUNCATE)
10297 ndbcluster_log_schema_op(thd,
10298 thd->query(), thd->query_length(),
10299 share->db, share->table_name,
10300 ndb_table_id, ndb_table_version,
10301 SOT_DROP_TABLE, NULL, NULL);
10304 delete_table_drop_share(share, 0);
10310 THD *thd= current_thd;
10311 Thd_ndb *thd_ndb= get_thd_ndb(thd);
10314 DBUG_ENTER(
"ha_ndbcluster::delete_table");
10315 DBUG_PRINT(
"enter", (
"name: %s", name));
10317 if ((thd == injector_thd) ||
10318 (thd_ndb->options & TNO_NO_NDB_DROP_TABLE))
10325 delete_table_drop_share(0, name);
10336 if (!ndb_schema_share)
10338 DBUG_PRINT(
"info", (
"Schema distribution table not setup"));
10339 error= HA_ERR_NO_CONNECTION;
10343 if (check_ndb_connection(thd))
10345 error= HA_ERR_NO_CONNECTION;
10351 if (!thd_ndb->has_required_global_schema_lock(
"ha_ndbcluster::delete_table"))
10353 error= HA_ERR_NO_CONNECTION;
10362 if (!(error= drop_table_impl(thd,
this, ndb, name,
10363 m_dbname, m_tabname)) ||
10364 error == HA_ERR_NO_SUCH_TABLE)
10373 DBUG_RETURN(error);
10377 void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
10378 ulonglong nb_desired_values,
10379 ulonglong *first_value,
10380 ulonglong *nb_reserved_values)
10383 THD *thd= current_thd;
10384 DBUG_ENTER(
"get_auto_increment");
10385 DBUG_PRINT(
"enter", (
"m_tabname: %s", m_tabname));
10386 Ndb *ndb= get_ndb(table->in_use);
10387 uint retries= NDB_AUTO_INCREMENT_RETRIES;
10388 int retry_sleep= 30;
10392 if ((m_skip_auto_increment &&
10393 ndb->readAutoIncrementValue(m_table, g.range, auto_value)) ||
10394 ndb->getAutoIncrementValue(m_table, g.range, auto_value,
10395 Uint32(m_autoincrement_prefetch),
10396 increment, offset))
10398 if (--retries && !thd->killed &&
10401 do_retry_sleep(retry_sleep);
10405 sql_print_error(
"Error %lu in ::get_auto_increment(): %s",
10407 *first_value= ~(ulonglong) 0;
10412 *first_value= (longlong)auto_value;
10414 *nb_reserved_values= 1;
10424 handler(hton, table_arg),
10426 m_active_cursor(NULL),
10429 m_ndb_hidden_key_record(0),
10430 m_table_info(NULL),
10432 m_key_fields(NULL),
10434 m_user_defined_partitioning(FALSE),
10435 m_use_partition_pruning(FALSE),
10437 m_use_write(FALSE),
10438 m_ignore_dup_key(FALSE),
10439 m_has_unique_index(FALSE),
10440 m_ignore_no_key(FALSE),
10441 m_read_before_write_removal_possible(FALSE),
10442 m_read_before_write_removal_used(FALSE),
10445 m_rows_to_insert((ha_rows) 1),
10446 m_rows_inserted((ha_rows) 0),
10447 m_rows_changed((ha_rows) 0),
10448 m_delete_cannot_batch(FALSE),
10449 m_update_cannot_batch(FALSE),
10450 m_skip_auto_increment(TRUE),
10451 m_blobs_pending(0),
10452 m_is_bulk_delete(false),
10453 m_blobs_row_total_size(0),
10455 m_blobs_buffer_size(0),
10456 m_dupkey((uint) -1),
10457 m_autoincrement_prefetch(DEFAULT_AUTO_PREFETCH),
10458 m_pushed_join_member(NULL),
10459 m_pushed_join_operation(-1),
10460 m_disable_pushed_join(FALSE),
10461 m_active_query(NULL),
10462 m_pushed_operation(NULL),
10464 m_multi_cursor(NULL)
10468 DBUG_ENTER(
"ha_ndbcluster");
10470 m_tabname[0]=
'\0';
10473 stats.records= ~(ha_rows)0;
10474 stats.block_size= 1024;
10476 for (i= 0; i < MAX_KEY; i++)
10477 ndb_init_index(m_index[i]);
10487 ha_ndbcluster::~ha_ndbcluster()
10489 THD *thd= current_thd;
10490 Ndb *ndb= thd ? check_ndb_in_thd(thd) : g_ndb;
10491 DBUG_ENTER(
"~ha_ndbcluster");
10496 DBUG_PRINT(
"NDB_SHARE", (
"%s handler free use_count: %u",
10497 m_share->key, m_share->use_count));
10498 free_share(&m_share);
10500 release_metadata(thd, ndb);
10501 release_blobs_buffer();
10504 DBUG_ASSERT(m_thd_ndb == NULL);
10507 DBUG_PRINT(
"info", (
"Deleting generated condition"));
10513 DBUG_PRINT(
"info", (
"Deleting pushed joins"));
10514 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
10515 DBUG_ASSERT(m_active_query == NULL);
10516 DBUG_ASSERT(m_active_cursor == NULL);
10517 if (m_pushed_join_operation==PUSHED_ROOT)
10519 delete m_pushed_join_member;
10521 m_pushed_join_member= NULL;
10538 int ha_ndbcluster::open(
const char *name,
int mode, uint test_if_locked)
10540 THD *thd= current_thd;
10544 uint key_parts,
i, j;
10545 DBUG_ENTER(
"ha_ndbcluster::open");
10546 DBUG_PRINT(
"enter", (
"name: %s mode: %d test_if_locked: %d",
10547 name, mode, test_if_locked));
10549 if (table_share->primary_key != MAX_KEY)
10555 key= table->key_info+table_share->primary_key;
10560 if (m_user_defined_partitioning)
10566 DBUG_PRINT(
"info", (
"ref_length: %d",
ref_length));
10569 char* bitmap_array;
10570 uint extra_hidden_keys= table_share->primary_key != MAX_KEY ? 0 : 1;
10571 uint n_keys= table_share->keys + extra_hidden_keys;
10572 uint ptr_size=
sizeof(
MY_BITMAP*) * (n_keys + 1 );
10573 uint map_size=
sizeof(
MY_BITMAP) * n_keys;
10574 m_key_fields= (
MY_BITMAP**)my_malloc(ptr_size + map_size,
10575 MYF(MY_WME + MY_ZEROFILL));
10578 local_close(thd, FALSE);
10581 bitmap_array= ((
char*)m_key_fields) + ptr_size;
10582 for (i= 0; i < n_keys; i++)
10584 my_bitmap_map *bitbuf= NULL;
10585 bool is_hidden_key= (i == table_share->keys);
10587 if (is_hidden_key || (i == table_share->primary_key))
10589 m_pk_bitmap_p= m_key_fields[
i];
10590 bitbuf= m_pk_bitmap_buf;
10592 if (bitmap_init(m_key_fields[i], bitbuf,
10593 table_share->fields, FALSE))
10595 m_key_fields[
i]= NULL;
10596 local_close(thd, FALSE);
10599 if (!is_hidden_key)
10601 key= table->key_info +
i;
10602 key_part_info= key->key_part;
10604 for (j= 0; j < key_parts; j++, key_part_info++)
10605 bitmap_set_bit(m_key_fields[i], key_part_info->fieldnr-1);
10609 uint field_no= table_share->fields;
10610 ((uchar *)m_pk_bitmap_buf)[field_no>>3]|= (1 << (field_no & 7));
10614 m_key_fields[
i]= NULL;
10620 if ((res= check_ndb_connection(thd)) != 0)
10622 local_close(thd, FALSE);
10628 if ((m_share=get_share(name, table, FALSE)) == 0)
10633 if (opt_ndb_extra_logging > 19)
10635 sql_print_information(
"Calling ndbcluster_create_binlog_setup(%s) in ::open",
10638 Ndb* ndb= check_ndb_in_thd(thd);
10639 ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
10640 m_dbname, m_tabname, table);
10641 if ((m_share=get_share(name, table, FALSE)) == 0)
10643 local_close(thd, FALSE);
10648 DBUG_PRINT(
"NDB_SHARE", (
"%s handler use_count: %u",
10649 m_share->key, m_share->use_count));
10650 thr_lock_data_init(&m_share->lock,&m_lock,(
void*) 0);
10652 if ((res= get_metadata(thd, name)))
10654 local_close(thd, FALSE);
10658 if ((res= update_stats(thd, 1,
true)) ||
10659 (res= info(HA_STATUS_CONST)))
10661 local_close(thd, TRUE);
10664 if (ndb_binlog_is_read_only())
10666 table->db_stat|= HA_READ_ONLY;
10667 sql_print_information(
"table '%s' opened read only", name);
10677 int ha_ndbcluster::optimize(THD* thd,
HA_CHECK_OPT* check_opt)
10679 ulong error, stats_error= 0;
10680 const uint delay= (uint)THDVAR(thd, optimization_delay);
10682 error= ndb_optimize_table(thd, delay);
10683 stats_error= update_stats(thd, 1);
10684 return (error) ? error : stats_error;
10687 int ha_ndbcluster::ndb_optimize_table(THD* thd, uint delay)
10689 Thd_ndb *thd_ndb= get_thd_ndb(thd);
10690 Ndb *ndb= thd_ndb->ndb;
10692 int result=0, error= 0;
10697 DBUG_ENTER(
"ndb_optimize_table");
10701 (
"Optimze table %s returned %d", m_tabname, error));
10704 while((result= th.
next()) == 1)
10708 my_sleep(1000*delay);
10710 if (result == -1 || th.
close() == -1)
10713 (
"Optimize table %s did not complete", m_tabname));
10716 for (i= 0; i < MAX_KEY; i++)
10720 if (m_index[i].status == ACTIVE)
10730 (
"Optimze index %s returned %d",
10735 while((result= ih.
next()) == 1)
10739 my_sleep(1000*delay);
10741 if (result == -1 || ih.
close() == -1)
10744 (
"Optimize index %s did not complete", index->
getName()));
10753 (
"Optimze unique index %s returned %d",
10754 unique_index->
getName(), error));
10757 while((result= ih.
next()) == 1)
10761 my_sleep(1000*delay);
10763 if (result == -1 || ih.
close() == -1)
10766 (
"Optimize index %s did not complete", index->
getName()));
10775 int ha_ndbcluster::analyze(THD* thd,
HA_CHECK_OPT* check_opt)
10778 if ((err= update_stats(thd, 1)) != 0)
10780 const bool index_stat_enable= THDVAR(NULL, index_stat_enable) &&
10781 THDVAR(thd, index_stat_enable);
10782 if (index_stat_enable)
10784 if ((err= analyze_index(thd)) != 0)
10791 ha_ndbcluster::analyze_index(THD *thd)
10793 DBUG_ENTER(
"ha_ndbcluster::analyze_index");
10795 Thd_ndb *thd_ndb= get_thd_ndb(thd);
10796 Ndb *ndb= thd_ndb->ndb;
10798 uint inx_list[MAX_INDEXES];
10802 for (inx= 0; inx < table_share->keys; inx++)
10804 NDB_INDEX_TYPE idx_type= get_index_type(inx);
10806 if ((idx_type == PRIMARY_KEY_ORDERED_INDEX ||
10807 idx_type == UNIQUE_ORDERED_INDEX ||
10808 idx_type == ORDERED_INDEX))
10810 if (inx_count < MAX_INDEXES)
10811 inx_list[inx_count++]= inx;
10815 if (inx_count != 0)
10817 int err= ndb_index_stat_analyze(ndb, inx_list, inx_count);
10838 void ha_ndbcluster::set_part_info(
partition_info *part_info,
bool early)
10840 DBUG_ENTER(
"ha_ndbcluster::set_part_info");
10841 m_part_info= part_info;
10844 m_use_partition_pruning= FALSE;
10845 if (!(m_part_info->part_type == HASH_PARTITION &&
10846 m_part_info->list_of_part_fields &&
10847 !m_part_info->is_sub_partitioned()))
10854 m_use_partition_pruning= TRUE;
10855 m_user_defined_partitioning= TRUE;
10857 if (m_part_info->part_type == HASH_PARTITION &&
10858 m_part_info->list_of_part_fields &&
10859 partition_info_num_full_part_fields(m_part_info) == 0)
10871 m_use_partition_pruning= FALSE;
10873 DBUG_PRINT(
"info", (
"m_use_partition_pruning = %d",
10874 m_use_partition_pruning));
10884 void ha_ndbcluster::local_close(THD *thd,
bool release_metadata_flag)
10887 DBUG_ENTER(
"ha_ndbcluster::local_close");
10891 for (inx_bitmap= m_key_fields;
10892 (inx_bitmap != NULL) && ((*inx_bitmap) != NULL);
10894 if ((*inx_bitmap)->bitmap != m_pk_bitmap_buf)
10895 bitmap_free(*inx_bitmap);
10896 my_free((
char*)m_key_fields, MYF(0));
10897 m_key_fields= NULL;
10902 DBUG_PRINT(
"NDB_SHARE", (
"%s handler free use_count: %u",
10903 m_share->key, m_share->use_count));
10904 free_share(&m_share);
10907 if (release_metadata_flag)
10909 ndb= thd ? check_ndb_in_thd(thd) : g_ndb;
10910 release_metadata(thd, ndb);
10915 int ha_ndbcluster::close(
void)
10917 DBUG_ENTER(
"close");
10918 THD *thd= table->in_use;
10919 local_close(thd, TRUE);
10924 int ha_ndbcluster::check_ndb_connection(THD* thd)
10927 DBUG_ENTER(
"check_ndb_connection");
10929 if (!(ndb= check_ndb_in_thd(thd,
true)))
10930 DBUG_RETURN(HA_ERR_NO_CONNECTION);
10939 static int ndbcluster_close_connection(
handlerton *hton, THD *thd)
10941 Thd_ndb *thd_ndb= get_thd_ndb(thd);
10942 DBUG_ENTER(
"ndbcluster_close_connection");
10945 Thd_ndb::release(thd_ndb);
10946 thd_set_thd_ndb(thd, NULL);
10956 int ndbcluster_discover(
handlerton *hton, THD* thd,
const char *db,
10966 char key[FN_REFLEN + 1];
10967 DBUG_ENTER(
"ndbcluster_discover");
10968 DBUG_PRINT(
"enter", (
"db: %s, name: %s", db, name));
10970 if (!(ndb= check_ndb_in_thd(thd)))
10971 DBUG_RETURN(HA_ERR_NO_CONNECTION);
10977 build_table_filename(key,
sizeof(key) - 1, db, name,
"", 0);
10979 NDB_SHARE *share= get_share(key, 0, FALSE);
10982 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
10983 share->key, share->use_count));
10985 if (share && get_ndb_share_state(share) == NSS_ALTERED)
10988 if (
readfrm(key, &data, &len))
10990 DBUG_PRINT(
"error", (
"Could not read frm"));
10998 const NDBTAB *tab= ndbtab_g.get_table();
11002 if (err.
code == 709 || err.
code == 723)
11005 DBUG_PRINT(
"info", (
"ndb_error.code: %u", ndb_error.
code));
11011 DBUG_PRINT(
"info", (
"ndb_error.code: %u", ndb_error.
code));
11015 DBUG_PRINT(
"info", (
"Found table %s", tab->
getName()));
11017 len= tab->getFrmLength();
11020 DBUG_PRINT(
"error", (
"No frm data found."));
11025 if (unpackfrm(&data, &len, (uchar*) tab->
getFrmData()))
11027 DBUG_PRINT(
"error", (
"Could not unpack table"));
11032 #ifdef HAVE_NDB_BINLOG
11033 if (ndbcluster_check_if_local_table(db, name) &&
11034 !Ndb_dist_priv_util::is_distributed_priv_table(db, name))
11036 DBUG_PRINT(
"info", (
"ndbcluster_discover: Skipping locally defined table '%s.%s'",
11038 sql_print_error(
"ndbcluster_discover: Skipping locally defined table '%s.%s'",
11050 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
11051 share->key, share->use_count));
11052 free_share(&share);
11057 my_free((
char*)data, MYF(MY_ALLOW_ZERO_PTR));
11061 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
11062 share->key, share->use_count));
11063 free_share(&share);
11066 if (ndb_error.
code)
11068 ERR_RETURN(ndb_error);
11070 DBUG_RETURN(error);
11077 int ndbcluster_table_exists_in_engine(
handlerton *hton, THD* thd,
11082 DBUG_ENTER(
"ndbcluster_table_exists_in_engine");
11083 DBUG_PRINT(
"enter", (
"db: %s name: %s", db, name));
11085 if (!(ndb= check_ndb_in_thd(thd)))
11086 DBUG_RETURN(HA_ERR_NO_CONNECTION);
11093 for (uint i= 0 ; i < list.
count ; i++)
11096 if (my_strcasecmp(table_alias_charset, elmt.
database, db))
11098 if (my_strcasecmp(table_alias_charset, elmt.
name, name))
11100 DBUG_PRINT(
"info", (
"Found table"));
11101 DBUG_RETURN(HA_ERR_TABLE_EXIST);
11103 DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
11108 extern "C" uchar* tables_get_key(
const char *
entry,
size_t *length,
11109 my_bool not_used __attribute__((unused)))
11111 *length= strlen(entry);
11112 return (uchar*)
entry;
11122 int ndbcluster_drop_database_impl(THD *thd,
const char *path)
11124 DBUG_ENTER(
"ndbcluster_drop_database");
11125 char dbname[FN_HEADLEN];
11132 ha_ndbcluster::set_dbname(path, (
char *)&dbname);
11133 DBUG_PRINT(
"enter", (
"db: %s", dbname));
11135 if (!(ndb= check_ndb_in_thd(thd)))
11143 for (i= 0 ; i < list.
count ; i++)
11146 DBUG_PRINT(
"info", (
"Found %s/%s in NDB", elmt.
database, elmt.
name));
11151 if (my_strcasecmp(system_charset_info, elmt.
database, dbname) ||
11152 IS_NDB_BLOB_PREFIX(elmt.
name))
11154 DBUG_PRINT(
"info", (
"%s must be dropped", elmt.
name));
11155 drop_list.push_back(thd->strdup(elmt.
name));
11158 char full_path[FN_REFLEN + 1];
11159 char *tmp= full_path +
11160 build_table_filename(full_path,
sizeof(full_path) - 1, dbname,
"",
"", 0);
11166 while ((tabname=it++))
11168 tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
11169 if (ha_ndbcluster::drop_table_impl(thd, 0, ndb, full_path, dbname, tabname))
11172 if (err.
code != 709 && err.
code != 723)
11174 ret= ndb_to_mysql_error(&err);
11179 dict->invalidateDbGlobal(dbname);
11183 static void ndbcluster_drop_database(
handlerton *hton,
char *path)
11185 THD *thd= current_thd;
11186 DBUG_ENTER(
"ndbcluster_drop_database");
11191 if (!ndb_schema_share)
11193 DBUG_PRINT(
"info", (
"Schema distribution table not setup"));
11196 ndbcluster_drop_database_impl(thd, path);
11197 char db[FN_REFLEN];
11198 ha_ndbcluster::set_dbname(path, db);
11199 uint32 table_id= 0, table_version= 0;
11207 table_id = (uint32)rand();
11208 table_version = (uint32)rand();
11209 ndbcluster_log_schema_op(thd,
11210 thd->query(), thd->query_length(),
11211 db,
"", table_id, table_version,
11212 SOT_DROP_DB, NULL, NULL);
11216 int ndb_create_table_from_engine(THD *thd,
const char *db,
11217 const char *table_name)
11221 char db_buf[FN_REFLEN + 1];
11222 char table_name_buf[FN_REFLEN + 1];
11223 strnmov(db_buf, db,
sizeof(db_buf));
11224 strnmov(table_name_buf, table_name,
sizeof(table_name_buf));
11226 LEX *old_lex= thd->lex, newlex;
11228 newlex.current_select= NULL;
11238 int ndbcluster_find_all_files(THD *thd)
11241 char key[FN_REFLEN + 1];
11243 int unhandled, retries= 5, skipped;
11244 DBUG_ENTER(
"ndbcluster_find_all_files");
11246 if (!(ndb= check_ndb_in_thd(thd)))
11247 DBUG_RETURN(HA_ERR_NO_CONNECTION);
11251 LINT_INIT(unhandled);
11252 LINT_INIT(skipped);
11261 for (uint i= 0 ; i < list.
count ; i++)
11263 NDBDICT::List::Element& elmt= list.
elements[
i];
11264 if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name))
11266 DBUG_PRINT(
"info", (
"Skipping %s.%s in NDB", elmt.database, elmt.name));
11269 DBUG_PRINT(
"info", (
"Found %s.%s in NDB", elmt.database, elmt.name));
11274 sql_print_information(
"NDB: skipping setup table %s.%s, in state %d",
11275 elmt.database, elmt.name, elmt.state);
11282 const NDBTAB *ndbtab= ndbtab_g.get_table();
11286 sql_print_error(
"NDB: failed to setup table %s.%s, error: %d, %s",
11287 elmt.database, elmt.name,
11294 if (ndbtab->getFrmLength() == 0)
11299 build_table_filename(key,
sizeof(key) - 1, elmt.database,
"",
"", 0);
11300 if (my_access(key, F_OK))
11306 end+= tablename_to_filename(elmt.name, end,
11307 sizeof(key)-(end-key));
11308 uchar *data= 0, *pack_data= 0;
11309 size_t length, pack_length;
11311 if (
readfrm(key, &data, &length) ||
11312 packfrm(data, length, &pack_data, &pack_length))
11315 sql_print_information(
"NDB: missing frm for %s.%s, discovering...",
11316 elmt.database, elmt.name);
11318 else if (cmp_frm(ndbtab, pack_data, pack_length))
11321 NDB_SHARE *share= get_share(key, 0, FALSE);
11324 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
11325 share->key, share->use_count));
11327 if (!share || get_ndb_share_state(share) != NSS_ALTERED)
11330 sql_print_information(
"NDB: mismatch in frm for %s.%s, discovering...",
11331 elmt.database, elmt.name);
11336 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
11337 share->key, share->use_count));
11338 free_share(&share);
11341 my_free((
char*) data, MYF(MY_ALLOW_ZERO_PTR));
11342 my_free((
char*) pack_data, MYF(MY_ALLOW_ZERO_PTR));
11347 if (ndb_create_table_from_engine(thd, elmt.database, elmt.name))
11355 ndbcluster_create_binlog_setup(thd, ndb, key, end-key,
11356 elmt.database, elmt.name,
11361 while (unhandled && retries);
11363 DBUG_RETURN(-(skipped + unhandled));
11368 ndbcluster_find_files(
handlerton *hton, THD *thd,
11369 const char *db,
const char *path,
11372 DBUG_ENTER(
"ndbcluster_find_files");
11373 DBUG_PRINT(
"enter", (
"db: %s", db));
11378 char name[FN_REFLEN + 1];
11379 HASH ndb_tables, ok_tables;
11382 if (!(ndb= check_ndb_in_thd(thd)))
11383 DBUG_RETURN(HA_ERR_NO_CONNECTION);
11384 thd_ndb= get_thd_ndb(thd);
11390 if (ndb_global_schema_lock_guard.lock())
11391 DBUG_RETURN(HA_ERR_NO_CONNECTION);
11399 if (my_hash_init(&ndb_tables, table_alias_charset,list.
count,0,0,
11400 (my_hash_get_key)tables_get_key,0,0))
11402 DBUG_PRINT(
"error", (
"Failed to init HASH ndb_tables"));
11406 if (my_hash_init(&ok_tables, system_charset_info,32,0,0,
11407 (my_hash_get_key)tables_get_key,0,0))
11409 DBUG_PRINT(
"error", (
"Failed to init HASH ok_tables"));
11410 my_hash_free(&ndb_tables);
11414 for (i= 0 ; i < list.
count ; i++)
11416 NDBDICT::List::Element& elmt= list.
elements[
i];
11417 if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name))
11419 DBUG_PRINT(
"info", (
"Skipping %s.%s in NDB", elmt.database, elmt.name));
11422 DBUG_PRINT(
"info", (
"Found %s/%s in NDB", elmt.database, elmt.name));
11425 if (my_strcasecmp(system_charset_info, elmt.database, db))
11431 if (lower_case_table_names)
11433 if (wild_case_compare(files_charset_info, elmt.name, wild))
11436 else if (wild_compare(elmt.name,wild,0))
11439 DBUG_PRINT(
"info", (
"Inserting %s into ndb_tables hash", elmt.name));
11440 my_hash_insert(&ndb_tables, (uchar*)thd->strdup(elmt.name));
11446 char *file_name_str;
11447 while ((file_name=it++))
11449 bool file_on_disk= FALSE;
11450 DBUG_PRINT(
"info", (
"%s", file_name->str));
11451 if (my_hash_search(&ndb_tables,
11452 (
const uchar*)file_name->str, file_name->length))
11454 build_table_filename(name,
sizeof(name) - 1, db,
11455 file_name->str, reg_ext, 0);
11456 if (my_access(name, F_OK))
11458 DBUG_PRINT(
"info", (
"Table %s listed and need discovery",
11460 if (ndb_create_table_from_engine(thd, db, file_name->str))
11462 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
11463 ER_TABLE_EXISTS_ERROR,
11464 "Discover of table %s.%s failed",
11465 db, file_name->str);
11469 DBUG_PRINT(
"info", (
"%s existed in NDB _and_ on disk ", file_name->str));
11470 file_on_disk= TRUE;
11474 build_table_filename(name,
sizeof(name) - 1, db,
11475 file_name->str, ha_ndb_ext, 0);
11476 DBUG_PRINT(
"info", (
"Check access for %s", name));
11477 if (my_access(name, F_OK))
11479 DBUG_PRINT(
"info", (
"%s did not exist on disk", name));
11484 uchar *record= my_hash_search(&ndb_tables,
11485 (
const uchar*) file_name->str,
11486 file_name->length);
11487 DBUG_ASSERT(record);
11488 my_hash_delete(&ndb_tables, record);
11489 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
11490 ER_TABLE_EXISTS_ERROR,
11491 "Local table %s.%s shadows ndb table",
11492 db, file_name->str);
11499 my_hash_insert(&ok_tables, (uchar*) file_name->str);
11502 DBUG_PRINT(
"info", (
"%s existed on disk", name));
11505 if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name->str) ==
11506 HA_ERR_NO_SUCH_TABLE)
11508 DBUG_PRINT(
"info", (
"NDB says %s does not exists", file_name->str));
11511 delete_list.push_back(thd->strdup(file_name->str));
11517 char *end, *end1= name +
11518 build_table_filename(name,
sizeof(name) - 1, db,
"",
"", 0);
11519 for (i= 0; i < ok_tables.records; i++)
11521 file_name_str= (
char*)my_hash_element(&ok_tables, i);
11523 tablename_to_filename(file_name_str, end1,
sizeof(name) - (end1 - name));
11524 ndbcluster_create_binlog_setup(thd, ndb, name, end-name,
11525 db, file_name_str, 0);
11530 DBUG_PRINT(
"info", (
"Checking for new files to discover"));
11532 for (i= 0 ; i < ndb_tables.records ; i++)
11534 file_name_str= (
char*) my_hash_element(&ndb_tables, i);
11535 if (!my_hash_search(&ok_tables,
11536 (
const uchar*) file_name_str, strlen(file_name_str)))
11538 build_table_filename(name,
sizeof(name) - 1,
11539 db, file_name_str, reg_ext, 0);
11540 if (my_access(name, F_OK))
11542 DBUG_PRINT(
"info", (
"%s must be discovered", file_name_str));
11545 create_list.push_back(thd->strdup(file_name_str));
11550 #ifndef NDB_NO_MYSQL_RM_TABLE_PART2
11561 while ((file_name_str= it3++))
11563 DBUG_PRINT(
"info", (
"Removing table %s/%s", db, file_name_str));
11567 file_name_str, strlen(file_name_str),
11570 table_list.mdl_request.
set_type(MDL_EXCLUSIVE);
11575 thd_ndb->options|= TNO_NO_NDB_DROP_TABLE;
11576 (void)mysql_rm_table_part2(thd, &table_list,
11581 thd_ndb->options&= ~TNO_NO_NDB_DROP_TABLE;
11582 trans_commit_implicit(thd);
11583 thd->mdl_context.release_transactional_locks();
11585 thd->clear_error();
11591 while ((file_name_str=it2++))
11593 DBUG_PRINT(
"info", (
"Table %s need discovery", file_name_str));
11594 if (ndb_create_table_from_engine(thd, db, file_name_str) == 0)
11597 tmp_file_name= thd->make_lex_string(tmp_file_name, file_name_str,
11598 strlen(file_name_str), TRUE);
11599 files->push_back(tmp_file_name);
11603 my_hash_free(&ok_tables);
11604 my_hash_free(&ndb_tables);
11607 if (!strcmp(db, NDB_REP_DB))
11610 while (count++ < files->elements)
11613 if (!strcmp(file_name->str, NDB_SCHEMA_TABLE))
11615 DBUG_PRINT(
"info", (
"skip %s.%s table, it should be hidden to user",
11616 NDB_REP_DB, NDB_SCHEMA_TABLE));
11619 files->push_back(file_name);
11633 static int connect_callback()
11635 pthread_mutex_lock(&LOCK_ndb_util_thread);
11636 update_status_variables(NULL, &g_ndb_status,
11637 g_ndb_cluster_connection);
11639 uint node_id, i= 0;
11641 memset((
void *)g_node_id_map, 0xFFFF,
sizeof(g_node_id_map));
11642 while ((node_id= g_ndb_cluster_connection->get_next_node(node_iter)))
11643 g_node_id_map[node_id]= i++;
11645 pthread_cond_signal(&COND_ndb_util_thread);
11646 pthread_mutex_unlock(&LOCK_ndb_util_thread);
11650 #ifndef NDB_NO_WAIT_SETUP
11651 static int ndb_wait_setup_func_impl(ulong max_wait)
11653 DBUG_ENTER(
"ndb_wait_setup_func_impl");
11655 pthread_mutex_lock(&ndbcluster_mutex);
11658 set_timespec(abstime, 1);
11660 while (!ndb_setup_complete && max_wait)
11662 int rc= pthread_cond_timedwait(&COND_ndb_setup_complete,
11667 if (rc == ETIMEDOUT)
11669 DBUG_PRINT(
"info", (
"1s elapsed waiting"));
11671 set_timespec(abstime, 1);
11675 DBUG_PRINT(
"info", (
"Bad pthread_cond_timedwait rc : %u",
11683 pthread_mutex_unlock(&ndbcluster_mutex);
11685 DBUG_RETURN((ndb_setup_complete == 1)? 0 : 1);
11688 int(*ndb_wait_setup_func)(ulong) = 0;
11690 extern int ndb_dictionary_is_mysqld;
11692 static int ndbcluster_init(
void *p)
11694 DBUG_ENTER(
"ndbcluster_init");
11696 if (ndbcluster_inited)
11697 DBUG_RETURN(FALSE);
11699 pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST);
11700 pthread_mutex_init(&LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST);
11701 pthread_cond_init(&COND_ndb_util_thread, NULL);
11702 pthread_cond_init(&COND_ndb_util_ready, NULL);
11703 pthread_cond_init(&COND_ndb_setup_complete, NULL);
11704 ndb_util_thread_running= -1;
11705 pthread_mutex_init(&LOCK_ndb_index_stat_thread, MY_MUTEX_INIT_FAST);
11706 pthread_cond_init(&COND_ndb_index_stat_thread, NULL);
11707 pthread_cond_init(&COND_ndb_index_stat_ready, NULL);
11708 pthread_mutex_init(&ndb_index_stat_list_mutex, MY_MUTEX_INIT_FAST);
11709 pthread_mutex_init(&ndb_index_stat_stat_mutex, MY_MUTEX_INIT_FAST);
11710 pthread_cond_init(&ndb_index_stat_stat_cond, NULL);
11711 ndb_index_stat_thread_running= -1;
11712 ndbcluster_terminating= 0;
11713 ndb_dictionary_is_mysqld= 1;
11714 ndb_setup_complete= 0;
11716 ndbcluster_global_schema_lock_init(ndbcluster_hton);
11720 h->state= SHOW_OPTION_YES;
11721 h->db_type= DB_TYPE_NDBCLUSTER;
11722 h->close_connection= ndbcluster_close_connection;
11723 h->commit= ndbcluster_commit;
11724 h->rollback= ndbcluster_rollback;
11725 h->create= ndbcluster_create_handler;
11726 h->drop_database= ndbcluster_drop_database;
11727 h->panic= ndbcluster_end;
11728 h->show_status= ndbcluster_show_status;
11729 h->alter_tablespace= ndbcluster_alter_tablespace;
11730 h->partition_flags= ndbcluster_partition_flags;
11731 h->alter_table_flags=
11732 ndbcluster_alter_table_flags;
11733 #if MYSQL_VERSION_ID >= 50501
11734 h->fill_is_table= ndbcluster_fill_is_table;
11736 h->fill_files_table= ndbcluster_fill_files_table;
11738 ndbcluster_binlog_init_handlerton();
11739 h->flags= HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED;
11740 h->discover= ndbcluster_discover;
11741 h->find_files= ndbcluster_find_files;
11742 h->table_exists_in_engine= ndbcluster_table_exists_in_engine;
11743 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
11744 h->make_pushed_join= ndbcluster_make_pushed_join;
11749 ndb_init_internal();
11752 const uint global_opti_node_select= THDVAR(NULL, optimized_node_selection);
11753 if (ndbcluster_connect(connect_callback, opt_ndb_wait_connected,
11754 opt_ndb_cluster_connection_pool,
11755 (global_opti_node_select & 1),
11756 opt_ndb_connectstring,
11759 DBUG_PRINT(
"error", (
"Could not initiate connection to cluster"));
11760 goto ndbcluster_init_error;
11763 (void) my_hash_init(&ndbcluster_open_tables,table_alias_charset,32,0,0,
11764 (my_hash_get_key) ndbcluster_get_key,0,0);
11766 if (ndbcluster_binlog_start())
11768 DBUG_PRINT(
"error", (
"Could start the injector thread"));
11769 goto ndbcluster_init_error;
11774 if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0))
11776 DBUG_PRINT(
"error", (
"Could not create ndb utility thread"));
11777 my_hash_free(&ndbcluster_open_tables);
11778 pthread_mutex_destroy(&ndbcluster_mutex);
11779 pthread_mutex_destroy(&LOCK_ndb_util_thread);
11780 pthread_cond_destroy(&COND_ndb_util_thread);
11781 pthread_cond_destroy(&COND_ndb_util_ready);
11782 pthread_cond_destroy(&COND_ndb_setup_complete);
11783 goto ndbcluster_init_error;
11787 pthread_mutex_lock(&LOCK_ndb_util_thread);
11788 while (ndb_util_thread_running < 0)
11789 pthread_cond_wait(&COND_ndb_util_ready, &LOCK_ndb_util_thread);
11790 pthread_mutex_unlock(&LOCK_ndb_util_thread);
11792 if (!ndb_util_thread_running)
11794 DBUG_PRINT(
"error", (
"ndb utility thread exited prematurely"));
11795 my_hash_free(&ndbcluster_open_tables);
11796 pthread_mutex_destroy(&ndbcluster_mutex);
11797 pthread_mutex_destroy(&LOCK_ndb_util_thread);
11798 pthread_cond_destroy(&COND_ndb_util_thread);
11799 pthread_cond_destroy(&COND_ndb_util_ready);
11800 pthread_cond_destroy(&COND_ndb_setup_complete);
11801 goto ndbcluster_init_error;
11806 if (pthread_create(&tmp2, &connection_attrib, ndb_index_stat_thread_func, 0))
11808 DBUG_PRINT(
"error", (
"Could not create ndb index statistics thread"));
11809 my_hash_free(&ndbcluster_open_tables);
11810 pthread_mutex_destroy(&ndbcluster_mutex);
11811 pthread_mutex_destroy(&LOCK_ndb_index_stat_thread);
11812 pthread_cond_destroy(&COND_ndb_index_stat_thread);
11813 pthread_cond_destroy(&COND_ndb_index_stat_ready);
11814 pthread_mutex_destroy(&ndb_index_stat_list_mutex);
11815 pthread_mutex_destroy(&ndb_index_stat_stat_mutex);
11816 pthread_cond_destroy(&ndb_index_stat_stat_cond);
11817 goto ndbcluster_init_error;
11821 pthread_mutex_lock(&LOCK_ndb_index_stat_thread);
11822 while (ndb_index_stat_thread_running < 0)
11823 pthread_cond_wait(&COND_ndb_index_stat_ready, &LOCK_ndb_index_stat_thread);
11824 pthread_mutex_unlock(&LOCK_ndb_index_stat_thread);
11826 if (!ndb_index_stat_thread_running)
11828 DBUG_PRINT(
"error", (
"ndb index statistics thread exited prematurely"));
11829 my_hash_free(&ndbcluster_open_tables);
11830 pthread_mutex_destroy(&ndbcluster_mutex);
11831 pthread_mutex_destroy(&LOCK_ndb_index_stat_thread);
11832 pthread_cond_destroy(&COND_ndb_index_stat_thread);
11833 pthread_cond_destroy(&COND_ndb_index_stat_ready);
11834 pthread_mutex_destroy(&ndb_index_stat_list_mutex);
11835 pthread_mutex_destroy(&ndb_index_stat_stat_mutex);
11836 pthread_cond_destroy(&ndb_index_stat_stat_cond);
11837 goto ndbcluster_init_error;
11840 #ifndef NDB_NO_WAIT_SETUP
11841 ndb_wait_setup_func= ndb_wait_setup_func_impl;
11844 memset(&g_slave_api_client_stats, 0,
sizeof(g_slave_api_client_stats));
11846 ndbcluster_inited= 1;
11847 DBUG_RETURN(FALSE);
11849 ndbcluster_init_error:
11851 ndbcluster_disconnect();
11852 ndbcluster_hton->state= SHOW_OPTION_DISABLED;
11854 ndbcluster_global_schema_lock_deinit();
11862 get_share_state_string(NDB_SHARE_STATE s)
11866 return "NSS_INITIAL";
11868 return "NSS_ALTERED";
11870 return "NSS_DROPPED";
11873 return "<unknown>";
11877 int ndbcluster_binlog_end(THD *thd);
11879 static int ndbcluster_end(
handlerton *hton, ha_panic_function type)
11881 DBUG_ENTER(
"ndbcluster_end");
11883 if (!ndbcluster_inited)
11885 ndbcluster_inited= 0;
11888 sql_print_information(
"Stopping Cluster Index Statistics thread");
11889 pthread_mutex_lock(&LOCK_ndb_index_stat_thread);
11890 ndbcluster_terminating= 1;
11891 pthread_cond_signal(&COND_ndb_index_stat_thread);
11892 while (ndb_index_stat_thread_running > 0)
11893 pthread_cond_wait(&COND_ndb_index_stat_ready, &LOCK_ndb_index_stat_thread);
11894 pthread_mutex_unlock(&LOCK_ndb_index_stat_thread);
11897 ndbcluster_binlog_end(NULL);
11900 pthread_mutex_lock(&ndbcluster_mutex);
11901 uint save = ndbcluster_open_tables.records; (void)save;
11902 while (ndbcluster_open_tables.records)
11905 (
NDB_SHARE*) my_hash_element(&ndbcluster_open_tables, 0);
11908 "NDB: table share %s with use_count %d state: %s(%u) not freed\n",
11909 share->key, share->use_count,
11910 get_share_state_string(share->state),
11911 (uint)share->state);
11913 ndbcluster_real_free_share(&share);
11915 pthread_mutex_unlock(&ndbcluster_mutex);
11916 DBUG_ASSERT(save == 0);
11918 my_hash_free(&ndbcluster_open_tables);
11920 ndb_index_stat_end();
11921 ndbcluster_disconnect();
11923 ndbcluster_global_schema_lock_deinit();
11926 ndb_end_internal();
11928 pthread_mutex_destroy(&ndbcluster_mutex);
11929 pthread_mutex_destroy(&LOCK_ndb_util_thread);
11930 pthread_cond_destroy(&COND_ndb_util_thread);
11931 pthread_cond_destroy(&COND_ndb_util_ready);
11932 pthread_cond_destroy(&COND_ndb_setup_complete);
11933 pthread_mutex_destroy(&LOCK_ndb_index_stat_thread);
11934 pthread_cond_destroy(&COND_ndb_index_stat_thread);
11935 pthread_cond_destroy(&COND_ndb_index_stat_ready);
11942 DBUG_ENTER(
"ha_ndbcluster::print_error");
11943 DBUG_PRINT(
"enter", (
"error: %d", error));
11945 if (error == HA_ERR_NO_PARTITION_FOUND)
11946 m_part_info->print_no_partition_found(table);
11949 if (error == HA_ERR_FOUND_DUPP_KEY &&
11950 (table == NULL || table->file == NULL))
11963 my_error(ER_DUP_KEY, errflag, table_share->table_name.str, error);
11978 void ndbcluster_print_error(
int error,
const NdbOperation *error_op)
11980 DBUG_ENTER(
"ndbcluster_print_error");
11982 const char *tab_name= (error_op) ? error_op->
getTableName() :
"";
11983 if (tab_name == NULL)
11985 DBUG_ASSERT(tab_name != NULL);
11988 share.db.str= (
char*)
"";
11989 share.db.length= 0;
11990 share.table_name.str= (
char *) tab_name;
11991 share.table_name.length= strlen(tab_name);
11993 error_handler.print_error(error, MYF(0));
12001 void ha_ndbcluster::set_dbname(
const char *path_name,
char *dbname)
12003 char *end, *ptr, *tmp_name;
12004 char tmp_buff[FN_REFLEN + 1];
12006 tmp_name= tmp_buff;
12008 ptr= strend(path_name)-1;
12009 while (ptr >= path_name && *ptr !=
'\\' && *ptr !=
'/') {
12014 while (ptr >= path_name && *ptr !=
'\\' && *ptr !=
'/') {
12017 uint name_len= end - ptr;
12018 memcpy(tmp_name, ptr + 1, name_len);
12019 tmp_name[name_len]=
'\0';
12020 filename_to_tablename(tmp_name, dbname,
sizeof(tmp_buff) - 1);
12027 void ha_ndbcluster::set_dbname(
const char *path_name)
12029 set_dbname(path_name, m_dbname);
12037 ha_ndbcluster::set_tabname(
const char *path_name,
char * tabname)
12039 char *end, *ptr, *tmp_name;
12040 char tmp_buff[FN_REFLEN + 1];
12042 tmp_name= tmp_buff;
12044 end= strend(path_name)-1;
12046 while (ptr >= path_name && *ptr !=
'\\' && *ptr !=
'/') {
12049 uint name_len= end - ptr;
12050 memcpy(tmp_name, ptr + 1, end - ptr);
12051 tmp_name[name_len]=
'\0';
12052 filename_to_tablename(tmp_name, tabname,
sizeof(tmp_buff) - 1);
12059 void ha_ndbcluster::set_tabname(
const char *path_name)
12061 set_tabname(path_name, m_tabname);
12069 static const bool g_ndb_records_in_range_tree_dive=
false;
12073 ha_ndbcluster::records_in_range(uint inx,
key_range *min_key,
12076 KEY *key_info= table->key_info + inx;
12078 NDB_INDEX_TYPE idx_type= get_index_type(inx);
12080 DBUG_ENTER(
"records_in_range");
12082 if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) &&
12083 ((min_key && min_key->length < key_length) ||
12084 (max_key && max_key->length < key_length)))
12085 DBUG_RETURN(HA_POS_ERROR);
12089 if ((idx_type != ORDERED_INDEX) &&
12090 ((min_key && min_key->length == key_length) &&
12091 (max_key && max_key->length == key_length) &&
12092 (min_key->key==max_key->key ||
12093 memcmp(min_key->key, max_key->key, key_length)==0)))
12097 if ((idx_type == PRIMARY_KEY_ORDERED_INDEX ||
12098 idx_type == UNIQUE_ORDERED_INDEX ||
12099 idx_type == ORDERED_INDEX))
12101 THD *thd= current_thd;
12102 const bool index_stat_enable= THDVAR(NULL, index_stat_enable) &&
12103 THDVAR(thd, index_stat_enable);
12105 if (index_stat_enable)
12107 ha_rows rows= HA_POS_ERROR;
12108 int err= ndb_index_stat_get_rir(inx, min_key, max_key, &rows);
12121 err != NdbIndexStat::NoIndexStats &&
12123 err != Ndb_index_stat_error_HAS_ERROR)
12125 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
12127 "index stats (RIR) for key %s:"
12128 " unexpected error %d",
12129 key_info->
name, err);
12134 if (g_ndb_records_in_range_tree_dive)
12138 Ndb *ndb= get_ndb(thd);
12146 if ((trans=active_trans) == NULL ||
12149 DBUG_PRINT(
"info", (
"no active trans"));
12156 compute_index_bounds(ib,
12165 if (is.records_in_range(index,
12173 ERR_BREAK(is.getNdbError(), res);
12176 if (trans != active_trans && rows == 0)
12178 if (trans != active_trans && trans != NULL)
12191 if (
stats.records == ~(ha_rows)0 ||
stats.records == 0)
12194 THD *thd= current_thd;
12195 if (update_stats(thd, THDVAR(thd, use_exact_count)))
12200 Uint64 table_rows=
stats.records;
12201 size_t eq_bound_len= 0;
12202 size_t min_key_length= (min_key) ? min_key->length : 0;
12203 size_t max_key_length= (max_key) ? max_key->length : 0;
12207 if (!min_key_length)
12209 rows= (!max_key_length)
12214 else if (!max_key_length)
12216 rows= table_rows/10;
12220 size_t bounds_len= MIN(min_key_length,max_key_length);
12221 uint eq_bound_len= 0;
12222 uint eq_bound_offs= 0;
12226 for (; key_part != end; key_part++)
12228 uint part_length= key_part->store_length;
12229 if (eq_bound_offs+part_length > bounds_len ||
12230 memcmp(&min_key->key[eq_bound_offs],
12231 &max_key->key[eq_bound_offs],
12236 eq_bound_len+= key_part->length;
12237 eq_bound_offs+= part_length;
12242 rows= table_rows/20;
12251 double eq_fraction = (double)(eq_bound_len) / key_length;
12252 if (idx_type == ORDERED_INDEX)
12253 eq_fraction/= 1.20;
12254 if (eq_fraction >= 1.0)
12257 rows = (Uint64)((
double)table_rows / pow((
double)table_rows, eq_fraction));
12258 if (rows > (table_rows/50))
12259 rows= (table_rows/50);
12261 if (min_key_length > eq_bound_offs)
12263 if (max_key_length > eq_bound_offs)
12269 if (eq_bound_len && rows < 2)
12273 DBUG_RETURN(MIN(rows,table_rows));
12279 ulonglong ha_ndbcluster::table_flags(
void)
const
12281 THD *thd= current_thd;
12283 HA_REC_NOT_IN_SEQ |
12286 HA_NO_PREFIX_CHAR_KEYS |
12287 #ifndef NDB_WITH_NEW_MRR_INTERFACE
12288 HA_NEED_READ_RANGE_BUFFER |
12292 HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
12293 HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
12294 HA_PARTIAL_COLUMN_READ |
12295 HA_HAS_OWN_BINLOGGING |
12296 HA_BINLOG_ROW_CAPABLE |
12298 #ifndef NDB_WITHOUT_ONLINE_ALTER
12307 if (thd->variables.binlog_format == BINLOG_FORMAT_STMT)
12308 f= (f | HA_BINLOG_STMT_CAPABLE) & ~HA_HAS_OWN_BINLOGGING;
12314 if (THDVAR(thd, join_pushdown))
12315 f= f | HA_BLOCK_CONST_TABLE;
12322 return(
"NDBCLUSTER");
12324 uint ha_ndbcluster::max_supported_record_length()
const
12326 return NDB_MAX_TUPLE_SIZE;
12328 uint ha_ndbcluster::max_supported_keys()
const
12332 uint ha_ndbcluster::max_supported_key_parts()
const
12334 return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY;
12336 uint ha_ndbcluster::max_supported_key_length()
const
12338 return NDB_MAX_KEY_SIZE;
12340 uint ha_ndbcluster::max_supported_key_part_length()
const
12342 return NDB_MAX_KEY_SIZE;
12344 bool ha_ndbcluster::low_byte_first()
const
12346 #ifdef WORDS_BIGENDIAN
12352 const char* ha_ndbcluster::index_type(uint key_number)
12354 switch (get_index_type(key_number)) {
12355 case ORDERED_INDEX:
12356 case UNIQUE_ORDERED_INDEX:
12357 case PRIMARY_KEY_ORDERED_INDEX:
12360 case PRIMARY_KEY_INDEX:
12368 DBUG_ENTER(
"ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT");
12369 DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT);
12383 uint ndb_get_commitcount(THD *thd,
char *norm_name,
12384 Uint64 *commit_count)
12386 char dbname[NAME_LEN + 1];
12388 DBUG_ENTER(
"ndb_get_commitcount");
12390 DBUG_PRINT(
"enter", (
"name: %s", norm_name));
12391 pthread_mutex_lock(&ndbcluster_mutex);
12392 if (!(share=(
NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
12393 (
const uchar*) norm_name,
12394 strlen(norm_name))))
12396 pthread_mutex_unlock(&ndbcluster_mutex);
12397 DBUG_PRINT(
"info", (
"Table %s not found in ndbcluster_open_tables",
12402 share->use_count++;
12403 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
12404 share->key, share->use_count));
12405 pthread_mutex_unlock(&ndbcluster_mutex);
12407 pthread_mutex_lock(&share->mutex);
12408 if (opt_ndb_cache_check_time > 0)
12410 if (share->commit_count != 0)
12412 *commit_count= share->commit_count;
12416 DBUG_PRINT(
"info", (
"Getting commit_count: %s from share",
12417 llstr(share->commit_count, buff)));
12418 pthread_mutex_unlock(&share->mutex);
12420 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
12421 share->key, share->use_count));
12422 free_share(&share);
12426 DBUG_PRINT(
"info", (
"Get commit_count from NDB"));
12428 if (!(ndb= check_ndb_in_thd(thd)))
12431 ha_ndbcluster::set_dbname(norm_name, dbname);
12436 uint lock= share->commit_count_lock;
12437 pthread_mutex_unlock(&share->mutex);
12441 char tblname[NAME_LEN + 1];
12442 ha_ndbcluster::set_tabname(norm_name, tblname);
12444 if (ndbtab_g.get_table() == 0
12445 || ndb_get_table_statistics(thd, NULL,
12448 ndbtab_g.get_table()->getDefaultRecord(),
12452 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
12453 share->key, share->use_count));
12454 free_share(&share);
12459 pthread_mutex_lock(&share->mutex);
12460 if (share->commit_count_lock == lock)
12465 DBUG_PRINT(
"info", (
"Setting commit_count to %s",
12466 llstr(stat.commit_count, buff)));
12467 share->commit_count= stat.commit_count;
12468 *commit_count= stat.commit_count;
12472 DBUG_PRINT(
"info", (
"Discarding commit_count, comit_count_lock changed"));
12475 pthread_mutex_unlock(&share->mutex);
12477 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
12478 share->key, share->use_count));
12479 free_share(&share);
12513 ndbcluster_cache_retrieval_allowed(THD *thd,
12514 char *full_name, uint full_name_len,
12515 ulonglong *engine_data)
12517 Uint64 commit_count;
12518 char dbname[NAME_LEN + 1];
12519 char tabname[NAME_LEN + 1];
12521 char buff[22], buff2[22];
12524 ha_ndbcluster::set_dbname(full_name, dbname);
12525 ha_ndbcluster::set_tabname(full_name, tabname);
12527 DBUG_ENTER(
"ndbcluster_cache_retrieval_allowed");
12528 DBUG_PRINT(
"enter", (
"dbname: %s, tabname: %s",
12531 if (thd_options(thd) & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
12535 if (!check_ndb_in_thd(thd))
12536 DBUG_RETURN(FALSE);
12537 Thd_ndb *thd_ndb= get_thd_ndb(thd);
12538 if (!thd_ndb->changed_tables.is_empty())
12542 while ((share= it++))
12544 if (strcmp(share->table_name, tabname) == 0 &&
12545 strcmp(share->db, dbname) == 0)
12547 DBUG_PRINT(
"exit", (
"No, transaction has changed table"));
12548 DBUG_RETURN(FALSE);
12554 if (ndb_get_commitcount(thd, full_name, &commit_count))
12557 DBUG_PRINT(
"exit", (
"No, could not retrieve commit_count"));
12558 DBUG_RETURN(FALSE);
12560 DBUG_PRINT(
"info", (
"*engine_data: %s, commit_count: %s",
12561 llstr(*engine_data, buff), llstr(commit_count, buff2)));
12562 if (commit_count == 0)
12565 DBUG_PRINT(
"exit", (
"No, local commit has been performed"));
12566 DBUG_RETURN(FALSE);
12568 else if (*engine_data != commit_count)
12570 *engine_data= commit_count;
12571 DBUG_PRINT(
"exit", (
"No, commit_count has changed"));
12572 DBUG_RETURN(FALSE);
12575 DBUG_PRINT(
"exit", (
"OK to use cache, engine_data: %s",
12576 llstr(*engine_data, buff)));
12604 char *full_name, uint full_name_len,
12605 qc_engine_callback *engine_callback,
12606 ulonglong *engine_data)
12608 Uint64 commit_count;
12612 DBUG_ENTER(
"ha_ndbcluster::register_query_cache_table");
12613 DBUG_PRINT(
"enter",(
"dbname: %s, tabname: %s",
12614 m_dbname, m_tabname));
12616 if (thd_options(thd) & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
12620 Thd_ndb *thd_ndb= get_thd_ndb(thd);
12621 if (!thd_ndb->changed_tables.is_empty())
12623 DBUG_ASSERT(m_share);
12626 while ((share= it++))
12628 if (m_share == share)
12630 DBUG_PRINT(
"exit", (
"No, transaction has changed table"));
12631 DBUG_RETURN(FALSE);
12637 if (ndb_get_commitcount(thd, full_name, &commit_count))
12640 DBUG_PRINT(
"exit", (
"Error, could not get commitcount"));
12641 DBUG_RETURN(FALSE);
12643 *engine_data= commit_count;
12644 *engine_callback= ndbcluster_cache_retrieval_allowed;
12645 DBUG_PRINT(
"exit", (
"commit_count: %s", llstr(commit_count, buff)));
12646 DBUG_RETURN(commit_count > 0);
12659 static uchar *ndbcluster_get_key(
NDB_SHARE *share,
size_t *length,
12660 my_bool not_used __attribute__((unused)))
12662 *length= share->key_length;
12663 return (uchar*) share->key;
12669 static void print_share(
const char* where,
NDB_SHARE* share)
12672 "%s %s.%s: use_count: %u, commit_count: %lu\n",
12673 where, share->db, share->table_name, share->use_count,
12674 (ulong) share->commit_count);
12676 " - key: %s, key_length: %d\n",
12677 share->key, share->key_length);
12680 if (share->event_data)
12681 event_data= share->event_data;
12682 else if (share->op)
12687 " - event_data->shadow_table: %p %s.%s\n",
12688 event_data->shadow_table, event_data->shadow_table->s->db.str,
12689 event_data->shadow_table->s->table_name.str);
12694 static void print_ndbcluster_open_tables()
12697 fprintf(DBUG_FILE,
">ndbcluster_open_tables\n");
12698 for (uint i= 0; i < ndbcluster_open_tables.records; i++)
12700 (
NDB_SHARE*)my_hash_element(&ndbcluster_open_tables, i));
12701 fprintf(DBUG_FILE,
"<ndbcluster_open_tables\n");
12708 #define dbug_print_open_tables() \
12709 DBUG_EXECUTE("info", \
12710 print_ndbcluster_open_tables(););
12712 #define dbug_print_share(t, s) \
12714 DBUG_EXECUTE("info", \
12715 print_share((t), (s));); \
12729 int handle_trailing_share(THD *thd,
NDB_SHARE *share)
12731 static ulong trailing_share_id= 0;
12732 DBUG_ENTER(
"handle_trailing_share");
12735 ++share->use_count;
12736 if (opt_ndb_extra_logging > 9)
12737 sql_print_information (
"handle_trailing_share: %s use_count: %u", share->key, share->use_count);
12738 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
12739 share->key, share->use_count));
12740 pthread_mutex_unlock(&ndbcluster_mutex);
12743 memset(&table_list, 0,
sizeof(table_list));
12744 table_list.db= share->db;
12745 table_list.alias= table_list.table_name= share->table_name;
12746 close_cached_tables(thd, &table_list, TRUE, FALSE, FALSE);
12748 pthread_mutex_lock(&ndbcluster_mutex);
12750 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
12751 share->key, share->use_count));
12752 if (!--share->use_count)
12754 if (opt_ndb_extra_logging > 9)
12755 sql_print_information (
"handle_trailing_share: %s use_count: %u", share->key, share->use_count);
12756 if (opt_ndb_extra_logging)
12757 sql_print_information(
"NDB_SHARE: trailing share "
12758 "%s(connect_count: %u) "
12759 "released by close_cached_tables at "
12760 "connect_count: %u",
12762 share->connect_count,
12763 g_ndb_cluster_connection->get_connect_count());
12764 ndbcluster_real_free_share(&share);
12767 if (opt_ndb_extra_logging > 9)
12768 sql_print_information (
"handle_trailing_share: %s use_count: %u", share->key, share->use_count);
12774 if (share->state != NSS_DROPPED)
12776 share->state= NSS_DROPPED;
12778 DBUG_PRINT(
"NDB_SHARE", (
"%s create free use_count: %u",
12779 share->key, share->use_count));
12780 --share->use_count;
12781 if (opt_ndb_extra_logging > 9)
12782 sql_print_information (
"handle_trailing_share: %s use_count: %u", share->key, share->use_count);
12784 if (share->use_count == 0)
12786 if (opt_ndb_extra_logging)
12787 sql_print_information(
"NDB_SHARE: trailing share "
12788 "%s(connect_count: %u) "
12789 "released after NSS_DROPPED check "
12790 "at connect_count: %u",
12792 share->connect_count,
12793 g_ndb_cluster_connection->get_connect_count());
12794 ndbcluster_real_free_share(&share);
12799 DBUG_PRINT(
"info", (
"NDB_SHARE: %s already exists use_count=%d, op=0x%lx.",
12800 share->key, share->use_count, (
long) share->op));
12804 if (!((share->use_count == 1) && share->util_thread))
12806 #ifdef NDB_LOG_TRAILING_SHARE_ERRORS
12807 sql_print_warning(
"NDB_SHARE: %s already exists use_count=%d."
12808 " Moving away for safety, but possible memleak.",
12809 share->key, share->use_count);
12812 dbug_print_open_tables();
12818 DBUG_ASSERT(FALSE);
12826 my_hash_delete(&ndbcluster_open_tables, (uchar*) share);
12833 const uint min_key_length= 10;
12834 if (share->key_length < min_key_length)
12836 share->key= (
char*) alloc_root(&share->mem_root, min_key_length + 1);
12837 share->key_length= min_key_length;
12840 my_snprintf(share->key, min_key_length + 1,
"#leak%lu",
12841 trailing_share_id++);
12844 my_hash_insert(&ndbcluster_open_tables, (uchar*) share);
12852 int ndbcluster_prepare_rename_share(
NDB_SHARE *share,
const char *new_key)
12858 uint new_length= (uint) strlen(new_key);
12859 share->new_key= (
char*) alloc_root(&share->mem_root, 2 * (new_length + 1));
12860 strmov(share->new_key, new_key);
12864 int ndbcluster_undo_rename_share(THD *thd,
NDB_SHARE *share)
12866 share->new_key= share->old_names;
12867 ndbcluster_rename_share(thd, share);
12871 int ndbcluster_rename_share(THD *thd,
NDB_SHARE *share)
12874 pthread_mutex_lock(&ndbcluster_mutex);
12875 uint new_length= (uint) strlen(share->new_key);
12876 DBUG_PRINT(
"ndbcluster_rename_share", (
"old_key: %s old__length: %d",
12877 share->key, share->key_length));
12878 if ((tmp= (
NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
12879 (
const uchar*) share->new_key,
12881 handle_trailing_share(thd, tmp);
12884 my_hash_delete(&ndbcluster_open_tables, (uchar*) share);
12885 dbug_print_open_tables();
12888 uint old_length= share->key_length;
12889 char *old_key= share->key;
12891 share->key= share->new_key;
12892 share->key_length= new_length;
12894 if (my_hash_insert(&ndbcluster_open_tables, (uchar*) share))
12897 DBUG_PRINT(
"error", (
"ndbcluster_rename_share: my_hash_insert %s failed",
12899 share->key= old_key;
12900 share->key_length= old_length;
12901 if (my_hash_insert(&ndbcluster_open_tables, (uchar*) share))
12903 sql_print_error(
"ndbcluster_rename_share: failed to recover %s", share->key);
12904 DBUG_PRINT(
"error", (
"ndbcluster_rename_share: my_hash_insert %s failed",
12907 dbug_print_open_tables();
12908 pthread_mutex_unlock(&ndbcluster_mutex);
12911 dbug_print_open_tables();
12913 share->db= share->key + new_length + 1;
12914 ha_ndbcluster::set_dbname(share->new_key, share->db);
12915 share->table_name= share->db + strlen(share->db) + 1;
12916 ha_ndbcluster::set_tabname(share->new_key, share->table_name);
12918 dbug_print_share(
"ndbcluster_rename_share:", share);
12920 if (share->event_data)
12921 event_data= share->event_data;
12922 else if (share->op)
12924 if (event_data && event_data->shadow_table)
12926 if (!IS_TMP_PREFIX(share->table_name))
12928 event_data->shadow_table->s->db.str= share->db;
12929 event_data->shadow_table->s->db.length= strlen(share->db);
12930 event_data->shadow_table->s->table_name.str= share->table_name;
12931 event_data->shadow_table->s->table_name.length= strlen(share->table_name);
12944 share->old_names= old_key;
12947 if (opt_ndb_extra_logging > 9)
12948 sql_print_information (
"ndbcluster_rename_share: %s-%s use_count: %u", old_key, share->key, share->use_count);
12950 pthread_mutex_unlock(&ndbcluster_mutex);
12960 pthread_mutex_lock(&ndbcluster_mutex);
12961 share->use_count++;
12963 dbug_print_open_tables();
12964 dbug_print_share(
"ndbcluster_get_share:", share);
12965 if (opt_ndb_extra_logging > 9)
12966 sql_print_information (
"ndbcluster_get_share: %s use_count: %u", share->key, share->use_count);
12967 pthread_mutex_unlock(&ndbcluster_mutex);
12988 bool create_if_not_exists,
12992 uint length= (uint) strlen(key);
12993 DBUG_ENTER(
"ndbcluster_get_share");
12994 DBUG_PRINT(
"enter", (
"key: '%s'", key));
12997 pthread_mutex_lock(&ndbcluster_mutex);
12998 if (!(share= (
NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
12999 (
const uchar*) key,
13002 if (!create_if_not_exists)
13004 DBUG_PRINT(
"error", (
"get_share: %s does not exist", key));
13006 pthread_mutex_unlock(&ndbcluster_mutex);
13009 if ((share= (
NDB_SHARE*) my_malloc(
sizeof(*share),
13010 MYF(MY_WME | MY_ZEROFILL))))
13013 my_pthread_getspecific_ptr(
MEM_ROOT**, THR_MALLOC);
13015 init_sql_alloc(&share->mem_root, 1024, 0);
13016 *root_ptr= &share->mem_root;
13018 share->state= NSS_INITIAL;
13020 share->key= (
char*) alloc_root(*root_ptr, 2 * (length + 1));
13021 share->key_length= length;
13022 strmov(share->key, key);
13023 if (my_hash_insert(&ndbcluster_open_tables, (uchar*) share))
13025 free_root(&share->mem_root, MYF(0));
13026 my_free((uchar*) share, 0);
13027 *root_ptr= old_root;
13029 pthread_mutex_unlock(&ndbcluster_mutex);
13032 thr_lock_init(&share->lock);
13033 pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
13034 share->commit_count= 0;
13035 share->commit_count_lock= 0;
13036 share->db= share->key + length + 1;
13037 ha_ndbcluster::set_dbname(key, share->db);
13038 share->table_name= share->db + strlen(share->db) + 1;
13039 ha_ndbcluster::set_tabname(key, share->table_name);
13040 if (ndbcluster_binlog_init_share(current_thd, share, table))
13042 DBUG_PRINT(
"error", (
"get_share: %s could not init share", key));
13043 ndbcluster_real_free_share(&share);
13044 *root_ptr= old_root;
13046 pthread_mutex_unlock(&ndbcluster_mutex);
13049 *root_ptr= old_root;
13053 DBUG_PRINT(
"error", (
"get_share: failed to alloc share"));
13055 pthread_mutex_unlock(&ndbcluster_mutex);
13056 my_error(ER_OUTOFMEMORY, MYF(0), static_cast<int>(
sizeof(*share)));
13060 share->use_count++;
13061 if (opt_ndb_extra_logging > 9)
13062 sql_print_information (
"ndbcluster_get_share: %s use_count: %u", share->key, share->use_count);
13064 dbug_print_open_tables();
13065 dbug_print_share(
"ndbcluster_get_share:", share);
13067 pthread_mutex_unlock(&ndbcluster_mutex);
13068 DBUG_RETURN(share);
13072 void ndbcluster_real_free_share(
NDB_SHARE **share)
13074 DBUG_ENTER(
"ndbcluster_real_free_share");
13075 dbug_print_share(
"ndbcluster_real_free_share:", *share);
13077 if (opt_ndb_extra_logging > 9)
13078 sql_print_information (
"ndbcluster_real_free_share: %s use_count: %u", (*share)->key, (*share)->use_count);
13080 ndb_index_stat_free(*share);
13082 my_hash_delete(&ndbcluster_open_tables, (uchar*) *share);
13083 thr_lock_delete(&(*share)->lock);
13084 pthread_mutex_destroy(&(*share)->mutex);
13086 #ifdef HAVE_NDB_BINLOG
13087 if ((*share)->m_cfn_share && (*share)->m_cfn_share->m_ex_tab && g_ndb)
13090 dict->removeTableGlobal(*(*share)->m_cfn_share->m_ex_tab, 0);
13091 (*share)->m_cfn_share->m_ex_tab= 0;
13094 (*share)->new_op= 0;
13095 if ((*share)->event_data)
13097 delete (*share)->event_data;
13098 (*share)->event_data= 0;
13100 free_root(&(*share)->mem_root, MYF(0));
13101 my_free((uchar*) *share, MYF(0));
13104 dbug_print_open_tables();
13109 void ndbcluster_free_share(
NDB_SHARE **share,
bool have_lock)
13112 pthread_mutex_lock(&ndbcluster_mutex);
13113 if (!--(*share)->use_count)
13115 if (opt_ndb_extra_logging > 9)
13116 sql_print_information (
"ndbcluster_free_share: %s use_count: %u", (*share)->key, (*share)->use_count);
13117 ndbcluster_real_free_share(share);
13121 if (opt_ndb_extra_logging > 9)
13122 sql_print_information (
"ndbcluster_free_share: %s use_count: %u", (*share)->key, (*share)->use_count);
13123 dbug_print_open_tables();
13124 dbug_print_share(
"ndbcluster_free_share:", *share);
13127 pthread_mutex_unlock(&ndbcluster_mutex);
13131 struct ndb_table_statistics_row {
13139 int ha_ndbcluster::update_stats(THD *thd,
13145 Thd_ndb *thd_ndb= get_thd_ndb(thd);
13146 DBUG_ENTER(
"ha_ndbcluster::update_stats");
13149 if (m_share && !do_read_stat)
13151 pthread_mutex_lock(&m_share->mutex);
13152 stat= m_share->stat;
13153 pthread_mutex_unlock(&m_share->mutex);
13155 DBUG_ASSERT(stat.row_count != ~(ha_rows)0);
13158 if (stat.row_count != ~(ha_rows)0)
13163 Ndb *ndb= thd_ndb->ndb;
13166 DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM);
13168 if (
int err= ndb_get_table_statistics(thd,
this, TRUE, ndb,
13169 m_ndb_record, &stat,
13170 have_lock, part_id))
13178 pthread_mutex_lock(&m_share->mutex);
13179 m_share->stat= stat;
13180 pthread_mutex_unlock(&m_share->mutex);
13186 int no_uncommitted_rows_count= 0;
13187 if (m_table_info && !thd_ndb->m_error)
13189 m_table_info->records= stat.row_count;
13190 m_table_info->last_count= thd_ndb->count;
13191 no_uncommitted_rows_count= m_table_info->no_uncommitted_rows_count;
13193 stats.mean_rec_length= stat.row_size;
13194 stats.data_file_length= stat.fragment_memory;
13195 stats.records= stat.row_count + no_uncommitted_rows_count;
13196 stats.max_data_file_length= stat.fragment_extent_space;
13197 stats.delete_length= stat.fragment_extent_free_space;
13199 DBUG_PRINT(
"exit", (
"stats.records: %d "
13200 "stat->row_count: %d "
13201 "no_uncommitted_rows_count: %d"
13202 "stat->fragment_extent_space: %u "
13203 "stat->fragment_extent_free_space: %u",
13204 (
int)
stats.records,
13205 (
int)stat.row_count,
13206 (
int)no_uncommitted_rows_count,
13207 (uint)stat.fragment_extent_space,
13208 (uint)stat.fragment_extent_free_space));
13219 void modify_shared_stats(
NDB_SHARE *share,
13222 if (local_stat->no_uncommitted_rows_count)
13224 pthread_mutex_lock(&share->mutex);
13225 DBUG_ASSERT(share->stat.row_count != ~(ha_rows)0);
13226 if (share->stat.row_count != ~(ha_rows)0)
13228 DBUG_PRINT(
"info", (
"Update row_count for %s, row_count: %lu, with:%d",
13229 share->table_name, (ulong) share->stat.row_count,
13230 local_stat->no_uncommitted_rows_count));
13231 share->stat.row_count=
13232 ((Int64)share->stat.row_count+local_stat->no_uncommitted_rows_count > 0)
13233 ? share->stat.row_count+local_stat->no_uncommitted_rows_count
13236 pthread_mutex_unlock(&share->mutex);
13237 local_stat->no_uncommitted_rows_count= 0;
13254 Thd_ndb *thd_ndb= get_thd_ndb(current_thd);
13259 int retry_sleep= 30;
13260 const char *dummyRowPtr;
13262 Uint64 rows, commits, fixed_mem, var_mem, ext_space, free_ext_space;
13263 Uint32
size, fragid;
13265 char buff[22], buff2[22], buff3[22], buff4[22], buff5[22], buff6[22];
13267 DBUG_ENTER(
"ndb_get_table_statistics");
13269 DBUG_ASSERT(record != 0);
13276 extraGets[0].column= NdbDictionary::Column::ROW_COUNT;
13277 extraGets[0].appStorage= &rows;
13278 extraGets[1].column= NdbDictionary::Column::COMMIT_COUNT;
13279 extraGets[1].appStorage= &commits;
13280 extraGets[2].column= NdbDictionary::Column::ROW_SIZE;
13281 extraGets[2].appStorage= &
size;
13282 extraGets[3].column= NdbDictionary::Column::FRAGMENT_FIXED_MEMORY;
13283 extraGets[3].appStorage= &fixed_mem;
13284 extraGets[4].column= NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY;
13285 extraGets[4].appStorage= &var_mem;
13286 extraGets[5].column= NdbDictionary::Column::FRAGMENT_EXTENT_SPACE;
13287 extraGets[5].appStorage= &ext_space;
13288 extraGets[6].column= NdbDictionary::Column::FRAGMENT_FREE_EXTENT_SPACE;
13289 extraGets[6].appStorage= &free_ext_space;
13290 extraGets[7].column= NdbDictionary::Column::FRAGMENT;
13291 extraGets[7].appStorage= &fragid;
13293 const Uint32 codeWords= 1;
13294 Uint32 codeSpace[ codeWords ];
13298 if ((code.interpret_exit_last_row() != 0) ||
13302 DBUG_PRINT(
"exit", (
"failed, reterr: %u, NdbError %u(%s)", reterr,
13304 DBUG_RETURN(reterr);
13310 Uint64 sum_rows= 0;
13311 Uint64 sum_commits= 0;
13312 Uint64 sum_row_size= 0;
13314 Uint64 sum_ext_space= 0;
13315 Uint64 sum_free_ext_space= 0;
13326 options.optionsPresent= NdbScanOperation::ScanOptions::SO_BATCH |
13327 NdbScanOperation::ScanOptions::SO_GETVALUE |
13328 NdbScanOperation::ScanOptions::SO_INTERPRETED;
13331 options.extraGetValues= &extraGets[0];
13332 options.numExtraGetValues=
sizeof(extraGets)/
sizeof(extraGets[0]);
13333 options.interpretedCode= &code;
13343 thd_ndb->m_scan_count++;
13344 thd_ndb->m_pruned_scan_count += (pOp->
getPruned()? 1 : 0);
13346 thd_ndb->m_execute_count++;
13347 DBUG_PRINT(
"info", (
"execute_count: %u", thd_ndb->m_execute_count));
13356 while ((check= pOp->
nextResult(&dummyRowPtr, TRUE, TRUE)) == 0)
13358 DBUG_PRINT(
"info", (
"nextResult rows: %d commits: %d"
13359 "fixed_mem_size %d var_mem_size %d "
13360 "fragmentid %d extent_space %d free_extent_space %d",
13361 (
int)rows, (
int)commits, (
int)fixed_mem,
13362 (
int)var_mem, (
int)fragid, (
int)ext_space,
13363 (
int)free_ext_space));
13365 if ((part_id != ~(uint)0) && fragid != part_id)
13371 sum_commits+= commits;
13372 if (sum_row_size < size)
13373 sum_row_size=
size;
13374 sum_mem+= fixed_mem + var_mem;
13376 sum_ext_space += ext_space;
13377 sum_free_ext_space += free_ext_space;
13379 if ((part_id != ~(uint)0) && fragid == part_id)
13395 ndbstat->row_count= sum_rows;
13396 ndbstat->commit_count= sum_commits;
13397 ndbstat->row_size= (ulong)sum_row_size;
13398 ndbstat->fragment_memory= sum_mem;
13399 ndbstat->fragment_extent_space= sum_ext_space;
13400 ndbstat->fragment_extent_free_space= sum_free_ext_space;
13402 DBUG_PRINT(
"exit", (
"records: %s commits: %s "
13403 "row_size: %s mem: %s "
13404 "allocated: %s free: %s "
13406 llstr(sum_rows, buff),
13407 llstr(sum_commits, buff2),
13408 llstr(sum_row_size, buff3),
13409 llstr(sum_mem, buff4),
13410 llstr(sum_ext_space, buff5),
13411 llstr(sum_free_ext_space, buff6),
13418 if (file && pTrans)
13420 reterr= file->ndb_err(pTrans, have_lock);
13426 reterr= ndb_to_mysql_error(&tmp);
13430 reterr= error.
code;
13438 retries-- && !thd->killed)
13440 do_retry_sleep(retry_sleep);
13445 DBUG_PRINT(
"exit", (
"failed, reterr: %u, NdbError %u(%s)", reterr,
13447 DBUG_RETURN(reterr);
13455 int ha_ndbcluster::write_ndb_file(
const char *name)
13459 char path[FN_REFLEN];
13461 DBUG_ENTER(
"write_ndb_file");
13462 DBUG_PRINT(
"enter", (
"name: %s", name));
13464 #ifndef EMBEDDED_LIBRARY
13465 (void)strxnmov(path, FN_REFLEN-1,
13466 mysql_data_home,
"/",name,ha_ndb_ext,NullS);
13468 (void)strxnmov(path, FN_REFLEN-1, name,ha_ndb_ext, NullS);
13471 if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
13475 my_close(file,MYF(0));
13477 DBUG_RETURN(error);
13480 #ifndef NDB_WITH_NEW_MRR_INTERFACE
13486 DBUG_ENTER(
"null_value_index_search");
13487 KEY* key_info= table->key_info + active_index;
13489 ulong reclength= table->s->reclength;
13490 uchar *curr= (uchar*)buffer->buffer;
13491 uchar *end_of_buffer= (uchar*)buffer->buffer_end;
13497 assert(!(range->range_flag & SKIP_RANGE));
13499 for (; range<end_range && curr+reclength <= end_of_buffer;
13502 const uchar *key= range->start_key.key;
13503 uint key_len= range->start_key.length;
13504 if (check_null_in_key(key_info, key, key_len))
13508 DBUG_RETURN(FALSE);
13512 void ha_ndbcluster::check_read_before_write_removal()
13514 DBUG_ENTER(
"check_read_before_write_removal");
13517 assert(m_read_before_write_removal_possible);
13518 m_read_before_write_removal_used=
true;
13521 assert(table_share->primary_key != MAX_KEY);
13524 DBUG_PRINT(
"info", (
"using index %d", active_index));
13525 const KEY *key= table->key_info + active_index;
13526 assert((key->
flags & HA_NOSAME)); NDB_IGNORE_VALUE(key);
13531 #ifndef NDB_WITH_NEW_MRR_INTERFACE
13539 read_multi_needs_scan(NDB_INDEX_TYPE cur_index_type,
const KEY *key_info,
13542 if (cur_index_type == ORDERED_INDEX)
13544 if (cur_index_type == PRIMARY_KEY_INDEX ||
13545 cur_index_type == UNIQUE_INDEX)
13547 DBUG_ASSERT(cur_index_type == PRIMARY_KEY_ORDERED_INDEX ||
13548 cur_index_type == UNIQUE_ORDERED_INDEX);
13549 if (r->start_key.length != key_info->
key_length ||
13550 r->start_key.flag != HA_READ_KEY_EXACT)
13552 if (cur_index_type == UNIQUE_ORDERED_INDEX &&
13553 check_null_in_key(key_info, r->start_key.key,r->start_key.length))
13565 KEY* key_info= table->key_info + active_index;
13566 NDB_INDEX_TYPE cur_index_type= get_index_type(active_index);
13567 ulong reclength= table_share->reclength;
13571 DBUG_ENTER(
"ha_ndbcluster::read_multi_range_first");
13572 DBUG_PRINT(
"info", (
"blob fields=%d read_set=0x%x", table_share->blob_fields, table->read_set->bitmap[0]));
13578 if (uses_blob_value(table->read_set) ||
13579 (cur_index_type == UNIQUE_INDEX &&
13580 has_null_in_unique_index(active_index) &&
13581 null_value_index_search(ranges, ranges+range_count, buffer))
13582 || (m_pushed_join_operation==PUSHED_ROOT &&
13583 !m_disable_pushed_join &&
13584 !m_pushed_join_member->get_query_def().isScanQuery())
13585 || m_delete_cannot_batch || m_update_cannot_batch)
13587 DBUG_PRINT(
"info", (
"read_multi_range not possible, falling back to default handler implementation"));
13588 m_disable_multi_read= TRUE;
13589 DBUG_RETURN(handler::read_multi_range_first(found_range_p,
13600 if (unlikely((error= close_scan())))
13601 DBUG_RETURN(error);
13603 m_disable_multi_read= FALSE;
13608 m_multi_ranges= ranges;
13609 multi_range_curr= ranges;
13610 multi_range_end= ranges+range_count;
13611 multi_range_sorted= sorted;
13612 multi_range_buffer= buffer;
13636 DBUG_ASSERT(cur_index_type != UNDEFINED_INDEX);
13637 DBUG_ASSERT(m_multi_cursor==NULL);
13638 DBUG_ASSERT(m_active_query==NULL);
13640 const NdbOperation* lastOp= trans ? trans->getLastDefinedOperation() : 0;
13642 uchar *row_buf= (uchar *)buffer->buffer;
13643 const uchar *end_of_buffer= buffer->buffer_end;
13644 uint num_scan_ranges= 0;
13646 bool any_real_read= FALSE;
13648 if (m_read_before_write_removal_possible)
13649 check_read_before_write_removal();
13650 for (i= 0; i < range_count; i++)
13655 if (m_use_partition_pruning)
13657 get_partition_set(table, table->record[0], active_index, &r->start_key,
13659 DBUG_PRINT(
"info", (
"part_spec.start_part: %u part_spec.end_part: %u",
13660 part_spec.start_part, part_spec.end_part));
13665 if (part_spec.start_part > part_spec.end_part)
13671 r->range_flag|= SKIP_RANGE;
13672 row_buf += reclength;
13676 (part_spec.start_part == part_spec.end_part))
13677 if (unlikely(!(trans= start_transaction_part_id(part_spec.start_part,
13679 DBUG_RETURN(error);
13681 r->range_flag&= ~(uint)SKIP_RANGE;
13683 if ((m_pushed_join_operation==PUSHED_ROOT &&
13684 m_pushed_join_member->get_query_def().isScanQuery()) ||
13685 read_multi_needs_scan(cur_index_type, key_info, r))
13690 if (!m_use_partition_pruning)
13692 get_partition_set(table, table->record[0], active_index, &r->start_key,
13694 if (part_spec.start_part == part_spec.end_part)
13696 if (unlikely(!(trans= start_transaction_part_id(part_spec.start_part,
13698 DBUG_RETURN(error);
13700 else if (unlikely(!(trans= start_transaction(error))))
13701 DBUG_RETURN(error);
13703 else if (unlikely(!(trans= start_transaction(error))))
13704 DBUG_RETURN(error);
13707 any_real_read= TRUE;
13708 DBUG_PRINT(
"info", (
"any_real_read= TRUE"));
13714 if (i > NdbIndexScanOperation::MaxRangeNo)
13716 DBUG_PRINT(
"info", (
"Reached the limit of ranges allowed in a single"
13721 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
13725 !m_active_query && sorted))
13727 if (!m_active_query)
13729 const int error= create_pushed_join();
13730 if (unlikely(error))
13731 DBUG_RETURN(error);
13733 NdbQuery*
const query= m_active_query;
13735 query->getQueryOperation((uint)PUSHED_ROOT)->
setOrdering(NdbQueryOptions::ScanOrdering_ascending))
13741 if (!m_multi_cursor)
13743 if (m_pushed_join_operation == PUSHED_ROOT)
13751 options.optionsPresent=
13752 NdbScanOperation::ScanOptions::SO_SCANFLAGS |
13753 NdbScanOperation::ScanOptions::SO_PARALLEL;
13755 options.scan_flags=
13756 NdbScanOperation::SF_ReadRangeNo |
13757 NdbScanOperation::SF_MultiRange;
13760 options.scan_flags|= NdbScanOperation::SF_KeyInfo;
13764 options.parallel= DEFAULT_PARALLELISM;
13767 if (table_share->primary_key == MAX_KEY)
13768 get_hidden_fields_scan(&options, gets);
13770 if (m_cond && m_cond->generate_scan_filter(&code, &options))
13775 (m_index[active_index].ndb_record_key,
13778 (uchar *)(table->read_set->bitmap),
13786 m_multi_cursor= scanOp;
13806 if (m_use_partition_pruning &&
13807 m_user_defined_partitioning &&
13808 (part_spec.start_part == part_spec.end_part))
13810 DBUG_PRINT(
"info", (
"Range on user-def-partitioned table can be pruned to part %u",
13811 part_spec.start_part));
13812 ndbPartitionSpec.type= Ndb::PartitionSpec::PS_USER_DEFINED;
13813 ndbPartitionSpec.UserDefined.partitionId= part_spec.start_part;
13814 ndbPartSpecPtr= &ndbPartitionSpec;
13819 compute_index_bounds(bound, key_info, &r->start_key, &r->end_key, 0);
13822 const NdbRecord *key_rec= m_index[active_index].ndb_record_key;
13823 if (m_active_query)
13825 DBUG_PRINT(
"info", (
"setBound:%d, for pushed join", bound.range_no));
13826 if (m_active_query->setBound(key_rec, &bound))
13833 if (m_multi_cursor->
setBound(key_rec,
13842 r->range_flag&= ~(uint)UNIQUE_RANGE;
13847 if (m_pushed_join_operation == PUSHED_ROOT)
13853 DBUG_ASSERT(active_index != MAX_KEY);
13854 if (unlikely(!(trans= start_transaction_key(active_index,
13857 DBUG_RETURN(error);
13865 if (row_buf + reclength > end_of_buffer)
13868 if (m_read_before_write_removal_used)
13870 r->range_flag|= READ_KEY_FROM_RANGE;
13875 any_real_read= TRUE;
13876 DBUG_PRINT(
"info", (
"m_read_before_write_removal_used == FALSE, "
13877 "any_real_read= TRUE"));
13879 r->range_flag|= UNIQUE_RANGE;
13881 Uint32 partitionId;
13882 Uint32* ppartitionId = NULL;
13884 if (m_user_defined_partitioning &&
13885 (cur_index_type == PRIMARY_KEY_ORDERED_INDEX ||
13886 cur_index_type == PRIMARY_KEY_INDEX))
13888 partitionId=part_spec.start_part;
13889 ppartitionId=&partitionId;
13892 DBUG_PRINT(
"info", (
"Generating Pk/Unique key read for range %u", i));
13897 if (m_pushed_join_operation==PUSHED_ROOT &&
13898 !m_disable_pushed_join &&
13899 !m_pushed_join_member->get_query_def().isScanQuery())
13901 DBUG_ASSERT(
false);
13903 const int error= pk_unique_index_read_key_pushed(active_index,
13906 if (unlikely(error))
13907 DBUG_RETURN(error);
13911 if (m_pushed_join_operation == PUSHED_ROOT)
13913 DBUG_PRINT(
"info", (
"Cannot push join due to incomplete implementation."));
13917 if (!(op= pk_unique_index_read_key(active_index,
13923 row_buf+= reclength;
13926 DBUG_ASSERT(i > 0 || i == range_count);
13927 m_multi_range_defined_end= ranges +
i;
13929 buffer->end_of_used_area= row_buf;
13931 if (m_active_query != NULL &&
13932 m_pushed_join_member->get_query_def().isScanQuery())
13934 m_thd_ndb->m_scan_count++;
13940 bool prunable =
false;
13941 if (unlikely(m_active_query->
isPrunable(prunable) != 0))
13944 m_thd_ndb->m_pruned_scan_count++;
13946 DBUG_PRINT(
"info", (
"Is MRR scan-query pruned to 1 partition? :%u", prunable));
13947 DBUG_ASSERT(!m_multi_cursor);
13949 if (m_multi_cursor)
13951 DBUG_PRINT(
"info", (
"Is MRR scan pruned to 1 partition? :%u",
13953 m_thd_ndb->m_scan_count++;
13954 m_thd_ndb->m_pruned_scan_count += (m_multi_cursor->
getPruned()? 1 : 0);
13964 const NdbOperation* rangeOp= lastOp ? lastOp->next() :
13965 trans->getFirstDefinedOperation();
13967 DBUG_PRINT(
"info", (
"Executing reads"));
13969 if (execute_no_commit_ie(m_thd_ndb, trans) == 0)
13971 m_multi_range_result_ptr= buffer->buffer;
13980 for (;rangeInfo < m_multi_range_defined_end; rangeInfo++)
13982 DBUG_PRINT(
"info", (
"range flag is %u", rangeInfo->range_flag));
13983 if (rangeInfo->range_flag & SKIP_RANGE)
13986 if ((rangeInfo->range_flag & UNIQUE_RANGE) &&
13987 (!(rangeInfo->range_flag & READ_KEY_FROM_RANGE)))
13989 assert(rangeOp != NULL);
13994 rangeInfo->range_flag &= ~(uint)EMPTY_RANGE;
13996 DBUG_PRINT(
"info", (
"Unique range op has result"));
14004 DBUG_RETURN(ndb_err(trans));
14006 DBUG_PRINT(
"info", (
"Unique range op has no result"));
14010 rangeInfo->range_flag |= EMPTY_RANGE;
14024 DBUG_RETURN(read_multi_range_next(found_range_p));
14028 ha_ndbcluster::read_multi_range_next(
KEY_MULTI_RANGE ** multi_range_found_p)
14030 DBUG_ENTER(
"ha_ndbcluster::read_multi_range_next");
14031 if (m_disable_multi_read)
14033 DBUG_RETURN(handler::read_multi_range_next(multi_range_found_p));
14036 const ulong reclength= table_share->reclength;
14038 while (multi_range_curr < m_multi_range_defined_end)
14040 if (multi_range_curr->range_flag & SKIP_RANGE)
14045 m_multi_range_result_ptr += reclength;
14046 multi_range_curr++;
14048 else if (multi_range_curr->range_flag & READ_KEY_FROM_RANGE)
14050 DBUG_PRINT(
"info", (
"using read before write removal optimisation"));
14051 KEY* key_info= table->key_info + active_index;
14052 key_restore(table->record[0], (uchar*)multi_range_curr->start_key.key,
14055 multi_range_curr++;
14058 else if (multi_range_curr->range_flag & UNIQUE_RANGE)
14064 multi_range_curr= old_multi_range_curr + 1;
14070 m_active_cursor= NULL;
14071 const uchar *src_row= m_multi_range_result_ptr;
14072 m_multi_range_result_ptr= src_row + table_share->reclength;
14074 if (!(old_multi_range_curr->range_flag & EMPTY_RANGE))
14076 *multi_range_found_p= old_multi_range_curr;
14077 memcpy(table->record[0], src_row, table_share->reclength);
14088 if ((res= read_multi_range_fetch_next()) != 0)
14097 multi_range_curr++;
14101 int current_range_no= m_current_range_no;
14102 int expected_range_no;
14113 if (!multi_range_sorted ||
14114 (expected_range_no= multi_range_curr - m_multi_ranges)
14115 == current_range_no)
14117 *multi_range_found_p= m_multi_ranges + current_range_no;
14119 unpack_record(table->record[0], m_next_row);
14131 m_active_cursor= m_multi_cursor;
14135 else if (current_range_no > expected_range_no)
14138 multi_range_curr++;
14147 multi_range_curr++;
14153 if (multi_range_curr == multi_range_end)
14155 DBUG_RETURN(HA_ERR_END_OF_FILE);
14163 multi_range_end - multi_range_curr,
14164 multi_range_sorted,
14165 multi_range_buffer));
14177 ha_ndbcluster::read_multi_range_fetch_next()
14179 DBUG_ENTER(
"read_multi_range_fetch_next");
14181 if (m_active_query)
14183 DBUG_PRINT(
"info", (
"read_multi_range_fetch_next from pushed join, m_next_row:%p", m_next_row));
14186 int res= fetch_next_pushed();
14187 if (res == NdbQuery::NextResult_gotRow)
14189 m_current_range_no= 0;
14192 else if (res == NdbQuery::NextResult_scanComplete)
14195 m_active_query->
close(FALSE);
14207 else if (m_multi_cursor)
14212 int res= fetch_next(cursor);
14220 cursor->
close(FALSE, TRUE);
14221 m_active_cursor= 0;
14237 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
14247 int ndbcluster_make_pushed_join(
handlerton *hton,
14251 DBUG_ENTER(
"ndbcluster_make_pushed_join");
14254 if (THDVAR(thd, join_pushdown))
14264 int error= pushed_builder.make_pushed_join(join_root, pushed_join);
14265 if (unlikely(error))
14269 ERR_SET(pushed_builder.getNdbError(),error);
14272 DBUG_RETURN(error);
14277 if (pushed_join != NULL)
14282 error= handler->assign_pushed_join(pushed_join);
14283 if (unlikely(error))
14285 delete pushed_join;
14287 DBUG_RETURN(error);
14304 ha_ndbcluster::assign_pushed_join(
const ndb_pushed_join* pushed_join)
14306 DBUG_ENTER(
"assign_pushed_join");
14312 DBUG_ASSERT(tab->file->ht == ht);
14314 child->m_pushed_join_member= pushed_join;
14315 child->m_pushed_join_operation=
i;
14318 DBUG_PRINT(
"info", (
"Assigned pushed join with %d child operations",
14333 ha_ndbcluster::maybe_pushable_join(
const char*& reason)
const
14336 if (uses_blob_value(table->read_set))
14338 reason=
"select list can't contain BLOB columns";
14341 if (m_user_defined_partitioning)
14343 reason=
"has user defined partioning";
14356 reason=
"lock modes other than 'read committed' not implemented";
14381 #ifndef NDB_WITHOUT_JOIN_PUSHDOWN
14383 ha_ndbcluster::check_if_pushable(
int type,
14385 bool needSorted)
const
14387 if (m_disable_pushed_join)
14389 DBUG_PRINT(
"info", (
"Push disabled (HA_EXTRA_KEYREAD)"));
14392 return m_pushed_join_operation == PUSHED_ROOT
14393 && m_pushed_join_member != NULL
14396 (idx<MAX_KEY) ? &m_index[idx] : NULL,
14401 ha_ndbcluster::create_pushed_join(
const NdbQueryParamValue* keyFieldParams, uint paramCnt)
14403 DBUG_ENTER(
"create_pushed_join");
14404 DBUG_ASSERT(m_pushed_join_member && m_pushed_join_operation == PUSHED_ROOT);
14409 if (unlikely(query==NULL))
14418 DBUG_ASSERT(handler->m_pushed_join_operation==(
int)i);
14420 handler->m_pushed_operation= op;
14423 const NdbRecord*
const resultRec= handler->m_ndb_record;
14424 int res= op->setResultRowRef(
14426 handler->_m_next_row,
14427 (uchar *)(tab->read_set->bitmap));
14432 handler->_m_next_row= 0;
14435 DBUG_ASSERT(m_active_query==NULL);
14436 m_active_query= query;
14449 ha_ndbcluster::check_is_pushed()
const
14451 if (m_pushed_join_member == NULL)
14454 handler *root= m_pushed_join_member->
get_table(PUSHED_ROOT)->file;
14455 return (static_cast<ha_ndbcluster*>(root)->m_active_query);
14461 if (m_pushed_join_member == NULL)
14470 if (m_pushed_join_member == NULL)
14473 return m_pushed_join_member->
get_table(PUSHED_ROOT);
14479 if (m_pushed_join_operation > PUSHED_ROOT)
14481 DBUG_ASSERT(m_pushed_join_member!=NULL);
14482 uint parent_ix= m_pushed_join_member
14483 ->get_query_def().getQueryOperation(m_pushed_join_operation)
14484 ->getParentOperation(0)
14486 return m_pushed_join_member->
get_table(parent_ix);
14498 ha_ndbcluster::update_table_comment(
14500 const char* comment)
14502 THD *thd= current_thd;
14503 uint length= strlen(comment);
14504 if (length > 64000 - 3)
14506 return((
char*)comment);
14510 if (!(ndb= get_ndb(thd)))
14512 return((
char*)comment);
14517 return((
char*)comment);
14519 const NDBTAB* tab= m_table;
14520 DBUG_ASSERT(tab != NULL);
14523 const char *
fmt=
"%s%snumber_of_replicas: %d";
14524 const unsigned fmt_len_plus_extra= length + strlen(fmt);
14525 if ((str= (
char*) my_malloc(fmt_len_plus_extra, MYF(0))) == NULL)
14527 sql_print_error(
"ha_ndbcluster::update_table_comment: "
14528 "my_malloc(%u) failed", (
unsigned int)fmt_len_plus_extra);
14529 return (
char*)comment;
14532 my_snprintf(str,fmt_len_plus_extra,fmt,comment,
14533 length > 0 ?
" ":
"",
14534 tab->getReplicaCount());
14542 pthread_handler_t ndb_util_thread_func(
void *arg __attribute__((unused)))
14547 uint share_list_size= 0;
14551 DBUG_ENTER(
"ndb_util_thread");
14552 DBUG_PRINT(
"enter", (
"cache_check_time: %lu", opt_ndb_cache_check_time));
14554 pthread_mutex_lock(&LOCK_ndb_util_thread);
14559 my_errno= HA_ERR_OUT_OF_MEM;
14562 THD_CHECK_SENTRY(thd);
14563 pthread_detach_this_thread();
14564 ndb_util_thread= pthread_self();
14566 thd->thread_stack= (
char*)&thd;
14567 if (thd->store_globals())
14568 goto ndb_util_thread_fail;
14570 thd->init_for_queries();
14571 thd_set_command(thd, COM_DAEMON);
14572 #ifndef NDB_THD_HAS_NO_VERSION
14573 thd->version=refresh_version;
14575 thd->client_capabilities = 0;
14576 thd->security_ctx->skip_grants();
14580 charset_connection= get_charset_by_csname(
"utf8",
14581 MY_CS_PRIMARY, MYF(MY_WME));
14582 thd->variables.character_set_client= charset_connection;
14583 thd->variables.character_set_results= charset_connection;
14584 thd->variables.collation_connection= charset_connection;
14585 thd->update_charset();
14588 ndb_util_thread_running= 1;
14589 pthread_cond_signal(&COND_ndb_util_ready);
14590 pthread_mutex_unlock(&LOCK_ndb_util_thread);
14596 while (!mysqld_server_started)
14598 set_timespec(abstime, 1);
14601 if (ndbcluster_terminating)
14604 pthread_mutex_lock(&LOCK_ndb_util_thread);
14605 goto ndb_util_thread_end;
14613 pthread_mutex_lock(&LOCK_ndb_util_thread);
14614 while (!g_ndb_status.cluster_node_id && (ndbcluster_hton->slot != ~(uint)0))
14617 pthread_cond_wait(&COND_ndb_util_thread, &LOCK_ndb_util_thread);
14618 if (ndbcluster_terminating)
14619 goto ndb_util_thread_end;
14621 pthread_mutex_unlock(&LOCK_ndb_util_thread);
14624 if (!(thd_ndb= Thd_ndb::seize(thd)))
14626 sql_print_error(
"Could not allocate Thd_ndb object");
14627 pthread_mutex_lock(&LOCK_ndb_util_thread);
14628 goto ndb_util_thread_end;
14630 thd_set_thd_ndb(thd, thd_ndb);
14631 thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
14633 if (opt_ndb_extra_logging && ndb_binlog_running)
14634 sql_print_information(
"NDB Binlog: Ndb tables initially read only.");
14636 set_timespec(abstime, 0);
14639 pthread_mutex_lock(&LOCK_ndb_util_thread);
14640 if (!ndbcluster_terminating)
14641 pthread_cond_timedwait(&COND_ndb_util_thread,
14642 &LOCK_ndb_util_thread,
14644 if (ndbcluster_terminating)
14645 goto ndb_util_thread_end;
14646 pthread_mutex_unlock(&LOCK_ndb_util_thread);
14647 #ifdef NDB_EXTRA_DEBUG_UTIL_THREAD
14648 DBUG_PRINT(
"ndb_util_thread", (
"Started, cache_check_time: %lu",
14649 opt_ndb_cache_check_time));
14657 if (!check_ndb_in_thd(thd,
false))
14659 set_timespec(abstime, 1);
14670 if (!ndb_binlog_setup(thd))
14673 set_timespec(abstime, 1);
14677 if (opt_ndb_cache_check_time == 0)
14680 set_timespec(abstime, 1);
14686 pthread_mutex_lock(&ndbcluster_mutex);
14687 uint
i, open_count, record_count= ndbcluster_open_tables.records;
14688 if (share_list_size < record_count)
14691 if (!new_share_list)
14693 sql_print_warning(
"ndb util thread: malloc failure, "
14694 "query cache not maintained properly");
14695 pthread_mutex_unlock(&ndbcluster_mutex);
14698 delete [] share_list;
14699 share_list_size= record_count;
14700 share_list= new_share_list;
14702 for (i= 0, open_count= 0; i < record_count; i++)
14704 share= (
NDB_SHARE *)my_hash_element(&ndbcluster_open_tables, i);
14705 if ((share->use_count - (
int) (share->op != 0) - (
int) (share->op != 0))
14709 share->use_count++;
14710 share->util_thread=
true;
14711 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary use_count: %u",
14712 share->key, share->use_count));
14713 DBUG_PRINT(
"ndb_util_thread",
14714 (
"Found open table[%d]: %s, use_count: %d",
14715 i, share->table_name, share->use_count));
14718 share_list[open_count++]= share;
14720 pthread_mutex_unlock(&ndbcluster_mutex);
14723 for (i= 0; i < open_count; i++)
14725 share= share_list[
i];
14726 if ((share->use_count - (
int) (share->op != 0) - (
int) (share->op != 0))
14733 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
14734 share->key, share->use_count));
14736 pthread_mutex_lock(&ndbcluster_mutex);
14737 share->util_thread=
false;
14738 free_share(&share,
true);
14739 pthread_mutex_unlock(&ndbcluster_mutex);
14742 DBUG_PRINT(
"ndb_util_thread",
14743 (
"Fetching commit count for: %s", share->key));
14747 pthread_mutex_lock(&share->mutex);
14748 lock= share->commit_count_lock;
14749 pthread_mutex_unlock(&share->mutex);
14752 Ndb* ndb= thd_ndb->ndb;
14758 if (ndbtab_g.get_table() &&
14759 ndb_get_table_statistics(thd, NULL, FALSE, ndb,
14760 ndbtab_g.get_table()->getDefaultRecord(),
14764 char buff[22], buff2[22];
14767 (
"Table: %s commit_count: %s rows: %s",
14769 llstr(stat.commit_count, buff),
14770 llstr(stat.row_count, buff2)));
14774 DBUG_PRINT(
"ndb_util_thread",
14775 (
"Error: Could not get commit count for table %s",
14777 stat.commit_count= 0;
14781 pthread_mutex_lock(&share->mutex);
14782 if (share->commit_count_lock == lock)
14783 share->commit_count= stat.commit_count;
14784 pthread_mutex_unlock(&share->mutex);
14787 DBUG_PRINT(
"NDB_SHARE", (
"%s temporary free use_count: %u",
14788 share->key, share->use_count));
14789 pthread_mutex_lock(&ndbcluster_mutex);
14790 share->util_thread=
false;
14791 free_share(&share,
true);
14792 pthread_mutex_unlock(&ndbcluster_mutex);
14796 set_timespec_nsec(abstime, opt_ndb_cache_check_time * 1000000ULL);
14799 pthread_mutex_lock(&LOCK_ndb_util_thread);
14801 ndb_util_thread_end:
14802 net_end(&thd->net);
14803 ndb_util_thread_fail:
14805 delete [] share_list;
14808 Thd_ndb::release(thd_ndb);
14809 thd_set_thd_ndb(thd, NULL);
14815 ndb_util_thread_running= 0;
14816 pthread_cond_signal(&COND_ndb_util_ready);
14817 pthread_mutex_unlock(&LOCK_ndb_util_thread);
14818 DBUG_PRINT(
"exit", (
"ndb_util_thread"));
14851 DBUG_ENTER(
"ha_ndbcluster::cond_push");
14854 if (cond->used_tables() & ~table->map)
14862 DBUG_EXECUTE(
"where",print_where((
Item *)cond,
"Rejected cond_push", QT_ORDINARY););
14871 DBUG_ASSERT(!(cond->used_tables() & ~table->map));
14877 my_errno= HA_ERR_OUT_OF_MEM;
14880 DBUG_EXECUTE(
"where",print_where((
Item *)cond, m_tabname, QT_ORDINARY););
14881 DBUG_RETURN(m_cond->cond_push(cond, table, (
NDBTAB *)m_table));
14891 m_cond->cond_pop();
14899 ndbcluster_show_status(
handlerton *hton, THD* thd, stat_print_fn *stat_print,
14900 enum ha_stat_type stat_type)
14905 DBUG_ENTER(
"ndbcluster_show_status");
14907 if (stat_type != HA_ENGINE_STATUS)
14909 DBUG_RETURN(FALSE);
14912 Ndb* ndb= check_ndb_in_thd(thd);
14913 Thd_ndb *thd_ndb= get_thd_ndb(thd);
14916 update_status_variables(thd_ndb, &ns, thd_ndb->connection);
14918 update_status_variables(NULL, &ns, g_ndb_cluster_connection);
14921 my_snprintf(buf,
sizeof(buf),
14922 "cluster_node_id=%ld, "
14923 "connected_host=%s, "
14924 "connected_port=%ld, "
14925 "number_of_data_nodes=%ld, "
14926 "number_of_ready_data_nodes=%ld, "
14927 "connect_count=%ld",
14928 ns.cluster_node_id,
14931 ns.number_of_data_nodes,
14932 ns.number_of_ready_data_nodes,
14934 if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
14935 STRING_WITH_LEN(
"connection"), buf, buflen))
14938 for (
int i= 0; i < MAX_NDB_NODES; i++)
14940 if (ns.transaction_hint_count[i] > 0 ||
14941 ns.transaction_no_hint_count[i] > 0)
14943 uint namelen= my_snprintf(name,
sizeof(name),
"node[%d]", i);
14944 buflen= my_snprintf(buf,
sizeof(buf),
14945 "transaction_hint=%ld, transaction_no_hint=%ld",
14946 ns.transaction_hint_count[i],
14947 ns.transaction_no_hint_count[i]);
14948 if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
14949 name, namelen, buf, buflen))
14958 while (ndb->get_free_list_usage(&tmp))
14961 my_snprintf(buf,
sizeof(buf),
14962 "created=%u, free=%u, sizeof=%u",
14963 tmp.m_created, tmp.m_free, tmp.m_sizeof);
14964 if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
14965 tmp.m_name, strlen(tmp.m_name), buf, buflen))
14969 ndbcluster_show_status_binlog(thd, stat_print, stat_type);
14971 DBUG_RETURN(FALSE);
14975 int ha_ndbcluster::get_default_no_partitions(
HA_CREATE_INFO *create_info)
14977 if (unlikely(g_ndb_cluster_connection->get_no_ready() <= 0))
14980 my_error(HA_ERR_NO_CONNECTION, MYF(0));
14984 THD* thd = current_thd;
14987 Thd_ndb * thd_ndb = get_thd_ndb(thd);
14991 ha_rows max_rows, min_rows;
14994 max_rows= create_info->max_rows;
14995 min_rows= create_info->min_rows;
14999 max_rows= table_share->max_rows;
15000 min_rows= table_share->min_rows;
15002 uint no_fragments= get_no_fragments(max_rows >= min_rows ?
15003 max_rows : min_rows);
15004 uint reported_frags;
15005 adjusted_frag_count(thd_ndb->ndb,
15008 return reported_frags;
15011 uint32 ha_ndbcluster::calculate_key_hash_value(
Field **field_array)
15018 Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1];
15019 void *buf= (
void*)&tmp[0];
15020 Ndb *ndb= m_thd_ndb->ndb;
15021 DBUG_ENTER(
"ha_ndbcluster::calculate_key_hash_value");
15025 Field *field= *field_array;
15026 uint len= field->data_length();
15027 DBUG_ASSERT(!field->is_real_null());
15028 if (field->real_type() == MYSQL_TYPE_VARCHAR)
15030 key_data[
i].ptr= field->ptr;
15031 key_data[i++].len= len;
15032 }
while (*(++field_array));
15033 key_data[
i].ptr= 0;
15034 if ((ret_val= ndb->
computeHash(&hash_value, m_table,
15035 key_data_ptr, buf,
sizeof(tmp))))
15037 DBUG_PRINT(
"info", (
"ret_val = %d", ret_val));
15038 DBUG_ASSERT(FALSE);
15041 DBUG_RETURN(hash_value);
15062 enum ndb_distribution_enum {
15063 NDB_DISTRIBUTION_KEYHASH= 0,
15064 NDB_DISTRIBUTION_LINHASH= 1
15066 static const char* distribution_names[]= {
"KEYHASH",
"LINHASH", NullS };
15067 static ulong opt_ndb_distribution;
15068 static TYPELIB distribution_typelib= {
15069 array_elements(distribution_names) - 1,
15071 distribution_names,
15074 static MYSQL_SYSVAR_ENUM(
15076 opt_ndb_distribution,
15077 PLUGIN_VAR_RQCMDARG,
15078 "Default distribution for new tables in ndb",
15081 NDB_DISTRIBUTION_KEYHASH,
15082 &distribution_typelib
15086 void ha_ndbcluster::set_auto_partitions(
partition_info *part_info)
15088 DBUG_ENTER(
"ha_ndbcluster::set_auto_partitions");
15089 part_info->list_of_part_fields= TRUE;
15090 part_info->part_type= HASH_PARTITION;
15091 switch (opt_ndb_distribution)
15093 case NDB_DISTRIBUTION_KEYHASH:
15094 part_info->linear_hash_ind= FALSE;
15096 case NDB_DISTRIBUTION_LINHASH:
15097 part_info->linear_hash_ind= TRUE;
15100 DBUG_ASSERT(
false);
15111 const uint num_parts = partition_info_num_parts(part_info);
15113 bool unsigned_flag= part_info->part_expr->unsigned_flag;
15114 DBUG_ENTER(
"set_range_data");
15116 int32 *range_data= (int32*)my_malloc(num_parts*
sizeof(int32), MYF(0));
15119 mem_alloc_error(num_parts*
sizeof(int32));
15122 for (uint i= 0; i < num_parts; i++)
15124 longlong range_val= part_info->range_int_array[
i];
15126 range_val-= 0x8000000000000000ULL;
15127 if (range_val < INT_MIN32 || range_val >= INT_MAX32)
15129 if ((i != num_parts - 1) ||
15130 (range_val != LONGLONG_MAX))
15132 my_error(ER_LIMITED_PART_RANGE, MYF(0),
"NDB");
15136 range_val= INT_MAX32;
15138 range_data[
i]= (int32)range_val;
15142 my_free((
char*)range_data, MYF(0));
15143 DBUG_RETURN(error);
15151 const uint num_list_values = partition_info_num_list_values(part_info);
15152 int32 *list_data= (int32*)my_malloc(num_list_values*2*
sizeof(int32), MYF(0));
15154 bool unsigned_flag= part_info->part_expr->unsigned_flag;
15155 DBUG_ENTER(
"set_list_data");
15159 mem_alloc_error(num_list_values*2*
sizeof(int32));
15162 for (uint i= 0; i < num_list_values; i++)
15165 longlong list_val= list_entry->list_value;
15167 list_val-= 0x8000000000000000ULL;
15168 if (list_val < INT_MIN32 || list_val > INT_MAX32)
15170 my_error(ER_LIMITED_PART_RANGE, MYF(0),
"NDB");
15174 list_data[2*
i]= (int32)list_val;
15175 list_data[2*i+1]= list_entry->partition_id;
15179 my_free((
char*)list_data, MYF(0));
15180 DBUG_RETURN(error);
15200 uint32 frag_data[MAX_PARTITIONS];
15201 char *ts_names[MAX_PARTITIONS];
15202 ulong fd_index= 0,
i, j;
15207 DBUG_ENTER(
"ha_ndbcluster::set_up_partition_info");
15209 if (part_info->part_type == HASH_PARTITION &&
15210 part_info->list_of_part_fields == TRUE)
15212 Field **fields= part_info->part_field_array;
15214 ftype= NDBTAB::HashMapPartition;
15216 for (i= 0; i < part_info->part_field_list.elements; i++)
15219 DBUG_PRINT(
"info",(
"setting dist key on %s", col->
getName()));
15225 if (!current_thd->variables.new_mode)
15227 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
15228 ER_ILLEGAL_HA_CREATE_OPTION,
15229 ER(ER_ILLEGAL_HA_CREATE_OPTION),
15230 ndbcluster_hton_name,
15231 "LIST, RANGE and HASH partition disabled by default,"
15232 " use --new option to enable");
15233 DBUG_RETURN(HA_ERR_UNSUPPORTED);
15243 DBUG_PRINT(
"info", (
"Generating partition func value field"));
15244 col.
setName(
"$PART_FUNC_VALUE");
15249 col.setAutoIncrement(FALSE);
15251 if (part_info->part_type == RANGE_PARTITION)
15253 if ((error= set_range_data(part_info, ndbtab)))
15255 DBUG_RETURN(error);
15258 else if (part_info->part_type == LIST_PARTITION)
15260 if ((error= set_list_data(part_info, ndbtab)))
15262 DBUG_RETURN(error);
15271 part_elem= part_it++;
15272 if (!part_info->is_sub_partitioned())
15274 ng= part_elem->nodegroup_id;
15275 ts_names[fd_index]= part_elem->tablespace_name;
15276 frag_data[fd_index++]= ng;
15284 part_elem= sub_it++;
15285 ng= part_elem->nodegroup_id;
15286 ts_names[fd_index]= part_elem->tablespace_name;
15287 frag_data[fd_index++]= ng;
15288 }
while (++j < partition_info_num_subparts(part_info));
15290 }
while (++i < partition_info_num_parts(part_info));
15292 const bool use_default_num_parts =
15293 partition_info_use_default_num_partitions(part_info);
15297 ha_rows max_rows= table_share->max_rows;
15298 ha_rows min_rows= table_share->min_rows;
15299 if (max_rows < min_rows)
15300 max_rows= min_rows;
15301 if (max_rows != (ha_rows)0)
15312 #ifndef NDB_WITHOUT_ONLINE_ALTER
15314 HA_ALTER_FLAGS supported_alter_operations()
15316 HA_ALTER_FLAGS alter_flags;
15317 return alter_flags |
15320 HA_ADD_UNIQUE_INDEX |
15321 HA_DROP_UNIQUE_INDEX |
15323 HA_COLUMN_STORAGE |
15326 HA_ALTER_TABLE_REORG |
15327 HA_CHANGE_AUTOINCREMENT_VALUE;
15330 int ha_ndbcluster::check_if_supported_alter(
TABLE *altered_table,
15333 HA_ALTER_FLAGS *alter_flags,
15334 uint table_changes)
15336 THD *thd= current_thd;
15337 HA_ALTER_FLAGS not_supported= ~(supported_alter_operations());
15340 HA_ALTER_FLAGS add_column;
15341 HA_ALTER_FLAGS adding;
15342 HA_ALTER_FLAGS dropping;
15344 DBUG_ENTER(
"ha_ndbcluster::check_if_supported_alter");
15345 add_column= add_column | HA_ADD_COLUMN;
15346 adding= adding | HA_ADD_INDEX | HA_ADD_UNIQUE_INDEX;
15347 dropping= dropping | HA_DROP_INDEX | HA_DROP_UNIQUE_INDEX;
15349 const NDBTAB *old_tab= m_table;
15351 if (THDVAR(thd, use_copying_alter_table))
15353 DBUG_PRINT(
"info", (
"On-line alter table disabled"));
15354 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15358 char dbug_string[HA_MAX_ALTER_FLAGS+1];
15359 alter_flags->print(dbug_string);
15360 DBUG_PRINT(
"info", (
"Not supported %s", dbug_string));
15364 if (alter_flags->is_set(HA_ALTER_TABLE_REORG))
15370 if (part_info->use_default_num_partitions)
15372 alter_flags->clear_bit(HA_COALESCE_PARTITION);
15373 alter_flags->clear_bit(HA_ADD_PARTITION);
15377 if ((*alter_flags & not_supported).is_set())
15380 HA_ALTER_FLAGS tmp = *alter_flags;
15381 tmp&= not_supported;
15382 char dbug_string[HA_MAX_ALTER_FLAGS+1];
15383 tmp.print(dbug_string);
15384 DBUG_PRINT(
"info", (
"Detected unsupported change: %s", dbug_string));
15386 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15389 if (alter_flags->is_set(HA_ADD_COLUMN) ||
15390 alter_flags->is_set(HA_ADD_PARTITION) ||
15391 alter_flags->is_set(HA_ALTER_TABLE_REORG))
15393 Ndb *ndb= get_ndb(thd);
15398 if (alter_flags->is_set(HA_ADD_COLUMN))
15413 add_column.set_bit(HA_COLUMN_STORAGE);
15414 add_column.set_bit(HA_COLUMN_FORMAT);
15415 if ((*alter_flags & ~add_column).is_set())
15417 DBUG_PRINT(
"info", (
"Only add column exclusively can be performed on-line"));
15418 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15424 if (table_share->primary_key == MAX_KEY ||
15425 part_info->part_type != HASH_PARTITION ||
15426 !part_info->list_of_part_fields)
15427 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15430 for (uint i= table->s->fields; i < altered_table->s->fields; i++)
15432 Field *field= altered_table->field[
i];
15433 DBUG_PRINT(
"info", (
"Found new field %s", field->field_name));
15434 DBUG_PRINT(
"info", (
"storage_type %i, column_format %i",
15435 (uint) field->field_storage_type(),
15436 (uint) field->column_format()));
15438 if ((my_errno= create_ndb_column(0, col, field, create_info,
15439 COLUMN_FORMAT_TYPE_DYNAMIC)))
15441 DBUG_PRINT(
"info", (
"create_ndb_column returned %u", my_errno));
15442 DBUG_RETURN(my_errno);
15448 if (alter_flags->is_set(HA_ALTER_TABLE_REORG))
15453 else if (alter_flags->is_set(HA_ADD_PARTITION))
15455 DBUG_PRINT(
"info", (
"Adding partition (%u)", part_info->num_parts));
15459 NDB_Modifiers table_modifiers(ndb_table_modifiers);
15460 table_modifiers.parse(thd,
"NDB_TABLE=", create_info->comment.str,
15461 create_info->comment.length);
15462 const NDB_Modifier* mod_nologging = table_modifiers.get(
"NOLOGGING");
15464 if (mod_nologging->m_found)
15466 new_tab.
setLogging(!mod_nologging->m_val_bool);
15471 DBUG_PRINT(
"info", (
"Adding column(s) supported on-line"));
15475 DBUG_PRINT(
"info",(
"Adding column not supported on-line"));
15476 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15483 if ((*alter_flags & adding).is_set())
15485 if (((altered_table->s->keys - table->s->keys) != 1) ||
15486 (*alter_flags & dropping).is_set())
15488 DBUG_PRINT(
"info",(
"Only one index can be added on-line"));
15489 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15496 if ((*alter_flags & dropping).is_set())
15498 if (((table->s->keys - altered_table->s->keys) != 1) ||
15499 (*alter_flags & adding).is_set())
15501 DBUG_PRINT(
"info",(
"Only one index can be dropped on-line"));
15502 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15506 for (i= 0; i < table->s->fields; i++)
15508 Field *field= table->field[
i];
15512 create_ndb_column(0, new_col, field, create_info);
15514 bool index_on_column =
false;
15521 for (uint j= 0; j<table->s->keys; j++)
15523 KEY* key_info= table->key_info + j;
15526 for (; key_part != end; key_part++)
15528 if (key_part->field->field_index == i)
15530 index_on_column=
true;
15537 if (index_on_column ==
false && (*alter_flags & adding).is_set())
15539 for (uint j= table->s->keys; j<altered_table->s->keys; j++)
15541 KEY* key_info= altered_table->key_info + j;
15544 for (; key_part != end; key_part++)
15546 if (key_part->field->field_index == i)
15548 index_on_column=
true;
15549 j= altered_table->s->keys;
15563 if (index_on_column)
15565 if (field->field_storage_type() == HA_SM_DISK)
15567 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15569 new_col.setStorageType(NdbDictionary::Column::StorageTypeMemory);
15571 else if (field->field_storage_type() == HA_SM_DEFAULT)
15577 new_col.setStorageType(col->getStorageType());
15580 if (col->getStorageType() != new_col.getStorageType())
15582 DBUG_PRINT(
"info", (
"Column storage media is changed"));
15583 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15586 if (field->flags & FIELD_IS_RENAMED)
15588 DBUG_PRINT(
"info", (
"Field has been renamed, copy table"));
15589 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15592 if ((field->flags & FIELD_IN_ADD_INDEX) &&
15593 (col->getStorageType() == NdbDictionary::Column::StorageTypeDisk))
15595 DBUG_PRINT(
"info", (
"add/drop index not supported for disk stored column"));
15596 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15600 if ((*alter_flags & HA_CHANGE_AUTOINCREMENT_VALUE).is_set())
15603 HA_ALTER_FLAGS change_auto_flags=
15604 change_auto_flags | HA_CHANGE_AUTOINCREMENT_VALUE;
15605 if ((*alter_flags & ~change_auto_flags).is_set())
15607 DBUG_PRINT(
"info", (
"Not only auto_increment value changed"));
15608 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15614 if ((create_info->used_fields & HA_CREATE_USED_AUTO) &&
15617 DBUG_PRINT(
"info", (
"Row format changed"));
15618 DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
15622 DBUG_PRINT(
"info", (
"Ndb supports ALTER on-line"));
15623 DBUG_RETURN(HA_ALTER_SUPPORTED_WAIT_LOCK);
15626 int ha_ndbcluster::alter_table_phase1(THD *thd,
15627 TABLE *altered_table,
15629 HA_ALTER_INFO *alter_info,
15630 HA_ALTER_FLAGS *alter_flags)
15634 Thd_ndb *thd_ndb= get_thd_ndb(thd);
15635 Ndb *ndb= get_ndb(thd);
15641 HA_ALTER_FLAGS adding;
15642 HA_ALTER_FLAGS dropping;
15644 DBUG_ENTER(
"alter_table_phase1");
15645 adding= adding | HA_ADD_INDEX | HA_ADD_UNIQUE_INDEX;
15646 dropping= dropping | HA_DROP_INDEX | HA_DROP_UNIQUE_INDEX;
15648 if (!thd_ndb->has_required_global_schema_lock(
"ha_ndbcluster::alter_table_phase1"))
15649 DBUG_RETURN(HA_ERR_NO_CONNECTION);
15652 DBUG_RETURN(HA_ERR_OUT_OF_MEM);
15653 old_tab= alter_data->old_table;
15654 new_tab= alter_data->new_table;
15655 alter_info->data= alter_data;
15658 char dbug_string[HA_MAX_ALTER_FLAGS+1];
15659 alter_flags->print(dbug_string);
15660 DBUG_PRINT(
"info", (
"altered_table %s, alter_flags %s",
15661 altered_table->s->table_name.str,
15662 (
char *) dbug_string));
15666 prepare_for_alter();
15670 DBUG_PRINT(
"info", (
"Failed to start schema transaction"));
15677 if ((*alter_flags & adding).is_set())
15685 DBUG_PRINT(
"info", (
"Adding indexes"));
15686 key_info= (
KEY*) thd->alloc(
sizeof(
KEY) * alter_info->index_add_count);
15688 for (idx_p= alter_info->index_add_buffer,
15689 idx_end_p= idx_p + alter_info->index_add_count;
15694 *key= alter_info->key_info_buffer[*idx_p];
15697 for (key_part= key->key_part; key_part < part_end; key_part++)
15698 key_part->field= table->field[key_part->fieldnr];
15700 if ((error= add_index_impl(thd, altered_table, key_info,
15701 alter_info->index_add_count)))
15707 KEY *save_key_info= table->key_info;
15708 table->key_info= key_info;
15710 table->key_info= save_key_info;
15715 if ((*alter_flags & dropping).is_set())
15721 DBUG_PRINT(
"info", (
"Renumbering indexes"));
15723 key_numbers= (uint*) thd->alloc(
sizeof(uint) * alter_info->index_drop_count);
15724 keyno_p= key_numbers;
15726 for (idx_p= alter_info->index_drop_buffer,
15727 idx_end_p= idx_p + alter_info->index_drop_count;
15729 idx_p++, keyno_p++)
15735 if ((error= prepare_drop_index(table, key_numbers,
15736 alter_info->index_drop_count)))
15743 if (alter_flags->is_set(HA_ADD_COLUMN))
15748 for (i= table->s->fields; i < altered_table->s->fields; i++)
15750 Field *field= altered_table->field[
i];
15751 DBUG_PRINT(
"info", (
"Found new field %s", field->field_name));
15752 if ((my_errno= create_ndb_column(thd, col, field, create_info,
15753 COLUMN_FORMAT_TYPE_DYNAMIC)))
15762 if (field->column_format() == COLUMN_FORMAT_TYPE_DEFAULT &&
15763 create_info->
row_type == ROW_TYPE_DEFAULT &&
15766 push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
15767 ER_ILLEGAL_HA_CREATE_OPTION,
15768 "Converted FIXED field to DYNAMIC "
15769 "to enable on-line ADD COLUMN",
15770 field->field_name);
15776 if (alter_flags->is_set(HA_ALTER_TABLE_REORG) || alter_flags->is_set(HA_ADD_PARTITION))
15778 if (alter_flags->is_set(HA_ALTER_TABLE_REORG))
15783 else if (alter_flags->is_set(HA_ADD_PARTITION))
15793 my_errno= ndb_to_mysql_error(&err);
15800 if (dict->
endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort)
15803 DBUG_PRINT(
"info", (
"Failed to abort schema transaction"));
15808 set_ndb_share_state(m_share, NSS_INITIAL);
15810 DBUG_PRINT(
"NDB_SHARE", (
"%s binlog schema free use_count: %u",
15811 m_share->key, m_share->use_count));
15812 free_share(&m_share);
15814 DBUG_RETURN(error);
15817 int ha_ndbcluster::alter_frm(THD *thd,
const char *file,
15820 uchar *data= NULL, *pack_data= NULL;
15821 size_t length, pack_length;
15824 DBUG_ENTER(
"alter_frm");
15826 DBUG_PRINT(
"enter", (
"file: %s", file));
15828 NDBDICT *dict= alter_data->dictionary;
15831 DBUG_ASSERT(m_table != 0);
15833 DBUG_ASSERT(get_ndb_share_state(m_share) == NSS_ALTERED);
15834 if (
readfrm(file, &data, &length) ||
15835 packfrm(data, length, &pack_data, &pack_length))
15837 DBUG_PRINT(
"info", (
"Missing frm for %s", m_tabname));
15838 my_free((
char*)data, MYF(MY_ALLOW_ZERO_PTR));
15839 my_free((
char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
15841 my_error(ER_FILE_NOT_FOUND, MYF(0), file);
15845 DBUG_PRINT(
"info", (
"Table %s has changed, altering frm in ndb",
15847 const NDBTAB *old_tab= alter_data->old_table;
15850 new_tab->
setFrm(pack_data, (Uint32)pack_length);
15851 if (dict->alterTableGlobal(*old_tab, *new_tab))
15853 DBUG_PRINT(
"info", (
"On-line alter of table %s failed", m_tabname));
15855 my_error(error, MYF(0));
15857 my_free((
char*)data, MYF(MY_ALLOW_ZERO_PTR));
15858 my_free((
char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
15862 DBUG_PRINT(
"NDB_SHARE", (
"%s binlog schema(?) free use_count: %u",
15863 m_share->key, m_share->use_count));
15865 DBUG_RETURN(error);
15868 int ha_ndbcluster::alter_table_phase2(THD *thd,
15869 TABLE *altered_table,
15871 HA_ALTER_INFO *alter_info,
15872 HA_ALTER_FLAGS *alter_flags)
15876 Thd_ndb *thd_ndb= get_thd_ndb(thd);
15878 NDBDICT *dict= alter_data->dictionary;
15879 HA_ALTER_FLAGS dropping;
15881 DBUG_ENTER(
"alter_table_phase2");
15882 dropping= dropping | HA_DROP_INDEX | HA_DROP_UNIQUE_INDEX;
15884 if (!thd_ndb->has_required_global_schema_lock(
"ha_ndbcluster::alter_table_phase2"))
15886 error= HA_ERR_NO_CONNECTION;
15890 if ((*alter_flags & dropping).is_set())
15893 if ((error= final_drop_index(table)))
15900 DBUG_PRINT(
"info", (
"getting frm file %s", altered_table->s->path.str));
15902 DBUG_ASSERT(alter_data);
15903 error= alter_frm(thd, altered_table->s->path.str, alter_data);
15912 DBUG_PRINT(
"info", (
"Failed to commit schema transaction, error %u",
15917 if ((*alter_flags & HA_CHANGE_AUTOINCREMENT_VALUE).is_set())
15918 error= set_auto_inc_val(thd, create_info->auto_increment_value);
15921 DBUG_PRINT(
"info", (
"Failed to set auto_increment value"));
15928 if (dict->
endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort)
15931 DBUG_PRINT(
"info", (
"Failed to abort schema transaction"));
15936 DBUG_PRINT(
"NDB_SHARE", (
"%s binlog schema free use_count: %u",
15937 m_share->key, m_share->use_count));
15939 alter_info->data= 0;
15941 set_ndb_share_state(m_share, NSS_INITIAL);
15942 free_share(&m_share);
15943 DBUG_RETURN(error);
15946 int ha_ndbcluster::alter_table_phase3(THD *thd,
TABLE *table,
15948 HA_ALTER_INFO *alter_info,
15949 HA_ALTER_FLAGS *alter_flags)
15951 Thd_ndb *thd_ndb= get_thd_ndb(thd);
15952 DBUG_ENTER(
"alter_table_phase3");
15955 if (!thd_ndb->has_required_global_schema_lock(
"ha_ndbcluster::alter_table_phase3"))
15958 alter_info->data= 0;
15959 DBUG_RETURN(HA_ERR_NO_CONNECTION);
15962 const char *db= table->s->db.str;
15963 const char *name= table->s->table_name.str;
15969 uint32 table_id= 0, table_version= 0;
15970 DBUG_ASSERT(alter_data != 0);
15973 table_id= alter_data->table_id;
15974 table_version= alter_data->old_table_version;
15976 ndbcluster_log_schema_op(thd, thd->query(), thd->query_length(),
15978 table_id, table_version,
15979 SOT_ONLINE_ALTER_TABLE_PREPARE,
15988 Ndb* ndb= get_ndb(thd);
15989 DBUG_ASSERT(ndb != 0);
15994 const NDBTAB *new_tab= ndbtab.get_table();
15995 DBUG_ASSERT(new_tab != 0);
15998 table_id= new_tab->getObjectId();
15999 table_version= new_tab->getObjectVersion();
16008 ndbcluster_log_schema_op(thd, thd->query(), thd->query_length(),
16010 table_id, table_version,
16011 SOT_ONLINE_ALTER_TABLE_COMMIT,
16015 alter_info->data= 0;
16023 if (alter_info->extent_size >= (Uint64(1) << 32))
16028 ndb_ts->setName(alter_info->tablespace_name);
16029 ndb_ts->setExtentSize(Uint32(alter_info->extent_size));
16030 ndb_ts->setDefaultLogfileGroup(alter_info->logfile_group_name);
16037 if (alter_info->max_size > 0)
16039 my_error(ER_TABLESPACE_AUTO_EXTEND_ERROR, MYF(0));
16042 ndb_df->setPath(alter_info->data_file_name);
16043 ndb_df->setSize(alter_info->initial_size);
16044 ndb_df->setTablespace(alter_info->tablespace_name);
16051 if (alter_info->undo_buffer_size >= (Uint64(1) << 32))
16057 ndb_lg->setName(alter_info->logfile_group_name);
16058 ndb_lg->setUndoBufferSize(Uint32(alter_info->undo_buffer_size));
16065 ndb_uf->setPath(alter_info->undo_file_name);
16066 ndb_uf->setSize(alter_info->initial_size);
16067 ndb_uf->setLogfileGroup(alter_info->logfile_group_name);
16071 int ndbcluster_alter_tablespace(
handlerton *hton,
16074 int is_tablespace= 0;
16078 const char *errmsg;
16080 DBUG_ENTER(
"ndbcluster_alter_tablespace");
16083 ndb= check_ndb_in_thd(thd);
16086 DBUG_RETURN(HA_ERR_NO_CONNECTION);
16090 uint32 table_id= 0, table_version= 0;
16091 switch (alter_info->ts_cmd_type){
16092 case (CREATE_TABLESPACE):
16094 error= ER_CREATE_FILEGROUP_FAILED;
16099 if (set_up_tablespace(alter_info, &ndb_ts))
16103 if (set_up_datafile(alter_info, &ndb_df))
16107 errmsg=
"TABLESPACE";
16108 if (dict->createTablespace(ndb_ts, &objid))
16110 DBUG_PRINT(
"error", (
"createTablespace returned %d", error));
16113 table_id = objid.getObjectId();
16116 NdbDictionary::Dictionary::WarnExtentRoundUp)
16118 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16120 "Extent size rounded up to kernel page size");
16122 DBUG_PRINT(
"alter_info", (
"Successfully created Tablespace"));
16123 errmsg=
"DATAFILE";
16124 if (dict->createDatafile(ndb_df))
16132 dict->dropTablespace(tmp);
16135 DBUG_PRINT(
"error", (
"createDatafile returned %d", error));
16139 NdbDictionary::Dictionary::WarnDatafileRoundUp)
16141 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16143 "Datafile size rounded up to extent size");
16147 NdbDictionary::Dictionary::WarnDatafileRoundDown)
16149 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16151 "Datafile size rounded down to extent size");
16156 case (ALTER_TABLESPACE):
16158 error= ER_ALTER_FILEGROUP_FAILED;
16159 if (alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE)
16162 if (set_up_datafile(alter_info, &ndb_df))
16166 errmsg=
" CREATE DATAFILE";
16168 if (dict->createDatafile(ndb_df,
false, &objid))
16172 table_id= objid.getObjectId();
16175 NdbDictionary::Dictionary::WarnDatafileRoundUp)
16177 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16179 "Datafile size rounded up to extent size");
16183 NdbDictionary::Dictionary::WarnDatafileRoundDown)
16185 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16187 "Datafile size rounded down to extent size");
16190 else if(alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE)
16195 df.getTablespaceId(&objid);
16199 strcmp(df.getPath(), alter_info->data_file_name) == 0)
16201 errmsg=
" DROP DATAFILE";
16202 if (dict->dropDatafile(df))
16209 DBUG_PRINT(
"error", (
"No such datafile"));
16210 my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0),
" NO SUCH FILE");
16216 DBUG_PRINT(
"error", (
"Unsupported alter tablespace: %d",
16217 alter_info->ts_alter_tablespace_type));
16218 DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
16223 case (CREATE_LOGFILE_GROUP):
16225 error= ER_CREATE_FILEGROUP_FAILED;
16229 if (alter_info->undo_file_name == NULL)
16234 DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
16236 if (set_up_logfile_group(alter_info, &ndb_lg))
16240 errmsg=
"LOGFILE GROUP";
16241 if (dict->createLogfileGroup(ndb_lg, &objid))
16245 table_id = objid.getObjectId();
16248 NdbDictionary::Dictionary::WarnUndobufferRoundUp)
16250 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16252 "Undo buffer size rounded up to kernel page size");
16254 DBUG_PRINT(
"alter_info", (
"Successfully created Logfile Group"));
16255 if (set_up_undofile(alter_info, &ndb_uf))
16259 errmsg=
"UNDOFILE";
16260 if (dict->createUndofile(ndb_uf))
16268 dict->dropLogfileGroup(tmp);
16273 NdbDictionary::Dictionary::WarnUndofileRoundDown)
16275 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16277 "Undofile size rounded down to kernel page size");
16281 case (ALTER_LOGFILE_GROUP):
16283 error= ER_ALTER_FILEGROUP_FAILED;
16284 if (alter_info->undo_file_name == NULL)
16289 DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
16292 if (set_up_undofile(alter_info, &ndb_uf))
16296 errmsg=
"CREATE UNDOFILE";
16298 if (dict->createUndofile(ndb_uf,
false, &objid))
16302 table_id = objid.getObjectId();
16305 NdbDictionary::Dictionary::WarnUndofileRoundDown)
16307 push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
16309 "Undofile size rounded down to kernel page size");
16313 case (DROP_TABLESPACE):
16315 error= ER_DROP_FILEGROUP_FAILED;
16316 errmsg=
"TABLESPACE";
16318 dict->getTablespace(alter_info->tablespace_name);
16321 if (dict->dropTablespace(ts))
16328 case (DROP_LOGFILE_GROUP):
16330 error= ER_DROP_FILEGROUP_FAILED;
16331 errmsg=
"LOGFILE GROUP";
16333 dict->getLogfileGroup(alter_info->logfile_group_name);
16336 if (dict->dropLogfileGroup(lg))
16342 case (CHANGE_FILE_TABLESPACE):
16344 DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
16346 case (ALTER_ACCESS_MODE_TABLESPACE):
16348 DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
16352 DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
16356 ndbcluster_log_schema_op(thd,
16357 thd->query(), thd->query_length(),
16358 "", alter_info->tablespace_name,
16359 table_id, table_version,
16360 SOT_TABLESPACE, NULL, NULL);
16362 ndbcluster_log_schema_op(thd,
16363 thd->query(), thd->query_length(),
16364 "", alter_info->logfile_group_name,
16365 table_id, table_version,
16366 SOT_LOGFILE_GROUP, NULL, NULL);
16367 DBUG_RETURN(FALSE);
16372 ndb_to_mysql_error(&err);
16374 my_error(error, MYF(0), errmsg);
16381 THD *thd= current_thd;
16385 DBUG_ENTER(
"ha_ndbcluster::get_no_parts");
16392 if (check_ndb_connection(thd))
16394 err= HA_ERR_NO_CONNECTION;
16400 if (!ndbtab_g.get_table())
16403 DBUG_RETURN(FALSE);
16410 static int ndbcluster_fill_files_table(
handlerton *hton,
16415 TABLE* table= tables->table;
16416 Ndb *ndb= check_ndb_in_thd(thd);
16421 DBUG_ENTER(
"ndbcluster_fill_files_table");
16426 ERR_RETURN(ndberr);
16428 for (i= 0; i < dflist.
count; i++)
16434 g_ndb_cluster_connection->init_get_next_node(iter);
16436 while ((
id= g_ndb_cluster_connection->get_next_alive_node(iter)))
16438 init_fill_schema_files_row(table);
16449 ERR_RETURN(ndberr);
16457 ERR_RETURN(ndberr);
16460 table->field[IS_FILES_FILE_NAME]->set_notnull();
16461 table->field[IS_FILES_FILE_NAME]->store(elt.
name, strlen(elt.
name),
16462 system_charset_info);
16463 table->field[IS_FILES_FILE_TYPE]->set_notnull();
16464 table->field[IS_FILES_FILE_TYPE]->store(
"DATAFILE",8,
16465 system_charset_info);
16466 table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
16467 table->field[IS_FILES_TABLESPACE_NAME]->store(df.getTablespace(),
16468 strlen(df.getTablespace()),
16469 system_charset_info);
16470 table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
16471 table->field[IS_FILES_LOGFILE_GROUP_NAME]->
16472 store(ts.getDefaultLogfileGroup(),
16473 strlen(ts.getDefaultLogfileGroup()),
16474 system_charset_info);
16475 table->field[IS_FILES_ENGINE]->set_notnull();
16476 table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
16477 ndbcluster_hton_name_length,
16478 system_charset_info);
16480 table->field[IS_FILES_FREE_EXTENTS]->set_notnull();
16481 table->field[IS_FILES_FREE_EXTENTS]->store(df.getFree()
16482 / ts.getExtentSize(),
true);
16483 table->field[IS_FILES_TOTAL_EXTENTS]->set_notnull();
16484 table->field[IS_FILES_TOTAL_EXTENTS]->store(df.getSize()
16485 / ts.getExtentSize(),
true);
16486 table->field[IS_FILES_EXTENT_SIZE]->set_notnull();
16487 table->field[IS_FILES_EXTENT_SIZE]->store(ts.getExtentSize(),
true);
16488 table->field[IS_FILES_INITIAL_SIZE]->set_notnull();
16489 table->field[IS_FILES_INITIAL_SIZE]->store(df.getSize(),
true);
16490 table->field[IS_FILES_MAXIMUM_SIZE]->set_notnull();
16491 table->field[IS_FILES_MAXIMUM_SIZE]->store(df.getSize(),
true);
16492 table->field[IS_FILES_VERSION]->set_notnull();
16495 table->field[IS_FILES_ROW_FORMAT]->set_notnull();
16496 table->field[IS_FILES_ROW_FORMAT]->store(
"FIXED", 5, system_charset_info);
16499 int len= my_snprintf(extra,
sizeof(extra),
"CLUSTER_NODE=%u",
id);
16500 table->field[IS_FILES_EXTRA]->set_notnull();
16501 table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
16502 schema_table_store_record(thd, table);
16510 ERR_RETURN(ndberr);
16512 for (i= 0; i < tslist.
count; i++)
16522 ERR_RETURN(ndberr);
16525 init_fill_schema_files_row(table);
16526 table->field[IS_FILES_FILE_TYPE]->set_notnull();
16527 table->field[IS_FILES_FILE_TYPE]->store(
"TABLESPACE", 10,
16528 system_charset_info);
16530 table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
16531 table->field[IS_FILES_TABLESPACE_NAME]->store(elt.
name,
16533 system_charset_info);
16534 table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
16535 table->field[IS_FILES_LOGFILE_GROUP_NAME]->
16536 store(ts.getDefaultLogfileGroup(),
16537 strlen(ts.getDefaultLogfileGroup()),
16538 system_charset_info);
16540 table->field[IS_FILES_ENGINE]->set_notnull();
16541 table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
16542 ndbcluster_hton_name_length,
16543 system_charset_info);
16545 table->field[IS_FILES_EXTENT_SIZE]->set_notnull();
16546 table->field[IS_FILES_EXTENT_SIZE]->store(ts.getExtentSize(),
true);
16548 table->field[IS_FILES_VERSION]->set_notnull();
16551 schema_table_store_record(thd, table);
16558 ERR_RETURN(ndberr);
16560 for (i= 0; i < uflist.
count; i++)
16566 g_ndb_cluster_connection->init_get_next_node(iter);
16568 while ((
id= g_ndb_cluster_connection->get_next_alive_node(iter)))
16578 ERR_RETURN(ndberr);
16581 dict->getLogfileGroup(uf.getLogfileGroup());
16587 ERR_RETURN(ndberr);
16590 init_fill_schema_files_row(table);
16591 table->field[IS_FILES_FILE_NAME]->set_notnull();
16592 table->field[IS_FILES_FILE_NAME]->store(elt.
name, strlen(elt.
name),
16593 system_charset_info);
16594 table->field[IS_FILES_FILE_TYPE]->set_notnull();
16595 table->field[IS_FILES_FILE_TYPE]->store(
"UNDO LOG", 8,
16596 system_charset_info);
16598 uf.getLogfileGroupId(&objid);
16599 table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
16600 table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(uf.getLogfileGroup(),
16601 strlen(uf.getLogfileGroup()),
16602 system_charset_info);
16603 table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
16604 table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(objid.getObjectId(),
true);
16605 table->field[IS_FILES_ENGINE]->set_notnull();
16606 table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
16607 ndbcluster_hton_name_length,
16608 system_charset_info);
16610 table->field[IS_FILES_TOTAL_EXTENTS]->set_notnull();
16611 table->field[IS_FILES_TOTAL_EXTENTS]->store(uf.getSize()/4,
true);
16612 table->field[IS_FILES_EXTENT_SIZE]->set_notnull();
16613 table->field[IS_FILES_EXTENT_SIZE]->store(4,
true);
16615 table->field[IS_FILES_INITIAL_SIZE]->set_notnull();
16616 table->field[IS_FILES_INITIAL_SIZE]->store(uf.getSize(),
true);
16617 table->field[IS_FILES_MAXIMUM_SIZE]->set_notnull();
16618 table->field[IS_FILES_MAXIMUM_SIZE]->store(uf.getSize(),
true);
16620 table->field[IS_FILES_VERSION]->set_notnull();
16624 int len= my_snprintf(extra,
sizeof(extra),
"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
16625 id, (ulong) lfg.getUndoBufferSize());
16626 table->field[IS_FILES_EXTRA]->set_notnull();
16627 table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
16628 schema_table_store_record(thd, table);
16637 ERR_RETURN(ndberr);
16639 for (i= 0; i < lfglist.
count; i++)
16649 ERR_RETURN(ndberr);
16652 init_fill_schema_files_row(table);
16653 table->field[IS_FILES_FILE_TYPE]->set_notnull();
16654 table->field[IS_FILES_FILE_TYPE]->store(
"UNDO LOG", 8,
16655 system_charset_info);
16657 table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
16658 table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(elt.
name,
16660 system_charset_info);
16661 table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
16662 table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(lfg.
getObjectId(),
true);
16663 table->field[IS_FILES_ENGINE]->set_notnull();
16664 table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
16665 ndbcluster_hton_name_length,
16666 system_charset_info);
16668 table->field[IS_FILES_FREE_EXTENTS]->set_notnull();
16669 table->field[IS_FILES_FREE_EXTENTS]->store(lfg.getUndoFreeWords(),
true);
16670 table->field[IS_FILES_EXTENT_SIZE]->set_notnull();
16671 table->field[IS_FILES_EXTENT_SIZE]->store(4,
true);
16673 table->field[IS_FILES_VERSION]->set_notnull();
16677 int len= my_snprintf(extra,
sizeof(extra),
16678 "UNDO_BUFFER_SIZE=%lu",
16679 (ulong) lfg.getUndoBufferSize());
16680 table->field[IS_FILES_EXTRA]->set_notnull();
16681 table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
16682 schema_table_store_record(thd, table);
16687 static int show_ndb_vars(THD *thd,
SHOW_VAR *var,
char *buff)
16689 if (!check_ndb_in_thd(thd))
16695 sizeof(ndb_status_variables_dynamic));
16698 memcpy(st_var, &ndb_status_variables_dynamic,
sizeof(ndb_status_variables_dynamic));
16700 SHOW_VAR *tmp= &(ndb_status_variables_dynamic[0]);
16701 for (; tmp->value; tmp++, i++)
16702 st_var[i].value= mem + (tmp->value - (
char*)&g_ndb_status);
16705 Thd_ndb *thd_ndb= get_thd_ndb(thd);
16707 update_status_variables(thd_ndb, st, c);
16709 var->type= SHOW_ARRAY;
16710 var->value= (
char *) st_var;
16714 SHOW_VAR ndb_status_variables_export[]= {
16715 {
"Ndb", (
char*) &show_ndb_vars, SHOW_FUNC},
16716 {
"Ndb_conflict", (
char*) &ndb_status_conflict_variables, SHOW_ARRAY},
16717 {
"Ndb", (
char*) &ndb_status_injector_variables, SHOW_ARRAY},
16718 {
"Ndb", (
char*) &ndb_status_slave_variables, SHOW_ARRAY},
16719 {
"Ndb", (
char*) &show_ndb_server_api_stats, SHOW_FUNC},
16720 {
"Ndb_index_stat", (
char*) &ndb_status_index_stat_variables, SHOW_ARRAY},
16721 {NullS, NullS, SHOW_LONG}
16724 static MYSQL_SYSVAR_ULONG(
16726 opt_ndb_cache_check_time,
16727 PLUGIN_VAR_RQCMDARG,
16728 "A dedicated thread is created to, at the given "
16729 "millisecond interval, invalidate the query cache "
16730 "if another MySQL server in the cluster has changed "
16731 "the data in the database.",
16736 ONE_YEAR_IN_SECONDS,
16741 static MYSQL_SYSVAR_ULONG(
16743 opt_ndb_extra_logging,
16744 PLUGIN_VAR_OPCMDARG,
16745 "Turn on more logging in the error log.",
16755 static MYSQL_SYSVAR_ULONG(
16757 opt_ndb_wait_connected,
16758 PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
16759 "Time (in seconds) for mysqld to wait for connection "
16760 "to cluster management and data nodes.",
16765 ONE_YEAR_IN_SECONDS,
16770 static MYSQL_SYSVAR_ULONG(
16772 opt_ndb_wait_setup,
16773 PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
16774 "Time (in seconds) for mysqld to wait for setup to "
16775 "complete (0 = no wait)",
16780 ONE_YEAR_IN_SECONDS,
16785 static MYSQL_SYSVAR_UINT(
16786 cluster_connection_pool,
16787 opt_ndb_cluster_connection_pool,
16788 PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
16789 "Pool of cluster connections to be used by mysql server.",
16801 ndb_index_stat_option_check(MYSQL_THD,
16806 ndb_index_stat_option_update(MYSQL_THD,
16811 extern char ndb_index_stat_option_buf[];
16813 static MYSQL_SYSVAR_STR(
16815 opt_ndb_index_stat_option,
16816 PLUGIN_VAR_RQCMDARG,
16817 "Comma-separated tunable options for ndb index statistics",
16818 ndb_index_stat_option_check,
16819 ndb_index_stat_option_update,
16820 ndb_index_stat_option_buf
16824 ulong opt_ndb_report_thresh_binlog_epoch_slip;
16825 static MYSQL_SYSVAR_ULONG(
16826 report_thresh_binlog_epoch_slip,
16827 opt_ndb_report_thresh_binlog_epoch_slip,
16828 PLUGIN_VAR_RQCMDARG,
16829 "Threshold on number of epochs to be behind before reporting binlog "
16830 "status. E.g. 3 means that if the difference between what epoch has "
16831 "been received from the storage nodes and what has been applied to "
16832 "the binlog is 3 or more, a status message will be sent to the cluster "
16843 ulong opt_ndb_report_thresh_binlog_mem_usage;
16844 static MYSQL_SYSVAR_ULONG(
16845 report_thresh_binlog_mem_usage,
16846 opt_ndb_report_thresh_binlog_mem_usage,
16847 PLUGIN_VAR_RQCMDARG,
16848 "Threshold on percentage of free memory before reporting binlog "
16849 "status. E.g. 10 means that if amount of available memory for "
16850 "receiving binlog data from the storage nodes goes below 10%, "
16851 "a status message will be sent to the cluster log.",
16861 my_bool opt_ndb_log_update_as_write;
16862 static MYSQL_SYSVAR_BOOL(
16863 log_update_as_write,
16864 opt_ndb_log_update_as_write,
16865 PLUGIN_VAR_OPCMDARG,
16866 "For efficiency log only after image as a write event. "
16867 "Ignore before image. This may cause compatability problems if "
16868 "replicating to other storage engines than ndbcluster.",
16875 my_bool opt_ndb_log_updated_only;
16876 static MYSQL_SYSVAR_BOOL(
16878 opt_ndb_log_updated_only,
16879 PLUGIN_VAR_OPCMDARG,
16880 "For efficiency log only updated columns. Columns are considered "
16881 "as \"updated\" even if they are updated with the same value. "
16882 "This may cause compatability problems if "
16883 "replicating to other storage engines than ndbcluster.",
16890 my_bool opt_ndb_log_orig;
16891 static MYSQL_SYSVAR_BOOL(
16894 PLUGIN_VAR_OPCMDARG,
16895 "Log originating server id and epoch in ndb_binlog_index. Each epoch "
16896 "may in this case have multiple rows in ndb_binlog_index, one for "
16897 "each originating epoch.",
16904 my_bool opt_ndb_log_bin;
16905 static MYSQL_SYSVAR_BOOL(
16908 PLUGIN_VAR_OPCMDARG,
16909 "Log ndb tables in the binary log. Option only has meaning if "
16910 "the binary log has been turned on for the server.",
16917 my_bool opt_ndb_log_binlog_index;
16918 static MYSQL_SYSVAR_BOOL(
16920 opt_ndb_log_binlog_index,
16921 PLUGIN_VAR_OPCMDARG,
16922 "Insert mapping between epochs and binlog positions into the "
16923 "ndb_binlog_index table.",
16930 static my_bool opt_ndb_log_empty_epochs;
16931 static MYSQL_SYSVAR_BOOL(
16933 opt_ndb_log_empty_epochs,
16934 PLUGIN_VAR_OPCMDARG,
16941 bool ndb_log_empty_epochs(
void)
16943 return opt_ndb_log_empty_epochs;
16946 my_bool opt_ndb_log_apply_status;
16947 static MYSQL_SYSVAR_BOOL(
16949 opt_ndb_log_apply_status,
16950 PLUGIN_VAR_OPCMDARG,
16951 "Log ndb_apply_status updates from Master in the Binlog",
16958 static MYSQL_SYSVAR_STR(
16960 opt_ndb_connectstring,
16961 PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
16962 "Connect string for ndbcluster.",
16969 static MYSQL_SYSVAR_STR(
16971 opt_ndb_connectstring,
16972 PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
16973 "Same as --ndb-connectstring",
16980 static MYSQL_SYSVAR_UINT(
16983 PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
16984 "Set nodeid for this node. Overrides node id specified "
16985 "in --ndb-connectstring.",
17000 sql_print_information(
"dbug_check_shares");
17001 for (uint i= 0; i < ndbcluster_open_tables.records; i++)
17004 sql_print_information(
" %s.%s: state: %s(%u) use_count: %u",
17005 share->db, share->table_name,
17006 get_share_state_string(share->state),
17007 (
unsigned)share->state,
17014 for (uint i= 0; i < ndbcluster_open_tables.records; i++)
17017 DBUG_ASSERT(strcmp(share->db,
"mysql") == 0);
17021 static MYSQL_THDVAR_UINT(
17023 PLUGIN_VAR_RQCMDARG,
17024 "Debug, only...check that no shares are lingering...",
17036 MYSQL_SYSVAR(cache_check_time),
17037 MYSQL_SYSVAR(extra_logging),
17038 MYSQL_SYSVAR(wait_connected),
17039 MYSQL_SYSVAR(wait_setup),
17040 MYSQL_SYSVAR(cluster_connection_pool),
17041 MYSQL_SYSVAR(report_thresh_binlog_mem_usage),
17042 MYSQL_SYSVAR(report_thresh_binlog_epoch_slip),
17043 MYSQL_SYSVAR(log_update_as_write),
17044 MYSQL_SYSVAR(log_updated_only),
17045 MYSQL_SYSVAR(log_orig),
17046 MYSQL_SYSVAR(distribution),
17047 MYSQL_SYSVAR(autoincrement_prefetch_sz),
17048 MYSQL_SYSVAR(force_send),
17049 MYSQL_SYSVAR(use_exact_count),
17050 MYSQL_SYSVAR(use_transactions),
17051 MYSQL_SYSVAR(use_copying_alter_table),
17052 MYSQL_SYSVAR(optimized_node_selection),
17053 MYSQL_SYSVAR(batch_size),
17054 MYSQL_SYSVAR(optimization_delay),
17055 MYSQL_SYSVAR(index_stat_enable),
17056 MYSQL_SYSVAR(index_stat_option),
17057 MYSQL_SYSVAR(index_stat_cache_entries),
17058 MYSQL_SYSVAR(index_stat_update_freq),
17059 MYSQL_SYSVAR(table_no_logging),
17060 MYSQL_SYSVAR(table_temporary),
17061 MYSQL_SYSVAR(log_bin),
17062 MYSQL_SYSVAR(log_binlog_index),
17063 MYSQL_SYSVAR(log_empty_epochs),
17064 MYSQL_SYSVAR(log_apply_status),
17065 MYSQL_SYSVAR(connectstring),
17066 MYSQL_SYSVAR(mgmd_host),
17067 MYSQL_SYSVAR(nodeid),
17068 MYSQL_SYSVAR(blob_read_batch_bytes),
17069 MYSQL_SYSVAR(blob_write_batch_bytes),
17070 MYSQL_SYSVAR(deferred_constraints),
17071 MYSQL_SYSVAR(join_pushdown),
17073 MYSQL_SYSVAR(check_shares),
17079 { MYSQL_HANDLERTON_INTERFACE_VERSION };
17082 #include "ha_ndbinfo.h"
17087 { MYSQL_HANDLERTON_INTERFACE_VERSION };
17089 mysql_declare_plugin(ndbcluster)
17091 MYSQL_STORAGE_ENGINE_PLUGIN,
17092 &ndbcluster_storage_engine,
17093 ndbcluster_hton_name,
17095 "Clustered, fault-tolerant tables",
17096 PLUGIN_LICENSE_GPL,
17100 ndb_status_variables_export,
17106 MYSQL_STORAGE_ENGINE_PLUGIN,
17107 &ndbinfo_storage_engine,
17109 "Sun Microsystems Inc.",
17110 "MySQL Cluster system information storage engine",
17111 PLUGIN_LICENSE_GPL,
17116 ndbinfo_system_variables,
17120 mysql_declare_plugin_end;