29 #include "sql_optimizer.h"
30 #include "sql_join_buffer.h"
66 uint add_flag_field_to_join_cache(uchar *str, uint length,
CACHE_FIELD **field)
73 copy->referenced_field_no= 0;
108 uint add_table_data_fields_to_join_cache(
JOIN_TAB *tab,
119 uint used_fields= bitmap_bits_set(field_set);
120 for (fld_ptr= tab->table->field; used_fields; fld_ptr++)
122 if (bitmap_is_set(field_set, (*fld_ptr)->field_index))
124 len+= (*fld_ptr)->fill_cache_field(copy);
125 if (copy->
type == CACHE_BLOB)
131 copy->field= *fld_ptr;
132 copy->referenced_field_no= 0;
140 *descr_ptr= copy_ptr;
163 void JOIN_CACHE::calc_record_fields()
181 data_field_ptr_count= 0;
182 referenced_fields= 0;
187 flag_fields+=
test(tab->used_null_fields || tab->used_uneven_bit_fields);
188 flag_fields+=
test(tab->table->maybe_null);
189 fields+= tab->used_fields;
190 blobs+= tab->used_blobs;
192 fields+= tab->check_rowid_field();
194 if ((with_match_flag= (join_tab->is_first_inner_for_outer_join() ||
195 (join_tab->first_sj_inner_tab == join_tab &&
196 join_tab->get_sj_strategy() == SJ_OPT_FIRST_MATCH))))
198 fields+= flag_fields;
226 int JOIN_CACHE::alloc_fields(uint external_fields)
228 uint ptr_cnt= external_fields+blobs+1;
230 field_descr= (
CACHE_FIELD*) sql_alloc(fields_size +
232 blob_ptr= (
CACHE_FIELD **) ((uchar *) field_descr + fields_size);
233 return (field_descr == NULL);
273 void JOIN_CACHE::create_flag_fields()
284 length+= add_flag_field_to_join_cache((uchar*) &join_tab->found,
285 sizeof(join_tab->found),
289 for (tab= join_tab-tables; tab <
join_tab; tab++)
294 if (tab->used_null_fields || tab->used_uneven_bit_fields)
295 length+= add_flag_field_to_join_cache(table->null_flags,
296 table->s->null_bytes,
300 if (table->maybe_null)
301 length+= add_flag_field_to_join_cache((uchar*) &table->null_row,
302 sizeof(table->null_row),
307 flag_fields= copy-field_descr;
348 void JOIN_CACHE:: create_remaining_fields(
bool all_read_fields)
351 CACHE_FIELD *copy= field_descr+flag_fields+data_field_count;
352 CACHE_FIELD **copy_ptr= blob_ptr+data_field_ptr_count;
354 for (tab= join_tab-tables; tab <
join_tab; tab++)
357 TABLE *table= tab->table;
360 rem_field_set= table->read_set;
363 bitmap_invert(&table->tmp_set);
364 bitmap_intersect(&table->tmp_set, table->read_set);
365 rem_field_set= &table->tmp_set;
368 length+= add_table_data_fields_to_join_cache(tab, rem_field_set,
369 &data_field_count, ©,
370 &data_field_ptr_count,
374 if (tab->keep_current_rowid)
376 copy->
str= table->file->ref;
380 copy->referenced_field_no= 0;
383 if (tab->copy_current_rowid != NULL)
385 tab->copy_current_rowid= copy;
415 void JOIN_CACHE::set_constants()
432 with_length= is_key_access() || with_match_flag;
437 uint len= length + fields*
sizeof(uint)+blobs*
sizeof(uchar *) +
438 (prev_cache ? prev_cache->get_size_of_rec_offset() : 0) +
440 buff_size= max<size_t>(join->thd->variables.join_buff_size, 2*len);
441 size_of_rec_ofs= offset_size(buff_size);
442 size_of_rec_len= blobs ? size_of_rec_ofs : offset_size(len);
443 size_of_fld_ofs= size_of_rec_len;
449 pack_length= (with_length ? size_of_rec_len : 0) +
450 (prev_cache ? prev_cache->get_size_of_rec_offset() : 0) +
452 pack_length_with_blob_ptrs= pack_length + blobs*
sizeof(uchar *);
468 buff= (uchar*) my_malloc(buff_size, MYF(0));
501 DBUG_ENTER(
"JOIN_CACHE::init");
503 calc_record_fields();
508 create_flag_fields();
510 create_remaining_fields(TRUE);
553 local_key_arg_fields= 0;
554 external_key_arg_fields= 0;
555 DBUG_ENTER(
"JOIN_CACHE_BKA::init");
557 calc_record_fields();
573 for (tab= cache->
join_tab-cache->tables; tab < cache->join_tab ; tab++)
576 bitmap_clear_all(&tab->table->tmp_set);
580 if (!(tab->table->map & ref_item->used_tables()))
582 ref_item->walk(&Item::add_field_to_set_processor, 1,
583 (uchar *) tab->table);
585 if ((key_args= bitmap_bits_set(&tab->table->tmp_set)))
588 local_key_arg_fields+= key_args;
590 external_key_arg_fields+= key_args;
593 cache= cache->prev_cache;
597 if (alloc_fields(external_key_arg_fields))
600 create_flag_fields();
607 uint ext_key_arg_cnt= external_key_arg_fields;
610 while (ext_key_arg_cnt)
612 cache= cache->prev_cache;
613 for (tab= cache->
join_tab-cache->tables; tab < cache->join_tab ; tab++)
616 MY_BITMAP *key_read_set= &tab->table->tmp_set;
618 if (bitmap_is_clear_all(key_read_set))
620 copy_end= cache->field_descr+cache->fields;
621 for (copy= cache->field_descr+cache->flag_fields; copy < copy_end; copy++)
628 copy->field->table == tab->table &&
629 bitmap_is_set(key_read_set, copy->field->field_index))
633 if (!copy->referenced_field_no)
642 copy->referenced_field_no= ++cache->referenced_fields;
643 cache->with_length= TRUE;
644 cache->pack_length+= cache->get_size_of_fld_offset();
645 cache->pack_length_with_blob_ptrs+= cache->get_size_of_fld_offset();
655 copy= field_descr+flag_fields;
656 for (tab= join_tab-tables; tab <
join_tab ; tab++)
658 length+= add_table_data_fields_to_join_cache(tab, &tab->table->tmp_set,
659 &data_field_count, ©,
660 &data_field_ptr_count,
664 use_emb_key= check_emb_key_usage();
666 create_remaining_fields(FALSE);
713 bool JOIN_CACHE_BKA::check_emb_key_usage()
721 TABLE *table= join_tab->table;
723 KEY *keyinfo= table->key_info+ref->
key;
731 if (external_key_arg_fields != 0)
737 if (local_key_arg_fields != ref->
key_parts)
750 item= ref->
items[
i]->real_item();
751 if (item->type() != Item::FIELD_ITEM)
753 key_part= keyinfo->key_part+
i;
754 if (key_part->key_part_flag & HA_PART_KEY_SEG)
758 if (key_part->field->maybe_null())
769 copy= field_descr+flag_fields;
770 copy_end= copy+local_key_arg_fields;
771 for ( ; copy < copy_end; copy++)
783 if (copy->field->type() == MYSQL_TYPE_BIT &&
802 for (j= i, copy= init_copy; i < local_key_arg_fields; i++, copy++)
804 if (fld->eq(copy->field))
810 *init_copy= key_part_copy;
840 TABLE *tab= join_tab->table;
842 set_if_bigger(rec_per_key, 1);
851 incr+= tab->file->stats.mrr_length_per_rec * rec_per_key;
868 return join_tab->table->file->stats.mrr_length_per_rec;
909 bool JOIN_CACHE_BKA::skip_index_tuple(range_seq_t rseq,
char *range_info)
911 DBUG_ENTER(
"JOIN_CACHE_BKA::skip_index_tuple");
913 cache->get_record_by_pos((uchar*)range_info);
914 DBUG_RETURN(!join_tab->cache_idx_cond->val_int());
939 bool bka_skip_index_tuple(range_seq_t rseq,
char *range_info)
941 DBUG_ENTER(
"bka_skip_index_tuple");
943 DBUG_RETURN(cache->skip_index_tuple(rseq, range_info));
992 uint JOIN_CACHE::write_record_data(uchar *
link,
bool *is_full)
1000 uchar *rec_len_ptr= 0;
1007 uint incr= aux_buffer_incr();
1008 ulong rem= rem_space();
1009 aux_buff_size+= len+incr < rem ? incr : rem;
1021 for ( ; copy_ptr < copy_ptr_end; copy_ptr++)
1024 if (!blob_field->is_null())
1026 uint blob_len= blob_field->get_length();
1027 (*copy_ptr)->blob_length= blob_len;
1029 blob_field->get_ptr(&(*copy_ptr)->str);
1044 last_record= (len+pack_length_with_blob_ptrs) > rem_space();
1054 cp+= size_of_rec_len;
1063 cp+= prev_cache->get_size_of_rec_offset();
1064 prev_cache->store_rec_ref(cp, link);
1071 if (with_match_flag)
1075 copy_end= field_descr+flag_fields;
1076 for ( ; copy < copy_end; copy++)
1083 copy_end= field_descr+fields;
1084 for ( ; copy < copy_end; copy++)
1086 Field *field= copy->field;
1087 if (field && field->maybe_null() && field->is_null())
1090 if (copy->referenced_field_no)
1095 if (copy->referenced_field_no)
1096 copy->
offset= cp-curr_rec_pos;
1098 if (copy->
type == CACHE_BLOB)
1103 last_rec_blob_data_is_in_rec_buff= 1;
1105 blob_field->get_image(cp, copy->
length+
sizeof(
char*),
1106 blob_field->charset());
1107 cp+= copy->
length+
sizeof(
char*);
1112 blob_field->get_image(cp, copy->
length,
1113 blob_field->charset());
1120 switch (copy->
type) {
1123 len= (uint) copy->
str[0] + 1;
1124 memcpy(cp, copy->
str, len);
1129 len= uint2korr(copy->
str) + 2;
1130 memcpy(cp, copy->
str, len);
1133 case CACHE_STRIPPED:
1140 for (str= copy->
str, end= str+copy->
length;
1141 end > str && end[-1] ==
' ';
1143 len=(uint) (end-str);
1145 memcpy(cp+2, str, len);
1158 if (referenced_fields)
1161 for (copy= field_descr+flag_fields; copy < copy_end ; copy++)
1163 if (copy->referenced_field_no)
1165 store_fld_offset(cp+size_of_fld_ofs*(copy->referenced_field_no-1),
1170 cp+= size_of_fld_ofs*cnt;
1174 store_rec_length(rec_len_ptr, (ulong) (cp-rec_len_ptr-size_of_rec_len));
1175 last_rec_pos= curr_rec_pos;
1177 *is_full= last_record;
1178 return (uint) (cp-init_pos);
1211 last_rec_blob_data_is_in_rec_buff= 0;
1235 bool JOIN_CACHE::put_record_in_cache()
1240 link= prev_cache->get_curr_rec_link();
1241 write_record_data(link, &is_full);
1271 bool JOIN_CACHE::get_record()
1274 uchar *prev_rec_ptr= 0;
1276 pos+= size_of_rec_len;
1279 pos+= prev_cache->get_size_of_rec_offset();
1280 prev_rec_ptr= prev_cache->get_rec_ref(pos);
1286 pos+= referenced_fields*size_of_fld_ofs;
1293 prev_cache->get_record_by_pos(prev_rec_ptr);
1318 void JOIN_CACHE::get_record_by_pos(uchar *rec_ptr)
1320 uchar *save_pos= pos;
1326 uchar *prev_rec_ptr= prev_cache->get_rec_ref(rec_ptr);
1327 prev_cache->get_record_by_pos(prev_rec_ptr);
1350 bool JOIN_CACHE::get_match_flag_by_pos(uchar *rec_ptr)
1352 if (with_match_flag)
1353 return test(*rec_ptr);
1356 uchar *prev_rec_ptr= prev_cache->get_rec_ref(rec_ptr);
1357 return prev_cache->get_match_flag_by_pos(prev_rec_ptr);
1387 uchar *init_pos= pos;
1389 if (pos > last_rec_pos || !records)
1398 bool blob_in_rec_buff= blob_data_is_in_rec_buff(init_pos);
1399 for ( ; copy < copy_end; copy++)
1400 read_record_field(copy, blob_in_rec_buff);
1402 return (uint) (pos-init_pos);
1424 for ( ; copy < copy_end; copy++)
1453 uint JOIN_CACHE::read_record_field(
CACHE_FIELD *copy,
bool blob_in_rec_buff)
1457 if (copy->field && copy->field->maybe_null() && copy->field->is_null())
1459 if (copy->
type == CACHE_BLOB)
1466 if (blob_in_rec_buff)
1468 blob_field->set_image(pos, copy->
length+
sizeof(
char*),
1469 blob_field->charset());
1470 len= copy->
length+
sizeof(
char*);
1474 blob_field->set_ptr(pos, pos+copy->
length);
1475 len= copy->
length+blob_field->get_length();
1480 switch (copy->
type) {
1483 len= (uint) pos[0] + 1;
1484 memcpy(copy->
str, pos, len);
1488 len= uint2korr(pos) + 2;
1489 memcpy(copy->
str, pos, len);
1491 case CACHE_STRIPPED:
1493 len= uint2korr(pos);
1494 memcpy(copy->
str, pos+2, len);
1495 memset(copy->
str+len,
' ', copy->
length-len);
1501 memcpy(copy->
str, pos, len);
1534 bool JOIN_CACHE::read_referenced_field(
CACHE_FIELD *copy,
1540 if (copy < field_descr || copy >= field_descr+fields)
1545 uchar *len_ptr= rec_ptr;
1547 len_ptr-= prev_cache->get_size_of_rec_offset();
1548 *len= get_rec_length(len_ptr-size_of_rec_len);
1551 ptr= rec_ptr-(prev_cache ? prev_cache->get_size_of_rec_offset() : 0);
1552 offset= get_fld_offset(ptr+ *len -
1554 (referenced_fields+1-copy->referenced_field_no));
1555 bool is_null= FALSE;
1556 if (offset == 0 && flag_fields)
1559 copy->field->set_null();
1562 uchar *save_pos= pos;
1563 copy->field->set_notnull();
1565 read_record_field(copy, blob_data_is_in_rec_buff(rec_ptr));
1589 bool JOIN_CACHE::skip_record_if_match()
1591 DBUG_ASSERT(with_match_flag && with_length);
1592 uint offset= size_of_rec_len;
1594 offset+= prev_cache->get_size_of_rec_offset();
1596 if (
test(*(pos+offset)))
1598 pos+= size_of_rec_len + get_rec_length(pos);
1622 void JOIN_CACHE::restore_last_record()
1625 get_record_by_pos(last_rec_pos);
1664 enum_nested_loop_state JOIN_CACHE::join_records(
bool skip_last)
1666 enum_nested_loop_state rc= NESTED_LOOP_OK;
1667 DBUG_ENTER(
"JOIN_CACHE::join_records");
1669 table_map saved_status_bits[3]= {0, 0, 0};
1670 for (
int cnt= 1; cnt <= static_cast<int>(tables); cnt++)
1681 TABLE *
const table= join_tab[- cnt].table;
1682 const uint8 status= table->status;
1683 const table_map map= table->map;
1684 DBUG_ASSERT((status & (STATUS_DELETED | STATUS_UPDATED)) == 0);
1685 if (status & STATUS_GARBAGE)
1686 saved_status_bits[0]|= map;
1687 if (status & STATUS_NOT_FOUND)
1688 saved_status_bits[1]|= map;
1689 if (status & STATUS_NULL_ROW)
1690 saved_status_bits[2]|= map;
1694 const bool outer_join_first_inner=
1695 join_tab->is_first_inner_for_outer_join();
1696 if (outer_join_first_inner && !join_tab->first_unmatched)
1697 join_tab->not_null_compl= TRUE;
1699 if (!join_tab->first_unmatched)
1702 rc= join_matching_records(skip_last);
1703 if (rc != NESTED_LOOP_OK)
1705 if (outer_join_first_inner)
1716 rc= next_cache->join_records(skip_last);
1717 if (rc != NESTED_LOOP_OK)
1720 join_tab->not_null_compl= FALSE;
1723 tab <= join_tab->last_inner; tab++)
1727 if (join_tab->first_unmatched)
1729 if (is_key_access())
1730 restore_last_record();
1737 rc= join_null_complements(skip_last);
1738 if (rc != NESTED_LOOP_OK)
1749 rc= next_cache->join_records(skip_last);
1750 if (rc != NESTED_LOOP_OK)
1756 DBUG_ASSERT(!is_key_access());
1765 if (outer_join_first_inner)
1773 tab <= join_tab->last_inner; tab++)
1776 for (
int cnt= 1; cnt <= static_cast<int>(tables); cnt++)
1782 TABLE *
const table= join_tab[- cnt].table;
1783 const table_map map= table->map;
1785 if (saved_status_bits[0] & map)
1786 status|= STATUS_GARBAGE;
1787 if (saved_status_bits[1] & map)
1788 status|= STATUS_NOT_FOUND;
1789 if (saved_status_bits[2] & map)
1790 status|= STATUS_NULL_ROW;
1791 table->status= status;
1793 restore_last_record();
1827 enum_nested_loop_state JOIN_CACHE_BNL::join_matching_records(
bool skip_last)
1832 enum_nested_loop_state rc= NESTED_LOOP_OK;
1835 join_tab->table->null_row= 0;
1839 return NESTED_LOOP_OK;
1848 put_record_in_cache();
1850 if (join_tab->use_quick == QS_DYNAMIC_RANGE && join_tab->select->quick)
1852 join_tab->select->set_quick(NULL);
1855 if ((error= (*join_tab->read_first_record)(join_tab)))
1856 return error < 0 ? NESTED_LOOP_OK : NESTED_LOOP_ERROR;
1858 info= &join_tab->read_record;
1861 if (join_tab->keep_current_rowid)
1862 join_tab->table->file->position(join_tab->table->record[0]);
1864 if (join->thd->killed)
1867 join->thd->send_kill_message();
1868 return NESTED_LOOP_KILLED;
1875 if (rc == NESTED_LOOP_OK)
1878 bool consider_record= (!select ||
1879 (!select->skip_record(join->thd, &skip_record) &&
1881 if (select && join->thd->is_error())
1882 return NESTED_LOOP_ERROR;
1883 if (consider_record)
1889 for (cnt= records -
test(skip_last) ; cnt; cnt--)
1898 rc= generate_full_extensions(get_curr_rec());
1899 if (rc != NESTED_LOOP_OK)
1905 }
while (!(error= info->read_record(info)));
1908 rc= NESTED_LOOP_ERROR;
1937 bool JOIN_CACHE::set_match_flag_if_none(
JOIN_TAB *first_inner,
1940 if (!first_inner->op)
1946 if (first_inner->
found)
1950 first_inner->
found= 1;
1955 while (cache->
join_tab != first_inner)
1957 cache= cache->prev_cache;
1959 rec_ptr= cache->get_rec_ref(rec_ptr);
1961 if (rec_ptr[0] == 0)
1964 first_inner->
found= 1;
1988 enum_nested_loop_state JOIN_CACHE::generate_full_extensions(uchar *rec_ptr)
1990 enum_nested_loop_state rc= NESTED_LOOP_OK;
1996 if (check_match(rec_ptr))
1999 if (!join_tab->check_weed_out_table ||
2002 set_curr_rec_link(rec_ptr);
2003 rc= (join_tab->next_select)(join, join_tab+1, 0);
2004 if (rc != NESTED_LOOP_OK)
2012 rc= NESTED_LOOP_ERROR;
2043 bool JOIN_CACHE::check_match(uchar *rec_ptr)
2047 if (join_tab->select &&
2048 (join_tab->select->skip_record(join->thd, &skip_record) || skip_record))
2051 if (!((join_tab->first_inner &&
2052 join_tab->first_inner->last_inner == join_tab) ||
2053 (join_tab->last_sj_inner_tab == join_tab &&
2054 join_tab->get_sj_strategy() == SJ_OPT_FIRST_MATCH)))
2064 ((join_tab->get_sj_strategy() == SJ_OPT_FIRST_MATCH) ?
2065 join_tab->first_sj_inner_tab : NULL);
2069 set_match_flag_if_none(first_inner, rec_ptr);
2071 !join_tab->first_inner)
2086 (tab->select->skip_record(join->thd, &skip_record) || skip_record))
2123 enum_nested_loop_state JOIN_CACHE::join_null_complements(
bool skip_last)
2126 enum_nested_loop_state rc= NESTED_LOOP_OK;
2127 bool is_first_inner= join_tab == join_tab->first_unmatched;
2128 DBUG_ENTER(
"JOIN_CACHE::join_null_complements");
2132 DBUG_RETURN(NESTED_LOOP_OK);
2134 cnt= records - (is_key_access() ? 0 :
test(skip_last));
2137 DBUG_ASSERT(join_tab->first_inner);
2140 if (join_tab->copy_current_rowid &&
2141 !join_tab->copy_current_rowid->buffer_is_bound())
2142 join_tab->copy_current_rowid->bind_buffer(join_tab->table->file->ref);
2146 if (join->thd->killed)
2149 join->thd->send_kill_message();
2150 rc= NESTED_LOOP_KILLED;
2154 if (!is_first_inner || !skip_record_if_match())
2158 restore_record(join_tab->table, s->default_values);
2159 mark_as_null_row(join_tab->table);
2160 rc= generate_full_extensions(get_curr_rec());
2161 if (rc != NESTED_LOOP_OK)
2193 range_seq_t bka_range_seq_init(
void *init_param, uint n_ranges, uint
flags)
2195 DBUG_ENTER(
"bka_range_seq_init");
2198 DBUG_RETURN((range_seq_t) init_param);
2226 DBUG_ENTER(
"bka_range_seq_next");
2229 key_range *start_key= &range->start_key;
2230 if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
2232 start_key->keypart_map= (1 << ref->
key_parts) - 1;
2233 start_key->flag= HA_READ_KEY_EXACT;
2234 range->end_key= *start_key;
2235 range->end_key.flag= HA_READ_AFTER_KEY;
2236 range->ptr= (
char *) cache->get_curr_rec();
2237 range->range_flag= EQ_RANGE;
2271 bool bka_range_seq_skip_record(range_seq_t rseq,
char *range_info, uchar *rowid)
2273 DBUG_ENTER(
"bka_range_seq_skip_record");
2275 bool res= cache->get_match_flag_by_pos((uchar *) range_info);
2329 enum_nested_loop_state JOIN_CACHE_BKA::join_matching_records(
bool skip_last)
2332 DBUG_ASSERT(!skip_last);
2336 return NESTED_LOOP_OK;
2342 bka_range_seq_skip_record : 0,
2343 join_tab->cache_idx_cond ?
2344 bka_skip_index_tuple : 0 };
2346 if (init_join_matching_records(&seq_funcs, records))
2347 return NESTED_LOOP_ERROR;
2351 enum_nested_loop_state rc= NESTED_LOOP_OK;
2352 uchar *rec_ptr= NULL;
2356 if (join->thd->killed)
2359 join->thd->send_kill_message();
2360 return NESTED_LOOP_KILLED;
2362 if (join_tab->keep_current_rowid)
2363 join_tab->table->file->position(join_tab->table->record[0]);
2369 if (rc == NESTED_LOOP_OK &&
2372 get_record_by_pos(rec_ptr);
2373 rc= generate_full_extensions(rec_ptr);
2374 if (rc != NESTED_LOOP_OK)
2379 if (error > 0 && error != HA_ERR_END_OF_FILE)
2380 return NESTED_LOOP_ERROR;
2414 JOIN_CACHE_BKA::init_join_matching_records(
RANGE_SEQ_IF *seq_funcs, uint ranges)
2416 handler *file= join_tab->table->file;
2418 join_tab->table->null_row= 0;
2421 DBUG_ASSERT(join_tab->use_quick != QS_DYNAMIC_RANGE);
2440 mrr_mode, &mrr_buff);
2453 uchar *
const save_pos= pos;
2460 rec_ptr= prev_cache->get_rec_ref(rec_ptr);
2498 uint JOIN_CACHE_BKA::get_next_key(uchar ** key)
2509 DBUG_ASSERT(with_length);
2517 for(len= 0 ; (len == 0) && pos < last_rec_pos ; pos= init_pos + rec_len)
2520 rec_len= get_rec_length(pos);
2521 pos+= size_of_rec_len;
2525 uchar *prev_rec_ptr= NULL;
2528 pos+= prev_cache->get_size_of_rec_offset();
2530 prev_rec_ptr= prev_cache->get_rec_ref(pos);
2544 len= emb_key_length;
2545 DBUG_ASSERT(len != 0);
2553 if (external_key_arg_fields)
2555 uchar *rec_ptr= curr_rec_pos;
2556 uint key_arg_count= external_key_arg_fields;
2558 for (cache= prev_cache; key_arg_count; cache= cache->prev_cache)
2562 rec_ptr= cache->get_rec_ref(rec_ptr);
2563 while (!cache->referenced_fields)
2565 cache= cache->prev_cache;
2567 rec_ptr= cache->get_rec_ref(rec_ptr);
2569 while (key_arg_count &&
2570 cache->read_referenced_field(*copy_ptr, rec_ptr, &len2))
2585 bool blob_in_rec_buff= blob_data_is_in_rec_buff(curr_rec_pos);
2586 for ( ; copy < copy_end; copy++)
2587 read_record_field(copy, blob_in_rec_buff);
2592 DBUG_PRINT(
"info", (
"JOIN_CACHE_BKA::get_next_key null_rejected"));
2599 cp_buffer_from_ref(join->thd, join_tab->table, ref);
2602 DBUG_ASSERT(len != 0);
2644 DBUG_ENTER(
"JOIN_CACHE_BKA_UNIQUE::init");
2655 pack_length+= get_size_of_rec_offset();
2658 uint max_size_of_key_ofs= max(2
U, get_size_of_rec_offset());
2659 for (size_of_key_ofs= 2;
2660 size_of_key_ofs <= max_size_of_key_ofs;
2661 size_of_key_ofs+= 2)
2663 key_entry_length= get_size_of_rec_offset() +
2665 (use_emb_key ? get_size_of_rec_offset() : key_length);
2667 uint
n= buff_size / (pack_length+key_entry_length+size_of_key_ofs);
2673 uint max_n= buff_size / (pack_length-length+
2674 key_entry_length+size_of_key_ofs);
2676 hash_entries= (uint) (n / 0.7);
2678 if (offset_size(max_n*key_entry_length) <=
2684 hash_table= buff + (buff_size-hash_entries*size_of_key_ofs);
2685 cleanup_hash_table();
2686 curr_key_entry= hash_table;
2688 pack_length+= key_entry_length;
2689 pack_length_with_blob_ptrs+= get_size_of_rec_offset() + key_entry_length;
2691 rec_fields_offset= get_size_of_rec_offset()+get_size_of_rec_length()+
2692 (prev_cache ? prev_cache->get_size_of_rec_offset() : 0);
2694 data_fields_offset= 0;
2699 for ( ; copy < copy_end; copy++)
2700 data_fields_offset+= copy->
length;
2727 if (for_writing && hash_table)
2728 cleanup_hash_table();
2729 curr_key_entry= hash_table;
2759 JOIN_CACHE_BKA_UNIQUE::put_record_in_cache()
2762 uint key_len= key_length;
2765 uchar *next_ref_ptr= pos;
2766 pos+= get_size_of_rec_offset();
2769 bool is_full= JOIN_CACHE::put_record_in_cache();
2773 key= get_curr_emb_key();
2779 cp_buffer_from_ref(join->thd, join_tab->table, ref);
2790 DBUG_PRINT(
"info", (
"JOIN_CACHE_BKA_UNIQUE::put_record null_rejected"));
2796 if (key_search(key, key_len, &key_ref_ptr))
2798 uchar *last_next_ref_ptr;
2806 last_next_ref_ptr= get_next_rec_ref(key_ref_ptr+get_size_of_key_offset());
2808 memcpy(next_ref_ptr, last_next_ref_ptr, get_size_of_rec_offset());
2810 store_next_rec_ref(last_next_ref_ptr, next_ref_ptr);
2812 store_next_rec_ref(key_ref_ptr+get_size_of_key_offset(), next_ref_ptr);
2822 uchar *cp= last_key_entry;
2823 cp-= get_size_of_rec_offset()+get_size_of_key_offset();
2824 store_next_key_ref(key_ref_ptr, cp);
2825 store_null_key_ref(cp);
2826 store_next_rec_ref(next_ref_ptr, next_ref_ptr);
2827 store_next_rec_ref(cp+get_size_of_key_offset(), next_ref_ptr);
2830 cp-= get_size_of_rec_offset();
2831 store_emb_key_ref(cp, key);
2836 memcpy(cp, key, key_len);
2862 bool JOIN_CACHE_BKA_UNIQUE::get_record()
2864 pos+= get_size_of_rec_offset();
2865 return this->JOIN_CACHE::get_record();
2885 bool JOIN_CACHE_BKA_UNIQUE::skip_record_if_match()
2887 uchar *save_pos= pos;
2888 pos+= get_size_of_rec_offset();
2889 if (!this->JOIN_CACHE::skip_record_if_match())
2923 bool JOIN_CACHE_BKA_UNIQUE::key_search(uchar *key, uint key_len,
2924 uchar **key_ref_ptr)
2926 bool is_found= FALSE;
2927 uint idx= get_hash_idx(key, key_length);
2928 uchar *ref_ptr= hash_table+size_of_key_ofs*idx;
2929 while (!is_null_key_ref(ref_ptr))
2932 ref_ptr= get_next_key_ref(ref_ptr);
2933 next_key= use_emb_key ? get_emb_key(ref_ptr-get_size_of_rec_offset()) :
2936 if (memcmp(next_key, key, key_len) == 0)
2942 *key_ref_ptr= ref_ptr;
2963 uint JOIN_CACHE_BKA_UNIQUE::get_hash_idx(uchar* key, uint key_len)
2968 uchar *end= key+key_len;
2969 for (; pos < end ; pos++)
2971 nr^= (ulong) ((((uint) nr & 63)+nr2)*((uint) *pos))+ (nr << 8);
2974 return nr % hash_entries;
2994 void JOIN_CACHE_BKA_UNIQUE:: cleanup_hash_table()
2996 last_key_entry= hash_table;
2997 memset(hash_table, 0, (buff+buff_size)-hash_table);
3025 range_seq_t bka_unique_range_seq_init(
void *init_param, uint n_ranges,
3028 DBUG_ENTER(
"bka_unique_range_seq_init");
3031 DBUG_RETURN((range_seq_t) init_param);
3057 uint bka_unique_range_seq_next(range_seq_t rseq,
KEY_MULTI_RANGE *range)
3059 DBUG_ENTER(
"bka_unique_range_seq_next");
3062 key_range *start_key= &range->start_key;
3063 if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
3065 start_key->keypart_map= (1 << ref->
key_parts) - 1;
3066 start_key->flag= HA_READ_KEY_EXACT;
3067 range->end_key= *start_key;
3068 range->end_key.flag= HA_READ_AFTER_KEY;
3069 range->ptr= (
char *) cache->get_curr_key_chain();
3070 range->range_flag= EQ_RANGE;
3102 bool bka_unique_range_seq_skip_record(range_seq_t rseq,
char *range_info,
3105 DBUG_ENTER(
"bka_unique_range_seq_skip_record");
3107 bool res= cache->check_all_match_flags_for_key((uchar *) range_info);
3155 DBUG_ENTER(
"JOIN_CACHE_BKA_UNIQUE::skip_index_tuple");
3157 uchar *last_rec_ref_ptr= cache->get_next_rec_ref((uchar*) range_info);
3158 uchar *next_rec_ref_ptr= last_rec_ref_ptr;
3161 next_rec_ref_ptr= cache->get_next_rec_ref(next_rec_ref_ptr);
3162 uchar *rec_ptr= next_rec_ref_ptr + cache->rec_fields_offset;
3163 cache->get_record_by_pos(rec_ptr);
3164 if (join_tab->cache_idx_cond->val_int())
3166 }
while(next_rec_ref_ptr != last_rec_ref_ptr);
3192 bool bka_unique_skip_index_tuple(range_seq_t rseq,
char *range_info)
3194 DBUG_ENTER(
"bka_unique_skip_index_tuple");
3230 enum_nested_loop_state
3231 JOIN_CACHE_BKA_UNIQUE::join_matching_records(
bool skip_last)
3234 DBUG_ASSERT(!skip_last);
3238 return NESTED_LOOP_OK;
3240 const bool no_association= mrr_mode & HA_MRR_NO_ASSOCIATION;
3243 bka_unique_range_seq_next,
3245 bka_unique_range_seq_skip_record : 0,
3246 join_tab->cache_idx_cond ?
3247 bka_unique_skip_index_tuple : 0 };
3249 if (init_join_matching_records(&seq_funcs, key_entries))
3250 return NESTED_LOOP_ERROR;
3253 uchar *key_chain_ptr;
3254 handler *file= join_tab->table->file;
3255 enum_nested_loop_state rc= NESTED_LOOP_OK;
3262 TABLE *table= join_tab->table;
3264 KEY *keyinfo= table->key_info+ref->
key;
3273 key_chain_ptr= key_ref_ptr+get_size_of_key_offset();
3276 if (join_tab->keep_current_rowid)
3277 join_tab->table->file->position(join_tab->table->record[0]);
3279 uchar *last_rec_ref_ptr= get_next_rec_ref(key_chain_ptr);
3280 uchar *next_rec_ref_ptr= last_rec_ref_ptr;
3283 next_rec_ref_ptr= get_next_rec_ref(next_rec_ref_ptr);
3284 uchar *rec_ptr= next_rec_ref_ptr+rec_fields_offset;
3286 if (join->thd->killed)
3289 join->thd->send_kill_message();
3290 return NESTED_LOOP_KILLED;
3297 if (rc == NESTED_LOOP_OK &&
3300 get_record_by_pos(rec_ptr);
3301 rc= generate_full_extensions(rec_ptr);
3302 if (rc != NESTED_LOOP_OK)
3306 while (next_rec_ref_ptr != last_rec_ref_ptr);
3309 if (error > 0 && error != HA_ERR_END_OF_FILE)
3310 return NESTED_LOOP_ERROR;
3333 bool JOIN_CACHE_BKA_UNIQUE::check_all_match_flags_for_key(uchar *key_chain_ptr)
3335 uchar *last_rec_ref_ptr= get_next_rec_ref(key_chain_ptr);
3336 uchar *next_rec_ref_ptr= last_rec_ref_ptr;
3339 next_rec_ref_ptr= get_next_rec_ref(next_rec_ref_ptr);
3340 uchar *rec_ptr= next_rec_ref_ptr+rec_fields_offset;
3341 if (!get_match_flag_by_pos(rec_ptr))
3344 while (next_rec_ref_ptr != last_rec_ref_ptr);
3368 uint JOIN_CACHE_BKA_UNIQUE::get_next_key(uchar ** key)
3370 if (curr_key_entry == last_key_entry)
3373 curr_key_entry-= key_entry_length;
3375 *key = use_emb_key ? get_emb_key(curr_key_entry) : curr_key_entry;
3377 DBUG_ASSERT(*key >= buff && *key < hash_table);
3396 if (join_tab->cache_idx_cond != NULL &&
3397 !join_tab->cache_idx_cond->val_int())