mySQL 5.0.11 sources for tomato
[tomato.git] / release / src / router / mysql / sql / sql_update.cc
blob0e75fdabf499af9352c638414a28ee33a603eeb7
1 /* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; version 2 of the License.
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
12 You should have received a copy of the GNU General Public License
13 along with this program; if not, write to the Free Software
14 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
18 Single table and multi table updates of tables.
19 Multi-table updates were introduced by Sinisa & Monty
22 #include "mysql_priv.h"
23 #include "sql_select.h"
24 #include "sp_head.h"
25 #include "sql_trigger.h"
26 #include "debug_sync.h"
29 /**
30 True if the table's input and output record buffers are comparable using
31 compare_records(TABLE*).
33 bool records_are_comparable(const TABLE *table) {
34 return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
35 bitmap_is_subset(table->write_set, table->read_set);
39 /**
40 Compares the input and outbut record buffers of the table to see if a row
41 has changed. The algorithm iterates over updated columns and if they are
42 nullable compares NULL bits in the buffer before comparing actual
43 data. Special care must be taken to compare only the relevant NULL bits and
44 mask out all others as they may be undefined. The storage engine will not
45 and should not touch them.
47 @param table The table to evaluate.
49 @return true if row has changed.
50 @return false otherwise.
52 bool compare_records(const TABLE *table)
54 DBUG_ASSERT(records_are_comparable(table));
56 if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0)
59 Storage engine may not have read all columns of the record. Fields
60 (including NULL bits) not in the write_set may not have been read and
61 can therefore not be compared.
62 */
63 for (Field **ptr= table->field ; *ptr != NULL; ptr++)
65 Field *field= *ptr;
66 if (bitmap_is_set(table->write_set, field->field_index))
68 if (field->real_maybe_null())
70 uchar null_byte_index= field->null_ptr - table->record[0];
72 if (((table->record[0][null_byte_index]) & field->null_bit) !=
73 ((table->record[1][null_byte_index]) & field->null_bit))
74 return TRUE;
76 if (field->cmp_binary_offset(table->s->rec_buff_length))
77 return TRUE;
80 return FALSE;
83 /*
84 The storage engine has read all columns, so it's safe to compare all bits
85 including those not in the write_set. This is cheaper than the field-by-field
86 comparison done above.
87 */
88 if (table->s->blob_fields + table->s->varchar_fields == 0)
89 // Fixed-size record: do bitwise comparison of the records
90 return cmp_record(table,record[1]);
91 /* Compare null bits */
92 if (memcmp(table->null_flags,
93 table->null_flags+table->s->rec_buff_length,
94 table->s->null_bytes))
95 return TRUE; // Diff in NULL value
96 /* Compare updated fields */
97 for (Field **ptr= table->field ; *ptr ; ptr++)
99 if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
100 (*ptr)->cmp_binary_offset(table->s->rec_buff_length))
101 return TRUE;
103 return FALSE;
108 check that all fields are real fields
110 SYNOPSIS
111 check_fields()
112 thd thread handler
113 items Items for check
115 RETURN
116 TRUE Items can't be used in UPDATE
117 FALSE Items are OK
120 static bool check_fields(THD *thd, List<Item> &items)
122 List_iterator<Item> it(items);
123 Item *item;
124 Item_field *field;
126 while ((item= it++))
128 if (!(field= item->filed_for_view_update()))
130 /* item has name, because it comes from VIEW SELECT list */
131 my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
132 return TRUE;
135 we make temporary copy of Item_field, to avoid influence of changing
136 result_field on Item_ref which refer on this field
138 thd->change_item_tree(it.ref(), new Item_field(thd, field));
140 return FALSE;
145 Re-read record if more columns are needed for error message.
147 If we got a duplicate key error, we want to write an error
148 message containing the value of the duplicate key. If we do not have
149 all fields of the key value in record[0], we need to re-read the
150 record with a proper read_set.
152 @param[in] error error number
153 @param[in] table table
156 static void prepare_record_for_error_message(int error, TABLE *table)
158 Field **field_p;
159 Field *field;
160 uint keynr;
161 MY_BITMAP unique_map; /* Fields in offended unique. */
162 my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
163 DBUG_ENTER("prepare_record_for_error_message");
166 Only duplicate key errors print the key value.
167 If storage engine does always read all columns, we have the value alraedy.
169 if ((error != HA_ERR_FOUND_DUPP_KEY) ||
170 !(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ))
171 DBUG_VOID_RETURN;
174 Get the number of the offended index.
175 We will see MAX_KEY if the engine cannot determine the affected index.
177 if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY)
178 DBUG_VOID_RETURN;
180 /* Create unique_map with all fields used by that index. */
181 bitmap_init(&unique_map, unique_map_buf, table->s->fields, FALSE);
182 table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
184 /* Subtract read_set and write_set. */
185 bitmap_subtract(&unique_map, table->read_set);
186 bitmap_subtract(&unique_map, table->write_set);
189 If the unique index uses columns that are neither in read_set
190 nor in write_set, we must re-read the record.
191 Otherwise no need to do anything.
193 if (bitmap_is_clear_all(&unique_map))
194 DBUG_VOID_RETURN;
196 /* Get identifier of last read record into table->file->ref. */
197 table->file->position(table->record[0]);
198 /* Add all fields used by unique index to read_set. */
199 bitmap_union(table->read_set, &unique_map);
200 /* Tell the engine about the new set. */
201 table->file->column_bitmaps_signal();
202 /* Read record that is identified by table->file->ref. */
203 (void) table->file->rnd_pos(table->record[1], table->file->ref);
204 /* Copy the newly read columns into the new record. */
205 for (field_p= table->field; (field= *field_p); field_p++)
206 if (bitmap_is_set(&unique_map, field->field_index))
207 field->copy_from_tmp(table->s->rec_buff_length);
209 DBUG_VOID_RETURN;
214 Process usual UPDATE
216 SYNOPSIS
217 mysql_update()
218 thd thread handler
219 fields fields for update
220 values values of fields for update
221 conds WHERE clause expression
222 order_num number of elemen in ORDER BY clause
223 order ORDER BY clause list
224 limit limit clause
225 handle_duplicates how to handle duplicates
227 RETURN
228 0 - OK
229 2 - privilege check and openning table passed, but we need to convert to
230 multi-update because of view substitution
231 1 - error
234 int mysql_update(THD *thd,
235 TABLE_LIST *table_list,
236 List<Item> &fields,
237 List<Item> &values,
238 COND *conds,
239 uint order_num, ORDER *order,
240 ha_rows limit,
241 enum enum_duplicates handle_duplicates, bool ignore)
243 bool using_limit= limit != HA_POS_ERROR;
244 bool safe_update= test(thd->options & OPTION_SAFE_UPDATES);
245 bool used_key_is_modified, transactional_table, will_batch;
246 int res;
247 int error, loc_error;
248 uint used_index= MAX_KEY, dup_key_found;
249 bool need_sort= TRUE;
250 #ifndef NO_EMBEDDED_ACCESS_CHECKS
251 uint want_privilege;
252 #endif
253 uint table_count= 0;
254 ha_rows updated, found;
255 key_map old_covering_keys;
256 TABLE *table;
257 SQL_SELECT *select;
258 READ_RECORD info;
259 SELECT_LEX *select_lex= &thd->lex->select_lex;
260 bool need_reopen;
261 ulonglong id;
262 List<Item> all_fields;
263 THD::killed_state killed_status= THD::NOT_KILLED;
264 DBUG_ENTER("mysql_update");
266 for ( ; ; )
268 if (open_tables(thd, &table_list, &table_count, 0))
269 DBUG_RETURN(1);
271 if (table_list->multitable_view)
273 DBUG_ASSERT(table_list->view != 0);
274 DBUG_PRINT("info", ("Switch to multi-update"));
275 /* pass counter value */
276 thd->lex->table_count= table_count;
277 /* convert to multiupdate */
278 DBUG_RETURN(2);
280 if (!lock_tables(thd, table_list, table_count, &need_reopen))
281 break;
282 if (!need_reopen)
283 DBUG_RETURN(1);
284 close_tables_for_reopen(thd, &table_list);
287 if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) ||
288 (thd->fill_derived_tables() &&
289 mysql_handle_derived(thd->lex, &mysql_derived_filling)))
290 DBUG_RETURN(1);
292 thd_proc_info(thd, "init");
293 table= table_list->table;
295 /* Calculate "table->covering_keys" based on the WHERE */
296 table->covering_keys= table->s->keys_in_use;
297 table->quick_keys.clear_all();
299 #ifndef NO_EMBEDDED_ACCESS_CHECKS
300 /* Force privilege re-checking for views after they have been opened. */
301 want_privilege= (table_list->view ? UPDATE_ACL :
302 table_list->grant.want_privilege);
303 #endif
304 if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
305 DBUG_RETURN(1);
307 old_covering_keys= table->covering_keys; // Keys used in WHERE
308 /* Check the fields we are going to modify */
309 #ifndef NO_EMBEDDED_ACCESS_CHECKS
310 table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
311 table_list->register_want_access(want_privilege);
312 #endif
313 if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
314 DBUG_RETURN(1); /* purecov: inspected */
315 if (table_list->view && check_fields(thd, fields))
317 DBUG_RETURN(1);
319 if (!table_list->updatable || check_key_in_view(thd, table_list))
321 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
322 DBUG_RETURN(1);
324 if (table->timestamp_field)
326 // Don't set timestamp column if this is modified
327 if (bitmap_is_set(table->write_set,
328 table->timestamp_field->field_index))
329 table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
330 else
332 if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
333 table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)
334 bitmap_set_bit(table->write_set,
335 table->timestamp_field->field_index);
339 #ifndef NO_EMBEDDED_ACCESS_CHECKS
340 /* Check values */
341 table_list->grant.want_privilege= table->grant.want_privilege=
342 (SELECT_ACL & ~table->grant.privilege);
343 #endif
344 if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0))
346 free_underlaid_joins(thd, select_lex);
347 DBUG_RETURN(1); /* purecov: inspected */
350 if (select_lex->inner_refs_list.elements &&
351 fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
352 DBUG_RETURN(1);
354 if (conds)
356 Item::cond_result cond_value;
357 conds= remove_eq_conds(thd, conds, &cond_value);
358 if (cond_value == Item::COND_FALSE)
359 limit= 0; // Impossible WHERE
363 If a timestamp field settable on UPDATE is present then to avoid wrong
364 update force the table handler to retrieve write-only fields to be able
365 to compare records and detect data change.
367 if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ &&
368 table->timestamp_field &&
369 (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
370 table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH))
371 bitmap_union(table->read_set, table->write_set);
372 // Don't count on usage of 'only index' when calculating which key to use
373 table->covering_keys.clear_all();
375 #ifdef WITH_PARTITION_STORAGE_ENGINE
376 if (prune_partitions(thd, table, conds))
378 free_underlaid_joins(thd, select_lex);
379 my_ok(thd); // No matching records
380 DBUG_RETURN(0);
382 #endif
383 /* Update the table->file->stats.records number */
384 table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
386 select= make_select(table, 0, 0, conds, 0, &error);
387 if (error || !limit ||
388 (select && select->check_quick(thd, safe_update, limit)))
390 delete select;
391 free_underlaid_joins(thd, select_lex);
393 There was an error or the error was already sent by
394 the quick select evaluation.
395 TODO: Add error code output parameter to Item::val_xxx() methods.
396 Currently they rely on the user checking DA for
397 errors when unwinding the stack after calling Item::val_xxx().
399 if (error || thd->is_error())
401 DBUG_RETURN(1); // Error in where
403 my_ok(thd); // No matching records
404 DBUG_RETURN(0);
406 if (!select && limit != HA_POS_ERROR)
408 if ((used_index= get_index_for_order(table, order, limit)) != MAX_KEY)
409 need_sort= FALSE;
411 /* If running in safe sql mode, don't allow updates without keys */
412 if (table->quick_keys.is_clear_all())
414 thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
415 if (safe_update && !using_limit)
417 my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
418 ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
419 goto err;
422 init_ftfuncs(thd, select_lex, 1);
424 table->mark_columns_needed_for_update();
426 /* Check if we are modifying a key that we are used to search with */
428 if (select && select->quick)
430 used_index= select->quick->index;
431 used_key_is_modified= (!select->quick->unique_key_range() &&
432 select->quick->is_keys_used(table->write_set));
434 else
436 used_key_is_modified= 0;
437 if (used_index == MAX_KEY) // no index for sort order
438 used_index= table->file->key_used_on_scan;
439 if (used_index != MAX_KEY)
440 used_key_is_modified= is_key_used(table, used_index, table->write_set);
444 #ifdef WITH_PARTITION_STORAGE_ENGINE
445 if (used_key_is_modified || order ||
446 partition_key_modified(table, table->write_set))
447 #else
448 if (used_key_is_modified || order)
449 #endif
452 We can't update table directly; We must first search after all
453 matching rows before updating the table!
455 if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
456 table->add_read_columns_used_by_index(used_index);
457 else
459 table->use_all_columns();
462 /* note: We avoid sorting avoid if we sort on the used index */
463 if (order && (need_sort || used_key_is_modified))
466 Doing an ORDER BY; Let filesort find and sort the rows we are going
467 to update
468 NOTE: filesort will call table->prepare_for_position()
470 uint length= 0;
471 SORT_FIELD *sortorder;
472 ha_rows examined_rows;
474 table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
475 MYF(MY_FAE | MY_ZEROFILL));
476 if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
477 (table->sort.found_records= filesort(thd, table, sortorder, length,
478 select, limit, 1,
479 &examined_rows))
480 == HA_POS_ERROR)
482 goto err;
484 thd->examined_row_count+= examined_rows;
486 Filesort has already found and selected the rows we want to update,
487 so we don't need the where clause
489 delete select;
490 select= 0;
492 else
495 We are doing a search on a key that is updated. In this case
496 we go trough the matching rows, save a pointer to them and
497 update these in a separate loop based on the pointer.
500 IO_CACHE tempfile;
501 if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
502 DISK_BUFFER_SIZE, MYF(MY_WME)))
503 goto err;
505 /* If quick select is used, initialize it before retrieving rows. */
506 if (select && select->quick && select->quick->reset())
507 goto err;
508 table->file->try_semi_consistent_read(1);
511 When we get here, we have one of the following options:
512 A. used_index == MAX_KEY
513 This means we should use full table scan, and start it with
514 init_read_record call
515 B. used_index != MAX_KEY
516 B.1 quick select is used, start the scan with init_read_record
517 B.2 quick select is not used, this is full index scan (with LIMIT)
518 Full index scan must be started with init_read_record_idx
521 if (used_index == MAX_KEY || (select && select->quick))
522 init_read_record(&info, thd, table, select, 0, 1, FALSE);
523 else
524 init_read_record_idx(&info, thd, table, 1, used_index);
526 thd_proc_info(thd, "Searching rows for update");
527 ha_rows tmp_limit= limit;
529 while (!(error=info.read_record(&info)) && !thd->killed)
531 thd->examined_row_count++;
532 bool skip_record= FALSE;
533 if (select && select->skip_record(thd, &skip_record))
535 error= 1;
536 table->file->unlock_row();
537 break;
539 if (!skip_record)
541 if (table->file->was_semi_consistent_read())
542 continue; /* repeat the read of the same row if it still exists */
544 table->file->position(table->record[0]);
545 if (my_b_write(&tempfile,table->file->ref,
546 table->file->ref_length))
548 error=1; /* purecov: inspected */
549 break; /* purecov: inspected */
551 if (!--limit && using_limit)
553 error= -1;
554 break;
557 else
558 table->file->unlock_row();
560 if (thd->killed && !error)
561 error= 1; // Aborted
562 limit= tmp_limit;
563 table->file->try_semi_consistent_read(0);
564 end_read_record(&info);
566 /* Change select to use tempfile */
567 if (select)
569 delete select->quick;
570 if (select->free_cond)
571 delete select->cond;
572 select->quick=0;
573 select->cond=0;
575 else
577 select= new SQL_SELECT;
578 select->head=table;
580 if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
581 error=1; /* purecov: inspected */
582 select->file=tempfile; // Read row ptrs from this file
583 if (error >= 0)
584 goto err;
586 if (table->key_read)
587 table->restore_column_maps_after_mark_index();
590 if (ignore)
591 table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
593 if (select && select->quick && select->quick->reset())
594 goto err;
595 table->file->try_semi_consistent_read(1);
596 init_read_record(&info, thd, table, select, 0, 1, FALSE);
598 updated= found= 0;
600 Generate an error (in TRADITIONAL mode) or warning
601 when trying to set a NOT NULL field to NULL.
603 thd->count_cuted_fields= CHECK_FIELD_WARN;
604 thd->cuted_fields=0L;
605 thd_proc_info(thd, "Updating");
607 transactional_table= table->file->has_transactions();
608 thd->abort_on_warning= test(!ignore &&
609 (thd->variables.sql_mode &
610 (MODE_STRICT_TRANS_TABLES |
611 MODE_STRICT_ALL_TABLES)));
612 if (table->triggers &&
613 table->triggers->has_triggers(TRG_EVENT_UPDATE,
614 TRG_ACTION_AFTER))
617 The table has AFTER UPDATE triggers that might access to subject
618 table and therefore might need update to be done immediately.
619 So we turn-off the batching.
621 (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
622 will_batch= FALSE;
624 else
625 will_batch= !table->file->start_bulk_update();
628 Assure that we can use position()
629 if we need to create an error message.
631 if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
632 table->prepare_for_position();
634 while (!(error=info.read_record(&info)) && !thd->killed)
636 thd->examined_row_count++;
637 bool skip_record;
638 if (!select || (!select->skip_record(thd, &skip_record) && !skip_record))
640 if (table->file->was_semi_consistent_read())
641 continue; /* repeat the read of the same row if it still exists */
643 store_record(table,record[1]);
644 if (fill_record_n_invoke_before_triggers(thd, fields, values, 0,
645 table->triggers,
646 TRG_EVENT_UPDATE))
647 break; /* purecov: inspected */
649 found++;
651 if (!records_are_comparable(table) || compare_records(table))
653 if ((res= table_list->view_check_option(thd, ignore)) !=
654 VIEW_CHECK_OK)
656 found--;
657 if (res == VIEW_CHECK_SKIP)
658 continue;
659 else if (res == VIEW_CHECK_ERROR)
661 error= 1;
662 break;
665 if (will_batch)
668 Typically a batched handler can execute the batched jobs when:
669 1) When specifically told to do so
670 2) When it is not a good idea to batch anymore
671 3) When it is necessary to send batch for other reasons
672 (One such reason is when READ's must be performed)
674 1) is covered by exec_bulk_update calls.
675 2) and 3) is handled by the bulk_update_row method.
677 bulk_update_row can execute the updates including the one
678 defined in the bulk_update_row or not including the row
679 in the call. This is up to the handler implementation and can
680 vary from call to call.
682 The dup_key_found reports the number of duplicate keys found
683 in those updates actually executed. It only reports those if
684 the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
685 If this hasn't been issued it returns an error code and can
686 ignore this number. Thus any handler that implements batching
687 for UPDATE IGNORE must also handle this extra call properly.
689 If a duplicate key is found on the record included in this
690 call then it should be included in the count of dup_key_found
691 and error should be set to 0 (only if these errors are ignored).
693 error= table->file->ha_bulk_update_row(table->record[1],
694 table->record[0],
695 &dup_key_found);
696 limit+= dup_key_found;
697 updated-= dup_key_found;
699 else
701 /* Non-batched update */
702 error= table->file->ha_update_row(table->record[1],
703 table->record[0]);
705 if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
707 if (error != HA_ERR_RECORD_IS_THE_SAME)
708 updated++;
709 else
710 error= 0;
712 else if (!ignore ||
713 table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
716 If (ignore && error is ignorable) we don't have to
717 do anything; otherwise...
719 if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
720 thd->fatal_error(); /* Other handler errors are fatal */
722 prepare_record_for_error_message(error, table);
723 table->file->print_error(error,MYF(0));
724 error= 1;
725 break;
729 if (table->triggers &&
730 table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
731 TRG_ACTION_AFTER, TRUE))
733 error= 1;
734 break;
737 if (!--limit && using_limit)
740 We have reached end-of-file in most common situations where no
741 batching has occurred and if batching was supposed to occur but
742 no updates were made and finally when the batch execution was
743 performed without error and without finding any duplicate keys.
744 If the batched updates were performed with errors we need to
745 check and if no error but duplicate key's found we need to
746 continue since those are not counted for in limit.
748 if (will_batch &&
749 ((error= table->file->exec_bulk_update(&dup_key_found)) ||
750 dup_key_found))
752 if (error)
754 /* purecov: begin inspected */
756 The handler should not report error of duplicate keys if they
757 are ignored. This is a requirement on batching handlers.
759 prepare_record_for_error_message(error, table);
760 table->file->print_error(error,MYF(0));
761 error= 1;
762 break;
763 /* purecov: end */
766 Either an error was found and we are ignoring errors or there
767 were duplicate keys found. In both cases we need to correct
768 the counters and continue the loop.
770 limit= dup_key_found; //limit is 0 when we get here so need to +
771 updated-= dup_key_found;
773 else
775 error= -1; // Simulate end of file
776 break;
780 else
781 table->file->unlock_row();
782 thd->row_count++;
783 if (thd->is_error())
785 error= 1;
786 break;
789 table->auto_increment_field_not_null= FALSE;
790 dup_key_found= 0;
792 Caching the killed status to pass as the arg to query event constuctor;
793 The cached value can not change whereas the killed status can
794 (externally) since this point and change of the latter won't affect
795 binlogging.
796 It's assumed that if an error was set in combination with an effective
797 killed status then the error is due to killing.
799 killed_status= thd->killed; // get the status of the volatile
800 // simulated killing after the loop must be ineffective for binlogging
801 DBUG_EXECUTE_IF("simulate_kill_bug27571",
803 thd->killed= THD::KILL_QUERY;
804 };);
805 error= (killed_status == THD::NOT_KILLED)? error : 1;
807 if (error &&
808 will_batch &&
809 (loc_error= table->file->exec_bulk_update(&dup_key_found)))
811 An error has occurred when a batched update was performed and returned
812 an error indication. It cannot be an allowed duplicate key error since
813 we require the batching handler to treat this as a normal behavior.
815 Otherwise we simply remove the number of duplicate keys records found
816 in the batched update.
819 /* purecov: begin inspected */
820 thd->fatal_error();
821 prepare_record_for_error_message(loc_error, table);
822 table->file->print_error(loc_error,MYF(0));
823 error= 1;
824 /* purecov: end */
826 else
827 updated-= dup_key_found;
828 if (will_batch)
829 table->file->end_bulk_update();
830 table->file->try_semi_consistent_read(0);
832 if (!transactional_table && updated > 0)
833 thd->transaction.stmt.modified_non_trans_table= TRUE;
835 end_read_record(&info);
836 delete select;
837 thd_proc_info(thd, "end");
838 VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
841 Invalidate the table in the query cache if something changed.
842 This must be before binlog writing and ha_autocommit_...
844 if (updated)
846 query_cache_invalidate3(thd, table_list, 1);
850 error < 0 means really no error at all: we processed all rows until the
851 last one without error. error > 0 means an error (e.g. unique key
852 violation and no IGNORE or REPLACE). error == 0 is also an error (if
853 preparing the record or invoking before triggers fails). See
854 ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
855 Sometimes we want to binlog even if we updated no rows, in case user used
856 it to be sure master and slave are in same state.
858 if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
860 if (mysql_bin_log.is_open())
862 int errcode= 0;
863 if (error < 0)
864 thd->clear_error();
865 else
866 errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
868 if (thd->binlog_query(THD::ROW_QUERY_TYPE,
869 thd->query(), thd->query_length(),
870 transactional_table, FALSE, errcode))
872 error=1; // Rollback update
875 if (thd->transaction.stmt.modified_non_trans_table)
876 thd->transaction.all.modified_non_trans_table= TRUE;
878 DBUG_ASSERT(transactional_table || !updated || thd->transaction.stmt.modified_non_trans_table);
879 free_underlaid_joins(thd, select_lex);
881 /* If LAST_INSERT_ID(X) was used, report X */
882 id= thd->arg_of_last_insert_id_function ?
883 thd->first_successful_insert_id_in_prev_stmt : 0;
885 if (error < 0)
887 char buff[MYSQL_ERRMSG_SIZE];
888 my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
889 (ulong) thd->cuted_fields);
890 thd->row_count_func=
891 (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
892 my_ok(thd, (ulong) thd->row_count_func, id, buff);
893 DBUG_PRINT("info",("%ld records updated", (long) updated));
895 thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
896 thd->abort_on_warning= 0;
897 DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
899 err:
900 delete select;
901 free_underlaid_joins(thd, select_lex);
902 table->set_keyread(FALSE);
903 thd->abort_on_warning= 0;
904 DBUG_RETURN(1);
908 Prepare items in UPDATE statement
910 SYNOPSIS
911 mysql_prepare_update()
912 thd - thread handler
913 table_list - global/local table list
914 conds - conditions
915 order_num - number of ORDER BY list entries
916 order - ORDER BY clause list
918 RETURN VALUE
919 FALSE OK
920 TRUE error
922 bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
923 Item **conds, uint order_num, ORDER *order)
925 Item *fake_conds= 0;
926 #ifndef NO_EMBEDDED_ACCESS_CHECKS
927 TABLE *table= table_list->table;
928 #endif
929 List<Item> all_fields;
930 SELECT_LEX *select_lex= &thd->lex->select_lex;
931 DBUG_ENTER("mysql_prepare_update");
934 Statement-based replication of UPDATE ... LIMIT is not safe as order of
935 rows is not defined, so in mixed mode we go to row-based.
937 Note that we may consider a statement as safe if ORDER BY primary_key
938 is present. However it may confuse users to see very similiar statements
939 replicated differently.
941 if (thd->lex->current_select->select_limit)
943 thd->lex->set_stmt_unsafe();
944 thd->set_current_stmt_binlog_row_based_if_mixed();
946 #ifndef NO_EMBEDDED_ACCESS_CHECKS
947 table_list->grant.want_privilege= table->grant.want_privilege=
948 (SELECT_ACL & ~table->grant.privilege);
949 table_list->register_want_access(SELECT_ACL);
950 #endif
952 thd->lex->allow_sum_func= 0;
954 if (setup_tables_and_check_access(thd, &select_lex->context,
955 &select_lex->top_join_list,
956 table_list,
957 &select_lex->leaf_tables,
958 FALSE, UPDATE_ACL, SELECT_ACL) ||
959 setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
960 select_lex->setup_ref_array(thd, order_num) ||
961 setup_order(thd, select_lex->ref_pointer_array,
962 table_list, all_fields, all_fields, order) ||
963 setup_ftfuncs(select_lex))
964 DBUG_RETURN(TRUE);
966 /* Check that we are not using table that we are updating in a sub select */
968 TABLE_LIST *duplicate;
969 if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
971 update_non_unique_table_error(table_list, "UPDATE", duplicate);
972 my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
973 DBUG_RETURN(TRUE);
976 select_lex->fix_prepare_information(thd, conds, &fake_conds);
977 DBUG_RETURN(FALSE);
981 /***************************************************************************
982 Update multiple tables from join
983 ***************************************************************************/
986 Get table map for list of Item_field
989 static table_map get_table_map(List<Item> *items)
991 List_iterator_fast<Item> item_it(*items);
992 Item_field *item;
993 table_map map= 0;
995 while ((item= (Item_field *) item_it++))
996 map|= item->used_tables();
997 DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map));
998 return map;
1003 make update specific preparation and checks after opening tables
1005 SYNOPSIS
1006 mysql_multi_update_prepare()
1007 thd thread handler
1009 RETURN
1010 FALSE OK
1011 TRUE Error
1014 int mysql_multi_update_prepare(THD *thd)
1016 LEX *lex= thd->lex;
1017 TABLE_LIST *table_list= lex->query_tables;
1018 TABLE_LIST *tl, *leaves;
1019 List<Item> *fields= &lex->select_lex.item_list;
1020 table_map tables_for_update;
1021 bool update_view= 0;
1023 if this multi-update was converted from usual update, here is table
1024 counter else junk will be assigned here, but then replaced with real
1025 count in open_tables()
1027 uint table_count= lex->table_count;
1028 const bool using_lock_tables= thd->locked_tables != 0;
1029 bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
1030 bool need_reopen= FALSE;
1031 DBUG_ENTER("mysql_multi_update_prepare");
1033 /* following need for prepared statements, to run next time multi-update */
1034 thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
1036 reopen_tables:
1038 /* open tables and create derived ones, but do not lock and fill them */
1039 if (((original_multiupdate || need_reopen) &&
1040 open_tables(thd, &table_list, &table_count, 0)) ||
1041 mysql_handle_derived(lex, &mysql_derived_prepare))
1042 DBUG_RETURN(TRUE);
1044 setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
1045 second time, but this call will do nothing (there are check for second
1046 call in setup_tables()).
1049 if (setup_tables_and_check_access(thd, &lex->select_lex.context,
1050 &lex->select_lex.top_join_list,
1051 table_list,
1052 &lex->select_lex.leaf_tables, FALSE,
1053 UPDATE_ACL, SELECT_ACL))
1054 DBUG_RETURN(TRUE);
1056 if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
1057 DBUG_RETURN(TRUE);
1059 for (tl= table_list; tl ; tl= tl->next_local)
1061 if (tl->view)
1063 update_view= 1;
1064 break;
1068 if (update_view && check_fields(thd, *fields))
1070 DBUG_RETURN(TRUE);
1073 thd->table_map_for_update= tables_for_update= get_table_map(fields);
1076 Setup timestamp handling and locking mode
1078 leaves= lex->select_lex.leaf_tables;
1079 for (tl= leaves; tl; tl= tl->next_leaf)
1081 TABLE *table= tl->table;
1082 /* Only set timestamp column if this is not modified */
1083 if (table->timestamp_field &&
1084 bitmap_is_set(table->write_set,
1085 table->timestamp_field->field_index))
1086 table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
1088 /* if table will be updated then check that it is unique */
1089 if (table->map & tables_for_update)
1091 if (!tl->updatable || check_key_in_view(thd, tl))
1093 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE");
1094 DBUG_RETURN(TRUE);
1097 DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
1099 If table will be updated we should not downgrade lock for it and
1100 leave it as is.
1103 else
1105 DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias));
1107 If we are using the binary log, we need TL_READ_NO_INSERT to get
1108 correct order of statements. Otherwise, we use a TL_READ lock to
1109 improve performance.
1111 tl->lock_type= read_lock_type_for_table(thd, lex, tl);
1112 tl->updating= 0;
1113 /* Update TABLE::lock_type accordingly. */
1114 if (!tl->placeholder() && !using_lock_tables)
1115 tl->table->reginfo.lock_type= tl->lock_type;
1118 for (tl= table_list; tl; tl= tl->next_local)
1120 /* Check access privileges for table */
1121 if (!tl->derived)
1123 uint want_privilege= tl->updating ? UPDATE_ACL : SELECT_ACL;
1124 if (check_access(thd, want_privilege,
1125 tl->db, &tl->grant.privilege, 0, 0,
1126 test(tl->schema_table)) ||
1127 check_grant(thd, want_privilege, tl, 0, 1, 0))
1128 DBUG_RETURN(TRUE);
1132 /* check single table update for view compound from several tables */
1133 for (tl= table_list; tl; tl= tl->next_local)
1135 if (tl->effective_algorithm == VIEW_ALGORITHM_MERGE)
1137 TABLE_LIST *for_update= 0;
1138 if (tl->check_single_table(&for_update, tables_for_update, tl))
1140 my_error(ER_VIEW_MULTIUPDATE, MYF(0),
1141 tl->view_db.str, tl->view_name.str);
1142 DBUG_RETURN(-1);
1147 /* now lock and fill tables */
1148 if (!thd->stmt_arena->is_stmt_prepare() &&
1149 lock_tables(thd, table_list, table_count, &need_reopen))
1151 if (!need_reopen)
1152 DBUG_RETURN(TRUE);
1154 DBUG_PRINT("info", ("lock_tables failed, reopening"));
1157 We have to reopen tables since some of them were altered or dropped
1158 during lock_tables() or something was done with their triggers.
1159 Let us do some cleanups to be able do setup_table() and setup_fields()
1160 once again.
1162 List_iterator_fast<Item> it(*fields);
1163 Item *item;
1164 while ((item= it++))
1165 item->cleanup();
1167 /* We have to cleanup translation tables of views. */
1168 for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global)
1169 tbl->cleanup_items();
1172 To not to hog memory (as a result of the
1173 unit->reinit_exec_mechanism() call below):
1175 lex->unit.cleanup();
1177 for (SELECT_LEX *sl= lex->all_selects_list;
1179 sl= sl->next_select_in_list())
1181 SELECT_LEX_UNIT *unit= sl->master_unit();
1182 unit->reinit_exec_mechanism(); // reset unit->prepared flags
1184 Reset 'clean' flag back to force normal execution of
1185 unit->cleanup() in Prepared_statement::cleanup_stmt()
1186 (call to lex->unit.cleanup() above sets this flag to TRUE).
1188 unit->unclean();
1192 Also we need to cleanup Natural_join_column::table_field items.
1193 To not to traverse a join tree we will cleanup whole
1194 thd->free_list (in PS execution mode that list may not contain
1195 items from 'fields' list, so the cleanup above is necessary to.
1197 cleanup_items(thd->free_list);
1198 cleanup_items(thd->stmt_arena->free_list);
1199 close_tables_for_reopen(thd, &table_list);
1201 DEBUG_SYNC(thd, "multi_update_reopen_tables");
1203 goto reopen_tables;
1207 Check that we are not using table that we are updating, but we should
1208 skip all tables of UPDATE SELECT itself
1210 lex->select_lex.exclude_from_table_unique_test= TRUE;
1211 /* We only need SELECT privilege for columns in the values list */
1212 for (tl= leaves; tl; tl= tl->next_leaf)
1214 TABLE *table= tl->table;
1215 TABLE_LIST *tlist;
1216 if (!(tlist= tl->top_table())->derived)
1218 tlist->grant.want_privilege=
1219 (SELECT_ACL & ~tlist->grant.privilege);
1220 table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
1222 DBUG_PRINT("info", ("table: %s want_privilege: %u", tl->alias,
1223 (uint) table->grant.want_privilege));
1224 if (tl->lock_type != TL_READ &&
1225 tl->lock_type != TL_READ_NO_INSERT)
1227 TABLE_LIST *duplicate;
1228 if ((duplicate= unique_table(thd, tl, table_list, 0)))
1230 update_non_unique_table_error(table_list, "UPDATE", duplicate);
1231 DBUG_RETURN(TRUE);
1236 Set exclude_from_table_unique_test value back to FALSE. It is needed for
1237 further check in multi_update::prepare whether to use record cache.
1239 lex->select_lex.exclude_from_table_unique_test= FALSE;
1241 if (thd->fill_derived_tables() &&
1242 mysql_handle_derived(lex, &mysql_derived_filling))
1243 DBUG_RETURN(TRUE);
1245 DBUG_RETURN (FALSE);
1250 Setup multi-update handling and call SELECT to do the join
1253 bool mysql_multi_update(THD *thd,
1254 TABLE_LIST *table_list,
1255 List<Item> *fields,
1256 List<Item> *values,
1257 COND *conds,
1258 ulonglong options,
1259 enum enum_duplicates handle_duplicates, bool ignore,
1260 SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
1262 multi_update *result;
1263 bool res;
1264 DBUG_ENTER("mysql_multi_update");
1266 if (!(result= new multi_update(table_list,
1267 thd->lex->select_lex.leaf_tables,
1268 fields, values,
1269 handle_duplicates, ignore)))
1270 DBUG_RETURN(TRUE);
1272 thd->abort_on_warning= test(thd->variables.sql_mode &
1273 (MODE_STRICT_TRANS_TABLES |
1274 MODE_STRICT_ALL_TABLES));
1276 List<Item> total_list;
1278 res= mysql_select(thd, &select_lex->ref_pointer_array,
1279 table_list, select_lex->with_wild,
1280 total_list,
1281 conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
1282 (ORDER *)NULL,
1283 options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1284 OPTION_SETUP_TABLES_DONE,
1285 result, unit, select_lex);
1287 DBUG_PRINT("info",("res: %d report_error: %d", res, (int) thd->is_error()));
1288 res|= thd->is_error();
1289 if (unlikely(res))
1291 /* If we had a another error reported earlier then this will be ignored */
1292 result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
1293 result->abort();
1295 delete result;
1296 thd->abort_on_warning= 0;
1297 DBUG_RETURN(FALSE);
1301 multi_update::multi_update(TABLE_LIST *table_list,
1302 TABLE_LIST *leaves_list,
1303 List<Item> *field_list, List<Item> *value_list,
1304 enum enum_duplicates handle_duplicates_arg,
1305 bool ignore_arg)
1306 :all_tables(table_list), leaves(leaves_list), update_tables(0),
1307 tmp_tables(0), updated(0), found(0), fields(field_list),
1308 values(value_list), table_count(0), copy_field(0),
1309 handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1310 transactional_tables(0), ignore(ignore_arg), error_handled(0)
1315 Connect fields with tables and create list of tables that are updated
1318 int multi_update::prepare(List<Item> &not_used_values,
1319 SELECT_LEX_UNIT *lex_unit)
1321 TABLE_LIST *table_ref;
1322 SQL_I_List<TABLE_LIST> update;
1323 table_map tables_to_update;
1324 Item_field *item;
1325 List_iterator_fast<Item> field_it(*fields);
1326 List_iterator_fast<Item> value_it(*values);
1327 uint i, max_fields;
1328 uint leaf_table_count= 0;
1329 DBUG_ENTER("multi_update::prepare");
1331 thd->count_cuted_fields= CHECK_FIELD_WARN;
1332 thd->cuted_fields=0L;
1333 thd_proc_info(thd, "updating main table");
1335 tables_to_update= get_table_map(fields);
1337 if (!tables_to_update)
1339 my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1340 DBUG_RETURN(1);
1344 We gather the set of columns read during evaluation of SET expression in
1345 TABLE::tmp_set by pointing TABLE::read_set to it and then restore it after
1346 setup_fields().
1348 for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1350 TABLE *table= table_ref->table;
1351 if (tables_to_update & table->map)
1353 DBUG_ASSERT(table->read_set == &table->def_read_set);
1354 table->read_set= &table->tmp_set;
1355 bitmap_clear_all(table->read_set);
1360 We have to check values after setup_tables to get covering_keys right in
1361 reference tables
1364 int error= setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0);
1366 for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1368 TABLE *table= table_ref->table;
1369 if (tables_to_update & table->map)
1371 table->read_set= &table->def_read_set;
1372 bitmap_union(table->read_set, &table->tmp_set);
1374 If a timestamp field settable on UPDATE is present then to avoid wrong
1375 update force the table handler to retrieve write-only fields to be able
1376 to compare records and detect data change.
1378 if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ &&
1379 table->timestamp_field &&
1380 (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
1381 table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH))
1382 bitmap_union(table->read_set, table->write_set);
1386 if (error)
1387 DBUG_RETURN(1);
1390 Save tables beeing updated in update_tables
1391 update_table->shared is position for table
1392 Don't use key read on tables that are updated
1395 update.empty();
1396 for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1398 /* TODO: add support of view of join support */
1399 TABLE *table=table_ref->table;
1400 leaf_table_count++;
1401 if (tables_to_update & table->map)
1403 TABLE_LIST *tl= (TABLE_LIST*) thd->memdup(table_ref,
1404 sizeof(*tl));
1405 if (!tl)
1406 DBUG_RETURN(1);
1407 update.link_in_list(tl, &tl->next_local);
1408 tl->shared= table_count++;
1409 table->no_keyread=1;
1410 table->covering_keys.clear_all();
1411 table->pos_in_table_list= tl;
1412 if (table->triggers &&
1413 table->triggers->has_triggers(TRG_EVENT_UPDATE,
1414 TRG_ACTION_AFTER))
1417 The table has AFTER UPDATE triggers that might access to subject
1418 table and therefore might need update to be done immediately.
1419 So we turn-off the batching.
1421 (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
1427 table_count= update.elements;
1428 update_tables= update.first;
1430 tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
1431 tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
1432 table_count);
1433 fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1434 table_count);
1435 values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1436 table_count);
1437 if (thd->is_fatal_error)
1438 DBUG_RETURN(1);
1439 for (i=0 ; i < table_count ; i++)
1441 fields_for_table[i]= new List_item;
1442 values_for_table[i]= new List_item;
1444 if (thd->is_fatal_error)
1445 DBUG_RETURN(1);
1447 /* Split fields into fields_for_table[] and values_by_table[] */
1449 while ((item= (Item_field *) field_it++))
1451 Item *value= value_it++;
1452 uint offset= item->field->table->pos_in_table_list->shared;
1453 fields_for_table[offset]->push_back(item);
1454 values_for_table[offset]->push_back(value);
1456 if (thd->is_fatal_error)
1457 DBUG_RETURN(1);
1459 /* Allocate copy fields */
1460 max_fields=0;
1461 for (i=0 ; i < table_count ; i++)
1462 set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1463 copy_field= new Copy_field[max_fields];
1464 DBUG_RETURN(thd->is_fatal_error != 0);
1469 Check if table is safe to update on fly
1471 SYNOPSIS
1472 safe_update_on_fly()
1473 thd Thread handler
1474 join_tab How table is used in join
1475 all_tables List of tables
1477 NOTES
1478 We can update the first table in join on the fly if we know that
1479 a row in this table will never be read twice. This is true under
1480 the following conditions:
1482 - No column is both written to and read in SET expressions.
1484 - We are doing a table scan and the data is in a separate file (MyISAM) or
1485 if we don't update a clustered key.
1487 - We are doing a range scan and we don't update the scan key or
1488 the primary key for a clustered table handler.
1490 - Table is not joined to itself.
1492 This function gets information about fields to be updated from
1493 the TABLE::write_set bitmap.
1495 WARNING
1496 This code is a bit dependent of how make_join_readinfo() works.
1498 The field table->tmp_set is used for keeping track of which fields are
1499 read during evaluation of the SET expression. See multi_update::prepare.
1501 RETURN
1502 0 Not safe to update
1503 1 Safe to update
1506 static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
1507 TABLE_LIST *table_ref, TABLE_LIST *all_tables)
1509 TABLE *table= join_tab->table;
1510 if (unique_table(thd, table_ref, all_tables, 0))
1511 return 0;
1512 switch (join_tab->type) {
1513 case JT_SYSTEM:
1514 case JT_CONST:
1515 case JT_EQ_REF:
1516 return TRUE; // At most one matching row
1517 case JT_REF:
1518 case JT_REF_OR_NULL:
1519 return !is_key_used(table, join_tab->ref.key, table->write_set);
1520 case JT_ALL:
1521 if (bitmap_is_overlapping(&table->tmp_set, table->write_set))
1522 return FALSE;
1523 /* If range search on index */
1524 if (join_tab->quick)
1525 return !join_tab->quick->is_keys_used(table->write_set);
1526 /* If scanning in clustered key */
1527 if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1528 table->s->primary_key < MAX_KEY)
1529 return !is_key_used(table, table->s->primary_key, table->write_set);
1530 return TRUE;
1531 default:
1532 break; // Avoid compler warning
1534 return FALSE;
1540 Initialize table for multi table
1542 IMPLEMENTATION
1543 - Update first table in join on the fly, if possible
1544 - Create temporary tables to store changed values for all other tables
1545 that are updated (and main_table if the above doesn't hold).
1548 bool
1549 multi_update::initialize_tables(JOIN *join)
1551 TABLE_LIST *table_ref;
1552 DBUG_ENTER("initialize_tables");
1554 if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1555 DBUG_RETURN(1);
1556 main_table=join->join_tab->table;
1557 table_to_update= 0;
1559 /* Any update has at least one pair (field, value) */
1560 DBUG_ASSERT(fields->elements);
1562 Only one table may be modified by UPDATE of an updatable view.
1563 For an updatable view first_table_for_update indicates this
1564 table.
1565 For a regular multi-update it refers to some updated table.
1567 TABLE *first_table_for_update= ((Item_field *) fields->head())->field->table;
1569 /* Create a temporary table for keys to all tables, except main table */
1570 for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1572 TABLE *table=table_ref->table;
1573 uint cnt= table_ref->shared;
1574 List<Item> temp_fields;
1575 ORDER group;
1576 TMP_TABLE_PARAM *tmp_param;
1578 if (ignore)
1579 table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1580 if (table == main_table) // First table in join
1582 if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
1584 table->mark_columns_needed_for_update();
1585 table_to_update= table; // Update table on the fly
1586 continue;
1589 table->mark_columns_needed_for_update();
1590 table->prepare_for_position();
1593 enable uncacheable flag if we update a view with check option
1594 and check option has a subselect, otherwise, the check option
1595 can be evaluated after the subselect was freed as independent
1596 (See full_local in JOIN::join_free()).
1598 if (table_ref->check_option && !join->select_lex->uncacheable)
1600 SELECT_LEX_UNIT *tmp_unit;
1601 SELECT_LEX *sl;
1602 for (tmp_unit= join->select_lex->first_inner_unit();
1603 tmp_unit;
1604 tmp_unit= tmp_unit->next_unit())
1606 for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
1608 if (sl->master_unit()->item)
1610 join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
1611 goto loop_end;
1616 loop_end:
1618 if (table == first_table_for_update && table_ref->check_option)
1620 table_map unupdated_tables= table_ref->check_option->used_tables() &
1621 ~first_table_for_update->map;
1622 for (TABLE_LIST *tbl_ref =leaves;
1623 unupdated_tables && tbl_ref;
1624 tbl_ref= tbl_ref->next_leaf)
1626 if (unupdated_tables & tbl_ref->table->map)
1627 unupdated_tables&= ~tbl_ref->table->map;
1628 else
1629 continue;
1630 if (unupdated_check_opt_tables.push_back(tbl_ref->table))
1631 DBUG_RETURN(1);
1635 tmp_param= tmp_table_param+cnt;
1638 Create a temporary table to store all fields that are changed for this
1639 table. The first field in the temporary table is a pointer to the
1640 original row so that we can find and update it. For the updatable
1641 VIEW a few following fields are rowids of tables used in the CHECK
1642 OPTION condition.
1645 List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1646 TABLE *tbl= table;
1649 Field_string *field= new Field_string(tbl->file->ref_length, 0,
1650 tbl->alias, &my_charset_bin);
1651 if (!field)
1652 DBUG_RETURN(1);
1653 field->init(tbl);
1655 The field will be converted to varstring when creating tmp table if
1656 table to be updated was created by mysql 4.1. Deny this.
1658 field->can_alter_field_type= 0;
1659 Item_field *ifield= new Item_field((Field *) field);
1660 if (!ifield)
1661 DBUG_RETURN(1);
1662 ifield->maybe_null= 0;
1663 if (temp_fields.push_back(ifield))
1664 DBUG_RETURN(1);
1665 } while ((tbl= tbl_it++));
1667 temp_fields.concat(fields_for_table[cnt]);
1669 /* Make an unique key over the first field to avoid duplicated updates */
1670 bzero((char*) &group, sizeof(group));
1671 group.asc= 1;
1672 group.item= (Item**) temp_fields.head_ref();
1674 tmp_param->quick_group=1;
1675 tmp_param->field_count=temp_fields.elements;
1676 tmp_param->group_parts=1;
1677 tmp_param->group_length= table->file->ref_length;
1678 if (!(tmp_tables[cnt]=create_tmp_table(thd,
1679 tmp_param,
1680 temp_fields,
1681 (ORDER*) &group, 0, 0,
1682 TMP_TABLE_ALL_COLUMNS,
1683 HA_POS_ERROR,
1684 (char *) "")))
1685 DBUG_RETURN(1);
1686 tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1688 DBUG_RETURN(0);
1692 multi_update::~multi_update()
1694 TABLE_LIST *table;
1695 for (table= update_tables ; table; table= table->next_local)
1697 table->table->no_keyread= table->table->no_cache= 0;
1698 if (ignore)
1699 table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1702 if (tmp_tables)
1704 for (uint cnt = 0; cnt < table_count; cnt++)
1706 if (tmp_tables[cnt])
1708 free_tmp_table(thd, tmp_tables[cnt]);
1709 tmp_table_param[cnt].cleanup();
1713 if (copy_field)
1714 delete [] copy_field;
1715 thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1716 DBUG_ASSERT(trans_safe || !updated ||
1717 thd->transaction.all.modified_non_trans_table);
1721 bool multi_update::send_data(List<Item> &not_used_values)
1723 TABLE_LIST *cur_table;
1724 DBUG_ENTER("multi_update::send_data");
1726 for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1728 TABLE *table= cur_table->table;
1729 uint offset= cur_table->shared;
1731 Check if we are using outer join and we didn't find the row
1732 or if we have already updated this row in the previous call to this
1733 function.
1735 The same row may be presented here several times in a join of type
1736 UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1738 In this case we will do the update for the first found row combination.
1739 The join algorithm guarantees that we will not find the a row in
1740 t1 several times.
1742 if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1743 continue;
1745 if (table == table_to_update)
1747 table->status|= STATUS_UPDATED;
1748 store_record(table,record[1]);
1749 if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset],
1750 *values_for_table[offset], 0,
1751 table->triggers,
1752 TRG_EVENT_UPDATE))
1753 DBUG_RETURN(1);
1756 Reset the table->auto_increment_field_not_null as it is valid for
1757 only one row.
1759 table->auto_increment_field_not_null= FALSE;
1760 found++;
1761 if (!records_are_comparable(table) || compare_records(table))
1763 int error;
1764 if ((error= cur_table->view_check_option(thd, ignore)) !=
1765 VIEW_CHECK_OK)
1767 found--;
1768 if (error == VIEW_CHECK_SKIP)
1769 continue;
1770 else if (error == VIEW_CHECK_ERROR)
1771 DBUG_RETURN(1);
1773 if (!updated++)
1776 Inform the main table that we are going to update the table even
1777 while we may be scanning it. This will flush the read cache
1778 if it's used.
1780 main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1782 if ((error=table->file->ha_update_row(table->record[1],
1783 table->record[0])) &&
1784 error != HA_ERR_RECORD_IS_THE_SAME)
1786 updated--;
1787 if (!ignore ||
1788 table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1791 If (ignore && error == is ignorable) we don't have to
1792 do anything; otherwise...
1794 if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1795 thd->fatal_error(); /* Other handler errors are fatal */
1797 prepare_record_for_error_message(error, table);
1798 table->file->print_error(error,MYF(0));
1799 DBUG_RETURN(1);
1802 else
1804 if (error == HA_ERR_RECORD_IS_THE_SAME)
1806 error= 0;
1807 updated--;
1809 /* non-transactional or transactional table got modified */
1810 /* either multi_update class' flag is raised in its branch */
1811 if (table->file->has_transactions())
1812 transactional_tables= 1;
1813 else
1815 trans_safe= 0;
1816 thd->transaction.stmt.modified_non_trans_table= TRUE;
1820 if (table->triggers &&
1821 table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
1822 TRG_ACTION_AFTER, TRUE))
1823 DBUG_RETURN(1);
1825 else
1827 int error;
1828 TABLE *tmp_table= tmp_tables[offset];
1830 For updatable VIEW store rowid of the updated table and
1831 rowids of tables used in the CHECK OPTION condition.
1833 uint field_num= 0;
1834 List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1835 TABLE *tbl= table;
1838 tbl->file->position(tbl->record[0]);
1839 memcpy((char*) tmp_table->field[field_num]->ptr,
1840 (char*) tbl->file->ref, tbl->file->ref_length);
1842 For outer joins a rowid field may have no NOT_NULL_FLAG,
1843 so we have to reset NULL bit for this field.
1844 (set_notnull() resets NULL bit only if available).
1846 tmp_table->field[field_num]->set_notnull();
1847 field_num++;
1848 } while ((tbl= tbl_it++));
1850 /* Store regular updated fields in the row. */
1851 fill_record(thd,
1852 tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1853 *values_for_table[offset], 1);
1855 /* Write row, ignoring duplicated updates to a row */
1856 error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1857 if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1859 if (error &&
1860 create_myisam_from_heap(thd, tmp_table,
1861 tmp_table_param + offset, error, 1))
1863 do_update= 0;
1864 DBUG_RETURN(1); // Not a table_is_full error
1866 found++;
1870 DBUG_RETURN(0);
1874 void multi_update::send_error(uint errcode,const char *err)
1876 /* First send error what ever it is ... */
1877 my_error(errcode, MYF(0), err);
1881 void multi_update::abort()
1883 /* the error was handled or nothing deleted and no side effects return */
1884 if (error_handled ||
1885 (!thd->transaction.stmt.modified_non_trans_table && !updated))
1886 return;
1888 /* Something already updated so we have to invalidate cache */
1889 if (updated)
1890 query_cache_invalidate3(thd, update_tables, 1);
1892 If all tables that has been updated are trans safe then just do rollback.
1893 If not attempt to do remaining updates.
1896 if (! trans_safe)
1898 DBUG_ASSERT(thd->transaction.stmt.modified_non_trans_table);
1899 if (do_update && table_count > 1)
1901 /* Add warning here */
1903 todo/fixme: do_update() is never called with the arg 1.
1904 should it change the signature to become argless?
1906 VOID(do_updates());
1909 if (thd->transaction.stmt.modified_non_trans_table)
1912 The query has to binlog because there's a modified non-transactional table
1913 either from the query's list or via a stored routine: bug#13270,23333
1915 if (mysql_bin_log.is_open())
1918 THD::killed status might not have been set ON at time of an error
1919 got caught and if happens later the killed error is written
1920 into repl event.
1922 int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
1923 /* the error of binary logging is ignored */
1924 (void)thd->binlog_query(THD::ROW_QUERY_TYPE,
1925 thd->query(), thd->query_length(),
1926 transactional_tables, FALSE, errcode);
1928 thd->transaction.all.modified_non_trans_table= TRUE;
1930 DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
1934 int multi_update::do_updates()
1936 TABLE_LIST *cur_table;
1937 int local_error= 0;
1938 ha_rows org_updated;
1939 TABLE *table, *tmp_table;
1940 List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
1941 DBUG_ENTER("multi_update::do_updates");
1943 do_update= 0; // Don't retry this function
1944 if (!found)
1945 DBUG_RETURN(0);
1946 for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1948 uint offset= cur_table->shared;
1950 table = cur_table->table;
1951 if (table == table_to_update)
1952 continue; // Already updated
1953 org_updated= updated;
1954 tmp_table= tmp_tables[cur_table->shared];
1955 tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1956 (void) table->file->ha_rnd_init(0);
1957 table->file->extra(HA_EXTRA_NO_CACHE);
1959 check_opt_it.rewind();
1960 while(TABLE *tbl= check_opt_it++)
1962 if (tbl->file->ha_rnd_init(1))
1963 goto err;
1964 tbl->file->extra(HA_EXTRA_CACHE);
1968 Setup copy functions to copy fields from temporary table
1970 List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1971 Field **field= tmp_table->field +
1972 1 + unupdated_check_opt_tables.elements; // Skip row pointers
1973 Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1974 for ( ; *field ; field++)
1976 Item_field *item= (Item_field* ) field_it++;
1977 (copy_field_ptr++)->set(item->field, *field, 0);
1979 copy_field_end=copy_field_ptr;
1981 if ((local_error = tmp_table->file->ha_rnd_init(1)))
1982 goto err;
1984 for (;;)
1986 if (thd->killed && trans_safe)
1987 goto err;
1988 if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1990 if (local_error == HA_ERR_END_OF_FILE)
1991 break;
1992 if (local_error == HA_ERR_RECORD_DELETED)
1993 continue; // May happen on dup key
1994 goto err;
1997 /* call rnd_pos() using rowids from temporary table */
1998 check_opt_it.rewind();
1999 TABLE *tbl= table;
2000 uint field_num= 0;
2003 if((local_error=
2004 tbl->file->rnd_pos(tbl->record[0],
2005 (uchar *) tmp_table->field[field_num]->ptr)))
2006 goto err;
2007 field_num++;
2008 } while((tbl= check_opt_it++));
2010 table->status|= STATUS_UPDATED;
2011 store_record(table,record[1]);
2013 /* Copy data from temporary table to current table */
2014 for (copy_field_ptr=copy_field;
2015 copy_field_ptr != copy_field_end;
2016 copy_field_ptr++)
2017 (*copy_field_ptr->do_copy)(copy_field_ptr);
2019 if (table->triggers &&
2020 table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2021 TRG_ACTION_BEFORE, TRUE))
2022 goto err2;
2024 if (!records_are_comparable(table) || compare_records(table))
2026 int error;
2027 if ((error= cur_table->view_check_option(thd, ignore)) !=
2028 VIEW_CHECK_OK)
2030 if (error == VIEW_CHECK_SKIP)
2031 continue;
2032 else if (error == VIEW_CHECK_ERROR)
2033 goto err;
2035 if ((local_error=table->file->ha_update_row(table->record[1],
2036 table->record[0])) &&
2037 local_error != HA_ERR_RECORD_IS_THE_SAME)
2039 if (!ignore ||
2040 table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
2041 goto err;
2043 if (local_error != HA_ERR_RECORD_IS_THE_SAME)
2044 updated++;
2045 else
2046 local_error= 0;
2049 if (table->triggers &&
2050 table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2051 TRG_ACTION_AFTER, TRUE))
2052 goto err2;
2055 if (updated != org_updated)
2057 if (table->file->has_transactions())
2058 transactional_tables= 1;
2059 else
2061 trans_safe= 0; // Can't do safe rollback
2062 thd->transaction.stmt.modified_non_trans_table= TRUE;
2065 (void) table->file->ha_rnd_end();
2066 (void) tmp_table->file->ha_rnd_end();
2067 check_opt_it.rewind();
2068 while (TABLE *tbl= check_opt_it++)
2069 tbl->file->ha_rnd_end();
2072 DBUG_RETURN(0);
2074 err:
2076 thd->fatal_error();
2077 prepare_record_for_error_message(local_error, table);
2078 table->file->print_error(local_error,MYF(0));
2081 err2:
2082 (void) table->file->ha_rnd_end();
2083 (void) tmp_table->file->ha_rnd_end();
2084 check_opt_it.rewind();
2085 while (TABLE *tbl= check_opt_it++)
2086 tbl->file->ha_rnd_end();
2088 if (updated != org_updated)
2090 if (table->file->has_transactions())
2091 transactional_tables= 1;
2092 else
2094 trans_safe= 0;
2095 thd->transaction.stmt.modified_non_trans_table= TRUE;
2098 DBUG_RETURN(1);
2102 /* out: 1 if error, 0 if success */
2104 bool multi_update::send_eof()
2106 char buff[STRING_BUFFER_USUAL_SIZE];
2107 ulonglong id;
2108 THD::killed_state killed_status= THD::NOT_KILLED;
2109 DBUG_ENTER("multi_update::send_eof");
2110 thd_proc_info(thd, "updating reference tables");
2113 Does updates for the last n - 1 tables, returns 0 if ok;
2114 error takes into account killed status gained in do_updates()
2116 int local_error = (table_count) ? do_updates() : 0;
2118 if local_error is not set ON until after do_updates() then
2119 later carried out killing should not affect binlogging.
2121 killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
2122 thd_proc_info(thd, "end");
2124 /* We must invalidate the query cache before binlog writing and
2125 ha_autocommit_... */
2127 if (updated)
2129 query_cache_invalidate3(thd, update_tables, 1);
2132 Write the SQL statement to the binlog if we updated
2133 rows and we succeeded or if we updated some non
2134 transactional tables.
2136 The query has to binlog because there's a modified non-transactional table
2137 either from the query's list or via a stored routine: bug#13270,23333
2140 DBUG_ASSERT(trans_safe || !updated ||
2141 thd->transaction.stmt.modified_non_trans_table);
2142 if (local_error == 0 || thd->transaction.stmt.modified_non_trans_table)
2144 if (mysql_bin_log.is_open())
2146 int errcode= 0;
2147 if (local_error == 0)
2148 thd->clear_error();
2149 else
2150 errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
2151 if (thd->binlog_query(THD::ROW_QUERY_TYPE,
2152 thd->query(), thd->query_length(),
2153 transactional_tables, FALSE, errcode))
2155 local_error= 1; // Rollback update
2158 if (thd->transaction.stmt.modified_non_trans_table)
2159 thd->transaction.all.modified_non_trans_table= TRUE;
2161 if (local_error != 0)
2162 error_handled= TRUE; // to force early leave from ::send_error()
2164 if (local_error > 0) // if the above log write did not fail ...
2166 /* Safety: If we haven't got an error before (can happen in do_updates) */
2167 my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
2168 MYF(0));
2169 DBUG_RETURN(TRUE);
2172 id= thd->arg_of_last_insert_id_function ?
2173 thd->first_successful_insert_id_in_prev_stmt : 0;
2174 my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO),
2175 (ulong) found, (ulong) updated, (ulong) thd->cuted_fields);
2176 thd->row_count_func=
2177 (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
2178 ::my_ok(thd, (ulong) thd->row_count_func, id, buff);
2179 DBUG_RETURN(FALSE);