mySQL 5.0.11 sources for tomato
[tomato.git] / release / src / router / mysql / storage / innodb_plugin / row / row0vers.c
blob9d2b7e4da0aafe80fd0691c5e796e33db5c45ed1
1 /*****************************************************************************
3 Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *****************************************************************************/
19 /**************************************************//**
20 @file row/row0vers.c
21 Row versions
23 Created 2/6/1997 Heikki Tuuri
24 *******************************************************/
26 #include "row0vers.h"
28 #ifdef UNIV_NONINL
29 #include "row0vers.ic"
30 #endif
32 #include "dict0dict.h"
33 #include "dict0boot.h"
34 #include "btr0btr.h"
35 #include "mach0data.h"
36 #include "trx0rseg.h"
37 #include "trx0trx.h"
38 #include "trx0roll.h"
39 #include "trx0undo.h"
40 #include "trx0purge.h"
41 #include "trx0rec.h"
42 #include "que0que.h"
43 #include "row0row.h"
44 #include "row0upd.h"
45 #include "rem0cmp.h"
46 #include "read0read.h"
47 #include "lock0lock.h"
49 /*****************************************************************//**
50 Finds out if an active transaction has inserted or modified a secondary
51 index record. NOTE: the kernel mutex is temporarily released in this
52 function!
53 @return NULL if committed, else the active transaction */
54 UNIV_INTERN
55 trx_t*
56 row_vers_impl_x_locked_off_kernel(
57 /*==============================*/
58 const rec_t* rec, /*!< in: record in a secondary index */
59 dict_index_t* index, /*!< in: the secondary index */
60 const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
62 dict_index_t* clust_index;
63 rec_t* clust_rec;
64 ulint* clust_offsets;
65 rec_t* version;
66 trx_id_t trx_id;
67 mem_heap_t* heap;
68 mem_heap_t* heap2;
69 dtuple_t* row;
70 dtuple_t* entry = NULL; /* assignment to eliminate compiler
71 warning */
72 trx_t* trx;
73 ulint rec_del;
74 #ifdef UNIV_DEBUG
75 ulint err;
76 #endif /* UNIV_DEBUG */
77 mtr_t mtr;
78 ulint comp;
80 ut_ad(mutex_own(&kernel_mutex));
81 #ifdef UNIV_SYNC_DEBUG
82 ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
83 #endif /* UNIV_SYNC_DEBUG */
85 mutex_exit(&kernel_mutex);
87 mtr_start(&mtr);
89 /* Search for the clustered index record: this is a time-consuming
90 operation: therefore we release the kernel mutex; also, the release
91 is required by the latching order convention. The latch on the
92 clustered index locks the top of the stack of versions. We also
93 reserve purge_latch to lock the bottom of the version stack. */
95 clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index,
96 &clust_index, &mtr);
97 if (!clust_rec) {
98 /* In a rare case it is possible that no clust rec is found
99 for a secondary index record: if in row0umod.c
100 row_undo_mod_remove_clust_low() we have already removed the
101 clust rec, while purge is still cleaning and removing
102 secondary index records associated with earlier versions of
103 the clustered index record. In that case there cannot be
104 any implicit lock on the secondary index record, because
105 an active transaction which has modified the secondary index
106 record has also modified the clustered index record. And in
107 a rollback we always undo the modifications to secondary index
108 records before the clustered index record. */
110 mutex_enter(&kernel_mutex);
111 mtr_commit(&mtr);
113 return(NULL);
116 heap = mem_heap_create(1024);
117 clust_offsets = rec_get_offsets(clust_rec, clust_index, NULL,
118 ULINT_UNDEFINED, &heap);
119 trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets);
121 mtr_s_lock(&(purge_sys->latch), &mtr);
123 mutex_enter(&kernel_mutex);
125 trx = NULL;
126 if (!trx_is_active(trx_id)) {
127 /* The transaction that modified or inserted clust_rec is no
128 longer active: no implicit lock on rec */
129 goto exit_func;
132 if (!lock_check_trx_id_sanity(trx_id, clust_rec, clust_index,
133 clust_offsets, TRUE)) {
134 /* Corruption noticed: try to avoid a crash by returning */
135 goto exit_func;
138 comp = page_rec_is_comp(rec);
139 ut_ad(index->table == clust_index->table);
140 ut_ad(!!comp == dict_table_is_comp(index->table));
141 ut_ad(!comp == !page_rec_is_comp(clust_rec));
143 /* We look up if some earlier version, which was modified by the trx_id
144 transaction, of the clustered index record would require rec to be in
145 a different state (delete marked or unmarked, or have different field
146 values, or not existing). If there is such a version, then rec was
147 modified by the trx_id transaction, and it has an implicit x-lock on
148 rec. Note that if clust_rec itself would require rec to be in a
149 different state, then the trx_id transaction has not yet had time to
150 modify rec, and does not necessarily have an implicit x-lock on rec. */
152 rec_del = rec_get_deleted_flag(rec, comp);
153 trx = NULL;
155 version = clust_rec;
157 for (;;) {
158 rec_t* prev_version;
159 ulint vers_del;
160 row_ext_t* ext;
161 trx_id_t prev_trx_id;
163 mutex_exit(&kernel_mutex);
165 /* While we retrieve an earlier version of clust_rec, we
166 release the kernel mutex, because it may take time to access
167 the disk. After the release, we have to check if the trx_id
168 transaction is still active. We keep the semaphore in mtr on
169 the clust_rec page, so that no other transaction can update
170 it and get an implicit x-lock on rec. */
172 heap2 = heap;
173 heap = mem_heap_create(1024);
174 #ifdef UNIV_DEBUG
175 err =
176 #endif /* UNIV_DEBUG */
177 trx_undo_prev_version_build(clust_rec, &mtr, version,
178 clust_index, clust_offsets,
179 heap, &prev_version);
180 mem_heap_free(heap2); /* free version and clust_offsets */
182 if (prev_version == NULL) {
183 mutex_enter(&kernel_mutex);
185 if (!trx_is_active(trx_id)) {
186 /* Transaction no longer active: no
187 implicit x-lock */
189 break;
192 /* If the transaction is still active,
193 clust_rec must be a fresh insert, because no
194 previous version was found. */
195 ut_ad(err == DB_SUCCESS);
197 /* It was a freshly inserted version: there is an
198 implicit x-lock on rec */
200 trx = trx_get_on_id(trx_id);
202 break;
205 clust_offsets = rec_get_offsets(prev_version, clust_index,
206 NULL, ULINT_UNDEFINED, &heap);
208 vers_del = rec_get_deleted_flag(prev_version, comp);
209 prev_trx_id = row_get_rec_trx_id(prev_version, clust_index,
210 clust_offsets);
211 /* The stack of versions is locked by mtr. Thus, it
212 is safe to fetch the prefixes for externally stored
213 columns. */
214 row = row_build(ROW_COPY_POINTERS, clust_index, prev_version,
215 clust_offsets, NULL, &ext, heap);
216 entry = row_build_index_entry(row, ext, index, heap);
217 /* entry may be NULL if a record was inserted in place
218 of a deleted record, and the BLOB pointers of the new
219 record were not initialized yet. But in that case,
220 prev_version should be NULL. */
221 ut_a(entry);
223 mutex_enter(&kernel_mutex);
225 if (!trx_is_active(trx_id)) {
226 /* Transaction no longer active: no implicit x-lock */
228 break;
231 /* If we get here, we know that the trx_id transaction is
232 still active and it has modified prev_version. Let us check
233 if prev_version would require rec to be in a different
234 state. */
236 /* The previous version of clust_rec must be
237 accessible, because the transaction is still active
238 and clust_rec was not a fresh insert. */
239 ut_ad(err == DB_SUCCESS);
241 /* We check if entry and rec are identified in the alphabetical
242 ordering */
243 if (0 == cmp_dtuple_rec(entry, rec, offsets)) {
244 /* The delete marks of rec and prev_version should be
245 equal for rec to be in the state required by
246 prev_version */
248 if (rec_del != vers_del) {
249 trx = trx_get_on_id(trx_id);
251 break;
254 /* It is possible that the row was updated so that the
255 secondary index record remained the same in
256 alphabetical ordering, but the field values changed
257 still. For example, 'abc' -> 'ABC'. Check also that. */
259 dtuple_set_types_binary(entry,
260 dtuple_get_n_fields(entry));
261 if (0 != cmp_dtuple_rec(entry, rec, offsets)) {
263 trx = trx_get_on_id(trx_id);
265 break;
267 } else if (!rec_del) {
268 /* The delete mark should be set in rec for it to be
269 in the state required by prev_version */
271 trx = trx_get_on_id(trx_id);
273 break;
276 if (0 != ut_dulint_cmp(trx_id, prev_trx_id)) {
277 /* The versions modified by the trx_id transaction end
278 to prev_version: no implicit x-lock */
280 break;
283 version = prev_version;
284 }/* for (;;) */
286 exit_func:
287 mtr_commit(&mtr);
288 mem_heap_free(heap);
290 return(trx);
293 /*****************************************************************//**
294 Finds out if we must preserve a delete marked earlier version of a clustered
295 index record, because it is >= the purge view.
296 @return TRUE if earlier version should be preserved */
297 UNIV_INTERN
298 ibool
299 row_vers_must_preserve_del_marked(
300 /*==============================*/
301 trx_id_t trx_id, /*!< in: transaction id in the version */
302 mtr_t* mtr) /*!< in: mtr holding the latch on the
303 clustered index record; it will also
304 hold the latch on purge_view */
306 #ifdef UNIV_SYNC_DEBUG
307 ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
308 #endif /* UNIV_SYNC_DEBUG */
310 mtr_s_lock(&(purge_sys->latch), mtr);
312 if (trx_purge_update_undo_must_exist(trx_id)) {
314 /* A purge operation is not yet allowed to remove this
315 delete marked record */
317 return(TRUE);
320 return(FALSE);
323 /*****************************************************************//**
324 Finds out if a version of the record, where the version >= the current
325 purge view, should have ientry as its secondary index entry. We check
326 if there is any not delete marked version of the record where the trx
327 id >= purge view, and the secondary index entry and ientry are identified in
328 the alphabetical ordering; exactly in this case we return TRUE.
329 @return TRUE if earlier version should have */
330 UNIV_INTERN
331 ibool
332 row_vers_old_has_index_entry(
333 /*=========================*/
334 ibool also_curr,/*!< in: TRUE if also rec is included in the
335 versions to search; otherwise only versions
336 prior to it are searched */
337 const rec_t* rec, /*!< in: record in the clustered index; the
338 caller must have a latch on the page */
339 mtr_t* mtr, /*!< in: mtr holding the latch on rec; it will
340 also hold the latch on purge_view */
341 dict_index_t* index, /*!< in: the secondary index */
342 const dtuple_t* ientry) /*!< in: the secondary index entry */
344 const rec_t* version;
345 rec_t* prev_version;
346 dict_index_t* clust_index;
347 ulint* clust_offsets;
348 mem_heap_t* heap;
349 mem_heap_t* heap2;
350 const dtuple_t* row;
351 const dtuple_t* entry;
352 ulint err;
353 ulint comp;
355 ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
356 || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
357 #ifdef UNIV_SYNC_DEBUG
358 ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
359 #endif /* UNIV_SYNC_DEBUG */
360 mtr_s_lock(&(purge_sys->latch), mtr);
362 clust_index = dict_table_get_first_index(index->table);
364 comp = page_rec_is_comp(rec);
365 ut_ad(!dict_table_is_comp(index->table) == !comp);
366 heap = mem_heap_create(1024);
367 clust_offsets = rec_get_offsets(rec, clust_index, NULL,
368 ULINT_UNDEFINED, &heap);
370 if (also_curr && !rec_get_deleted_flag(rec, comp)) {
371 row_ext_t* ext;
373 /* The stack of versions is locked by mtr.
374 Thus, it is safe to fetch the prefixes for
375 externally stored columns. */
376 row = row_build(ROW_COPY_POINTERS, clust_index,
377 rec, clust_offsets, NULL, &ext, heap);
378 entry = row_build_index_entry(row, ext, index, heap);
380 /* If entry == NULL, the record contains unset BLOB
381 pointers. This must be a freshly inserted record. If
382 this is called from
383 row_purge_remove_sec_if_poss_low(), the thread will
384 hold latches on the clustered index and the secondary
385 index. Because the insert works in three steps:
387 (1) insert the record to clustered index
388 (2) store the BLOBs and update BLOB pointers
389 (3) insert records to secondary indexes
391 the purge thread can safely ignore freshly inserted
392 records and delete the secondary index record. The
393 thread that inserted the new record will be inserting
394 the secondary index records. */
396 /* NOTE that we cannot do the comparison as binary
397 fields because the row is maybe being modified so that
398 the clustered index record has already been updated to
399 a different binary value in a char field, but the
400 collation identifies the old and new value anyway! */
401 if (entry && !dtuple_coll_cmp(ientry, entry)) {
403 mem_heap_free(heap);
405 return(TRUE);
409 version = rec;
411 for (;;) {
412 heap2 = heap;
413 heap = mem_heap_create(1024);
414 err = trx_undo_prev_version_build(rec, mtr, version,
415 clust_index, clust_offsets,
416 heap, &prev_version);
417 mem_heap_free(heap2); /* free version and clust_offsets */
419 if (err != DB_SUCCESS || !prev_version) {
420 /* Versions end here */
422 mem_heap_free(heap);
424 return(FALSE);
427 clust_offsets = rec_get_offsets(prev_version, clust_index,
428 NULL, ULINT_UNDEFINED, &heap);
430 if (!rec_get_deleted_flag(prev_version, comp)) {
431 row_ext_t* ext;
433 /* The stack of versions is locked by mtr.
434 Thus, it is safe to fetch the prefixes for
435 externally stored columns. */
436 row = row_build(ROW_COPY_POINTERS, clust_index,
437 prev_version, clust_offsets,
438 NULL, &ext, heap);
439 entry = row_build_index_entry(row, ext, index, heap);
441 /* If entry == NULL, the record contains unset
442 BLOB pointers. This must be a freshly
443 inserted record that we can safely ignore.
444 For the justification, see the comments after
445 the previous row_build_index_entry() call. */
447 /* NOTE that we cannot do the comparison as binary
448 fields because maybe the secondary index record has
449 already been updated to a different binary value in
450 a char field, but the collation identifies the old
451 and new value anyway! */
453 if (entry && !dtuple_coll_cmp(ientry, entry)) {
455 mem_heap_free(heap);
457 return(TRUE);
461 version = prev_version;
465 /*****************************************************************//**
466 Constructs the version of a clustered index record which a consistent
467 read should see. We assume that the trx id stored in rec is such that
468 the consistent read should not see rec in its present version.
469 @return DB_SUCCESS or DB_MISSING_HISTORY */
470 UNIV_INTERN
471 ulint
472 row_vers_build_for_consistent_read(
473 /*===============================*/
474 const rec_t* rec, /*!< in: record in a clustered index; the
475 caller must have a latch on the page; this
476 latch locks the top of the stack of versions
477 of this records */
478 mtr_t* mtr, /*!< in: mtr holding the latch on rec */
479 dict_index_t* index, /*!< in: the clustered index */
480 ulint** offsets,/*!< in/out: offsets returned by
481 rec_get_offsets(rec, index) */
482 read_view_t* view, /*!< in: the consistent read view */
483 mem_heap_t** offset_heap,/*!< in/out: memory heap from which
484 the offsets are allocated */
485 mem_heap_t* in_heap,/*!< in: memory heap from which the memory for
486 *old_vers is allocated; memory for possible
487 intermediate versions is allocated and freed
488 locally within the function */
489 rec_t** old_vers)/*!< out, own: old version, or NULL if the
490 record does not exist in the view, that is,
491 it was freshly inserted afterwards */
493 const rec_t* version;
494 rec_t* prev_version;
495 trx_id_t trx_id;
496 mem_heap_t* heap = NULL;
497 byte* buf;
498 ulint err;
500 ut_ad(dict_index_is_clust(index));
501 ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
502 || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
503 #ifdef UNIV_SYNC_DEBUG
504 ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
505 #endif /* UNIV_SYNC_DEBUG */
507 ut_ad(rec_offs_validate(rec, index, *offsets));
509 trx_id = row_get_rec_trx_id(rec, index, *offsets);
511 ut_ad(!read_view_sees_trx_id(view, trx_id));
513 rw_lock_s_lock(&(purge_sys->latch));
514 version = rec;
516 for (;;) {
517 mem_heap_t* heap2 = heap;
518 trx_undo_rec_t* undo_rec;
519 roll_ptr_t roll_ptr;
520 undo_no_t undo_no;
521 heap = mem_heap_create(1024);
523 /* If we have high-granularity consistent read view and
524 creating transaction of the view is the same as trx_id in
525 the record we see this record only in the case when
526 undo_no of the record is < undo_no in the view. */
528 if (view->type == VIEW_HIGH_GRANULARITY
529 && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) {
531 roll_ptr = row_get_rec_roll_ptr(version, index,
532 *offsets);
533 undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
534 undo_no = trx_undo_rec_get_undo_no(undo_rec);
535 mem_heap_empty(heap);
537 if (ut_dulint_cmp(view->undo_no, undo_no) > 0) {
538 /* The view already sees this version: we can
539 copy it to in_heap and return */
541 #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
542 ut_a(!rec_offs_any_null_extern(
543 version, *offsets));
544 #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
546 buf = mem_heap_alloc(in_heap,
547 rec_offs_size(*offsets));
548 *old_vers = rec_copy(buf, version, *offsets);
549 rec_offs_make_valid(*old_vers, index,
550 *offsets);
551 err = DB_SUCCESS;
553 break;
557 err = trx_undo_prev_version_build(rec, mtr, version, index,
558 *offsets, heap,
559 &prev_version);
560 if (heap2) {
561 mem_heap_free(heap2); /* free version */
564 if (err != DB_SUCCESS) {
565 break;
568 if (prev_version == NULL) {
569 /* It was a freshly inserted version */
570 *old_vers = NULL;
571 err = DB_SUCCESS;
573 break;
576 *offsets = rec_get_offsets(prev_version, index, *offsets,
577 ULINT_UNDEFINED, offset_heap);
579 #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
580 ut_a(!rec_offs_any_null_extern(prev_version, *offsets));
581 #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
583 trx_id = row_get_rec_trx_id(prev_version, index, *offsets);
585 if (read_view_sees_trx_id(view, trx_id)) {
587 /* The view already sees this version: we can copy
588 it to in_heap and return */
590 buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets));
591 *old_vers = rec_copy(buf, prev_version, *offsets);
592 rec_offs_make_valid(*old_vers, index, *offsets);
593 err = DB_SUCCESS;
595 break;
598 version = prev_version;
599 }/* for (;;) */
601 mem_heap_free(heap);
602 rw_lock_s_unlock(&(purge_sys->latch));
604 return(err);
607 /*****************************************************************//**
608 Constructs the last committed version of a clustered index record,
609 which should be seen by a semi-consistent read.
610 @return DB_SUCCESS or DB_MISSING_HISTORY */
611 UNIV_INTERN
612 ulint
613 row_vers_build_for_semi_consistent_read(
614 /*====================================*/
615 const rec_t* rec, /*!< in: record in a clustered index; the
616 caller must have a latch on the page; this
617 latch locks the top of the stack of versions
618 of this records */
619 mtr_t* mtr, /*!< in: mtr holding the latch on rec */
620 dict_index_t* index, /*!< in: the clustered index */
621 ulint** offsets,/*!< in/out: offsets returned by
622 rec_get_offsets(rec, index) */
623 mem_heap_t** offset_heap,/*!< in/out: memory heap from which
624 the offsets are allocated */
625 mem_heap_t* in_heap,/*!< in: memory heap from which the memory for
626 *old_vers is allocated; memory for possible
627 intermediate versions is allocated and freed
628 locally within the function */
629 const rec_t** old_vers)/*!< out: rec, old version, or NULL if the
630 record does not exist in the view, that is,
631 it was freshly inserted afterwards */
633 const rec_t* version;
634 mem_heap_t* heap = NULL;
635 byte* buf;
636 ulint err;
637 trx_id_t rec_trx_id = ut_dulint_zero;
639 ut_ad(dict_index_is_clust(index));
640 ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
641 || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
642 #ifdef UNIV_SYNC_DEBUG
643 ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
644 #endif /* UNIV_SYNC_DEBUG */
646 ut_ad(rec_offs_validate(rec, index, *offsets));
648 rw_lock_s_lock(&(purge_sys->latch));
649 /* The S-latch on purge_sys prevents the purge view from
650 changing. Thus, if we have an uncommitted transaction at
651 this point, then purge cannot remove its undo log even if
652 the transaction could commit now. */
654 version = rec;
656 for (;;) {
657 trx_t* version_trx;
658 mem_heap_t* heap2;
659 rec_t* prev_version;
660 trx_id_t version_trx_id;
662 version_trx_id = row_get_rec_trx_id(version, index, *offsets);
663 if (rec == version) {
664 rec_trx_id = version_trx_id;
667 mutex_enter(&kernel_mutex);
668 version_trx = trx_get_on_id(version_trx_id);
669 if (version_trx
670 && (version_trx->conc_state == TRX_COMMITTED_IN_MEMORY
671 || version_trx->conc_state == TRX_NOT_STARTED)) {
673 version_trx = NULL;
675 mutex_exit(&kernel_mutex);
677 if (!version_trx) {
679 /* We found a version that belongs to a
680 committed transaction: return it. */
682 #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
683 ut_a(!rec_offs_any_null_extern(version, *offsets));
684 #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
686 if (rec == version) {
687 *old_vers = rec;
688 err = DB_SUCCESS;
689 break;
692 /* We assume that a rolled-back transaction stays in
693 TRX_ACTIVE state until all the changes have been
694 rolled back and the transaction is removed from
695 the global list of transactions. */
697 if (!ut_dulint_cmp(rec_trx_id, version_trx_id)) {
698 /* The transaction was committed while
699 we searched for earlier versions.
700 Return the current version as a
701 semi-consistent read. */
703 version = rec;
704 *offsets = rec_get_offsets(version,
705 index, *offsets,
706 ULINT_UNDEFINED,
707 offset_heap);
710 buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets));
711 *old_vers = rec_copy(buf, version, *offsets);
712 rec_offs_make_valid(*old_vers, index, *offsets);
713 err = DB_SUCCESS;
715 break;
718 heap2 = heap;
719 heap = mem_heap_create(1024);
721 err = trx_undo_prev_version_build(rec, mtr, version, index,
722 *offsets, heap,
723 &prev_version);
724 if (heap2) {
725 mem_heap_free(heap2); /* free version */
728 if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
729 break;
732 if (prev_version == NULL) {
733 /* It was a freshly inserted version */
734 *old_vers = NULL;
735 err = DB_SUCCESS;
737 break;
740 version = prev_version;
741 *offsets = rec_get_offsets(version, index, *offsets,
742 ULINT_UNDEFINED, offset_heap);
743 #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
744 ut_a(!rec_offs_any_null_extern(version, *offsets));
745 #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
746 }/* for (;;) */
748 if (heap) {
749 mem_heap_free(heap);
751 rw_lock_s_unlock(&(purge_sys->latch));
753 return(err);