mySQL 5.0.11 sources for tomato
[tomato.git] / release / src / router / mysql / storage / ndb / src / kernel / blocks / dbtup / DbtupIndex.cpp
blobec0c1422274bf2b7e90d1e59cda895d3ef28f373
1 /* Copyright (c) 2003-2008 MySQL AB
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; version 2 of the License.
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
12 You should have received a copy of the GNU General Public License
13 along with this program; if not, write to the Free Software
14 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
16 #define DBTUP_C
17 #define DBTUP_INDEX_CPP
18 #include <Dblqh.hpp>
19 #include "Dbtup.hpp"
20 #include <RefConvert.hpp>
21 #include <ndb_limits.h>
22 #include <pc.hpp>
23 #include <AttributeDescriptor.hpp>
24 #include "AttributeOffset.hpp"
25 #include <AttributeHeader.hpp>
26 #include <signaldata/TuxMaint.hpp>
28 // methods used by ordered index
30 void
31 Dbtup::tuxGetTupAddr(Uint32 fragPtrI,
32 Uint32 pageId,
33 Uint32 pageIndex,
34 Uint32& tupAddr)
36 jamEntry();
37 PagePtr pagePtr;
38 c_page_pool.getPtr(pagePtr, pageId);
39 Uint32 fragPageId= pagePtr.p->frag_page_id;
40 tupAddr= (fragPageId << MAX_TUPLES_BITS) | pageIndex;
43 int
44 Dbtup::tuxAllocNode(Signal* signal,
45 Uint32 fragPtrI,
46 Uint32& pageId,
47 Uint32& pageOffset,
48 Uint32*& node)
50 jamEntry();
51 FragrecordPtr fragPtr;
52 fragPtr.i= fragPtrI;
53 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
54 TablerecPtr tablePtr;
55 tablePtr.i= fragPtr.p->fragTableId;
56 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
57 terrorCode= 0;
59 Local_key key;
60 Uint32* ptr, frag_page_id;
61 if ((ptr= alloc_fix_rec(fragPtr.p, tablePtr.p, &key, &frag_page_id)) == 0)
63 jam();
64 terrorCode = ZMEM_NOMEM_ERROR; // caller sets error
65 return terrorCode;
67 pageId= key.m_page_no;
68 pageOffset= key.m_page_idx;
69 Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
70 Uint32 attrDataOffset= AttributeOffset::getOffset(
71 tableDescriptor[attrDescIndex + 1].tabDescr);
72 node= ptr + attrDataOffset;
73 return 0;
76 #if 0
77 void
78 Dbtup::tuxFreeNode(Signal* signal,
79 Uint32 fragPtrI,
80 Uint32 pageId,
81 Uint32 pageOffset,
82 Uint32* node)
84 jamEntry();
85 FragrecordPtr fragPtr;
86 fragPtr.i= fragPtrI;
87 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
88 TablerecPtr tablePtr;
89 tablePtr.i= fragPtr.p->fragTableId;
90 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
91 PagePtr pagePtr;
92 pagePtr.i= pageId;
93 ptrCheckGuard(pagePtr, cnoOfPage, cpage);
94 Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
95 Uint32 attrDataOffset= AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr);
96 ndbrequire(node == &pagePtr.p->pageWord[pageOffset] + attrDataOffset);
97 freeTh(fragPtr.p, tablePtr.p, signal, pagePtr.p, pageOffset);
99 #endif
101 void
102 Dbtup::tuxGetNode(Uint32 fragPtrI,
103 Uint32 pageId,
104 Uint32 pageOffset,
105 Uint32*& node)
107 jamEntry();
108 FragrecordPtr fragPtr;
109 fragPtr.i= fragPtrI;
110 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
111 TablerecPtr tablePtr;
112 tablePtr.i= fragPtr.p->fragTableId;
113 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
114 PagePtr pagePtr;
115 c_page_pool.getPtr(pagePtr, pageId);
116 Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
117 Uint32 attrDataOffset= AttributeOffset::getOffset(
118 tableDescriptor[attrDescIndex + 1].tabDescr);
119 node= ((Fix_page*)pagePtr.p)->
120 get_ptr(pageOffset, tablePtr.p->m_offsets[MM].m_fix_header_size) +
121 attrDataOffset;
124 Dbtup::tuxReadAttrs(Uint32 fragPtrI,
125 Uint32 pageId,
126 Uint32 pageIndex,
127 Uint32 tupVersion,
128 const Uint32* attrIds,
129 Uint32 numAttrs,
130 Uint32* dataOut)
132 jamEntry();
133 // use own variables instead of globals
134 FragrecordPtr fragPtr;
135 fragPtr.i= fragPtrI;
136 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
137 TablerecPtr tablePtr;
138 tablePtr.i= fragPtr.p->fragTableId;
139 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
141 // search for tuple version if not original
143 Operationrec tmpOp;
144 KeyReqStruct req_struct;
145 tmpOp.m_tuple_location.m_page_no= pageId;
146 tmpOp.m_tuple_location.m_page_idx= pageIndex;
148 setup_fixed_part(&req_struct, &tmpOp, tablePtr.p);
149 Tuple_header *tuple_ptr= req_struct.m_tuple_ptr;
150 if (tuple_ptr->get_tuple_version() != tupVersion)
152 jam();
153 OperationrecPtr opPtr;
154 opPtr.i= tuple_ptr->m_operation_ptr_i;
155 Uint32 loopGuard= 0;
156 while (opPtr.i != RNIL) {
157 c_operation_pool.getPtr(opPtr);
158 if (opPtr.p->tupVersion == tupVersion) {
159 jam();
160 if (!opPtr.p->m_copy_tuple_location.isNull()) {
161 req_struct.m_tuple_ptr= (Tuple_header*)
162 c_undo_buffer.get_ptr(&opPtr.p->m_copy_tuple_location);
164 break;
166 jam();
167 opPtr.i= opPtr.p->prevActiveOp;
168 ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS));
171 // read key attributes from found tuple version
172 // save globals
173 TablerecPtr tabptr_old= tabptr;
174 FragrecordPtr fragptr_old= fragptr;
175 OperationrecPtr operPtr_old= operPtr;
176 // new globals
177 tabptr= tablePtr;
178 fragptr= fragPtr;
179 operPtr.i= RNIL;
180 operPtr.p= NULL;
181 prepare_read(&req_struct, tablePtr.p, false);
183 // do it
184 int ret = readAttributes(&req_struct,
185 attrIds,
186 numAttrs,
187 dataOut,
188 ZNIL,
189 true);
191 // restore globals
192 tabptr= tabptr_old;
193 fragptr= fragptr_old;
194 operPtr= operPtr_old;
195 // done
196 if (ret == -1) {
197 ret = terrorCode ? (-(int)terrorCode) : -1;
199 return ret;
202 Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag)
204 jamEntry();
205 // use own variables instead of globals
206 FragrecordPtr fragPtr;
207 fragPtr.i= fragPtrI;
208 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
209 TablerecPtr tablePtr;
210 tablePtr.i= fragPtr.p->fragTableId;
211 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
213 Operationrec tmpOp;
214 tmpOp.m_tuple_location.m_page_no= pageId;
215 tmpOp.m_tuple_location.m_page_idx= pageIndex;
217 KeyReqStruct req_struct;
219 PagePtr page_ptr;
220 Uint32* ptr= get_ptr(&page_ptr, &tmpOp.m_tuple_location, tablePtr.p);
221 req_struct.m_page_ptr = page_ptr;
222 req_struct.m_tuple_ptr = (Tuple_header*)ptr;
224 int ret = 0;
225 if (! (req_struct.m_tuple_ptr->m_header_bits & Tuple_header::FREE))
227 req_struct.check_offset[MM]= tablePtr.p->get_check_offset(MM);
228 req_struct.check_offset[DD]= tablePtr.p->get_check_offset(DD);
230 Uint32 num_attr= tablePtr.p->m_no_of_attributes;
231 Uint32 descr_start= tablePtr.p->tabDescriptor;
232 TableDescriptor *tab_descr= &tableDescriptor[descr_start];
233 ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
234 req_struct.attr_descr= tab_descr;
236 if(req_struct.m_tuple_ptr->m_header_bits & Tuple_header::ALLOC)
238 Uint32 opPtrI= req_struct.m_tuple_ptr->m_operation_ptr_i;
239 Operationrec* opPtrP= c_operation_pool.getPtr(opPtrI);
240 ndbassert(!opPtrP->m_copy_tuple_location.isNull());
241 req_struct.m_tuple_ptr= (Tuple_header*)
242 c_undo_buffer.get_ptr(&opPtrP->m_copy_tuple_location);
244 prepare_read(&req_struct, tablePtr.p, false);
246 const Uint32* attrIds= &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
247 const Uint32 numAttrs= tablePtr.p->noOfKeyAttr;
248 // read pk attributes from original tuple
250 // save globals
251 TablerecPtr tabptr_old= tabptr;
252 FragrecordPtr fragptr_old= fragptr;
253 OperationrecPtr operPtr_old= operPtr;
255 // new globals
256 tabptr= tablePtr;
257 fragptr= fragPtr;
258 operPtr.i= RNIL;
259 operPtr.p= NULL;
261 // do it
262 ret = readAttributes(&req_struct,
263 attrIds,
264 numAttrs,
265 dataOut,
266 ZNIL,
267 xfrmFlag);
268 // restore globals
269 tabptr= tabptr_old;
270 fragptr= fragptr_old;
271 operPtr= operPtr_old;
272 // done
273 if (ret != -1) {
274 // remove headers
275 Uint32 n= 0;
276 Uint32 i= 0;
277 while (n < numAttrs) {
278 const AttributeHeader ah(dataOut[i]);
279 Uint32 size= ah.getDataSize();
280 ndbrequire(size != 0);
281 for (Uint32 j= 0; j < size; j++) {
282 dataOut[i + j - n]= dataOut[i + j + 1];
284 n+= 1;
285 i+= 1 + size;
287 ndbrequire((int)i == ret);
288 ret -= numAttrs;
289 } else {
290 ret= terrorCode ? (-(int)terrorCode) : -1;
293 if (tablePtr.p->m_bits & Tablerec::TR_RowGCI)
295 dataOut[ret] = *req_struct.m_tuple_ptr->get_mm_gci(tablePtr.p);
297 else
299 dataOut[ret] = 0;
301 return ret;
305 Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag)
307 jamEntry();
308 // get table
309 TablerecPtr tablePtr;
310 tablePtr.i = tableId;
311 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
312 // get fragment
313 FragrecordPtr fragPtr;
314 getFragmentrec(fragPtr, fragId, tablePtr.p);
315 // get real page id and tuple offset
317 Uint32 pageId = getRealpid(fragPtr.p, fragPageId);
318 // use TUX routine - optimize later
319 int ret = tuxReadPk(fragPtr.i, pageId, pageIndex, dataOut, xfrmFlag);
320 return ret;
324 * TUX index contains all tuple versions. A scan in TUX has scanned
325 * one of them and asks if it can be returned as scan result. This
326 * depends on trans id, dirty read flag, and savepoint within trans.
328 * Previously this faked a ZREAD operation and used getPage().
329 * In TUP getPage() is run after ACC locking, but TUX comes here
330 * before ACC access. Instead of modifying getPage() it is more
331 * clear to do the full check here.
333 bool
334 Dbtup::tuxQueryTh(Uint32 fragPtrI,
335 Uint32 pageId,
336 Uint32 pageIndex,
337 Uint32 tupVersion,
338 Uint32 transId1,
339 Uint32 transId2,
340 bool dirty,
341 Uint32 savepointId)
343 jamEntry();
344 FragrecordPtr fragPtr;
345 fragPtr.i= fragPtrI;
346 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
347 TablerecPtr tablePtr;
348 tablePtr.i= fragPtr.p->fragTableId;
349 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
350 PagePtr pagePtr;
351 pagePtr.i = pageId;
352 c_page_pool.getPtr(pagePtr);
354 KeyReqStruct req_struct;
357 Operationrec tmpOp;
358 tmpOp.m_tuple_location.m_page_no = pageId;
359 tmpOp.m_tuple_location.m_page_idx = pageIndex;
360 setup_fixed_part(&req_struct, &tmpOp, tablePtr.p);
363 Tuple_header* tuple_ptr = req_struct.m_tuple_ptr;
365 OperationrecPtr currOpPtr;
366 currOpPtr.i = tuple_ptr->m_operation_ptr_i;
367 if (currOpPtr.i == RNIL) {
368 jam();
369 // tuple has no operation, any scan can see it
370 return true;
372 c_operation_pool.getPtr(currOpPtr);
374 const bool sameTrans =
375 c_lqh->is_same_trans(currOpPtr.p->userpointer, transId1, transId2);
377 bool res = false;
378 OperationrecPtr loopOpPtr = currOpPtr;
380 if (!sameTrans) {
381 jam();
382 if (!dirty) {
383 jam();
384 if (currOpPtr.p->nextActiveOp == RNIL) {
385 jam();
386 // last op - TUX makes ACC lock request in same timeslice
387 res = true;
390 else {
391 // loop to first op (returns false)
392 find_savepoint(loopOpPtr, 0);
393 const Uint32 op_type = loopOpPtr.p->op_struct.op_type;
395 if (op_type != ZINSERT) {
396 jam();
397 // read committed version
398 const Uint32 origVersion = tuple_ptr->get_tuple_version();
399 if (origVersion == tupVersion) {
400 jam();
401 res = true;
406 else {
407 jam();
408 // for own trans, ignore dirty flag
410 if (find_savepoint(loopOpPtr, savepointId)) {
411 jam();
412 const Uint32 op_type = loopOpPtr.p->op_struct.op_type;
414 if (op_type != ZDELETE) {
415 jam();
416 // check if this op has produced the scanned version
417 Uint32 loopVersion = loopOpPtr.p->tupVersion;
418 if (loopVersion == tupVersion) {
419 jam();
420 res = true;
426 return res;
429 // ordered index build
431 //#define TIME_MEASUREMENT
432 #ifdef TIME_MEASUREMENT
433 static Uint32 time_events;
434 NDB_TICKS tot_time_passed;
435 Uint32 number_events;
436 #endif
437 void
438 Dbtup::execBUILDINDXREQ(Signal* signal)
440 jamEntry();
441 #ifdef TIME_MEASUREMENT
442 time_events= 0;
443 tot_time_passed= 0;
444 number_events= 1;
445 #endif
446 // get new operation
447 BuildIndexPtr buildPtr;
448 if (! c_buildIndexList.seize(buildPtr)) {
449 jam();
450 BuildIndexRec buildRec;
451 memcpy(buildRec.m_request, signal->theData, sizeof(buildRec.m_request));
452 buildRec.m_errorCode= BuildIndxRef::Busy;
453 buildIndexReply(signal, &buildRec);
454 return;
456 memcpy(buildPtr.p->m_request,
457 signal->theData,
458 sizeof(buildPtr.p->m_request));
459 // check
460 buildPtr.p->m_errorCode= BuildIndxRef::NoError;
461 do {
462 const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request;
463 if (buildReq->getTableId() >= cnoOfTablerec) {
464 jam();
465 buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable;
466 break;
468 TablerecPtr tablePtr;
469 tablePtr.i= buildReq->getTableId();
470 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
471 if (tablePtr.p->tableStatus != DEFINED) {
472 jam();
473 buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable;
474 break;
476 // memory page format
477 buildPtr.p->m_build_vs =
478 tablePtr.p->m_attributes[MM].m_no_of_varsize > 0;
479 if (DictTabInfo::isOrderedIndex(buildReq->getIndexType())) {
480 jam();
481 const DLList<TupTriggerData>& triggerList =
482 tablePtr.p->tuxCustomTriggers;
484 TriggerPtr triggerPtr;
485 triggerList.first(triggerPtr);
486 while (triggerPtr.i != RNIL) {
487 if (triggerPtr.p->indexId == buildReq->getIndexId()) {
488 jam();
489 break;
491 triggerList.next(triggerPtr);
493 if (triggerPtr.i == RNIL) {
494 jam();
495 // trigger was not created
496 buildPtr.p->m_errorCode = BuildIndxRef::InternalError;
497 break;
499 buildPtr.p->m_indexId = buildReq->getIndexId();
500 buildPtr.p->m_buildRef = DBTUX;
501 } else if(buildReq->getIndexId() == RNIL) {
502 jam();
503 // REBUILD of acc
504 buildPtr.p->m_indexId = RNIL;
505 buildPtr.p->m_buildRef = DBACC;
506 } else {
507 jam();
508 buildPtr.p->m_errorCode = BuildIndxRef::InvalidIndexType;
509 break;
512 // set to first tuple position
513 const Uint32 firstTupleNo = 0;
514 buildPtr.p->m_fragNo= 0;
515 buildPtr.p->m_pageId= 0;
516 buildPtr.p->m_tupleNo= firstTupleNo;
517 // start build
518 buildIndex(signal, buildPtr.i);
519 return;
520 } while (0);
521 // check failed
522 buildIndexReply(signal, buildPtr.p);
523 c_buildIndexList.release(buildPtr);
526 void
527 Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
529 // get build record
530 BuildIndexPtr buildPtr;
531 buildPtr.i= buildPtrI;
532 c_buildIndexList.getPtr(buildPtr);
533 const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request;
534 // get table
535 TablerecPtr tablePtr;
536 tablePtr.i= buildReq->getTableId();
537 ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
539 const Uint32 firstTupleNo = 0;
540 const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size;
542 #ifdef TIME_MEASUREMENT
543 MicroSecondTimer start;
544 MicroSecondTimer stop;
545 NDB_TICKS time_passed;
546 #endif
547 do {
548 // get fragment
549 FragrecordPtr fragPtr;
550 if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) {
551 jam();
552 // build ready
553 buildIndexReply(signal, buildPtr.p);
554 c_buildIndexList.release(buildPtr);
555 return;
557 ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE);
558 fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo];
559 if (fragPtr.i == RNIL) {
560 jam();
561 buildPtr.p->m_fragNo++;
562 buildPtr.p->m_pageId= 0;
563 buildPtr.p->m_tupleNo= firstTupleNo;
564 break;
566 ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
567 // get page
568 PagePtr pagePtr;
569 if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) {
570 jam();
571 buildPtr.p->m_fragNo++;
572 buildPtr.p->m_pageId= 0;
573 buildPtr.p->m_tupleNo= firstTupleNo;
574 break;
576 Uint32 realPageId= getRealpid(fragPtr.p, buildPtr.p->m_pageId);
577 c_page_pool.getPtr(pagePtr, realPageId);
578 Uint32 pageState= pagePtr.p->page_state;
579 // skip empty page
580 if (pageState == ZEMPTY_MM) {
581 jam();
582 buildPtr.p->m_pageId++;
583 buildPtr.p->m_tupleNo= firstTupleNo;
584 break;
586 // get tuple
587 Uint32 pageIndex = ~0;
588 const Tuple_header* tuple_ptr = 0;
589 pageIndex = buildPtr.p->m_tupleNo * tupheadsize;
590 if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) {
591 jam();
592 buildPtr.p->m_pageId++;
593 buildPtr.p->m_tupleNo= firstTupleNo;
594 break;
596 tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex];
597 // skip over free tuple
598 if (tuple_ptr->m_header_bits & Tuple_header::FREE) {
599 jam();
600 buildPtr.p->m_tupleNo++;
601 break;
603 Uint32 tupVersion= tuple_ptr->get_tuple_version();
604 OperationrecPtr pageOperPtr;
605 pageOperPtr.i= tuple_ptr->m_operation_ptr_i;
606 #ifdef TIME_MEASUREMENT
607 NdbTick_getMicroTimer(&start);
608 #endif
609 // add to index
610 TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
611 req->errorCode = RNIL;
612 req->tableId = tablePtr.i;
613 req->indexId = buildPtr.p->m_indexId;
614 req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo];
615 req->pageId = realPageId;
616 req->tupVersion = tupVersion;
617 req->opInfo = TuxMaintReq::OpAdd;
618 req->tupFragPtrI = fragPtr.i;
619 req->fragPageId = buildPtr.p->m_pageId;
620 req->pageIndex = pageIndex;
622 if (pageOperPtr.i == RNIL)
624 EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
625 signal, TuxMaintReq::SignalLength+2);
627 else
630 If there is an ongoing operation on the tuple then it is either a
631 copy tuple or an original tuple with an ongoing transaction. In
632 both cases realPageId and pageOffset refer to the original tuple.
633 The tuple address stored in TUX will always be the original tuple
634 but with the tuple version of the tuple we found.
636 This is necessary to avoid having to update TUX at abort of
637 update. If an update aborts then the copy tuple is copied to
638 the original tuple. The build will however have found that
639 tuple as a copy tuple. The original tuple is stable and is thus
640 preferrable to store in TUX.
642 jam();
645 * Since copy tuples now can't be found on real pages.
646 * we will here build all copies of the tuple
648 * Note only "real" tupVersion's should be added
649 * i.e delete's shouldnt be added
650 * (unless it's the first op, when "original" should be added)
654 c_operation_pool.getPtr(pageOperPtr);
655 if(pageOperPtr.p->op_struct.op_type != ZDELETE ||
656 pageOperPtr.p->is_first_operation())
658 req->errorCode = RNIL;
659 req->tupVersion= pageOperPtr.p->tupVersion;
660 EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
661 signal, TuxMaintReq::SignalLength+2);
663 else
665 req->errorCode= 0;
667 pageOperPtr.i= pageOperPtr.p->prevActiveOp;
668 } while(req->errorCode == 0 && pageOperPtr.i != RNIL);
671 jamEntry();
672 if (req->errorCode != 0) {
673 switch (req->errorCode) {
674 case TuxMaintReq::NoMemError:
675 jam();
676 buildPtr.p->m_errorCode= BuildIndxRef::AllocationFailure;
677 break;
678 default:
679 ndbrequire(false);
680 break;
682 buildIndexReply(signal, buildPtr.p);
683 c_buildIndexList.release(buildPtr);
684 return;
686 #ifdef TIME_MEASUREMENT
687 NdbTick_getMicroTimer(&stop);
688 time_passed= NdbTick_getMicrosPassed(start, stop);
689 if (time_passed < 1000) {
690 time_events++;
691 tot_time_passed += time_passed;
692 if (time_events == number_events) {
693 NDB_TICKS mean_time_passed= tot_time_passed /
694 (NDB_TICKS)number_events;
695 ndbout << "Number of events= " << number_events;
696 ndbout << " Mean time passed= " << mean_time_passed << endl;
697 number_events <<= 1;
698 tot_time_passed= (NDB_TICKS)0;
699 time_events= 0;
702 #endif
703 // next tuple
704 buildPtr.p->m_tupleNo++;
705 break;
706 } while (0);
707 signal->theData[0]= ZBUILD_INDEX;
708 signal->theData[1]= buildPtr.i;
709 sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
712 void
713 Dbtup::buildIndexReply(Signal* signal, const BuildIndexRec* buildPtrP)
715 const BuildIndxReq* const buildReq=
716 (const BuildIndxReq*)buildPtrP->m_request;
717 // conf is subset of ref
718 BuildIndxRef* rep= (BuildIndxRef*)signal->getDataPtr();
719 rep->setUserRef(buildReq->getUserRef());
720 rep->setConnectionPtr(buildReq->getConnectionPtr());
721 rep->setRequestType(buildReq->getRequestType());
722 rep->setTableId(buildReq->getTableId());
723 rep->setIndexType(buildReq->getIndexType());
724 rep->setIndexId(buildReq->getIndexId());
725 // conf
726 if (buildPtrP->m_errorCode == BuildIndxRef::NoError) {
727 jam();
728 sendSignal(rep->getUserRef(), GSN_BUILDINDXCONF,
729 signal, BuildIndxConf::SignalLength, JBB);
730 return;
732 // ref
733 rep->setErrorCode(buildPtrP->m_errorCode);
734 sendSignal(rep->getUserRef(), GSN_BUILDINDXREF,
735 signal, BuildIndxRef::SignalLength, JBB);