code reorg
[csql.git] / src / relational / index / HashIndex.cxx
blobcc1f81684cfb652825ce3d40c0dc92fe742fce41
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int hashBinary(char *strVal, int length)
50 unsigned int hval, g;
51 hval = 0;
52 char *str =strVal;
53 int iter = 0;
54 while (iter != length)
56 hval <<= 4;
57 hval += (unsigned int) *str++;
58 g = hval & ((unsigned int) 0xf << (32 - 4));
59 if (g != 0)
61 hval ^= g >> (32 - 8);
62 hval ^= g;
64 iter++;
66 return hval;
69 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
72 if (typeInt == type) {
73 int val = *(int*)key;
74 return val % noOfBuckets;
75 }else if (typeString == type || typeVarchar == type) {
76 unsigned int val = hashString((char*)key);
77 return val % noOfBuckets;
78 }else if (typeShort == type) {
79 short val = *(short*) key;
80 return val % noOfBuckets;
81 }else if (typeLong == type) {
82 long val = *(long*) key;
83 return val % noOfBuckets;
84 }else if (typeLongLong == type) {
85 long long val = *(long long*) key;
86 return val % noOfBuckets;
87 }else if (typeByteInt == type) {
88 ByteInt val = *(ByteInt*)key;
89 return val % noOfBuckets;
90 }else if (typeDate == type) {
91 int val = *(int*)key;
92 return val % noOfBuckets;
93 }else if (typeTime == type) {
94 int val = *(int*)key;
95 return val % noOfBuckets;
96 }else if (typeComposite == type) {
97 unsigned int val = hashBinary((char*)key, length);
98 return val % noOfBuckets;
99 }else if (typeBinary == type) {
100 unsigned int val = hashBinary((char*)key, length);
101 return val % noOfBuckets;
102 }else if (typeULong == type) {
103 unsigned long val = *(unsigned long*)key;
104 return val % noOfBuckets;
106 printError(ErrSysFatal,"Type not supported for hashing\n");
107 return -1;
110 bool HashIndex::checkForUniqueKey(IndexNode *head, HashIndexInfo *info, void *tuple)
112 if (!head) return false;
113 int offset = info->fldOffset;
114 DataType type = info->type;
115 BucketList list(head);
116 BucketIter iter = list.getIterator();
117 IndexNode *node;
118 void *bucketTuple;
119 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
120 bool res = false;
122 while((node = iter.next()) != NULL)
124 bucketTuple = node->ptrToTuple_;
125 if (type == typeComposite) {
126 FieldIterator fldIter = info->idxFldList.getIterator();
127 int i = 0;
128 while (fldIter.hasElement()) {
129 FieldDef *def = fldIter.nextElement();
130 if (def->type_ != typeVarchar) {
131 res = AllDataType::compareVal(
132 (char *)bucketTuple + def->offset_,
133 (char *)tuple + def->offset_,
134 OpEquals, def->type_, def->length_);
135 } else {
136 char *tvcptr = (char *) *(long *)
137 ((char *)tuple + def->offset_);
138 char *btvcptr = (char *) *(long *)
139 ((char *)bucketTuple + def->offset_);
140 res = AllDataType::compareVal(tvcptr, btvcptr,
141 OpEquals, def->type_, def->length_);
143 if (!res) break;
146 else {
147 if (type != typeVarchar)
148 res = AllDataType::compareVal((void*)((char*)bucketTuple +offset),
149 (void*)((char*)tuple +offset), OpEquals,
150 type, info->compLength);
151 else
152 res = AllDataType::compareVal((void*)*(long *)((char*)bucketTuple +offset),
153 (void*)*(long *)((char*)tuple +offset),
154 OpEquals,type, info->compLength);
156 if (res)
158 if (type == typeLongLong)
159 printError(ErrUnique, "Unique key violation for id:%lld",
160 *(long long*) ((char*)tuple +offset) );
161 else
162 printError(ErrUnique, "Unique key violation");
163 return true;
166 return false;
169 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
171 HashIndexInfo *info = (HashIndexInfo*) indInfo;
172 CINDEX *iptr = (CINDEX*)indexPtr;
173 DbRetVal rc = OK;
174 int noOfBuckets = info->noOfBuckets;
175 int offset = info->fldOffset;
176 DataType type = info->type;
178 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
179 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
180 Bucket* buckets = (Bucket*)citer.nextElement();
181 void *keyPtr =(void*)((char*)tuple + offset);
182 int bucketNo = 0;
183 if (type == typeComposite) {
184 char *keyBuffer = (char*) malloc(info->compLength);
185 memset(keyBuffer, 0, info->compLength);
186 void* keyStartBuffer = keyBuffer;
187 FieldIterator iter = info->idxFldList.getIterator();
188 while(iter.hasElement())
190 FieldDef *def = iter.nextElement();
191 keyPtr = (char *)tuple + def->offset_;
192 if (def->type_ != typeVarchar) {
193 AllDataType::copyVal(keyBuffer, keyPtr, def->type_,
194 def->length_);
195 } else {
196 void *ptr = (void *) *(long *) keyPtr;
197 if (ptr)
198 AllDataType::copyVal(keyBuffer, ptr, def->type_,
199 def->length_);
201 keyBuffer = keyBuffer + AllDataType::size(def->type_,def->length_);
203 bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets,
204 info->compLength);
205 ::free(keyStartBuffer);
207 else {
208 if (type != typeVarchar)
209 bucketNo =
210 computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
211 else {
212 void *ptr = (void *) *(long *) keyPtr;
213 if (ptr)
214 bucketNo = computeHashBucket(type, ptr, noOfBuckets,
215 info->compLength);
216 else bucketNo = 0;
219 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
220 Bucket *bucket = &(buckets[bucketNo]);
221 HashUndoLogInfo hInfo;
222 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
223 hInfo.bucket_ = bucket;
224 hInfo.tuple_ = tuple;
225 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
226 hInfo.keyPtr_ = keyPtr;
228 IndexNode *head = (IndexNode*) bucket->bucketList_;
229 if (info->isUnique)
231 bool isKeyPresent = checkForUniqueKey(head, info, tuple);
232 if (isKeyPresent) return ErrUnique;
234 Chunk *hIdxNodeChunk = (Chunk*)iptr->hashNodeChunk_;
235 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
236 if (!head)
238 printDebug(DM_HashIndex, "HashIndex insert head is empty");
239 DbRetVal rv = OK;
240 IndexNode *firstNode= NULL;
242 int tries=0;
243 int totalTries = Conf::config.getMutexRetries();
244 while (tries < totalTries)
246 rv = OK;
247 firstNode= (IndexNode*) hIdxNodeChunk->allocate(tbl->db_, &rv);
248 if (firstNode !=NULL) break;
249 if (rv != ErrLockTimeOut)
251 printError(rv, "Unable to allocate hash index node");
252 return rv;
254 tries++;
256 if (firstNode == NULL){
257 printError(rv, "Unable to allocate hash index node after %d retry", tries);
258 return rv;
260 firstNode->ptrToKey_ = keyPtr;
261 firstNode->ptrToTuple_ = tuple;
262 firstNode->next_ = NULL;
263 if (0 != Mutex::CASL((long*)&bucket->bucketList_, 0, (long)firstNode)) {
264 printError(ErrLockTimeOut, "Hash Index bucket lock timeout.. retry");
265 hIdxNodeChunk->free(tbl->db_, firstNode);
266 return ErrLockTimeOut;
269 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
271 else
273 BucketList list(head);
274 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
275 if (rc !=OK) {
276 printError(rc, "unable to insert into bucketlist rv:%d", rc);
277 return rc;
281 if (!loadFlag) {
282 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
283 if (rc !=OK)
285 printError(rc, "Unable to append logical log before rc:%d", rc);
286 BucketList list(head);
287 DbRetVal rv = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
288 //bucket->bucketList_ = list.getBucketListHead();
289 if (rv == SplCase) {
290 printError(ErrWarning, "SplCase occured");
291 if (0 != Mutex::CASL((long*)&bucket->bucketList_,
292 (long)bucket->bucketList_, (long)list.getBucketListHead())) {
293 printError(ErrSysFatal, "Double failure, may lead to hash node leak\n");
295 }else if (rv !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
298 return rc;
302 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
304 CINDEX *iptr = (CINDEX*)indexPtr;
306 HashIndexInfo *info = (HashIndexInfo*) indInfo;
307 DataType type = info->type;
308 int offset = info->fldOffset;
309 int noOfBuckets = info->noOfBuckets;
311 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
312 Bucket* buckets = (Bucket*)citer.nextElement();
314 void *keyPtr =(void*)((char*)tuple + offset);
315 int bucket = 0;
316 if (type == typeComposite) {
317 char *keyBuffer = (char*) malloc(info->compLength);
318 memset(keyBuffer, 0, info->compLength);
319 void *keyStartBuffer = keyBuffer;
320 FieldIterator iter = info->idxFldList.getIterator();
321 while(iter.hasElement())
323 FieldDef *def = iter.nextElement();
324 keyPtr = (char *)tuple + def->offset_;
325 if (def->type_ != typeVarchar) {
326 AllDataType::copyVal(keyBuffer, keyPtr, def->type_,
327 def->length_);
328 } else {
329 void *ptr = (void *) *(long *) keyPtr;
330 if (ptr)
331 AllDataType::copyVal(keyBuffer, ptr, def->type_,
332 def->length_);
334 keyBuffer = keyBuffer + AllDataType::size(def->type_,def->length_);
336 bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
337 ::free(keyStartBuffer);
339 else {
340 if (type != typeVarchar)
341 bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets,
342 info->compLength);
343 else {
344 void *ptr = (void *) *(long *) keyPtr;
345 if (ptr)
346 bucket = HashIndex::computeHashBucket(type, ptr, noOfBuckets,
347 info->compLength);
348 else bucket = 0;
352 Bucket *bucket1 = &buckets[bucket];
353 HashUndoLogInfo hInfo;
354 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
355 hInfo.bucket_ = bucket1;
356 hInfo.tuple_ = tuple;
357 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
358 hInfo.keyPtr_ = keyPtr;
360 IndexNode *head = (IndexNode*) bucket1->bucketList_;
362 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n");
363 return ErrNotExists;
365 BucketList list(head);
366 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
368 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
369 if (SplCase == rc)
371 printDebug(DM_HashIndex, "Removing hash index node from head ");
372 //bucket1->bucketList_ = list.getBucketListHead();
373 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
374 (long)head, (long)list.getBucketListHead())) {
375 printError(ErrSysFatal, "Lock time out for hash bucket. retry\n");
376 return ErrLockTimeOut;
378 rc = OK;
380 if (!loadFlag) {
381 rc =tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
382 if (rc !=OK)
384 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
385 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
386 //bucket1->bucketList_ = list.getBucketListHead();
387 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
388 (long)bucket1->bucketList_, (long)list.getBucketListHead())) {
389 printError(ErrSysFatal, "Double failure on index insert");
393 return rc;
396 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
398 CINDEX *iptr = (CINDEX*)indexPtr;
400 HashIndexInfo *info = (HashIndexInfo*) indInfo;
401 DataType type = info->type;
402 int offset = info->fldOffset;
403 int noOfBuckets = info->noOfBuckets;
405 //check whether the index key is updated or not
406 //if it is not updated return from here
407 void *keyPtr =(void*)((char*)tuple + offset);
408 char *kPtr= (char*)keyPtr;
410 //creating old key value buffer for composite primary keys
411 char *oldKeyBuffer = (char*) malloc(info->compLength);
412 memset(oldKeyBuffer, 0, info->compLength);
413 void *oldKeyStartBuffer = oldKeyBuffer;
414 FieldIterator iter = info->idxFldList.getIterator();
415 while(iter.hasElement()) {
416 FieldDef *def = iter.nextElement();
417 keyPtr = (char *)tuple + def->offset_;
418 AllDataType::copyVal(oldKeyBuffer, keyPtr, def->type_, def->length_);
419 oldKeyBuffer = oldKeyBuffer + AllDataType::size(def->type_, def->length_);
422 keyPtr = (void *) kPtr;
423 //Iterate through the bind list and check
424 FieldIterator idxFldIter = info->idxFldList.getIterator();
425 char *keyBindBuffer ;
426 if(type==typeBinary) {
427 keyBindBuffer = (char*) malloc(2 * info->compLength);
428 memset(keyBindBuffer, 0, 2 * info->compLength);
429 } else {
430 keyBindBuffer = (char*) malloc(info->compLength);
431 memset(keyBindBuffer, 0, info->compLength);
433 void *keyStartBuffer = (void*) keyBindBuffer;
434 bool keyUpdated = false;
436 while (idxFldIter.hasElement()) {
437 FieldDef *idef = idxFldIter.nextElement();
438 FieldIterator fldIter = tbl->fldList_.getIterator();
439 while (fldIter.hasElement()) {
440 FieldDef *def = fldIter.nextElement();
441 if (0 == strcmp(def->fldName_, idef->fldName_)) {
442 if (NULL != def->bindVal_) {
443 if(type==typeBinary) {
444 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
445 def->type_, 2*def->length_);
446 keyStartBuffer=calloc(1,info->compLength);
447 AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength);
448 free(keyBindBuffer);
449 } else {
450 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
451 def->type_, def->length_);
452 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
454 } else {
455 AllDataType::copyVal(keyBindBuffer, (char *) tuple + def->offset_, def->type_, def->length_);
456 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
458 keyUpdated = true;
459 break;
463 if (!keyUpdated) {
464 //printf("DEBUG::key not updated\n");
465 free(keyStartBuffer);
466 free(oldKeyStartBuffer);
467 return OK;
469 //printf("DEBUG::it is wrong coming here\n");
470 bool result = false;
471 if (type == typeComposite)
472 result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer,
473 OpEquals, info->type, info->compLength);
474 else result = AllDataType::compareVal(keyPtr, keyStartBuffer,
475 OpEquals, info->type, info->compLength);
476 if (result) {
477 free(keyStartBuffer);
478 free(oldKeyStartBuffer);
479 return OK;
481 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
483 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
485 Bucket* buckets = (Bucket*)citer.nextElement();
487 //remove the node whose key is updated
488 int bucketNo = 0;
489 if (type == typeComposite)
490 bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength);
491 else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
492 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
493 Bucket *bucket = &buckets[bucketNo];
495 HashUndoLogInfo *hInfo1 = new HashUndoLogInfo();
496 hInfo1->metaData_ = tbl->db_->getMetaDataPtr();
497 hInfo1->bucket_ = bucket;
498 hInfo1->tuple_ = tuple;
499 hInfo1->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
500 hInfo1->keyPtr_ = keyPtr;
502 //it may run into deadlock, when two threads updates tuples which falls in
503 //same buckets.So take both the mutex one after another, which will reduce the
504 //deadlock window.
505 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
506 if (ret != 0)
508 delete hInfo1;
509 free(keyStartBuffer);
510 free(oldKeyStartBuffer);
511 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
512 return ErrLockTimeOut;
514 //insert node for the updated key value
515 int newBucketNo = computeHashBucket(type,
516 keyStartBuffer, noOfBuckets, info->compLength);
517 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
519 Bucket *bucket1 = &buckets[newBucketNo];
520 HashUndoLogInfo *hInfo2 = new HashUndoLogInfo();
521 hInfo2->metaData_ = tbl->db_->getMetaDataPtr();
522 hInfo2->bucket_ = bucket;
523 hInfo2->tuple_ = tuple;
524 hInfo2->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
525 hInfo2->keyPtr_ = keyPtr;
526 bucket1->mutex_.getLock(tbl->db_->procSlot);
527 if (ret != 0)
529 delete hInfo1;
530 delete hInfo2;
531 free(keyStartBuffer);
532 free(oldKeyStartBuffer);
533 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
534 return ErrLockTimeOut;
537 IndexNode *head1 = (IndexNode*) bucket->bucketList_;
538 if (head1)
540 BucketList list1(head1);
541 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
542 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
543 bucket->bucketList_=list1.getBucketListHead();
545 else
547 printError(ErrSysInternal,"Update: Bucket list is null");
548 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
549 bucket->mutex_.releaseLock(tbl->db_->procSlot);
550 delete hInfo1;
551 delete hInfo2;
552 free(keyStartBuffer);
553 free(oldKeyStartBuffer);
554 return ErrSysInternal;
556 DbRetVal rc = OK;
557 if (!loadFlag) {
558 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo));
559 if (rc !=OK)
561 BucketList list((IndexNode*) bucket->bucketList_);
562 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
563 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
564 bucket->bucketList_ = list.getBucketListHead();
565 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
566 bucket->mutex_.releaseLock(tbl->db_->procSlot);
567 delete hInfo1;
568 delete hInfo2;
569 free(keyStartBuffer);
570 free(oldKeyStartBuffer);
571 return rc;
574 IndexNode *head2 = (IndexNode*) bucket1->bucketList_;
575 //Note:: the tuple will be in the same address location
576 //so not changing the keyptr and tuple during append
577 //only bucket where this node resides will only change
578 //if the index key is updated.
579 if (!head2)
581 DbRetVal rv = OK;
582 IndexNode *firstNode= (IndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
583 if (firstNode == NULL)
585 printError(rv, "Error in allocating hash node");
586 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
587 bucket->mutex_.releaseLock(tbl->db_->procSlot);
588 delete hInfo1;
589 delete hInfo2;
590 free(keyStartBuffer);
591 free(oldKeyStartBuffer);
592 return rv;
594 firstNode->ptrToKey_ = keyPtr;
595 firstNode->ptrToTuple_ = tuple;
596 firstNode->next_ = NULL;
597 bucket1->bucketList_ = (IndexNode*)firstNode;
598 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
600 else
602 BucketList list2(head2);
603 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
604 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
605 bucket1->bucketList_ = list2.getBucketListHead();
607 if (!loadFlag) {
609 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo2, sizeof(HashUndoLogInfo));
610 if (rc !=OK)
612 //reverting back the changes:delete new node and add the old
613 //node + remove logical undo log of the DeleteHashIndexOperation
614 BucketList list1((IndexNode*) bucket->bucketList_);
615 BucketList list2((IndexNode*) bucket1->bucketList_);
616 list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
617 list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
618 bucket->bucketList_ = list1.getBucketListHead();
619 bucket1->bucketList_ = list2.getBucketListHead();
620 UndoLogInfo *logInfo = tr->popUndoLog();
621 Chunk *chunk = tbl->sysDB_->getSystemDatabaseChunk(UndoLogTableID);
622 chunk->free(tbl->sysDB_, logInfo);
625 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
626 bucket->mutex_.releaseLock(tbl->db_->procSlot);
627 delete hInfo1;
628 delete hInfo2;
629 free(keyStartBuffer);
630 free(oldKeyStartBuffer);
631 return rc;
634 //Following three methods are used to undo Logical Hash Indexes
635 DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data)
637 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
638 Chunk *hChunk = (Chunk *) info->hChunk_;
639 Database db;
640 db.setMetaDataPtr((DatabaseMetaData *) info->metaData_);
641 db.setProcSlot(sysdb->procSlot);
642 IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_;
643 BucketList list(head);
644 DbRetVal rv = list.insert(hChunk, &db, info->keyPtr_, info->tuple_);
645 if (rv != OK)
647 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
648 return ErrLockTimeOut;
650 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
651 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
652 (long)(((Bucket *)info->bucket_)->bucketList_),
653 (long)list.getBucketListHead()))
655 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
656 return ErrLockTimeOut;
658 return OK;
661 DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data)
663 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
664 Chunk *hChunk = (Chunk *) info->hChunk_;
665 Database db;
666 db.setMetaDataPtr((DatabaseMetaData *)info->metaData_);
667 db.setProcSlot(sysdb->procSlot);
668 IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_;
669 BucketList list(head);
670 DbRetVal rc = list.remove(hChunk, &db, info->keyPtr_);
671 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
672 if (SplCase == rc) {
673 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
674 (long)(((Bucket *)info->bucket_)->bucketList_),
675 (long)list.getBucketListHead()))
677 printError(ErrLockTimeOut, "Unable to set the head of hash index bucket\n");
678 return ErrLockTimeOut;
680 }else if (rc != OK) {
681 printError(ErrLockTimeOut, "Unable to remove hash index node");
682 return ErrLockTimeOut;
684 return OK;