Trie Implementation
[csql.git] / src / storage / HashIndex.cxx
blob232561e6a5a36b4029d9012d1690ab1815193fe2
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int hashBinary(char *strVal, int length)
50 unsigned int hval, g;
51 hval = 0;
52 char *str =strVal;
53 int iter = 0;
54 while (iter != length)
56 hval <<= 4;
57 hval += (unsigned int) *str++;
58 g = hval & ((unsigned int) 0xf << (32 - 4));
59 if (g != 0)
61 hval ^= g >> (32 - 8);
62 hval ^= g;
64 iter++;
66 return hval;
69 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
72 if (typeInt == type) {
73 int val = *(int*)key;
74 return val % noOfBuckets;
75 }else if (typeString == type || typeVarchar == type) {
76 unsigned int val = hashString((char*)key);
77 return val % noOfBuckets;
78 }else if (typeShort == type) {
79 short val = *(short*) key;
80 return val % noOfBuckets;
81 }else if (typeLong == type) {
82 long val = *(long*) key;
83 return val % noOfBuckets;
84 }else if (typeLongLong == type) {
85 long long val = *(long long*) key;
86 return val % noOfBuckets;
87 }else if (typeByteInt == type) {
88 ByteInt val = *(ByteInt*)key;
89 return val % noOfBuckets;
90 }else if (typeDate == type) {
91 int val = *(int*)key;
92 return val % noOfBuckets;
93 }else if (typeTime == type) {
94 int val = *(int*)key;
95 return val % noOfBuckets;
96 }else if (typeComposite == type) {
97 unsigned int val = hashBinary((char*)key, length);
98 return val % noOfBuckets;
99 }else if (typeBinary == type) {
100 unsigned int val = hashBinary((char*)key, length);
101 return val % noOfBuckets;
102 }else if (typeULong == type) {
103 unsigned long val = *(unsigned long*)key;
104 return val % noOfBuckets;
106 printError(ErrSysFatal,"Type not supported for hashing\n");
107 return -1;
110 bool HashIndex::checkForUniqueKey(IndexNode *head, HashIndexInfo *info, void *tuple)
112 if (!head) return false;
113 int offset = info->fldOffset;
114 DataType type = info->type;
115 BucketList list(head);
116 BucketIter iter = list.getIterator();
117 IndexNode *node;
118 void *bucketTuple;
119 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
120 bool res = false;
122 while((node = iter.next()) != NULL)
124 bucketTuple = node->ptrToTuple_;
125 if (type == typeComposite) {
126 FieldIterator fldIter = info->idxFldList.getIterator();
127 int i = 0;
128 while (fldIter.hasElement()) {
129 FieldDef *def = fldIter.nextElement();
130 if (def->type_ != typeVarchar) {
131 res = AllDataType::compareVal(
132 (char *)bucketTuple + def->offset_,
133 (char *)tuple + def->offset_,
134 OpEquals, def->type_, def->length_);
135 } else {
136 char *tvcptr = (char *) *(long *)
137 ((char *)tuple + def->offset_);
138 char *btvcptr = (char *) *(long *)
139 ((char *)bucketTuple + def->offset_);
140 res = AllDataType::compareVal(tvcptr, btvcptr,
141 OpEquals, def->type_, def->length_);
143 if (!res) break;
146 else {
147 if (type != typeVarchar)
148 res = AllDataType::compareVal((void*)((char*)bucketTuple +offset), (void*)((char*)tuple +offset), OpEquals,type, info->compLength);
149 else res = AllDataType::compareVal((void*)*(long *)((char*)bucketTuple +offset), (void*)*(long *)((char*)tuple +offset), OpEquals,type, info->compLength);
151 if (res)
153 if (type == typeLongLong)
154 printError(ErrUnique, "Unique key violation for id:%lld",*(long long*) ((char*)tuple +offset) );
155 else
156 printError(ErrUnique, "Unique key violation");
157 return true;
160 return false;
163 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
165 HashIndexInfo *info = (HashIndexInfo*) indInfo;
166 CINDEX *iptr = (CINDEX*)indexPtr;
167 DbRetVal rc = OK;
168 int noOfBuckets = info->noOfBuckets;
169 int offset = info->fldOffset;
170 DataType type = info->type;
172 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
173 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
174 Bucket* buckets = (Bucket*)citer.nextElement();
175 void *keyPtr =(void*)((char*)tuple + offset);
176 int bucketNo = 0;
177 if (type == typeComposite) {
178 char *keyBuffer = (char*) malloc(info->compLength);
179 memset(keyBuffer, 0, info->compLength);
180 void* keyStartBuffer = keyBuffer;
181 FieldIterator iter = info->idxFldList.getIterator();
182 while(iter.hasElement())
184 FieldDef *def = iter.nextElement();
185 keyPtr = (char *)tuple + def->offset_;
186 if (def->type_ != typeVarchar) {
187 AllDataType::copyVal(keyBuffer, keyPtr, def->type_,
188 def->length_);
189 } else {
190 void *ptr = (void *) *(long *) keyPtr;
191 if (ptr)
192 AllDataType::copyVal(keyBuffer, ptr, def->type_,
193 def->length_);
195 keyBuffer = keyBuffer + AllDataType::size(def->type_,def->length_);
197 bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets,
198 info->compLength);
199 ::free(keyStartBuffer);
201 else {
202 if (type != typeVarchar)
203 bucketNo =
204 computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
205 else {
206 void *ptr = (void *) *(long *) keyPtr;
207 if (ptr)
208 bucketNo = computeHashBucket(type, ptr, noOfBuckets,
209 info->compLength);
210 else bucketNo = 0;
213 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
214 Bucket *bucket = &(buckets[bucketNo]);
215 HashUndoLogInfo hInfo;
216 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
217 hInfo.bucket_ = bucket;
218 hInfo.tuple_ = tuple;
219 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
220 hInfo.keyPtr_ = keyPtr;
222 IndexNode *head = (IndexNode*) bucket->bucketList_;
223 if (info->isUnique)
225 bool isKeyPresent = checkForUniqueKey(head, info, tuple);
226 if (isKeyPresent) return ErrUnique;
228 Chunk *hIdxNodeChunk = (Chunk*)iptr->hashNodeChunk_;
229 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
230 if (!head)
232 printDebug(DM_HashIndex, "HashIndex insert head is empty");
233 DbRetVal rv = OK;
234 IndexNode *firstNode= NULL;
236 int tries=0;
237 int totalTries = Conf::config.getMutexRetries();
238 while (tries < totalTries)
240 rv = OK;
241 firstNode= (IndexNode*) hIdxNodeChunk->allocate(tbl->db_, &rv);
242 if (firstNode !=NULL) break;
243 if (rv != ErrLockTimeOut)
245 printError(rv, "Unable to allocate hash index node");
246 return rv;
248 tries++;
250 if (firstNode == NULL){
251 printError(rv, "Unable to allocate hash index node after %d retry", tries);
252 return rv;
254 firstNode->ptrToKey_ = keyPtr;
255 firstNode->ptrToTuple_ = tuple;
256 firstNode->next_ = NULL;
257 if (0 != Mutex::CASL((long*)&bucket->bucketList_, 0, (long)firstNode)) {
258 printError(ErrLockTimeOut, "Hash Index bucket lock timeout.. retry");
259 hIdxNodeChunk->free(tbl->db_, firstNode);
260 return ErrLockTimeOut;
263 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
265 else
267 BucketList list(head);
268 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
269 if (rc !=OK) {
270 printError(rc, "unable to insert into bucketlist rv:%d", rc);
271 return rc;
275 if (!loadFlag) {
276 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
277 if (rc !=OK)
279 printError(rc, "Unable to append logical log before rc:%d", rc);
280 BucketList list(head);
281 DbRetVal rv = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
282 //bucket->bucketList_ = list.getBucketListHead();
283 if (rv == SplCase) {
284 printError(ErrWarning, "SplCase occured");
285 if (0 != Mutex::CASL((long*)&bucket->bucketList_,
286 (long)bucket->bucketList_, (long)list.getBucketListHead())) {
287 printError(ErrSysFatal, "Double failure, may lead to hash node leak\n");
289 }else if (rv !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
292 return rc;
296 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
298 CINDEX *iptr = (CINDEX*)indexPtr;
300 HashIndexInfo *info = (HashIndexInfo*) indInfo;
301 DataType type = info->type;
302 int offset = info->fldOffset;
303 int noOfBuckets = info->noOfBuckets;
305 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
306 Bucket* buckets = (Bucket*)citer.nextElement();
308 void *keyPtr =(void*)((char*)tuple + offset);
309 int bucket = 0;
310 if (type == typeComposite) {
311 char *keyBuffer = (char*) malloc(info->compLength);
312 memset(keyBuffer, 0, info->compLength);
313 void *keyStartBuffer = keyBuffer;
314 FieldIterator iter = info->idxFldList.getIterator();
315 while(iter.hasElement())
317 FieldDef *def = iter.nextElement();
318 keyPtr = (char *)tuple + def->offset_;
319 if (def->type_ != typeVarchar) {
320 AllDataType::copyVal(keyBuffer, keyPtr, def->type_,
321 def->length_);
322 } else {
323 void *ptr = (void *) *(long *) keyPtr;
324 if (ptr)
325 AllDataType::copyVal(keyBuffer, ptr, def->type_,
326 def->length_);
328 keyBuffer = keyBuffer + AllDataType::size(def->type_,def->length_);
330 bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
331 ::free(keyStartBuffer);
333 else {
334 if (type != typeVarchar)
335 bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets,
336 info->compLength);
337 else {
338 void *ptr = (void *) *(long *) keyPtr;
339 if (ptr)
340 bucket = HashIndex::computeHashBucket(type, ptr, noOfBuckets,
341 info->compLength);
342 else bucket = 0;
346 Bucket *bucket1 = &buckets[bucket];
347 HashUndoLogInfo hInfo;
348 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
349 hInfo.bucket_ = bucket1;
350 hInfo.tuple_ = tuple;
351 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
352 hInfo.keyPtr_ = keyPtr;
354 IndexNode *head = (IndexNode*) bucket1->bucketList_;
356 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n");
357 return ErrNotExists;
359 BucketList list(head);
360 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
362 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
363 if (SplCase == rc)
365 printDebug(DM_HashIndex, "Removing hash index node from head ");
366 //bucket1->bucketList_ = list.getBucketListHead();
367 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
368 (long)head, (long)list.getBucketListHead())) {
369 printError(ErrSysFatal, "Lock time out for hash bucket. retry\n");
370 return ErrLockTimeOut;
372 rc = OK;
374 if (!loadFlag) {
375 rc =tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
376 if (rc !=OK)
378 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
379 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
380 //bucket1->bucketList_ = list.getBucketListHead();
381 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
382 (long)bucket1->bucketList_, (long)list.getBucketListHead())) {
383 printError(ErrSysFatal, "Double failure on index insert");
387 return rc;
390 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
392 CINDEX *iptr = (CINDEX*)indexPtr;
394 HashIndexInfo *info = (HashIndexInfo*) indInfo;
395 DataType type = info->type;
396 int offset = info->fldOffset;
397 int noOfBuckets = info->noOfBuckets;
399 //check whether the index key is updated or not
400 //if it is not updated return from here
401 void *keyPtr =(void*)((char*)tuple + offset);
402 char *kPtr= (char*)keyPtr;
404 //creating old key value buffer for composite primary keys
405 char *oldKeyBuffer = (char*) malloc(info->compLength);
406 memset(oldKeyBuffer, 0, info->compLength);
407 void *oldKeyStartBuffer = oldKeyBuffer;
408 FieldIterator iter = info->idxFldList.getIterator();
409 while(iter.hasElement()) {
410 FieldDef *def = iter.nextElement();
411 keyPtr = (char *)tuple + def->offset_;
412 AllDataType::copyVal(oldKeyBuffer, keyPtr, def->type_, def->length_);
413 oldKeyBuffer = oldKeyBuffer + AllDataType::size(def->type_, def->length_);
416 keyPtr = (void *) kPtr;
417 //Iterate through the bind list and check
418 FieldIterator idxFldIter = info->idxFldList.getIterator();
419 char *keyBindBuffer ;
420 if(type==typeBinary) {
421 keyBindBuffer = (char*) malloc(2 * info->compLength);
422 memset(keyBindBuffer, 0, 2 * info->compLength);
423 } else {
424 keyBindBuffer = (char*) malloc(info->compLength);
425 memset(keyBindBuffer, 0, info->compLength);
427 void *keyStartBuffer = (void*) keyBindBuffer;
428 bool keyUpdated = false;
430 while (idxFldIter.hasElement()) {
431 FieldDef *idef = idxFldIter.nextElement();
432 FieldIterator fldIter = tbl->fldList_.getIterator();
433 while (fldIter.hasElement()) {
434 FieldDef *def = fldIter.nextElement();
435 if (0 == strcmp(def->fldName_, idef->fldName_)) {
436 if (NULL != def->bindVal_) {
437 if(type==typeBinary) {
438 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
439 def->type_, 2*def->length_);
440 keyStartBuffer=calloc(1,info->compLength);
441 AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength);
442 free(keyBindBuffer);
443 } else {
444 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
445 def->type_, def->length_);
446 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
448 } else {
449 AllDataType::copyVal(keyBindBuffer, (char *) tuple + def->offset_, def->type_, def->length_);
450 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
452 keyUpdated = true;
453 break;
457 if (!keyUpdated) {
458 //printf("DEBUG::key not updated\n");
459 free(keyStartBuffer);
460 free(oldKeyStartBuffer);
461 return OK;
463 //printf("DEBUG::it is wrong coming here\n");
464 bool result = false;
465 if (type == typeComposite)
466 result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer,
467 OpEquals, info->type, info->compLength);
468 else result = AllDataType::compareVal(keyPtr, keyStartBuffer,
469 OpEquals, info->type, info->compLength);
470 if (result) {
471 free(keyStartBuffer);
472 free(oldKeyStartBuffer);
473 return OK;
475 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
477 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
479 Bucket* buckets = (Bucket*)citer.nextElement();
481 //remove the node whose key is updated
482 int bucketNo = 0;
483 if (type == typeComposite)
484 bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength);
485 else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
486 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
487 Bucket *bucket = &buckets[bucketNo];
489 HashUndoLogInfo *hInfo1 = new HashUndoLogInfo();
490 hInfo1->metaData_ = tbl->db_->getMetaDataPtr();
491 hInfo1->bucket_ = bucket;
492 hInfo1->tuple_ = tuple;
493 hInfo1->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
494 hInfo1->keyPtr_ = keyPtr;
496 //it may run into deadlock, when two threads updates tuples which falls in
497 //same buckets.So take both the mutex one after another, which will reduce the
498 //deadlock window.
499 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
500 if (ret != 0)
502 delete hInfo1;
503 free(keyStartBuffer);
504 free(oldKeyStartBuffer);
505 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
506 return ErrLockTimeOut;
508 //insert node for the updated key value
509 int newBucketNo = computeHashBucket(type,
510 keyStartBuffer, noOfBuckets, info->compLength);
511 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
513 Bucket *bucket1 = &buckets[newBucketNo];
514 HashUndoLogInfo *hInfo2 = new HashUndoLogInfo();
515 hInfo2->metaData_ = tbl->db_->getMetaDataPtr();
516 hInfo2->bucket_ = bucket;
517 hInfo2->tuple_ = tuple;
518 hInfo2->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
519 hInfo2->keyPtr_ = keyPtr;
520 bucket1->mutex_.getLock(tbl->db_->procSlot);
521 if (ret != 0)
523 delete hInfo1;
524 delete hInfo2;
525 free(keyStartBuffer);
526 free(oldKeyStartBuffer);
527 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
528 return ErrLockTimeOut;
531 IndexNode *head1 = (IndexNode*) bucket->bucketList_;
532 if (head1)
534 BucketList list1(head1);
535 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
536 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
537 bucket->bucketList_=list1.getBucketListHead();
539 else
541 printError(ErrSysInternal,"Update: Bucket list is null");
542 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
543 bucket->mutex_.releaseLock(tbl->db_->procSlot);
544 delete hInfo1;
545 delete hInfo2;
546 free(keyStartBuffer);
547 free(oldKeyStartBuffer);
548 return ErrSysInternal;
550 DbRetVal rc = OK;
551 if (!loadFlag) {
552 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo));
553 if (rc !=OK)
555 BucketList list((IndexNode*) bucket->bucketList_);
556 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
557 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
558 bucket->bucketList_ = list.getBucketListHead();
559 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
560 bucket->mutex_.releaseLock(tbl->db_->procSlot);
561 delete hInfo1;
562 delete hInfo2;
563 free(keyStartBuffer);
564 free(oldKeyStartBuffer);
565 return rc;
568 IndexNode *head2 = (IndexNode*) bucket1->bucketList_;
569 //Note:: the tuple will be in the same address location
570 //so not changing the keyptr and tuple during append
571 //only bucket where this node resides will only change
572 //if the index key is updated.
573 if (!head2)
575 DbRetVal rv = OK;
576 IndexNode *firstNode= (IndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
577 if (firstNode == NULL)
579 printError(rv, "Error in allocating hash node");
580 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
581 bucket->mutex_.releaseLock(tbl->db_->procSlot);
582 delete hInfo1;
583 delete hInfo2;
584 free(keyStartBuffer);
585 free(oldKeyStartBuffer);
586 return rv;
588 firstNode->ptrToKey_ = keyPtr;
589 firstNode->ptrToTuple_ = tuple;
590 firstNode->next_ = NULL;
591 bucket1->bucketList_ = (IndexNode*)firstNode;
592 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
594 else
596 BucketList list2(head2);
597 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
598 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
599 bucket1->bucketList_ = list2.getBucketListHead();
601 if (!loadFlag) {
603 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo2, sizeof(HashUndoLogInfo));
604 if (rc !=OK)
606 //reverting back the changes:delete new node and add the old
607 //node + remove logical undo log of the DeleteHashIndexOperation
608 BucketList list1((IndexNode*) bucket->bucketList_);
609 BucketList list2((IndexNode*) bucket1->bucketList_);
610 list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
611 list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
612 bucket->bucketList_ = list1.getBucketListHead();
613 bucket1->bucketList_ = list2.getBucketListHead();
614 UndoLogInfo *logInfo = tr->popUndoLog();
615 Chunk *chunk = tbl->sysDB_->getSystemDatabaseChunk(UndoLogTableID);
616 chunk->free(tbl->sysDB_, logInfo);
619 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
620 bucket->mutex_.releaseLock(tbl->db_->procSlot);
621 delete hInfo1;
622 delete hInfo2;
623 free(keyStartBuffer);
624 free(oldKeyStartBuffer);
625 return rc;
628 //Following three methods are used to undo Logical Hash Indexes
629 DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data)
631 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
632 Chunk *hChunk = (Chunk *) info->hChunk_;
633 Database db;
634 db.setMetaDataPtr((DatabaseMetaData *) info->metaData_);
635 db.setProcSlot(sysdb->procSlot);
636 IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_;
637 BucketList list(head);
638 DbRetVal rv = list.insert(hChunk, &db, info->keyPtr_, info->tuple_);
639 if (rv != OK)
641 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
642 return ErrLockTimeOut;
644 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
645 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
646 (long)(((Bucket *)info->bucket_)->bucketList_),
647 (long)list.getBucketListHead()))
649 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
650 return ErrLockTimeOut;
652 return OK;
655 DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data)
657 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
658 Chunk *hChunk = (Chunk *) info->hChunk_;
659 Database db;
660 db.setMetaDataPtr((DatabaseMetaData *)info->metaData_);
661 db.setProcSlot(sysdb->procSlot);
662 IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_;
663 BucketList list(head);
664 DbRetVal rc = list.remove(hChunk, &db, info->keyPtr_);
665 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
666 if (SplCase == rc) {
667 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
668 (long)(((Bucket *)info->bucket_)->bucketList_),
669 (long)list.getBucketListHead()))
671 printError(ErrLockTimeOut, "Unable to set the head of hash index bucket\n");
672 return ErrLockTimeOut;
674 }else if (rc != OK) {
675 printError(ErrLockTimeOut, "Unable to remove hash index node");
676 return ErrLockTimeOut;
678 return OK;