Removing dependency for Cache module in MMDB build
[csql.git] / src / storage / HashIndex.cxx
blob90ea74d3061d0edd69437807f568d799b9b28f12
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int hashBinary(char *strVal, int length)
50 unsigned int hval, g;
51 hval = 0;
52 char *str =strVal;
53 int iter = 0;
54 while (iter != length)
56 hval <<= 4;
57 hval += (unsigned int) *str++;
58 g = hval & ((unsigned int) 0xf << (32 - 4));
59 if (g != 0)
61 hval ^= g >> (32 - 8);
62 hval ^= g;
64 iter++;
66 return hval;
69 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
72 if (typeInt == type) {
73 int val = *(int*)key;
74 return val % noOfBuckets;
75 }else if (typeString == type) {
76 unsigned int val = hashString((char*)key);
77 return val % noOfBuckets;
78 }else if (typeShort == type) {
79 short val = *(short*) key;
80 return val % noOfBuckets;
81 }else if (typeLong == type) {
82 long val = *(long*) key;
83 return val % noOfBuckets;
84 }else if (typeLongLong == type) {
85 long long val = *(long long*) key;
86 return val % noOfBuckets;
87 }else if (typeByteInt == type) {
88 ByteInt val = *(ByteInt*)key;
89 return val % noOfBuckets;
90 }else if (typeDate == type) {
91 int val = *(int*)key;
92 return val % noOfBuckets;
93 }else if (typeTime == type) {
94 int val = *(int*)key;
95 return val % noOfBuckets;
96 }else if (typeComposite == type) {
97 unsigned int val = hashBinary((char*)key, length);
98 return val % noOfBuckets;
99 }else if (typeBinary == type) {
100 unsigned int val = hashBinary((char*)key, length);
101 return val % noOfBuckets;
102 }else if (typeULong == type) {
103 unsigned long val = *(unsigned long*)key;
104 return val % noOfBuckets;
106 printError(ErrSysFatal,"Type not supported for hashing\n");
107 return -1;
110 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
112 HashIndexInfo *info = (HashIndexInfo*) indInfo;
113 CINDEX *iptr = (CINDEX*)indexPtr;
114 DbRetVal rc = OK;
115 int noOfBuckets = info->noOfBuckets;
116 int offset = info->fldOffset;
117 DataType type = info->type;
119 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
120 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
121 Bucket* buckets = (Bucket*)citer.nextElement();
122 void *keyPtr =(void*)((char*)tuple + offset);
123 int bucketNo = 0;
124 if (type == typeComposite) {
125 char *keyBuffer = (char*) malloc(info->compLength);
126 memset(keyBuffer, 0, info->compLength);
127 void* keyStartBuffer = keyBuffer;
128 FieldIterator iter = info->idxFldList.getIterator();
129 while(iter.hasElement())
131 FieldDef *def = iter.nextElement();
132 keyPtr = (char *)tuple + def->offset_;
133 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
134 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
136 bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
137 ::free(keyStartBuffer);
139 else {
140 bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
142 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
143 Bucket *bucket = &(buckets[bucketNo]);
144 HashUndoLogInfo hInfo;
145 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
146 hInfo.bucket_ = bucket;
147 hInfo.tuple_ = tuple;
148 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
149 hInfo.keyPtr_ = keyPtr;
151 HashIndexNode *head = (HashIndexNode*) bucket->bucketList_;
152 if (head && info->isUnique)
154 BucketList list(head);
155 BucketIter iter = list.getIterator();
156 HashIndexNode *node;
157 void *bucketTuple;
158 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
159 bool res = false;
161 while((node = iter.next()) != NULL)
163 bucketTuple = node->ptrToTuple_;
164 if (type == typeComposite) {
165 FieldIterator fldIter = info->idxFldList.getIterator();
166 int i = 0;
167 while (fldIter.hasElement()) {
168 FieldDef *def = fldIter.nextElement();
169 res = AllDataType::compareVal((char *)bucketTuple + def->offset_, (char *)tuple + def->offset_, OpEquals, def->type_, def->length_);
170 if (!res) break;
173 else res = AllDataType::compareVal((void*)((char*)bucketTuple +offset), (void*)((char*)tuple +offset), OpEquals,type, info->compLength);
174 if (res)
176 printError(ErrUnique, "Unique key violation");
177 if (type == typeLongLong) printError(ErrUnique, "Unique key violation for id:%lld",*(long long*) ((char*)tuple +offset) );
178 return ErrUnique;
183 Chunk *hIdxNodeChunk = (Chunk*)iptr->hashNodeChunk_;
184 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
185 if (!head)
187 printDebug(DM_HashIndex, "HashIndex insert head is empty");
188 DbRetVal rv = OK;
189 HashIndexNode *firstNode= NULL;
191 int tries=0;
192 int totalTries = Conf::config.getMutexRetries();
193 while (tries < totalTries)
195 rv = OK;
196 firstNode= (HashIndexNode*) hIdxNodeChunk->allocate(tbl->db_, &rv);
197 if (firstNode !=NULL) break;
198 if (rv != ErrLockTimeOut)
200 printError(rv, "Unable to allocate hash index node");
201 return rv;
203 tries++;
205 if (firstNode == NULL){
206 printError(rv, "Unable to allocate hash index node after %d retry", tries);
207 return rv;
209 firstNode->ptrToKey_ = keyPtr;
210 firstNode->ptrToTuple_ = tuple;
211 firstNode->next_ = NULL;
212 if (0 != Mutex::CASL((long*)&bucket->bucketList_, 0, (long)firstNode)) {
213 printError(ErrLockTimeOut, "Hash Index bucket lock timeout.. retry");
214 hIdxNodeChunk->free(tbl->db_, firstNode);
215 return ErrLockTimeOut;
218 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
220 else
222 BucketList list(head);
223 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
224 if (rc !=OK) {
225 printError(rc, "unable to insert into bucketlist rv:%d", rc);
226 return rc;
230 if (!loadFlag) {
231 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
232 if (rc !=OK)
234 printError(rc, "Unable to append logical log before rc:%d", rc);
235 BucketList list(head);
236 DbRetVal rv = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
237 //bucket->bucketList_ = list.getBucketListHead();
238 if (rv == SplCase) {
239 printError(ErrWarning, "SplCase occured");
240 if (0 != Mutex::CASL((long*)&bucket->bucketList_,
241 (long)bucket->bucketList_, (long)list.getBucketListHead())) {
242 printError(ErrSysFatal, "Double failure, may lead to hash node leak\n");
244 }else if (rv !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
247 return rc;
251 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
253 CINDEX *iptr = (CINDEX*)indexPtr;
255 HashIndexInfo *info = (HashIndexInfo*) indInfo;
256 DataType type = info->type;
257 int offset = info->fldOffset;
258 int noOfBuckets = info->noOfBuckets;
260 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
261 Bucket* buckets = (Bucket*)citer.nextElement();
263 void *keyPtr =(void*)((char*)tuple + offset);
264 int bucket = 0;
265 if (type == typeComposite) {
266 char *keyBuffer = (char*) malloc(info->compLength);
267 memset(keyBuffer, 0, info->compLength);
268 void *keyStartBuffer = keyBuffer;
269 FieldIterator iter = info->idxFldList.getIterator();
270 while(iter.hasElement())
272 FieldDef *def = iter.nextElement();
273 keyPtr = (char *)tuple + def->offset_;
274 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
275 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
277 bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
278 ::free(keyStartBuffer);
280 else {
281 bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
284 Bucket *bucket1 = &buckets[bucket];
285 HashUndoLogInfo hInfo;
286 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
287 hInfo.bucket_ = bucket1;
288 hInfo.tuple_ = tuple;
289 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
290 hInfo.keyPtr_ = keyPtr;
292 HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_;
294 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n");
295 return ErrNotExists;
297 BucketList list(head);
298 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
300 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
301 if (SplCase == rc)
303 printDebug(DM_HashIndex, "Removing hash index node from head ");
304 //bucket1->bucketList_ = list.getBucketListHead();
305 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
306 (long)head, (long)list.getBucketListHead())) {
307 printError(ErrSysFatal, "Lock time out for hash bucket. retry\n");
308 return ErrLockTimeOut;
310 rc = OK;
312 if (!loadFlag) {
313 rc =tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
314 if (rc !=OK)
316 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
317 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
318 //bucket1->bucketList_ = list.getBucketListHead();
319 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
320 (long)bucket1->bucketList_, (long)list.getBucketListHead())) {
321 printError(ErrSysFatal, "Double failure on index insert");
325 return rc;
328 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
330 CINDEX *iptr = (CINDEX*)indexPtr;
332 HashIndexInfo *info = (HashIndexInfo*) indInfo;
333 DataType type = info->type;
334 int offset = info->fldOffset;
335 int noOfBuckets = info->noOfBuckets;
337 //check whether the index key is updated or not
338 //if it is not updated return from here
339 void *keyPtr =(void*)((char*)tuple + offset);
340 char *kPtr= (char*)keyPtr;
342 //creating old key value buffer for composite primary keys
343 char *oldKeyBuffer = (char*) malloc(info->compLength);
344 memset(oldKeyBuffer, 0, info->compLength);
345 void *oldKeyStartBuffer = oldKeyBuffer;
346 FieldIterator iter = info->idxFldList.getIterator();
347 while(iter.hasElement()) {
348 FieldDef *def = iter.nextElement();
349 keyPtr = (char *)tuple + def->offset_;
350 AllDataType::copyVal(oldKeyBuffer, keyPtr, def->type_, def->length_);
351 oldKeyBuffer = oldKeyBuffer + AllDataType::size(def->type_, def->length_);
354 keyPtr = (void *) kPtr;
355 //Iterate through the bind list and check
356 FieldIterator idxFldIter = info->idxFldList.getIterator();
357 char *keyBindBuffer ;
358 if(type==typeBinary) {
359 keyBindBuffer = (char*) malloc(2 * info->compLength);
360 memset(keyBindBuffer, 0, 2 * info->compLength);
361 } else {
362 keyBindBuffer = (char*) malloc(info->compLength);
363 memset(keyBindBuffer, 0, info->compLength);
365 void *keyStartBuffer = (void*) keyBindBuffer;
366 bool keyUpdated = false;
368 while (idxFldIter.hasElement()) {
369 FieldDef *idef = idxFldIter.nextElement();
370 FieldIterator fldIter = tbl->fldList_.getIterator();
371 while (fldIter.hasElement()) {
372 FieldDef *def = fldIter.nextElement();
373 if (0 == strcmp(def->fldName_, idef->fldName_)) {
374 if (NULL != def->bindVal_) {
375 if(type==typeBinary) {
376 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
377 def->type_, 2*def->length_);
378 keyStartBuffer=calloc(1,info->compLength);
379 AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength);
380 free(keyBindBuffer);
381 } else {
382 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
383 def->type_, def->length_);
384 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
386 } else {
387 AllDataType::copyVal(keyBindBuffer, (char *) tuple + def->offset_, def->type_, def->length_);
388 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
390 keyUpdated = true;
391 break;
395 if (!keyUpdated) {
396 //printf("DEBUG::key not updated\n");
397 free(keyStartBuffer);
398 free(oldKeyStartBuffer);
399 return OK;
401 //printf("DEBUG::it is wrong coming here\n");
402 bool result = false;
403 if (type == typeComposite)
404 result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer,
405 OpEquals, info->type, info->compLength);
406 else result = AllDataType::compareVal(keyPtr, keyStartBuffer,
407 OpEquals, info->type, info->compLength);
408 if (result) {
409 free(keyStartBuffer);
410 free(oldKeyStartBuffer);
411 return OK;
413 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
415 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
417 Bucket* buckets = (Bucket*)citer.nextElement();
419 //remove the node whose key is updated
420 int bucketNo = 0;
421 if (type == typeComposite)
422 bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength);
423 else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
424 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
425 Bucket *bucket = &buckets[bucketNo];
427 HashUndoLogInfo *hInfo1 = new HashUndoLogInfo();
428 hInfo1->metaData_ = tbl->db_->getMetaDataPtr();
429 hInfo1->bucket_ = bucket;
430 hInfo1->tuple_ = tuple;
431 hInfo1->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
432 hInfo1->keyPtr_ = keyPtr;
434 //it may run into deadlock, when two threads updates tuples which falls in
435 //same buckets.So take both the mutex one after another, which will reduce the
436 //deadlock window.
437 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
438 if (ret != 0)
440 delete hInfo1;
441 free(keyStartBuffer);
442 free(oldKeyStartBuffer);
443 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
444 return ErrLockTimeOut;
446 //insert node for the updated key value
447 int newBucketNo = computeHashBucket(type,
448 keyStartBuffer, noOfBuckets, info->compLength);
449 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
451 Bucket *bucket1 = &buckets[newBucketNo];
452 HashUndoLogInfo *hInfo2 = new HashUndoLogInfo();
453 hInfo2->metaData_ = tbl->db_->getMetaDataPtr();
454 hInfo2->bucket_ = bucket;
455 hInfo2->tuple_ = tuple;
456 hInfo2->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
457 hInfo2->keyPtr_ = keyPtr;
458 bucket1->mutex_.getLock(tbl->db_->procSlot);
459 if (ret != 0)
461 delete hInfo1;
462 delete hInfo2;
463 free(keyStartBuffer);
464 free(oldKeyStartBuffer);
465 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
466 return ErrLockTimeOut;
469 HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_;
470 if (head1)
472 BucketList list1(head1);
473 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
474 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
475 bucket->bucketList_=list1.getBucketListHead();
477 else
479 printError(ErrSysInternal,"Update: Bucket list is null");
480 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
481 bucket->mutex_.releaseLock(tbl->db_->procSlot);
482 delete hInfo1;
483 delete hInfo2;
484 free(keyStartBuffer);
485 free(oldKeyStartBuffer);
486 return ErrSysInternal;
488 DbRetVal rc = OK;
489 if (!loadFlag) {
490 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo));
491 if (rc !=OK)
493 BucketList list((HashIndexNode*) bucket->bucketList_);
494 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
495 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
496 bucket->bucketList_ = list.getBucketListHead();
497 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
498 bucket->mutex_.releaseLock(tbl->db_->procSlot);
499 delete hInfo1;
500 delete hInfo2;
501 free(keyStartBuffer);
502 free(oldKeyStartBuffer);
503 return rc;
506 HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_;
507 //Note:: the tuple will be in the same address location
508 //so not changing the keyptr and tuple during append
509 //only bucket where this node resides will only change
510 //if the index key is updated.
511 if (!head2)
513 DbRetVal rv = OK;
514 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
515 if (firstNode == NULL)
517 printError(rv, "Error in allocating hash node");
518 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
519 bucket->mutex_.releaseLock(tbl->db_->procSlot);
520 delete hInfo1;
521 delete hInfo2;
522 free(keyStartBuffer);
523 free(oldKeyStartBuffer);
524 return rv;
526 firstNode->ptrToKey_ = keyPtr;
527 firstNode->ptrToTuple_ = tuple;
528 firstNode->next_ = NULL;
529 bucket1->bucketList_ = (HashIndexNode*)firstNode;
530 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
532 else
534 BucketList list2(head2);
535 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
536 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
537 bucket1->bucketList_ = list2.getBucketListHead();
539 if (!loadFlag) {
541 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo2, sizeof(HashUndoLogInfo));
542 if (rc !=OK)
544 //reverting back the changes:delete new node and add the old
545 //node + remove logical undo log of the DeleteHashIndexOperation
546 BucketList list1((HashIndexNode*) bucket->bucketList_);
547 BucketList list2((HashIndexNode*) bucket1->bucketList_);
548 list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
549 list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
550 bucket->bucketList_ = list1.getBucketListHead();
551 bucket1->bucketList_ = list2.getBucketListHead();
552 UndoLogInfo *logInfo = tr->popUndoLog();
553 Chunk *chunk = tbl->sysDB_->getSystemDatabaseChunk(UndoLogTableID);
554 chunk->free(tbl->sysDB_, logInfo);
557 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
558 bucket->mutex_.releaseLock(tbl->db_->procSlot);
559 delete hInfo1;
560 delete hInfo2;
561 free(keyStartBuffer);
562 free(oldKeyStartBuffer);
563 return rc;
566 //Following three methods are used to undo Logical Hash Indexes
567 DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data)
569 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
570 Chunk *hChunk = (Chunk *) info->hChunk_;
571 Database db;
572 db.setMetaDataPtr((DatabaseMetaData *) info->metaData_);
573 db.setProcSlot(sysdb->procSlot);
574 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
575 BucketList list(head);
576 DbRetVal rv = list.insert(hChunk, &db, info->keyPtr_, info->tuple_);
577 if (rv != OK)
579 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
580 return ErrLockTimeOut;
582 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
583 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
584 (long)(((Bucket *)info->bucket_)->bucketList_),
585 (long)list.getBucketListHead()))
587 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
588 return ErrLockTimeOut;
590 return OK;
593 DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data)
595 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
596 Chunk *hChunk = (Chunk *) info->hChunk_;
597 Database db;
598 db.setMetaDataPtr((DatabaseMetaData *)info->metaData_);
599 db.setProcSlot(sysdb->procSlot);
600 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
601 BucketList list(head);
602 DbRetVal rc = list.remove(hChunk, &db, info->keyPtr_);
603 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
604 if (SplCase == rc) {
605 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
606 (long)(((Bucket *)info->bucket_)->bucketList_),
607 (long)list.getBucketListHead()))
609 printError(ErrLockTimeOut, "Unable to set the head of hash index bucket\n");
610 return ErrLockTimeOut;
612 }else if (rc != OK) {
613 printError(ErrLockTimeOut, "Unable to remove hash index node");
614 return ErrLockTimeOut;
616 return OK;