Enterprise to opensource. 40 files including Makefil.am and .in from storage, sqllog...
[csql.git] / src / storage / HashIndex.cxx
blob4ec441e8c165c0c9ed746df2225d56bb74c45838
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int hashBinary(char *strVal, int length)
50 unsigned int hval, g;
51 hval = 0;
52 char *str =strVal;
53 int iter = 0;
54 while (iter != length)
56 hval <<= 4;
57 hval += (unsigned int) *str++;
58 g = hval & ((unsigned int) 0xf << (32 - 4));
59 if (g != 0)
61 hval ^= g >> (32 - 8);
62 hval ^= g;
64 iter++;
66 return hval;
69 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
72 if (typeInt == type) {
73 int val = *(int*)key;
74 return val % noOfBuckets;
75 }else if (typeString == type) {
76 unsigned int val = hashString((char*)key);
77 return val % noOfBuckets;
78 }else if (typeShort == type) {
79 short val = *(short*) key;
80 return val % noOfBuckets;
81 }else if (typeLong == type) {
82 long val = *(long*) key;
83 return val % noOfBuckets;
84 }else if (typeLongLong == type) {
85 long long val = *(long long*) key;
86 return val % noOfBuckets;
87 }else if (typeByteInt == type) {
88 ByteInt val = *(ByteInt*)key;
89 return val % noOfBuckets;
90 }else if (typeDate == type) {
91 int val = *(int*)key;
92 return val % noOfBuckets;
93 }else if (typeTime == type) {
94 int val = *(int*)key;
95 return val % noOfBuckets;
96 }else if (typeComposite == type) {
97 unsigned int val = hashBinary((char*)key, length);
98 return val % noOfBuckets;
99 }else if (typeBinary == type) {
100 unsigned int val = hashBinary((char*)key, length);
101 return val % noOfBuckets;
102 }else if (typeULong == type) {
103 unsigned long val = *(unsigned long*)key;
104 return val % noOfBuckets;
106 printError(ErrSysFatal,"Type not supported for hashing\n");
107 return -1;
110 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
112 HashIndexInfo *info = (HashIndexInfo*) indInfo;
113 CINDEX *iptr = (CINDEX*)indexPtr;
114 DbRetVal rc = OK;
115 int noOfBuckets = info->noOfBuckets;
116 int offset = info->fldOffset;
117 DataType type = info->type;
118 char *keyBuffer = (char*) malloc(info->compLength);
119 memset(keyBuffer, 0, info->compLength);
120 void *keyStartBuffer = keyBuffer, *keyPtr;
121 FieldIterator iter = info->idxFldList.getIterator();
122 while(iter.hasElement())
124 FieldDef *def = iter.nextElement();
125 keyPtr = (char *)tuple + def->offset_;
126 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
127 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
130 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
131 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
132 Bucket* buckets = (Bucket*)citer.nextElement();
133 keyPtr =(void*)((char*)tuple + offset);
134 int bucketNo = 0;
135 if (type == typeComposite)
136 bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
137 else
138 bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
139 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
140 Bucket *bucket = &(buckets[bucketNo]);
141 HashUndoLogInfo *hInfo = new HashUndoLogInfo();
142 hInfo->metaData_ = tbl->db_->getMetaDataPtr();
143 hInfo->bucket_ = bucket;
144 hInfo->tuple_ = tuple;
145 hInfo->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
146 hInfo->keyPtr_ = keyPtr;
147 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
148 if (ret != 0)
150 delete hInfo;
151 free(keyStartBuffer);
152 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
153 return ErrLockTimeOut;
155 HashIndexNode *head = (HashIndexNode*) bucket->bucketList_;
156 if (head && info->isUnique)
158 BucketList list(head);
159 BucketIter iter = list.getIterator();
160 HashIndexNode *node;
161 void *bucketTuple;
162 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
163 bool res = false;
165 while((node = iter.next()) != NULL)
167 bucketTuple = node->ptrToTuple_;
168 if (type == typeComposite) {
169 FieldIterator fldIter = info->idxFldList.getIterator();
170 int i = 0;
171 while (fldIter.hasElement()) {
172 FieldDef *def = fldIter.nextElement();
173 res = AllDataType::compareVal((char *)bucketTuple + def->offset_, (char *)tuple + def->offset_, OpEquals, def->type_, def->length_);
174 if (!res) break;
177 else res = AllDataType::compareVal((void*)((char*)bucketTuple +offset), (void*)((char*)tuple +offset), OpEquals,type, info->compLength);
178 if (res)
180 delete hInfo;
181 free(keyStartBuffer);
182 printError(ErrUnique, "Unique key violation");
183 bucket->mutex_.releaseLock(tbl->db_->procSlot);
184 return ErrUnique;
189 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
190 if (!head)
192 printDebug(DM_HashIndex, "HashIndex insert head is empty");
193 DbRetVal rv = OK;
194 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
195 if (firstNode == NULL)
197 bucket->mutex_.releaseLock(tbl->db_->procSlot);
198 delete hInfo;
199 free(keyStartBuffer);
200 return rv;
202 firstNode->ptrToKey_ = keyPtr;
203 firstNode->ptrToTuple_ = tuple;
204 firstNode->next_ = NULL;
205 bucket->bucketList_ = (HashIndexNode*)firstNode;
206 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
208 else
210 BucketList list(head);
211 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
212 if (rc !=OK) {
213 bucket->mutex_.releaseLock(tbl->db_->procSlot);
214 delete hInfo;
215 free(keyStartBuffer);
216 return rc;
220 if (!loadFlag) {
221 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo, sizeof(HashUndoLogInfo));
222 if (rc !=OK)
224 BucketList list(head);
225 rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
226 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
227 bucket->bucketList_ = list.getBucketListHead();
230 free(keyStartBuffer);
231 delete hInfo; hInfo = NULL;
232 bucket->mutex_.releaseLock(tbl->db_->procSlot);
233 return rc;
237 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
239 CINDEX *iptr = (CINDEX*)indexPtr;
241 HashIndexInfo *info = (HashIndexInfo*) indInfo;
242 DataType type = info->type;
243 int offset = info->fldOffset;
244 int noOfBuckets = info->noOfBuckets;
246 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
247 Bucket* buckets = (Bucket*)citer.nextElement();
248 char *keyBuffer = (char*) malloc(info->compLength);
249 memset(keyBuffer, 0, info->compLength);
250 void *keyStartBuffer = keyBuffer, *keyPtr;
251 FieldIterator iter = info->idxFldList.getIterator();
252 while(iter.hasElement())
254 FieldDef *def = iter.nextElement();
255 keyPtr = (char *)tuple + def->offset_;
256 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
257 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
260 keyPtr =(void*)((char*)tuple + offset);
261 int bucket = 0;
262 if (type == typeComposite)
263 bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
264 else bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
266 Bucket *bucket1 = &buckets[bucket];
267 HashUndoLogInfo *hInfo = new HashUndoLogInfo();
268 hInfo->metaData_ = tbl->db_->getMetaDataPtr();
269 hInfo->bucket_ = bucket1;
270 hInfo->tuple_ = tuple;
271 hInfo->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
272 hInfo->keyPtr_ = keyPtr;
274 int ret = bucket1->mutex_.getLock(tbl->db_->procSlot);
275 if (ret != 0)
277 delete hInfo;
278 free(keyStartBuffer);
279 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucket);
280 return ErrLockTimeOut;
282 HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_;
284 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n");
285 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
286 delete hInfo;
287 free(keyStartBuffer);
288 return ErrNotExists;
290 BucketList list(head);
291 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
293 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
294 if (SplCase == rc)
296 printDebug(DM_HashIndex, "Removing hash index node from head ");
297 bucket1->bucketList_ = list.getBucketListHead();
298 rc = OK;
300 if (!loadFlag) {
301 rc =tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo, sizeof(HashUndoLogInfo));
302 if (rc !=OK)
304 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
305 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
306 bucket1->bucketList_ = list.getBucketListHead();
309 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
310 delete hInfo;
311 free(keyStartBuffer);
312 return rc;
315 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
317 CINDEX *iptr = (CINDEX*)indexPtr;
319 HashIndexInfo *info = (HashIndexInfo*) indInfo;
320 DataType type = info->type;
321 int offset = info->fldOffset;
322 int noOfBuckets = info->noOfBuckets;
324 //check whether the index key is updated or not
325 //if it is not updated return from here
326 void *keyPtr =(void*)((char*)tuple + offset);
327 char *kPtr= (char*)keyPtr;
329 //creating old key value buffer for composite primary keys
330 char *oldKeyBuffer = (char*) malloc(info->compLength);
331 memset(oldKeyBuffer, 0, info->compLength);
332 void *oldKeyStartBuffer = oldKeyBuffer;
333 FieldIterator iter = info->idxFldList.getIterator();
334 while(iter.hasElement()) {
335 FieldDef *def = iter.nextElement();
336 keyPtr = (char *)tuple + def->offset_;
337 AllDataType::copyVal(oldKeyBuffer, keyPtr, def->type_, def->length_);
338 oldKeyBuffer = oldKeyBuffer + AllDataType::size(def->type_, def->length_);
341 keyPtr = (void *) kPtr;
342 //Iterate through the bind list and check
343 FieldIterator idxFldIter = info->idxFldList.getIterator();
344 char *keyBindBuffer ;
345 if(type==typeBinary) {
346 keyBindBuffer = (char*) malloc(2 * info->compLength);
347 memset(keyBindBuffer, 0, 2 * info->compLength);
348 }else {
349 keyBindBuffer = (char*) malloc(info->compLength);
350 memset(keyBindBuffer, 0, info->compLength);
352 void *keyStartBuffer = (void*) keyBindBuffer;
353 bool keyUpdated = false;
355 while (idxFldIter.hasElement()) {
356 FieldDef *idef = idxFldIter.nextElement();
357 FieldIterator fldIter = tbl->fldList_.getIterator();
358 while (fldIter.hasElement()) {
359 FieldDef *def = fldIter.nextElement();
360 if (0 == strcmp(def->fldName_, idef->fldName_)) {
361 if (NULL != def->bindVal_) {
362 if(type==typeBinary) {
363 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
364 def->type_, 2*def->length_);
365 keyStartBuffer=calloc(1,info->compLength);
366 AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength);
367 free(keyBindBuffer);
368 } else {
369 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
370 def->type_, def->length_);
371 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
373 } else {
374 AllDataType::copyVal(keyBindBuffer, (char *) tuple + def->offset_, def->type_, def->length_);
375 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
377 keyUpdated = true;
378 break;
382 if (!keyUpdated) {
383 //printf("PRABA::key not updated\n");
384 free(keyStartBuffer);
385 free(oldKeyStartBuffer);
386 return OK;
388 //printf("PRABA::it is wrong coming here\n");
389 bool result = false;
390 if (type == typeComposite)
391 result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer,
392 OpEquals, info->type, info->compLength);
393 else result = AllDataType::compareVal(keyPtr, keyStartBuffer,
394 OpEquals, info->type, info->compLength);
395 if (result) {
396 free(keyStartBuffer);
397 free(oldKeyStartBuffer);
398 return OK;
400 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
402 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
404 Bucket* buckets = (Bucket*)citer.nextElement();
406 //remove the node whose key is updated
407 int bucketNo = 0;
408 if (type == typeComposite)
409 bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength);
410 else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
411 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
412 Bucket *bucket = &buckets[bucketNo];
414 HashUndoLogInfo *hInfo1 = new HashUndoLogInfo();
415 hInfo1->metaData_ = tbl->db_->getMetaDataPtr();
416 hInfo1->bucket_ = bucket;
417 hInfo1->tuple_ = tuple;
418 hInfo1->hChunk_ = indexPtr;
419 hInfo1->keyPtr_ = keyPtr;
421 //it may run into deadlock, when two threads updates tuples which falls in
422 //same buckets.So take both the mutex one after another, which will reduce the
423 //deadlock window.
424 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
425 if (ret != 0)
427 delete hInfo1;
428 free(keyStartBuffer);
429 free(oldKeyStartBuffer);
430 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
431 return ErrLockTimeOut;
433 //insert node for the updated key value
434 int newBucketNo = computeHashBucket(type,
435 keyStartBuffer, noOfBuckets, info->compLength);
436 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
438 Bucket *bucket1 = &buckets[newBucketNo];
439 HashUndoLogInfo *hInfo2 = new HashUndoLogInfo();
440 hInfo2->metaData_ = tbl->db_->getMetaDataPtr();
441 hInfo2->bucket_ = bucket;
442 hInfo2->tuple_ = tuple;
443 hInfo2->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
444 hInfo2->keyPtr_ = keyPtr;
445 bucket1->mutex_.getLock(tbl->db_->procSlot);
446 if (ret != 0)
448 delete hInfo1;
449 delete hInfo2;
450 free(keyStartBuffer);
451 free(oldKeyStartBuffer);
452 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
453 return ErrLockTimeOut;
456 HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_;
457 if (head1)
459 BucketList list1(head1);
460 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
461 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
462 bucket->bucketList_=list1.getBucketListHead();
464 else
466 printError(ErrSysInternal,"Update: Bucket list is null");
467 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
468 bucket->mutex_.releaseLock(tbl->db_->procSlot);
469 delete hInfo1;
470 delete hInfo2;
471 free(keyStartBuffer);
472 free(oldKeyStartBuffer);
473 return ErrSysInternal;
475 DbRetVal rc = OK;
476 if (!loadFlag) {
477 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo));
478 if (rc !=OK)
480 BucketList list((HashIndexNode*) bucket->bucketList_);
481 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
482 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
483 bucket->bucketList_ = list.getBucketListHead();
484 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
485 bucket->mutex_.releaseLock(tbl->db_->procSlot);
486 delete hInfo1;
487 delete hInfo2;
488 free(keyStartBuffer);
489 free(oldKeyStartBuffer);
490 return rc;
493 HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_;
494 //Note:: the tuple will be in the same address location
495 //so not changing the keyptr and tuple during append
496 //only bucket where this node resides will only change
497 //if the index key is updated.
498 if (!head2)
500 DbRetVal rv = OK;
501 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
502 if (firstNode == NULL)
504 printError(rv, "Error in allocating hash node");
505 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
506 bucket->mutex_.releaseLock(tbl->db_->procSlot);
507 delete hInfo1;
508 delete hInfo2;
509 free(keyStartBuffer);
510 free(oldKeyStartBuffer);
511 return rv;
513 firstNode->ptrToKey_ = keyPtr;
514 firstNode->ptrToTuple_ = tuple;
515 firstNode->next_ = NULL;
516 bucket1->bucketList_ = (HashIndexNode*)firstNode;
517 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
519 else
521 BucketList list2(head2);
522 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
523 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
524 bucket1->bucketList_ = list2.getBucketListHead();
526 if (!loadFlag) {
528 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo2, sizeof(HashUndoLogInfo));
529 if (rc !=OK)
531 //reverting back the changes:delete new node and add the old
532 //node + remove logical undo log of the DeleteHashIndexOperation
533 BucketList list1((HashIndexNode*) bucket->bucketList_);
534 BucketList list2((HashIndexNode*) bucket1->bucketList_);
535 list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
536 list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
537 bucket->bucketList_ = list1.getBucketListHead();
538 bucket1->bucketList_ = list2.getBucketListHead();
539 UndoLogInfo *logInfo = tr->popUndoLog();
540 Chunk *chunk = tbl->sysDB_->getSystemDatabaseChunk(UndoLogTableID);
541 chunk->free(tbl->sysDB_, logInfo);
544 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
545 bucket->mutex_.releaseLock(tbl->db_->procSlot);
546 delete hInfo1;
547 delete hInfo2;
548 free(keyStartBuffer);
549 free(oldKeyStartBuffer);
550 return rc;
553 //Following three methods are used to undo Logical Hash Indexes
554 DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data)
556 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
557 Chunk *hChunk = (Chunk *) info->hChunk_;
558 Database *db = new Database();
559 db->setMetaDataPtr((DatabaseMetaData *) info->metaData_);
560 db->setProcSlot(sysdb->procSlot);
561 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
562 BucketList list(head);
563 list.insert(hChunk, db, info->keyPtr_, info->tuple_);
564 ((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
565 return OK;
568 DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data)
570 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
571 Chunk *hChunk = (Chunk *) info->hChunk_;
572 Database *db = new Database();
573 db->setMetaDataPtr((DatabaseMetaData *)info->metaData_);
574 db->setProcSlot(sysdb->procSlot);
575 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
576 BucketList list(head);
577 DbRetVal rc = list.remove(hChunk, db, info->keyPtr_);
578 if (SplCase == rc) {
579 ((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
581 return OK;