changing global prepare mutex to process level mutex
[csql.git] / src / storage / HashIndex.cxx
blobb156bf309fae53d743d8f235128799f33eff90d5
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int hashBinary(char *strVal, int length)
50 unsigned int hval, g;
51 hval = 0;
52 char *str =strVal;
53 int iter = 0;
54 while (iter != length)
56 hval <<= 4;
57 hval += (unsigned int) *str++;
58 g = hval & ((unsigned int) 0xf << (32 - 4));
59 if (g != 0)
61 hval ^= g >> (32 - 8);
62 hval ^= g;
64 iter++;
66 return hval;
69 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
72 if (typeInt == type) {
73 int val = *(int*)key;
74 return val % noOfBuckets;
75 }else if (typeString == type || typeVarchar == type) {
76 unsigned int val = hashString((char*)key);
77 return val % noOfBuckets;
78 }else if (typeShort == type) {
79 short val = *(short*) key;
80 return val % noOfBuckets;
81 }else if (typeLong == type) {
82 long val = *(long*) key;
83 return val % noOfBuckets;
84 }else if (typeLongLong == type) {
85 long long val = *(long long*) key;
86 return val % noOfBuckets;
87 }else if (typeByteInt == type) {
88 ByteInt val = *(ByteInt*)key;
89 return val % noOfBuckets;
90 }else if (typeDate == type) {
91 int val = *(int*)key;
92 return val % noOfBuckets;
93 }else if (typeTime == type) {
94 int val = *(int*)key;
95 return val % noOfBuckets;
96 }else if (typeComposite == type) {
97 unsigned int val = hashBinary((char*)key, length);
98 return val % noOfBuckets;
99 }else if (typeBinary == type) {
100 unsigned int val = hashBinary((char*)key, length);
101 return val % noOfBuckets;
102 }else if (typeULong == type) {
103 unsigned long val = *(unsigned long*)key;
104 return val % noOfBuckets;
106 printError(ErrSysFatal,"Type not supported for hashing\n");
107 return -1;
110 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
112 HashIndexInfo *info = (HashIndexInfo*) indInfo;
113 CINDEX *iptr = (CINDEX*)indexPtr;
114 DbRetVal rc = OK;
115 int noOfBuckets = info->noOfBuckets;
116 int offset = info->fldOffset;
117 DataType type = info->type;
119 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
120 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
121 Bucket* buckets = (Bucket*)citer.nextElement();
122 void *keyPtr =(void*)((char*)tuple + offset);
123 int bucketNo = 0;
124 if (type == typeComposite) {
125 char *keyBuffer = (char*) malloc(info->compLength);
126 memset(keyBuffer, 0, info->compLength);
127 void* keyStartBuffer = keyBuffer;
128 FieldIterator iter = info->idxFldList.getIterator();
129 while(iter.hasElement())
131 FieldDef *def = iter.nextElement();
132 keyPtr = (char *)tuple + def->offset_;
133 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
134 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
136 bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
137 ::free(keyStartBuffer);
139 else {
140 if (type != typeVarchar)
141 bucketNo =
142 computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
143 else bucketNo =
144 computeHashBucket(type, (void *) *(long *) keyPtr, noOfBuckets,
145 info->compLength);
147 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
148 Bucket *bucket = &(buckets[bucketNo]);
149 HashUndoLogInfo hInfo;
150 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
151 hInfo.bucket_ = bucket;
152 hInfo.tuple_ = tuple;
153 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
154 hInfo.keyPtr_ = keyPtr;
156 HashIndexNode *head = (HashIndexNode*) bucket->bucketList_;
157 if (head && info->isUnique)
159 BucketList list(head);
160 BucketIter iter = list.getIterator();
161 HashIndexNode *node;
162 void *bucketTuple;
163 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
164 bool res = false;
166 while((node = iter.next()) != NULL)
168 bucketTuple = node->ptrToTuple_;
169 if (type == typeComposite) {
170 FieldIterator fldIter = info->idxFldList.getIterator();
171 int i = 0;
172 while (fldIter.hasElement()) {
173 FieldDef *def = fldIter.nextElement();
174 res = AllDataType::compareVal((char *)bucketTuple + def->offset_, (char *)tuple + def->offset_, OpEquals, def->type_, def->length_);
175 if (!res) break;
178 else {
179 if (type != typeVarchar)
180 res = AllDataType::compareVal((void*)((char*)bucketTuple +offset), (void*)((char*)tuple +offset), OpEquals,type, info->compLength);
181 else res = AllDataType::compareVal((void*)*(long *)((char*)bucketTuple +offset), (void*)*(long *)((char*)tuple +offset), OpEquals,type, info->compLength);
183 if (res)
185 printError(ErrUnique, "Unique key violation");
186 if (type == typeLongLong) printError(ErrUnique, "Unique key violation for id:%lld",*(long long*) ((char*)tuple +offset) );
187 return ErrUnique;
192 Chunk *hIdxNodeChunk = (Chunk*)iptr->hashNodeChunk_;
193 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
194 if (!head)
196 printDebug(DM_HashIndex, "HashIndex insert head is empty");
197 DbRetVal rv = OK;
198 HashIndexNode *firstNode= NULL;
200 int tries=0;
201 int totalTries = Conf::config.getMutexRetries();
202 while (tries < totalTries)
204 rv = OK;
205 firstNode= (HashIndexNode*) hIdxNodeChunk->allocate(tbl->db_, &rv);
206 if (firstNode !=NULL) break;
207 if (rv != ErrLockTimeOut)
209 printError(rv, "Unable to allocate hash index node");
210 return rv;
212 tries++;
214 if (firstNode == NULL){
215 printError(rv, "Unable to allocate hash index node after %d retry", tries);
216 return rv;
218 firstNode->ptrToKey_ = keyPtr;
219 firstNode->ptrToTuple_ = tuple;
220 firstNode->next_ = NULL;
221 if (0 != Mutex::CASL((long*)&bucket->bucketList_, 0, (long)firstNode)) {
222 printError(ErrLockTimeOut, "Hash Index bucket lock timeout.. retry");
223 hIdxNodeChunk->free(tbl->db_, firstNode);
224 return ErrLockTimeOut;
227 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
229 else
231 BucketList list(head);
232 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
233 if (rc !=OK) {
234 printError(rc, "unable to insert into bucketlist rv:%d", rc);
235 return rc;
239 if (!loadFlag) {
240 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
241 if (rc !=OK)
243 printError(rc, "Unable to append logical log before rc:%d", rc);
244 BucketList list(head);
245 DbRetVal rv = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
246 //bucket->bucketList_ = list.getBucketListHead();
247 if (rv == SplCase) {
248 printError(ErrWarning, "SplCase occured");
249 if (0 != Mutex::CASL((long*)&bucket->bucketList_,
250 (long)bucket->bucketList_, (long)list.getBucketListHead())) {
251 printError(ErrSysFatal, "Double failure, may lead to hash node leak\n");
253 }else if (rv !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
256 return rc;
260 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
262 CINDEX *iptr = (CINDEX*)indexPtr;
264 HashIndexInfo *info = (HashIndexInfo*) indInfo;
265 DataType type = info->type;
266 int offset = info->fldOffset;
267 int noOfBuckets = info->noOfBuckets;
269 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
270 Bucket* buckets = (Bucket*)citer.nextElement();
272 void *keyPtr =(void*)((char*)tuple + offset);
273 int bucket = 0;
274 if (type == typeComposite) {
275 char *keyBuffer = (char*) malloc(info->compLength);
276 memset(keyBuffer, 0, info->compLength);
277 void *keyStartBuffer = keyBuffer;
278 FieldIterator iter = info->idxFldList.getIterator();
279 while(iter.hasElement())
281 FieldDef *def = iter.nextElement();
282 keyPtr = (char *)tuple + def->offset_;
283 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
284 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
286 bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
287 ::free(keyStartBuffer);
289 else {
290 if (type != typeVarchar)
291 bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets,
292 info->compLength);
293 else bucket =
294 HashIndex::computeHashBucket(type, (void *) *(long *)keyPtr,
295 noOfBuckets, info->compLength);
298 Bucket *bucket1 = &buckets[bucket];
299 HashUndoLogInfo hInfo;
300 hInfo.metaData_ = tbl->db_->getMetaDataPtr();
301 hInfo.bucket_ = bucket1;
302 hInfo.tuple_ = tuple;
303 hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
304 hInfo.keyPtr_ = keyPtr;
306 HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_;
308 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n");
309 return ErrNotExists;
311 BucketList list(head);
312 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
314 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
315 if (SplCase == rc)
317 printDebug(DM_HashIndex, "Removing hash index node from head ");
318 //bucket1->bucketList_ = list.getBucketListHead();
319 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
320 (long)head, (long)list.getBucketListHead())) {
321 printError(ErrSysFatal, "Lock time out for hash bucket. retry\n");
322 return ErrLockTimeOut;
324 rc = OK;
326 if (!loadFlag) {
327 rc =tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, &hInfo, sizeof(HashUndoLogInfo));
328 if (rc !=OK)
330 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
331 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
332 //bucket1->bucketList_ = list.getBucketListHead();
333 if (0 != Mutex::CASL((long*)&bucket1->bucketList_,
334 (long)bucket1->bucketList_, (long)list.getBucketListHead())) {
335 printError(ErrSysFatal, "Double failure on index insert");
339 return rc;
342 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag)
344 CINDEX *iptr = (CINDEX*)indexPtr;
346 HashIndexInfo *info = (HashIndexInfo*) indInfo;
347 DataType type = info->type;
348 int offset = info->fldOffset;
349 int noOfBuckets = info->noOfBuckets;
351 //check whether the index key is updated or not
352 //if it is not updated return from here
353 void *keyPtr =(void*)((char*)tuple + offset);
354 char *kPtr= (char*)keyPtr;
356 //creating old key value buffer for composite primary keys
357 char *oldKeyBuffer = (char*) malloc(info->compLength);
358 memset(oldKeyBuffer, 0, info->compLength);
359 void *oldKeyStartBuffer = oldKeyBuffer;
360 FieldIterator iter = info->idxFldList.getIterator();
361 while(iter.hasElement()) {
362 FieldDef *def = iter.nextElement();
363 keyPtr = (char *)tuple + def->offset_;
364 AllDataType::copyVal(oldKeyBuffer, keyPtr, def->type_, def->length_);
365 oldKeyBuffer = oldKeyBuffer + AllDataType::size(def->type_, def->length_);
368 keyPtr = (void *) kPtr;
369 //Iterate through the bind list and check
370 FieldIterator idxFldIter = info->idxFldList.getIterator();
371 char *keyBindBuffer ;
372 if(type==typeBinary) {
373 keyBindBuffer = (char*) malloc(2 * info->compLength);
374 memset(keyBindBuffer, 0, 2 * info->compLength);
375 } else {
376 keyBindBuffer = (char*) malloc(info->compLength);
377 memset(keyBindBuffer, 0, info->compLength);
379 void *keyStartBuffer = (void*) keyBindBuffer;
380 bool keyUpdated = false;
382 while (idxFldIter.hasElement()) {
383 FieldDef *idef = idxFldIter.nextElement();
384 FieldIterator fldIter = tbl->fldList_.getIterator();
385 while (fldIter.hasElement()) {
386 FieldDef *def = fldIter.nextElement();
387 if (0 == strcmp(def->fldName_, idef->fldName_)) {
388 if (NULL != def->bindVal_) {
389 if(type==typeBinary) {
390 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
391 def->type_, 2*def->length_);
392 keyStartBuffer=calloc(1,info->compLength);
393 AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength);
394 free(keyBindBuffer);
395 } else {
396 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
397 def->type_, def->length_);
398 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
400 } else {
401 AllDataType::copyVal(keyBindBuffer, (char *) tuple + def->offset_, def->type_, def->length_);
402 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
404 keyUpdated = true;
405 break;
409 if (!keyUpdated) {
410 //printf("DEBUG::key not updated\n");
411 free(keyStartBuffer);
412 free(oldKeyStartBuffer);
413 return OK;
415 //printf("DEBUG::it is wrong coming here\n");
416 bool result = false;
417 if (type == typeComposite)
418 result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer,
419 OpEquals, info->type, info->compLength);
420 else result = AllDataType::compareVal(keyPtr, keyStartBuffer,
421 OpEquals, info->type, info->compLength);
422 if (result) {
423 free(keyStartBuffer);
424 free(oldKeyStartBuffer);
425 return OK;
427 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
429 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
431 Bucket* buckets = (Bucket*)citer.nextElement();
433 //remove the node whose key is updated
434 int bucketNo = 0;
435 if (type == typeComposite)
436 bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength);
437 else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
438 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
439 Bucket *bucket = &buckets[bucketNo];
441 HashUndoLogInfo *hInfo1 = new HashUndoLogInfo();
442 hInfo1->metaData_ = tbl->db_->getMetaDataPtr();
443 hInfo1->bucket_ = bucket;
444 hInfo1->tuple_ = tuple;
445 hInfo1->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
446 hInfo1->keyPtr_ = keyPtr;
448 //it may run into deadlock, when two threads updates tuples which falls in
449 //same buckets.So take both the mutex one after another, which will reduce the
450 //deadlock window.
451 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
452 if (ret != 0)
454 delete hInfo1;
455 free(keyStartBuffer);
456 free(oldKeyStartBuffer);
457 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
458 return ErrLockTimeOut;
460 //insert node for the updated key value
461 int newBucketNo = computeHashBucket(type,
462 keyStartBuffer, noOfBuckets, info->compLength);
463 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
465 Bucket *bucket1 = &buckets[newBucketNo];
466 HashUndoLogInfo *hInfo2 = new HashUndoLogInfo();
467 hInfo2->metaData_ = tbl->db_->getMetaDataPtr();
468 hInfo2->bucket_ = bucket;
469 hInfo2->tuple_ = tuple;
470 hInfo2->hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_;
471 hInfo2->keyPtr_ = keyPtr;
472 bucket1->mutex_.getLock(tbl->db_->procSlot);
473 if (ret != 0)
475 delete hInfo1;
476 delete hInfo2;
477 free(keyStartBuffer);
478 free(oldKeyStartBuffer);
479 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
480 return ErrLockTimeOut;
483 HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_;
484 if (head1)
486 BucketList list1(head1);
487 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
488 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
489 bucket->bucketList_=list1.getBucketListHead();
491 else
493 printError(ErrSysInternal,"Update: Bucket list is null");
494 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
495 bucket->mutex_.releaseLock(tbl->db_->procSlot);
496 delete hInfo1;
497 delete hInfo2;
498 free(keyStartBuffer);
499 free(oldKeyStartBuffer);
500 return ErrSysInternal;
502 DbRetVal rc = OK;
503 if (!loadFlag) {
504 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo));
505 if (rc !=OK)
507 BucketList list((HashIndexNode*) bucket->bucketList_);
508 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
509 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
510 bucket->bucketList_ = list.getBucketListHead();
511 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
512 bucket->mutex_.releaseLock(tbl->db_->procSlot);
513 delete hInfo1;
514 delete hInfo2;
515 free(keyStartBuffer);
516 free(oldKeyStartBuffer);
517 return rc;
520 HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_;
521 //Note:: the tuple will be in the same address location
522 //so not changing the keyptr and tuple during append
523 //only bucket where this node resides will only change
524 //if the index key is updated.
525 if (!head2)
527 DbRetVal rv = OK;
528 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
529 if (firstNode == NULL)
531 printError(rv, "Error in allocating hash node");
532 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
533 bucket->mutex_.releaseLock(tbl->db_->procSlot);
534 delete hInfo1;
535 delete hInfo2;
536 free(keyStartBuffer);
537 free(oldKeyStartBuffer);
538 return rv;
540 firstNode->ptrToKey_ = keyPtr;
541 firstNode->ptrToTuple_ = tuple;
542 firstNode->next_ = NULL;
543 bucket1->bucketList_ = (HashIndexNode*)firstNode;
544 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
546 else
548 BucketList list2(head2);
549 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
550 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
551 bucket1->bucketList_ = list2.getBucketListHead();
553 if (!loadFlag) {
555 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo2, sizeof(HashUndoLogInfo));
556 if (rc !=OK)
558 //reverting back the changes:delete new node and add the old
559 //node + remove logical undo log of the DeleteHashIndexOperation
560 BucketList list1((HashIndexNode*) bucket->bucketList_);
561 BucketList list2((HashIndexNode*) bucket1->bucketList_);
562 list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
563 list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
564 bucket->bucketList_ = list1.getBucketListHead();
565 bucket1->bucketList_ = list2.getBucketListHead();
566 UndoLogInfo *logInfo = tr->popUndoLog();
567 Chunk *chunk = tbl->sysDB_->getSystemDatabaseChunk(UndoLogTableID);
568 chunk->free(tbl->sysDB_, logInfo);
571 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
572 bucket->mutex_.releaseLock(tbl->db_->procSlot);
573 delete hInfo1;
574 delete hInfo2;
575 free(keyStartBuffer);
576 free(oldKeyStartBuffer);
577 return rc;
580 //Following three methods are used to undo Logical Hash Indexes
581 DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data)
583 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
584 Chunk *hChunk = (Chunk *) info->hChunk_;
585 Database db;
586 db.setMetaDataPtr((DatabaseMetaData *) info->metaData_);
587 db.setProcSlot(sysdb->procSlot);
588 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
589 BucketList list(head);
590 DbRetVal rv = list.insert(hChunk, &db, info->keyPtr_, info->tuple_);
591 if (rv != OK)
593 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
594 return ErrLockTimeOut;
596 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
597 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
598 (long)(((Bucket *)info->bucket_)->bucketList_),
599 (long)list.getBucketListHead()))
601 printError(ErrLockTimeOut, "Unable to add to bucket..retry\n");
602 return ErrLockTimeOut;
604 return OK;
607 DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data)
609 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
610 Chunk *hChunk = (Chunk *) info->hChunk_;
611 Database db;
612 db.setMetaDataPtr((DatabaseMetaData *)info->metaData_);
613 db.setProcSlot(sysdb->procSlot);
614 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
615 BucketList list(head);
616 DbRetVal rc = list.remove(hChunk, &db, info->keyPtr_);
617 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
618 if (SplCase == rc) {
619 if (0 != Mutex::CASL((long*)& (((Bucket *)info->bucket_)->bucketList_),
620 (long)(((Bucket *)info->bucket_)->bucketList_),
621 (long)list.getBucketListHead()))
623 printError(ErrLockTimeOut, "Unable to set the head of hash index bucket\n");
624 return ErrLockTimeOut;
626 }else if (rc != OK) {
627 printError(ErrLockTimeOut, "Unable to remove hash index node");
628 return ErrLockTimeOut;
630 return OK;