1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 ***************************************************************************/
17 #include<CatalogTables.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal
)
37 hval
+= (unsigned int) *str
++;
38 g
= hval
& ((unsigned int) 0xf << (32 - 4));
41 hval
^= g
>> (32 - 8);
48 unsigned int hashBinary(char *strVal
, int length
)
54 while (iter
!= length
)
57 hval
+= (unsigned int) *str
++;
58 g
= hval
& ((unsigned int) 0xf << (32 - 4));
61 hval
^= g
>> (32 - 8);
69 unsigned int HashIndex::computeHashBucket(DataType type
, void *key
, int noOfBuckets
, int length
)
72 if (typeInt
== type
) {
74 return val
% noOfBuckets
;
75 }else if (typeString
== type
|| typeVarchar
== type
) {
76 unsigned int val
= hashString((char*)key
);
77 return val
% noOfBuckets
;
78 }else if (typeShort
== type
) {
79 short val
= *(short*) key
;
80 return val
% noOfBuckets
;
81 }else if (typeLong
== type
) {
82 long val
= *(long*) key
;
83 return val
% noOfBuckets
;
84 }else if (typeLongLong
== type
) {
85 long long val
= *(long long*) key
;
86 return val
% noOfBuckets
;
87 }else if (typeByteInt
== type
) {
88 ByteInt val
= *(ByteInt
*)key
;
89 return val
% noOfBuckets
;
90 }else if (typeDate
== type
) {
92 return val
% noOfBuckets
;
93 }else if (typeTime
== type
) {
95 return val
% noOfBuckets
;
96 }else if (typeComposite
== type
) {
97 unsigned int val
= hashBinary((char*)key
, length
);
98 return val
% noOfBuckets
;
99 }else if (typeBinary
== type
) {
100 unsigned int val
= hashBinary((char*)key
, length
);
101 return val
% noOfBuckets
;
102 }else if (typeULong
== type
) {
103 unsigned long val
= *(unsigned long*)key
;
104 return val
% noOfBuckets
;
106 printError(ErrSysFatal
,"Type not supported for hashing\n");
110 bool HashIndex::checkForUniqueKey(IndexNode
*head
, HashIndexInfo
*info
, void *tuple
)
112 if (!head
) return false;
113 int offset
= info
->fldOffset
;
114 DataType type
= info
->type
;
115 BucketList
list(head
);
116 BucketIter iter
= list
.getIterator();
119 printDebug(DM_HashIndex
, "HashIndex insert Checking for unique");
122 while((node
= iter
.next()) != NULL
)
124 bucketTuple
= node
->ptrToTuple_
;
125 if (type
== typeComposite
) {
126 FieldIterator fldIter
= info
->idxFldList
.getIterator();
128 while (fldIter
.hasElement()) {
129 FieldDef
*def
= fldIter
.nextElement();
130 if (def
->type_
!= typeVarchar
) {
131 res
= AllDataType::compareVal(
132 (char *)bucketTuple
+ def
->offset_
,
133 (char *)tuple
+ def
->offset_
,
134 OpEquals
, def
->type_
, def
->length_
);
136 char *tvcptr
= (char *) *(long *)
137 ((char *)tuple
+ def
->offset_
);
138 char *btvcptr
= (char *) *(long *)
139 ((char *)bucketTuple
+ def
->offset_
);
140 res
= AllDataType::compareVal(tvcptr
, btvcptr
,
141 OpEquals
, def
->type_
, def
->length_
);
147 if (type
!= typeVarchar
)
148 res
= AllDataType::compareVal((void*)((char*)bucketTuple
+offset
), (void*)((char*)tuple
+offset
), OpEquals
,type
, info
->compLength
);
149 else res
= AllDataType::compareVal((void*)*(long *)((char*)bucketTuple
+offset
), (void*)*(long *)((char*)tuple
+offset
), OpEquals
,type
, info
->compLength
);
153 if (type
== typeLongLong
)
154 printError(ErrUnique
, "Unique key violation for id:%lld",*(long long*) ((char*)tuple
+offset
) );
156 printError(ErrUnique
, "Unique key violation");
163 DbRetVal
HashIndex::insert(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool loadFlag
)
165 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
166 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
168 int noOfBuckets
= info
->noOfBuckets
;
169 int offset
= info
->fldOffset
;
170 DataType type
= info
->type
;
172 printDebug(DM_HashIndex
, "Inserting hash index node for %s", iptr
->indName_
);
173 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
174 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
175 void *keyPtr
=(void*)((char*)tuple
+ offset
);
177 if (type
== typeComposite
) {
178 char *keyBuffer
= (char*) malloc(info
->compLength
);
179 memset(keyBuffer
, 0, info
->compLength
);
180 void* keyStartBuffer
= keyBuffer
;
181 FieldIterator iter
= info
->idxFldList
.getIterator();
182 while(iter
.hasElement())
184 FieldDef
*def
= iter
.nextElement();
185 keyPtr
= (char *)tuple
+ def
->offset_
;
186 if (def
->type_
!= typeVarchar
) {
187 AllDataType::copyVal(keyBuffer
, keyPtr
, def
->type_
,
190 void *ptr
= (void *) *(long *) keyPtr
;
192 AllDataType::copyVal(keyBuffer
, ptr
, def
->type_
,
195 keyBuffer
= keyBuffer
+ AllDataType::size(def
->type_
,def
->length_
);
197 bucketNo
= computeHashBucket(type
, keyStartBuffer
, noOfBuckets
,
199 ::free(keyStartBuffer
);
202 if (type
!= typeVarchar
)
204 computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
206 void *ptr
= (void *) *(long *) keyPtr
;
208 bucketNo
= computeHashBucket(type
, ptr
, noOfBuckets
,
213 printDebug(DM_HashIndex
, "HashIndex insert bucketno %d", bucketNo
);
214 Bucket
*bucket
= &(buckets
[bucketNo
]);
215 HashUndoLogInfo hInfo
;
216 hInfo
.metaData_
= tbl
->db_
->getMetaDataPtr();
217 hInfo
.bucket_
= bucket
;
218 hInfo
.tuple_
= tuple
;
219 hInfo
.hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
220 hInfo
.keyPtr_
= keyPtr
;
222 IndexNode
*head
= (IndexNode
*) bucket
->bucketList_
;
225 bool isKeyPresent
= checkForUniqueKey(head
, info
, tuple
);
226 if (isKeyPresent
) return ErrUnique
;
228 Chunk
*hIdxNodeChunk
= (Chunk
*)iptr
->hashNodeChunk_
;
229 printDebug(DM_HashIndex
, "HashIndex insert into bucket list");
232 printDebug(DM_HashIndex
, "HashIndex insert head is empty");
234 IndexNode
*firstNode
= NULL
;
237 int totalTries
= Conf::config
.getMutexRetries();
238 while (tries
< totalTries
)
241 firstNode
= (IndexNode
*) hIdxNodeChunk
->allocate(tbl
->db_
, &rv
);
242 if (firstNode
!=NULL
) break;
243 if (rv
!= ErrLockTimeOut
)
245 printError(rv
, "Unable to allocate hash index node");
250 if (firstNode
== NULL
){
251 printError(rv
, "Unable to allocate hash index node after %d retry", tries
);
254 firstNode
->ptrToKey_
= keyPtr
;
255 firstNode
->ptrToTuple_
= tuple
;
256 firstNode
->next_
= NULL
;
257 if (0 != Mutex::CASL((long*)&bucket
->bucketList_
, 0, (long)firstNode
)) {
258 printError(ErrLockTimeOut
, "Hash Index bucket lock timeout.. retry");
259 hIdxNodeChunk
->free(tbl
->db_
, firstNode
);
260 return ErrLockTimeOut
;
263 printDebug(DM_HashIndex
, "HashIndex insert new node %x in empty bucket", bucket
->bucketList_
);
267 BucketList
list(head
);
268 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
270 printError(rc
, "unable to insert into bucketlist rv:%d", rc
);
276 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, &hInfo
, sizeof(HashUndoLogInfo
));
279 printError(rc
, "Unable to append logical log before rc:%d", rc
);
280 BucketList
list(head
);
281 DbRetVal rv
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
282 //bucket->bucketList_ = list.getBucketListHead();
284 printError(ErrWarning
, "SplCase occured");
285 if (0 != Mutex::CASL((long*)&bucket
->bucketList_
,
286 (long)bucket
->bucketList_
, (long)list
.getBucketListHead())) {
287 printError(ErrSysFatal
, "Double failure, may lead to hash node leak\n");
289 }else if (rv
!=OK
) printError(ErrSysFatal
, "double failure on undo log insert followed by hash bucket list remove\n");
296 DbRetVal
HashIndex::remove(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool loadFlag
)
298 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
300 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
301 DataType type
= info
->type
;
302 int offset
= info
->fldOffset
;
303 int noOfBuckets
= info
->noOfBuckets
;
305 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
306 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
308 void *keyPtr
=(void*)((char*)tuple
+ offset
);
310 if (type
== typeComposite
) {
311 char *keyBuffer
= (char*) malloc(info
->compLength
);
312 memset(keyBuffer
, 0, info
->compLength
);
313 void *keyStartBuffer
= keyBuffer
;
314 FieldIterator iter
= info
->idxFldList
.getIterator();
315 while(iter
.hasElement())
317 FieldDef
*def
= iter
.nextElement();
318 keyPtr
= (char *)tuple
+ def
->offset_
;
319 if (def
->type_
!= typeVarchar
) {
320 AllDataType::copyVal(keyBuffer
, keyPtr
, def
->type_
,
323 void *ptr
= (void *) *(long *) keyPtr
;
325 AllDataType::copyVal(keyBuffer
, ptr
, def
->type_
,
328 keyBuffer
= keyBuffer
+ AllDataType::size(def
->type_
,def
->length_
);
330 bucket
= HashIndex::computeHashBucket(type
, keyStartBuffer
, noOfBuckets
, info
->compLength
);
331 ::free(keyStartBuffer
);
334 if (type
!= typeVarchar
)
335 bucket
= HashIndex::computeHashBucket(type
, keyPtr
, noOfBuckets
,
338 void *ptr
= (void *) *(long *) keyPtr
;
340 bucket
= HashIndex::computeHashBucket(type
, ptr
, noOfBuckets
,
346 Bucket
*bucket1
= &buckets
[bucket
];
347 HashUndoLogInfo hInfo
;
348 hInfo
.metaData_
= tbl
->db_
->getMetaDataPtr();
349 hInfo
.bucket_
= bucket1
;
350 hInfo
.tuple_
= tuple
;
351 hInfo
.hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
352 hInfo
.keyPtr_
= keyPtr
;
354 IndexNode
*head
= (IndexNode
*) bucket1
->bucketList_
;
356 if (!head
) { printError(ErrNotExists
, "Hash index does not exist:should never happen\n");
359 BucketList
list(head
);
360 printDebug(DM_HashIndex
, "Removing hash index node from head %x", head
);
362 DbRetVal rc
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
365 printDebug(DM_HashIndex
, "Removing hash index node from head ");
366 //bucket1->bucketList_ = list.getBucketListHead();
367 if (0 != Mutex::CASL((long*)&bucket1
->bucketList_
,
368 (long)head
, (long)list
.getBucketListHead())) {
369 printError(ErrSysFatal
, "Lock time out for hash bucket. retry\n");
370 return ErrLockTimeOut
;
375 rc
=tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, &hInfo
, sizeof(HashUndoLogInfo
));
378 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
379 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
380 //bucket1->bucketList_ = list.getBucketListHead();
381 if (0 != Mutex::CASL((long*)&bucket1
->bucketList_
,
382 (long)bucket1
->bucketList_
, (long)list
.getBucketListHead())) {
383 printError(ErrSysFatal
, "Double failure on index insert");
390 DbRetVal
HashIndex::update(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool loadFlag
)
392 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
394 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
395 DataType type
= info
->type
;
396 int offset
= info
->fldOffset
;
397 int noOfBuckets
= info
->noOfBuckets
;
399 //check whether the index key is updated or not
400 //if it is not updated return from here
401 void *keyPtr
=(void*)((char*)tuple
+ offset
);
402 char *kPtr
= (char*)keyPtr
;
404 //creating old key value buffer for composite primary keys
405 char *oldKeyBuffer
= (char*) malloc(info
->compLength
);
406 memset(oldKeyBuffer
, 0, info
->compLength
);
407 void *oldKeyStartBuffer
= oldKeyBuffer
;
408 FieldIterator iter
= info
->idxFldList
.getIterator();
409 while(iter
.hasElement()) {
410 FieldDef
*def
= iter
.nextElement();
411 keyPtr
= (char *)tuple
+ def
->offset_
;
412 AllDataType::copyVal(oldKeyBuffer
, keyPtr
, def
->type_
, def
->length_
);
413 oldKeyBuffer
= oldKeyBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
416 keyPtr
= (void *) kPtr
;
417 //Iterate through the bind list and check
418 FieldIterator idxFldIter
= info
->idxFldList
.getIterator();
419 char *keyBindBuffer
;
420 if(type
==typeBinary
) {
421 keyBindBuffer
= (char*) malloc(2 * info
->compLength
);
422 memset(keyBindBuffer
, 0, 2 * info
->compLength
);
424 keyBindBuffer
= (char*) malloc(info
->compLength
);
425 memset(keyBindBuffer
, 0, info
->compLength
);
427 void *keyStartBuffer
= (void*) keyBindBuffer
;
428 bool keyUpdated
= false;
430 while (idxFldIter
.hasElement()) {
431 FieldDef
*idef
= idxFldIter
.nextElement();
432 FieldIterator fldIter
= tbl
->fldList_
.getIterator();
433 while (fldIter
.hasElement()) {
434 FieldDef
*def
= fldIter
.nextElement();
435 if (0 == strcmp(def
->fldName_
, idef
->fldName_
)) {
436 if (NULL
!= def
->bindVal_
) {
437 if(type
==typeBinary
) {
438 AllDataType::copyVal(keyBindBuffer
, def
->bindVal_
,
439 def
->type_
, 2*def
->length_
);
440 keyStartBuffer
=calloc(1,info
->compLength
);
441 AllDataType::convertToBinary(keyStartBuffer
, keyBindBuffer
, typeString
, info
->compLength
);
444 AllDataType::copyVal(keyBindBuffer
, def
->bindVal_
,
445 def
->type_
, def
->length_
);
446 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
449 AllDataType::copyVal(keyBindBuffer
, (char *) tuple
+ def
->offset_
, def
->type_
, def
->length_
);
450 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
458 //printf("DEBUG::key not updated\n");
459 free(keyStartBuffer
);
460 free(oldKeyStartBuffer
);
463 //printf("DEBUG::it is wrong coming here\n");
465 if (type
== typeComposite
)
466 result
= AllDataType::compareVal(oldKeyStartBuffer
, keyStartBuffer
,
467 OpEquals
, info
->type
, info
->compLength
);
468 else result
= AllDataType::compareVal(keyPtr
, keyStartBuffer
,
469 OpEquals
, info
->type
, info
->compLength
);
471 free(keyStartBuffer
);
472 free(oldKeyStartBuffer
);
475 printDebug(DM_HashIndex
, "Updating hash index node: Key value is updated");
477 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
479 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
481 //remove the node whose key is updated
483 if (type
== typeComposite
)
484 bucketNo
= computeHashBucket(type
, oldKeyStartBuffer
, noOfBuckets
, info
->compLength
);
485 else bucketNo
= computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
486 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for old value is %d", bucketNo
);
487 Bucket
*bucket
= &buckets
[bucketNo
];
489 HashUndoLogInfo
*hInfo1
= new HashUndoLogInfo();
490 hInfo1
->metaData_
= tbl
->db_
->getMetaDataPtr();
491 hInfo1
->bucket_
= bucket
;
492 hInfo1
->tuple_
= tuple
;
493 hInfo1
->hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
494 hInfo1
->keyPtr_
= keyPtr
;
496 //it may run into deadlock, when two threads updates tuples which falls in
497 //same buckets.So take both the mutex one after another, which will reduce the
499 int ret
= bucket
->mutex_
.getLock(tbl
->db_
->procSlot
);
503 free(keyStartBuffer
);
504 free(oldKeyStartBuffer
);
505 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucketNo
);
506 return ErrLockTimeOut
;
508 //insert node for the updated key value
509 int newBucketNo
= computeHashBucket(type
,
510 keyStartBuffer
, noOfBuckets
, info
->compLength
);
511 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for new value is %d", newBucketNo
);
513 Bucket
*bucket1
= &buckets
[newBucketNo
];
514 HashUndoLogInfo
*hInfo2
= new HashUndoLogInfo();
515 hInfo2
->metaData_
= tbl
->db_
->getMetaDataPtr();
516 hInfo2
->bucket_
= bucket
;
517 hInfo2
->tuple_
= tuple
;
518 hInfo2
->hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
519 hInfo2
->keyPtr_
= keyPtr
;
520 bucket1
->mutex_
.getLock(tbl
->db_
->procSlot
);
525 free(keyStartBuffer
);
526 free(oldKeyStartBuffer
);
527 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",newBucketNo
);
528 return ErrLockTimeOut
;
531 IndexNode
*head1
= (IndexNode
*) bucket
->bucketList_
;
534 BucketList
list1(head1
);
535 printDebug(DM_HashIndex
, "Updating hash index node: Removing node from list with head %x", head1
);
536 list1
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
537 bucket
->bucketList_
=list1
.getBucketListHead();
541 printError(ErrSysInternal
,"Update: Bucket list is null");
542 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
543 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
546 free(keyStartBuffer
);
547 free(oldKeyStartBuffer
);
548 return ErrSysInternal
;
552 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, hInfo1
, sizeof(HashUndoLogInfo
));
555 BucketList
list((IndexNode
*) bucket
->bucketList_
);
556 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
557 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
558 bucket
->bucketList_
= list
.getBucketListHead();
559 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
560 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
563 free(keyStartBuffer
);
564 free(oldKeyStartBuffer
);
568 IndexNode
*head2
= (IndexNode
*) bucket1
->bucketList_
;
569 //Note:: the tuple will be in the same address location
570 //so not changing the keyptr and tuple during append
571 //only bucket where this node resides will only change
572 //if the index key is updated.
576 IndexNode
*firstNode
= (IndexNode
*)(((Chunk
*)iptr
->hashNodeChunk_
)->allocate(tbl
->db_
, &rv
));
577 if (firstNode
== NULL
)
579 printError(rv
, "Error in allocating hash node");
580 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
581 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
584 free(keyStartBuffer
);
585 free(oldKeyStartBuffer
);
588 firstNode
->ptrToKey_
= keyPtr
;
589 firstNode
->ptrToTuple_
= tuple
;
590 firstNode
->next_
= NULL
;
591 bucket1
->bucketList_
= (IndexNode
*)firstNode
;
592 printDebug(DM_HashIndex
, "Updating hash index node: Adding new node %x:Head is empty", firstNode
);
596 BucketList
list2(head2
);
597 printDebug(DM_HashIndex
, "Updating hash index node: Adding node to list with head %x", head2
);
598 list2
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
599 bucket1
->bucketList_
= list2
.getBucketListHead();
603 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, hInfo2
, sizeof(HashUndoLogInfo
));
606 //reverting back the changes:delete new node and add the old
607 //node + remove logical undo log of the DeleteHashIndexOperation
608 BucketList
list1((IndexNode
*) bucket
->bucketList_
);
609 BucketList
list2((IndexNode
*) bucket1
->bucketList_
);
610 list1
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
611 list2
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
612 bucket
->bucketList_
= list1
.getBucketListHead();
613 bucket1
->bucketList_
= list2
.getBucketListHead();
614 UndoLogInfo
*logInfo
= tr
->popUndoLog();
615 Chunk
*chunk
= tbl
->sysDB_
->getSystemDatabaseChunk(UndoLogTableID
);
616 chunk
->free(tbl
->sysDB_
, logInfo
);
619 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
620 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
623 free(keyStartBuffer
);
624 free(oldKeyStartBuffer
);
628 //Following three methods are used to undo Logical Hash Indexes
629 DbRetVal
HashIndex::insertLogicalUndoLog(Database
*sysdb
, void *data
)
631 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
632 Chunk
*hChunk
= (Chunk
*) info
->hChunk_
;
634 db
.setMetaDataPtr((DatabaseMetaData
*) info
->metaData_
);
635 db
.setProcSlot(sysdb
->procSlot
);
636 IndexNode
*head
= (IndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
637 BucketList
list(head
);
638 DbRetVal rv
= list
.insert(hChunk
, &db
, info
->keyPtr_
, info
->tuple_
);
641 printError(ErrLockTimeOut
, "Unable to add to bucket..retry\n");
642 return ErrLockTimeOut
;
644 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
645 if (0 != Mutex::CASL((long*)& (((Bucket
*)info
->bucket_
)->bucketList_
),
646 (long)(((Bucket
*)info
->bucket_
)->bucketList_
),
647 (long)list
.getBucketListHead()))
649 printError(ErrLockTimeOut
, "Unable to add to bucket..retry\n");
650 return ErrLockTimeOut
;
655 DbRetVal
HashIndex::deleteLogicalUndoLog(Database
*sysdb
, void *data
)
657 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
658 Chunk
*hChunk
= (Chunk
*) info
->hChunk_
;
660 db
.setMetaDataPtr((DatabaseMetaData
*)info
->metaData_
);
661 db
.setProcSlot(sysdb
->procSlot
);
662 IndexNode
*head
= (IndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
663 BucketList
list(head
);
664 DbRetVal rc
= list
.remove(hChunk
, &db
, info
->keyPtr_
);
665 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
667 if (0 != Mutex::CASL((long*)& (((Bucket
*)info
->bucket_
)->bucketList_
),
668 (long)(((Bucket
*)info
->bucket_
)->bucketList_
),
669 (long)list
.getBucketListHead()))
671 printError(ErrLockTimeOut
, "Unable to set the head of hash index bucket\n");
672 return ErrLockTimeOut
;
674 }else if (rc
!= OK
) {
675 printError(ErrLockTimeOut
, "Unable to remove hash index node");
676 return ErrLockTimeOut
;