1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 ***************************************************************************/
17 #include<CatalogTables.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal
)
37 hval
+= (unsigned int) *str
++;
38 g
= hval
& ((unsigned int) 0xf << (32 - 4));
41 hval
^= g
>> (32 - 8);
48 unsigned int hashBinary(char *strVal
, int length
)
54 while (iter
!= length
)
57 hval
+= (unsigned int) *str
++;
58 g
= hval
& ((unsigned int) 0xf << (32 - 4));
61 hval
^= g
>> (32 - 8);
69 unsigned int HashIndex::computeHashBucket(DataType type
, void *key
, int noOfBuckets
, int length
)
72 if (typeInt
== type
) {
74 return val
% noOfBuckets
;
75 }else if (typeString
== type
|| typeVarchar
== type
) {
76 unsigned int val
= hashString((char*)key
);
77 return val
% noOfBuckets
;
78 }else if (typeShort
== type
) {
79 short val
= *(short*) key
;
80 return val
% noOfBuckets
;
81 }else if (typeLong
== type
) {
82 long val
= *(long*) key
;
83 return val
% noOfBuckets
;
84 }else if (typeLongLong
== type
) {
85 long long val
= *(long long*) key
;
86 return val
% noOfBuckets
;
87 }else if (typeByteInt
== type
) {
88 ByteInt val
= *(ByteInt
*)key
;
89 return val
% noOfBuckets
;
90 }else if (typeDate
== type
) {
92 return val
% noOfBuckets
;
93 }else if (typeTime
== type
) {
95 return val
% noOfBuckets
;
96 }else if (typeComposite
== type
) {
97 unsigned int val
= hashBinary((char*)key
, length
);
98 return val
% noOfBuckets
;
99 }else if (typeBinary
== type
) {
100 unsigned int val
= hashBinary((char*)key
, length
);
101 return val
% noOfBuckets
;
102 }else if (typeULong
== type
) {
103 unsigned long val
= *(unsigned long*)key
;
104 return val
% noOfBuckets
;
106 printError(ErrSysFatal
,"Type not supported for hashing\n");
110 bool HashIndex::checkForUniqueKey(IndexNode
*head
, HashIndexInfo
*info
, void *tuple
)
112 if (!head
) return false;
113 int offset
= info
->fldOffset
;
114 DataType type
= info
->type
;
115 BucketList
list(head
);
116 BucketIter iter
= list
.getIterator();
119 printDebug(DM_HashIndex
, "HashIndex insert Checking for unique");
122 while((node
= iter
.next()) != NULL
)
124 bucketTuple
= node
->ptrToTuple_
;
125 if (type
== typeComposite
) {
126 FieldIterator fldIter
= info
->idxFldList
.getIterator();
128 while (fldIter
.hasElement()) {
129 FieldDef
*def
= fldIter
.nextElement();
130 if (def
->type_
!= typeVarchar
) {
131 res
= AllDataType::compareVal(
132 (char *)bucketTuple
+ def
->offset_
,
133 (char *)tuple
+ def
->offset_
,
134 OpEquals
, def
->type_
, def
->length_
);
136 char *tvcptr
= (char *) *(long *)
137 ((char *)tuple
+ def
->offset_
);
138 char *btvcptr
= (char *) *(long *)
139 ((char *)bucketTuple
+ def
->offset_
);
140 res
= AllDataType::compareVal(tvcptr
, btvcptr
,
141 OpEquals
, def
->type_
, def
->length_
);
147 if (type
!= typeVarchar
)
148 res
= AllDataType::compareVal((void*)((char*)bucketTuple
+offset
),
149 (void*)((char*)tuple
+offset
), OpEquals
,
150 type
, info
->compLength
);
152 res
= AllDataType::compareVal((void*)*(long *)((char*)bucketTuple
+offset
),
153 (void*)*(long *)((char*)tuple
+offset
),
154 OpEquals
,type
, info
->compLength
);
158 if (type
== typeLongLong
)
159 printError(ErrUnique
, "Unique key violation for id:%lld",
160 *(long long*) ((char*)tuple
+offset
) );
162 printError(ErrUnique
, "Unique key violation");
169 DbRetVal
HashIndex::insert(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool loadFlag
)
171 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
172 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
174 int noOfBuckets
= info
->noOfBuckets
;
175 int offset
= info
->fldOffset
;
176 DataType type
= info
->type
;
178 printDebug(DM_HashIndex
, "Inserting hash index node for %s", iptr
->indName_
);
179 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
180 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
181 void *keyPtr
=(void*)((char*)tuple
+ offset
);
183 if (type
== typeComposite
) {
184 char *keyBuffer
= (char*) malloc(info
->compLength
);
185 memset(keyBuffer
, 0, info
->compLength
);
186 void* keyStartBuffer
= keyBuffer
;
187 FieldIterator iter
= info
->idxFldList
.getIterator();
188 while(iter
.hasElement())
190 FieldDef
*def
= iter
.nextElement();
191 keyPtr
= (char *)tuple
+ def
->offset_
;
192 if (def
->type_
!= typeVarchar
) {
193 AllDataType::copyVal(keyBuffer
, keyPtr
, def
->type_
,
196 void *ptr
= (void *) *(long *) keyPtr
;
198 AllDataType::copyVal(keyBuffer
, ptr
, def
->type_
,
201 keyBuffer
= keyBuffer
+ AllDataType::size(def
->type_
,def
->length_
);
203 bucketNo
= computeHashBucket(type
, keyStartBuffer
, noOfBuckets
,
205 ::free(keyStartBuffer
);
208 if (type
!= typeVarchar
)
210 computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
212 void *ptr
= (void *) *(long *) keyPtr
;
214 bucketNo
= computeHashBucket(type
, ptr
, noOfBuckets
,
219 printDebug(DM_HashIndex
, "HashIndex insert bucketno %d", bucketNo
);
220 Bucket
*bucket
= &(buckets
[bucketNo
]);
221 HashUndoLogInfo hInfo
;
222 hInfo
.metaData_
= tbl
->db_
->getMetaDataPtr();
223 hInfo
.bucket_
= bucket
;
224 hInfo
.tuple_
= tuple
;
225 hInfo
.hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
226 hInfo
.keyPtr_
= keyPtr
;
228 IndexNode
*head
= (IndexNode
*) bucket
->bucketList_
;
231 bool isKeyPresent
= checkForUniqueKey(head
, info
, tuple
);
232 if (isKeyPresent
) return ErrUnique
;
234 Chunk
*hIdxNodeChunk
= (Chunk
*)iptr
->hashNodeChunk_
;
235 printDebug(DM_HashIndex
, "HashIndex insert into bucket list");
238 printDebug(DM_HashIndex
, "HashIndex insert head is empty");
240 IndexNode
*firstNode
= NULL
;
243 int totalTries
= Conf::config
.getMutexRetries();
244 while (tries
< totalTries
)
247 firstNode
= (IndexNode
*) hIdxNodeChunk
->allocate(tbl
->db_
, &rv
);
248 if (firstNode
!=NULL
) break;
249 if (rv
!= ErrLockTimeOut
)
251 printError(rv
, "Unable to allocate hash index node");
256 if (firstNode
== NULL
){
257 printError(rv
, "Unable to allocate hash index node after %d retry", tries
);
260 firstNode
->ptrToKey_
= keyPtr
;
261 firstNode
->ptrToTuple_
= tuple
;
262 firstNode
->next_
= NULL
;
263 if (0 != Mutex::CASL((long*)&bucket
->bucketList_
, 0, (long)firstNode
)) {
264 printError(ErrLockTimeOut
, "Hash Index bucket lock timeout.. retry");
265 hIdxNodeChunk
->free(tbl
->db_
, firstNode
);
266 return ErrLockTimeOut
;
269 printDebug(DM_HashIndex
, "HashIndex insert new node %x in empty bucket", bucket
->bucketList_
);
273 BucketList
list(head
);
274 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
276 printError(rc
, "unable to insert into bucketlist rv:%d", rc
);
282 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, &hInfo
, sizeof(HashUndoLogInfo
));
285 printError(rc
, "Unable to append logical log before rc:%d", rc
);
286 BucketList
list(head
);
287 DbRetVal rv
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
288 //bucket->bucketList_ = list.getBucketListHead();
290 printError(ErrWarning
, "SplCase occured");
291 if (0 != Mutex::CASL((long*)&bucket
->bucketList_
,
292 (long)bucket
->bucketList_
, (long)list
.getBucketListHead())) {
293 printError(ErrSysFatal
, "Double failure, may lead to hash node leak\n");
295 }else if (rv
!=OK
) printError(ErrSysFatal
, "double failure on undo log insert followed by hash bucket list remove\n");
302 DbRetVal
HashIndex::remove(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool loadFlag
)
304 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
306 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
307 DataType type
= info
->type
;
308 int offset
= info
->fldOffset
;
309 int noOfBuckets
= info
->noOfBuckets
;
311 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
312 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
314 void *keyPtr
=(void*)((char*)tuple
+ offset
);
316 if (type
== typeComposite
) {
317 char *keyBuffer
= (char*) malloc(info
->compLength
);
318 memset(keyBuffer
, 0, info
->compLength
);
319 void *keyStartBuffer
= keyBuffer
;
320 FieldIterator iter
= info
->idxFldList
.getIterator();
321 while(iter
.hasElement())
323 FieldDef
*def
= iter
.nextElement();
324 keyPtr
= (char *)tuple
+ def
->offset_
;
325 if (def
->type_
!= typeVarchar
) {
326 AllDataType::copyVal(keyBuffer
, keyPtr
, def
->type_
,
329 void *ptr
= (void *) *(long *) keyPtr
;
331 AllDataType::copyVal(keyBuffer
, ptr
, def
->type_
,
334 keyBuffer
= keyBuffer
+ AllDataType::size(def
->type_
,def
->length_
);
336 bucket
= HashIndex::computeHashBucket(type
, keyStartBuffer
, noOfBuckets
, info
->compLength
);
337 ::free(keyStartBuffer
);
340 if (type
!= typeVarchar
)
341 bucket
= HashIndex::computeHashBucket(type
, keyPtr
, noOfBuckets
,
344 void *ptr
= (void *) *(long *) keyPtr
;
346 bucket
= HashIndex::computeHashBucket(type
, ptr
, noOfBuckets
,
352 Bucket
*bucket1
= &buckets
[bucket
];
353 HashUndoLogInfo hInfo
;
354 hInfo
.metaData_
= tbl
->db_
->getMetaDataPtr();
355 hInfo
.bucket_
= bucket1
;
356 hInfo
.tuple_
= tuple
;
357 hInfo
.hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
358 hInfo
.keyPtr_
= keyPtr
;
360 IndexNode
*head
= (IndexNode
*) bucket1
->bucketList_
;
362 if (!head
) { printError(ErrNotExists
, "Hash index does not exist:should never happen\n");
365 BucketList
list(head
);
366 printDebug(DM_HashIndex
, "Removing hash index node from head %x", head
);
368 DbRetVal rc
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
371 printDebug(DM_HashIndex
, "Removing hash index node from head ");
372 //bucket1->bucketList_ = list.getBucketListHead();
373 if (0 != Mutex::CASL((long*)&bucket1
->bucketList_
,
374 (long)head
, (long)list
.getBucketListHead())) {
375 printError(ErrSysFatal
, "Lock time out for hash bucket. retry\n");
376 return ErrLockTimeOut
;
381 rc
=tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, &hInfo
, sizeof(HashUndoLogInfo
));
384 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
385 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
386 //bucket1->bucketList_ = list.getBucketListHead();
387 if (0 != Mutex::CASL((long*)&bucket1
->bucketList_
,
388 (long)bucket1
->bucketList_
, (long)list
.getBucketListHead())) {
389 printError(ErrSysFatal
, "Double failure on index insert");
396 DbRetVal
HashIndex::update(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool loadFlag
)
398 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
400 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
401 DataType type
= info
->type
;
402 int offset
= info
->fldOffset
;
403 int noOfBuckets
= info
->noOfBuckets
;
405 //check whether the index key is updated or not
406 //if it is not updated return from here
407 void *keyPtr
=(void*)((char*)tuple
+ offset
);
408 char *kPtr
= (char*)keyPtr
;
410 //creating old key value buffer for composite primary keys
411 char *oldKeyBuffer
= (char*) malloc(info
->compLength
);
412 memset(oldKeyBuffer
, 0, info
->compLength
);
413 void *oldKeyStartBuffer
= oldKeyBuffer
;
414 FieldIterator iter
= info
->idxFldList
.getIterator();
415 while(iter
.hasElement()) {
416 FieldDef
*def
= iter
.nextElement();
417 keyPtr
= (char *)tuple
+ def
->offset_
;
418 AllDataType::copyVal(oldKeyBuffer
, keyPtr
, def
->type_
, def
->length_
);
419 oldKeyBuffer
= oldKeyBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
422 keyPtr
= (void *) kPtr
;
423 //Iterate through the bind list and check
424 FieldIterator idxFldIter
= info
->idxFldList
.getIterator();
425 char *keyBindBuffer
;
426 if(type
==typeBinary
) {
427 keyBindBuffer
= (char*) malloc(2 * info
->compLength
);
428 memset(keyBindBuffer
, 0, 2 * info
->compLength
);
430 keyBindBuffer
= (char*) malloc(info
->compLength
);
431 memset(keyBindBuffer
, 0, info
->compLength
);
433 void *keyStartBuffer
= (void*) keyBindBuffer
;
434 bool keyUpdated
= false;
436 while (idxFldIter
.hasElement()) {
437 FieldDef
*idef
= idxFldIter
.nextElement();
438 FieldIterator fldIter
= tbl
->fldList_
.getIterator();
439 while (fldIter
.hasElement()) {
440 FieldDef
*def
= fldIter
.nextElement();
441 if (0 == strcmp(def
->fldName_
, idef
->fldName_
)) {
442 if (NULL
!= def
->bindVal_
) {
443 if(type
==typeBinary
) {
444 AllDataType::copyVal(keyBindBuffer
, def
->bindVal_
,
445 def
->type_
, 2*def
->length_
);
446 keyStartBuffer
=calloc(1,info
->compLength
);
447 AllDataType::convertToBinary(keyStartBuffer
, keyBindBuffer
, typeString
, info
->compLength
);
450 AllDataType::copyVal(keyBindBuffer
, def
->bindVal_
,
451 def
->type_
, def
->length_
);
452 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
455 AllDataType::copyVal(keyBindBuffer
, (char *) tuple
+ def
->offset_
, def
->type_
, def
->length_
);
456 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
464 //printf("DEBUG::key not updated\n");
465 free(keyStartBuffer
);
466 free(oldKeyStartBuffer
);
469 //printf("DEBUG::it is wrong coming here\n");
471 if (type
== typeComposite
)
472 result
= AllDataType::compareVal(oldKeyStartBuffer
, keyStartBuffer
,
473 OpEquals
, info
->type
, info
->compLength
);
474 else result
= AllDataType::compareVal(keyPtr
, keyStartBuffer
,
475 OpEquals
, info
->type
, info
->compLength
);
477 free(keyStartBuffer
);
478 free(oldKeyStartBuffer
);
481 printDebug(DM_HashIndex
, "Updating hash index node: Key value is updated");
483 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
485 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
487 //remove the node whose key is updated
489 if (type
== typeComposite
)
490 bucketNo
= computeHashBucket(type
, oldKeyStartBuffer
, noOfBuckets
, info
->compLength
);
491 else bucketNo
= computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
492 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for old value is %d", bucketNo
);
493 Bucket
*bucket
= &buckets
[bucketNo
];
495 HashUndoLogInfo
*hInfo1
= new HashUndoLogInfo();
496 hInfo1
->metaData_
= tbl
->db_
->getMetaDataPtr();
497 hInfo1
->bucket_
= bucket
;
498 hInfo1
->tuple_
= tuple
;
499 hInfo1
->hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
500 hInfo1
->keyPtr_
= keyPtr
;
502 //it may run into deadlock, when two threads updates tuples which falls in
503 //same buckets.So take both the mutex one after another, which will reduce the
505 int ret
= bucket
->mutex_
.getLock(tbl
->db_
->procSlot
);
509 free(keyStartBuffer
);
510 free(oldKeyStartBuffer
);
511 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucketNo
);
512 return ErrLockTimeOut
;
514 //insert node for the updated key value
515 int newBucketNo
= computeHashBucket(type
,
516 keyStartBuffer
, noOfBuckets
, info
->compLength
);
517 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for new value is %d", newBucketNo
);
519 Bucket
*bucket1
= &buckets
[newBucketNo
];
520 HashUndoLogInfo
*hInfo2
= new HashUndoLogInfo();
521 hInfo2
->metaData_
= tbl
->db_
->getMetaDataPtr();
522 hInfo2
->bucket_
= bucket
;
523 hInfo2
->tuple_
= tuple
;
524 hInfo2
->hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
525 hInfo2
->keyPtr_
= keyPtr
;
526 bucket1
->mutex_
.getLock(tbl
->db_
->procSlot
);
531 free(keyStartBuffer
);
532 free(oldKeyStartBuffer
);
533 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",newBucketNo
);
534 return ErrLockTimeOut
;
537 IndexNode
*head1
= (IndexNode
*) bucket
->bucketList_
;
540 BucketList
list1(head1
);
541 printDebug(DM_HashIndex
, "Updating hash index node: Removing node from list with head %x", head1
);
542 list1
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
543 bucket
->bucketList_
=list1
.getBucketListHead();
547 printError(ErrSysInternal
,"Update: Bucket list is null");
548 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
549 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
552 free(keyStartBuffer
);
553 free(oldKeyStartBuffer
);
554 return ErrSysInternal
;
558 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, hInfo1
, sizeof(HashUndoLogInfo
));
561 BucketList
list((IndexNode
*) bucket
->bucketList_
);
562 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
563 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
564 bucket
->bucketList_
= list
.getBucketListHead();
565 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
566 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
569 free(keyStartBuffer
);
570 free(oldKeyStartBuffer
);
574 IndexNode
*head2
= (IndexNode
*) bucket1
->bucketList_
;
575 //Note:: the tuple will be in the same address location
576 //so not changing the keyptr and tuple during append
577 //only bucket where this node resides will only change
578 //if the index key is updated.
582 IndexNode
*firstNode
= (IndexNode
*)(((Chunk
*)iptr
->hashNodeChunk_
)->allocate(tbl
->db_
, &rv
));
583 if (firstNode
== NULL
)
585 printError(rv
, "Error in allocating hash node");
586 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
587 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
590 free(keyStartBuffer
);
591 free(oldKeyStartBuffer
);
594 firstNode
->ptrToKey_
= keyPtr
;
595 firstNode
->ptrToTuple_
= tuple
;
596 firstNode
->next_
= NULL
;
597 bucket1
->bucketList_
= (IndexNode
*)firstNode
;
598 printDebug(DM_HashIndex
, "Updating hash index node: Adding new node %x:Head is empty", firstNode
);
602 BucketList
list2(head2
);
603 printDebug(DM_HashIndex
, "Updating hash index node: Adding node to list with head %x", head2
);
604 list2
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
605 bucket1
->bucketList_
= list2
.getBucketListHead();
609 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, hInfo2
, sizeof(HashUndoLogInfo
));
612 //reverting back the changes:delete new node and add the old
613 //node + remove logical undo log of the DeleteHashIndexOperation
614 BucketList
list1((IndexNode
*) bucket
->bucketList_
);
615 BucketList
list2((IndexNode
*) bucket1
->bucketList_
);
616 list1
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
617 list2
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
618 bucket
->bucketList_
= list1
.getBucketListHead();
619 bucket1
->bucketList_
= list2
.getBucketListHead();
620 UndoLogInfo
*logInfo
= tr
->popUndoLog();
621 Chunk
*chunk
= tbl
->sysDB_
->getSystemDatabaseChunk(UndoLogTableID
);
622 chunk
->free(tbl
->sysDB_
, logInfo
);
625 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
626 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
629 free(keyStartBuffer
);
630 free(oldKeyStartBuffer
);
634 //Following three methods are used to undo Logical Hash Indexes
635 DbRetVal
HashIndex::insertLogicalUndoLog(Database
*sysdb
, void *data
)
637 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
638 Chunk
*hChunk
= (Chunk
*) info
->hChunk_
;
640 db
.setMetaDataPtr((DatabaseMetaData
*) info
->metaData_
);
641 db
.setProcSlot(sysdb
->procSlot
);
642 IndexNode
*head
= (IndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
643 BucketList
list(head
);
644 DbRetVal rv
= list
.insert(hChunk
, &db
, info
->keyPtr_
, info
->tuple_
);
647 printError(ErrLockTimeOut
, "Unable to add to bucket..retry\n");
648 return ErrLockTimeOut
;
650 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
651 if (0 != Mutex::CASL((long*)& (((Bucket
*)info
->bucket_
)->bucketList_
),
652 (long)(((Bucket
*)info
->bucket_
)->bucketList_
),
653 (long)list
.getBucketListHead()))
655 printError(ErrLockTimeOut
, "Unable to add to bucket..retry\n");
656 return ErrLockTimeOut
;
661 DbRetVal
HashIndex::deleteLogicalUndoLog(Database
*sysdb
, void *data
)
663 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
664 Chunk
*hChunk
= (Chunk
*) info
->hChunk_
;
666 db
.setMetaDataPtr((DatabaseMetaData
*)info
->metaData_
);
667 db
.setProcSlot(sysdb
->procSlot
);
668 IndexNode
*head
= (IndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
669 BucketList
list(head
);
670 DbRetVal rc
= list
.remove(hChunk
, &db
, info
->keyPtr_
);
671 //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
673 if (0 != Mutex::CASL((long*)& (((Bucket
*)info
->bucket_
)->bucketList_
),
674 (long)(((Bucket
*)info
->bucket_
)->bucketList_
),
675 (long)list
.getBucketListHead()))
677 printError(ErrLockTimeOut
, "Unable to set the head of hash index bucket\n");
678 return ErrLockTimeOut
;
680 }else if (rc
!= OK
) {
681 printError(ErrLockTimeOut
, "Unable to remove hash index node");
682 return ErrLockTimeOut
;