1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 ***************************************************************************/
17 #include<CatalogTables.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal
)
37 hval
+= (unsigned int) *str
++;
38 g
= hval
& ((unsigned int) 0xf << (32 - 4));
41 hval
^= g
>> (32 - 8);
48 unsigned int hashBinary(char *strVal
, int length
)
54 while (iter
!= length
)
57 hval
+= (unsigned int) *str
++;
58 g
= hval
& ((unsigned int) 0xf << (32 - 4));
61 hval
^= g
>> (32 - 8);
69 unsigned int HashIndex::computeHashBucket(DataType type
, void *key
, int noOfBuckets
, int length
)
77 return val
% noOfBuckets
;
82 long val
= *(long*)key
;
83 return val
% noOfBuckets
;
88 unsigned long val
= *(unsigned long*)key
;
89 return val
% noOfBuckets
;
94 long long val
= *(long long*)key
;
95 return val
% noOfBuckets
;
100 short val
= *(short*)key
;
101 return val
% noOfBuckets
;
106 ByteInt val
= *(ByteInt
*)key
;
107 return val
% noOfBuckets
;
112 int val
= *(int*)key
;
113 return val
% noOfBuckets
;
118 int val
= *(int*)key
;
119 return val
% noOfBuckets
;
124 TimeStamp val
= *(TimeStamp
*)key
;
125 //TODO return val % noOfBuckets;
144 unsigned int val
= hashString((char*)key
);
145 return val
% noOfBuckets
;
150 unsigned int val
= hashBinary((char*)key
, length
);
151 return val
% noOfBuckets
;
161 DbRetVal
HashIndex::insert(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool undoFlag
)
163 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
164 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
166 int noOfBuckets
= info
->noOfBuckets
;
167 int offset
= info
->fldOffset
;
168 DataType type
= info
->type
;
169 char *keyBuffer
= (char*) malloc(info
->compLength
);
170 void *keyStartBuffer
= keyBuffer
, *keyPtr
;
171 FieldIterator iter
= info
->idxFldList
.getIterator();
172 while(iter
.hasElement())
174 FieldDef def
= iter
.nextElement();
175 keyPtr
= (char *)tuple
+ def
.offset_
;
176 AllDataType::copyVal(keyBuffer
, keyPtr
, def
.type_
, def
.length_
);
177 keyBuffer
= keyBuffer
+ AllDataType::size(def
.type_
, def
.length_
);
180 printDebug(DM_HashIndex
, "Inserting hash index node for %s", iptr
->indName_
);
181 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
182 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
183 keyPtr
=(void*)((char*)tuple
+ offset
);
185 if (type
== typeComposite
)
186 bucketNo
= computeHashBucket(type
, keyStartBuffer
, noOfBuckets
, info
->compLength
);
188 bucketNo
= computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
189 printDebug(DM_HashIndex
, "HashIndex insert bucketno %d", bucketNo
);
190 Bucket
*bucket
= &(buckets
[bucketNo
]);
191 HashUndoLogInfo
*hInfo
= new HashUndoLogInfo();
192 hInfo
->tblPtr_
= tbl
;
193 hInfo
->bucket_
= bucket
;
194 hInfo
->tuple_
= tuple
;
195 hInfo
->indexPtr_
= indexPtr
;
196 hInfo
->keyPtr_
= keyPtr
;
197 int ret
= bucket
->mutex_
.getLock(tbl
->db_
->procSlot
);
201 free(keyStartBuffer
);
202 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucketNo
);
203 return ErrLockTimeOut
;
205 HashIndexNode
*head
= (HashIndexNode
*) bucket
->bucketList_
;
206 if (head
&& info
->isUnique
)
208 BucketList
list(head
);
209 BucketIter iter
= list
.getIterator();
212 printDebug(DM_HashIndex
, "HashIndex insert Checking for unique");
215 while((node
= iter
.next()) != NULL
)
217 bucketTuple
= node
->ptrToTuple_
;
218 if (type
== typeComposite
) {
219 FieldIterator fldIter
= info
->idxFldList
.getIterator();
221 while (fldIter
.hasElement()) {
222 FieldDef def
= fldIter
.nextElement();
223 res
= AllDataType::compareVal((char *)bucketTuple
+ def
.offset_
, (char *)tuple
+ def
.offset_
, OpEquals
, def
.type_
, def
.length_
);
227 else res
= AllDataType::compareVal((void*)((char*)bucketTuple
+offset
), (void*)((char*)tuple
+offset
), OpEquals
,type
, info
->compLength
);
231 free(keyStartBuffer
);
232 printError(ErrUnique
, "Unique key violation");
233 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
239 printDebug(DM_HashIndex
, "HashIndex insert into bucket list");
242 printDebug(DM_HashIndex
, "HashIndex insert head is empty");
244 HashIndexNode
*firstNode
= (HashIndexNode
*)(((Chunk
*)iptr
->hashNodeChunk_
)->allocate(tbl
->db_
, &rv
));
245 if (firstNode
== NULL
)
247 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
249 free(keyStartBuffer
);
252 firstNode
->ptrToKey_
= keyPtr
;
253 firstNode
->ptrToTuple_
= tuple
;
254 firstNode
->next_
= NULL
;
255 bucket
->bucketList_
= (HashIndexNode
*)firstNode
;
256 printDebug(DM_HashIndex
, "HashIndex insert new node %x in empty bucket", bucket
->bucketList_
);
260 BucketList
list(head
);
261 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
263 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
265 free(keyStartBuffer
);
271 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, hInfo
, sizeof(HashUndoLogInfo
));
274 BucketList
list(head
);
275 rc
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
276 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log insert followed by hash bucket list remove\n");
277 bucket
->bucketList_
= list
.getBucketListHead();
280 free(keyStartBuffer
);
281 delete hInfo
; hInfo
= NULL
;
282 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
287 DbRetVal
HashIndex::remove(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool undoFlag
)
289 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
291 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
292 DataType type
= info
->type
;
293 int offset
= info
->fldOffset
;
294 int noOfBuckets
= info
->noOfBuckets
;
296 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
297 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
299 char *keyBuffer
= (char*) malloc(info
->compLength
);
300 void *keyStartBuffer
= keyBuffer
, *keyPtr
;
301 FieldIterator iter
= info
->idxFldList
.getIterator();
302 while(iter
.hasElement())
304 FieldDef def
= iter
.nextElement();
305 keyPtr
= (char *)tuple
+ def
.offset_
;
306 AllDataType::copyVal(keyBuffer
, keyPtr
, def
.type_
, def
.length_
);
307 keyBuffer
= keyBuffer
+ AllDataType::size(def
.type_
, def
.length_
);
310 keyPtr
=(void*)((char*)tuple
+ offset
);
312 if (type
== typeComposite
)
313 bucket
= HashIndex::computeHashBucket(type
, keyStartBuffer
, noOfBuckets
, info
->compLength
);
314 else bucket
= HashIndex::computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
316 Bucket
*bucket1
= &buckets
[bucket
];
317 HashUndoLogInfo
*hInfo
= new HashUndoLogInfo();
318 hInfo
->tblPtr_
= tbl
;
319 hInfo
->bucket_
= bucket1
;
320 hInfo
->tuple_
= tuple
;
321 hInfo
->indexPtr_
= indexPtr
;
322 hInfo
->keyPtr_
= keyPtr
;
324 int ret
= bucket1
->mutex_
.getLock(tbl
->db_
->procSlot
);
328 free(keyStartBuffer
);
329 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucket
);
330 return ErrLockTimeOut
;
332 HashIndexNode
*head
= (HashIndexNode
*) bucket1
->bucketList_
;
334 if (!head
) { printError(ErrNotExists
, "Hash index does not exist:should never happen\n");
335 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
337 free(keyStartBuffer
);
340 BucketList
list(head
);
341 printDebug(DM_HashIndex
, "Removing hash index node from head %x", head
);
343 DbRetVal rc
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
346 printDebug(DM_HashIndex
, "Removing hash index node from head ");
347 bucket1
->bucketList_
= list
.getBucketListHead();
351 rc
=tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, hInfo
, sizeof(HashUndoLogInfo
));
354 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
355 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
356 bucket1
->bucketList_
= list
.getBucketListHead();
359 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
361 free(keyStartBuffer
);
365 DbRetVal
HashIndex::update(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool undoFlag
)
367 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
369 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
370 DataType type
= info
->type
;
371 int offset
= info
->fldOffset
;
372 int noOfBuckets
= info
->noOfBuckets
;
374 //check whether the index key is updated or not
375 //if it is not updated return from here
376 void *keyPtr
=(void*)((char*)tuple
+ offset
);
377 char *kPtr
= (char*)keyPtr
;
379 //creating old key value buffer for composite primary keys
380 char *oldKeyBuffer
= (char*) malloc(info
->compLength
);
381 void *oldKeyStartBuffer
= oldKeyBuffer
;
382 FieldIterator iter
= info
->idxFldList
.getIterator();
383 while(iter
.hasElement()) {
384 FieldDef def
= iter
.nextElement();
385 keyPtr
= (char *)tuple
+ def
.offset_
;
386 AllDataType::copyVal(oldKeyBuffer
, keyPtr
, def
.type_
, def
.length_
);
387 oldKeyBuffer
= oldKeyBuffer
+ AllDataType::size(def
.type_
, def
.length_
);
390 keyPtr
= (void *) kPtr
;
391 //Iterate through the bind list and check
392 FieldIterator idxFldIter
= info
->idxFldList
.getIterator();
393 char *keyBindBuffer
;
394 if(type
==typeBinary
) keyBindBuffer
= (char*) malloc(2 * info
->compLength
);
395 else keyBindBuffer
= (char*) malloc(info
->compLength
);
396 void *keyStartBuffer
= (void*) keyBindBuffer
;
397 bool keyUpdated
= false;
399 while (idxFldIter
.hasElement()) {
400 FieldDef idef
= idxFldIter
.nextElement();
401 FieldIterator fldIter
= tbl
->fldList_
.getIterator();
402 while (fldIter
.hasElement()) {
403 FieldDef def
= fldIter
.nextElement();
404 if (0 == strcmp(def
.fldName_
, idef
.fldName_
)) {
405 if (NULL
!= def
.bindVal_
) {
406 if(type
==typeBinary
) {
407 AllDataType::copyVal(keyBindBuffer
, def
.bindVal_
,
408 def
.type_
, 2*def
.length_
);
409 keyStartBuffer
=calloc(1,info
->compLength
);
410 AllDataType::convertToBinary(keyStartBuffer
, keyBindBuffer
, typeString
, info
->compLength
);
413 AllDataType::copyVal(keyBindBuffer
, def
.bindVal_
,
414 def
.type_
, def
.length_
);
415 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
.type_
, def
.length_
);
418 AllDataType::copyVal(keyBindBuffer
, (char *) tuple
+ def
.offset_
, def
.type_
, def
.length_
);
419 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
.type_
, def
.length_
);
427 //printf("PRABA::key not updated\n");
428 free(keyStartBuffer
);
429 free(oldKeyStartBuffer
);
432 //printf("PRABA::it is wrong coming here\n");
434 if (type
== typeComposite
)
435 result
= AllDataType::compareVal(oldKeyStartBuffer
, keyStartBuffer
,
436 OpEquals
, info
->type
, info
->compLength
);
437 else result
= AllDataType::compareVal(keyPtr
, keyStartBuffer
,
438 OpEquals
, info
->type
, info
->compLength
);
440 free(keyStartBuffer
);
441 free(oldKeyStartBuffer
);
444 printDebug(DM_HashIndex
, "Updating hash index node: Key value is updated");
446 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
448 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
450 //remove the node whose key is updated
452 if (type
== typeComposite
)
453 bucketNo
= computeHashBucket(type
, oldKeyStartBuffer
, noOfBuckets
, info
->compLength
);
454 else bucketNo
= computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
455 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for old value is %d", bucketNo
);
456 Bucket
*bucket
= &buckets
[bucketNo
];
458 HashUndoLogInfo
*hInfo1
= new HashUndoLogInfo();
459 hInfo1
->tblPtr_
= tbl
;
460 hInfo1
->bucket_
= bucket
;
461 hInfo1
->tuple_
= tuple
;
462 hInfo1
->indexPtr_
= indexPtr
;
463 hInfo1
->keyPtr_
= keyPtr
;
465 //it may run into deadlock, when two threads updates tuples which falls in
466 //same buckets.So take both the mutex one after another, which will reduce the
468 int ret
= bucket
->mutex_
.getLock(tbl
->db_
->procSlot
);
472 free(keyStartBuffer
);
473 free(oldKeyStartBuffer
);
474 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucketNo
);
475 return ErrLockTimeOut
;
477 //insert node for the updated key value
478 int newBucketNo
= computeHashBucket(type
,
479 keyStartBuffer
, noOfBuckets
, info
->compLength
);
480 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for new value is %d", newBucketNo
);
482 Bucket
*bucket1
= &buckets
[newBucketNo
];
483 HashUndoLogInfo
*hInfo2
= new HashUndoLogInfo();
484 hInfo2
->tblPtr_
= tbl
;
485 hInfo2
->bucket_
= bucket
;
486 hInfo2
->tuple_
= tuple
;
487 hInfo2
->indexPtr_
= indexPtr
;
488 hInfo2
->keyPtr_
= keyPtr
;
489 bucket1
->mutex_
.getLock(tbl
->db_
->procSlot
);
494 free(keyStartBuffer
);
495 free(oldKeyStartBuffer
);
496 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",newBucketNo
);
497 return ErrLockTimeOut
;
500 HashIndexNode
*head1
= (HashIndexNode
*) bucket
->bucketList_
;
503 BucketList
list1(head1
);
504 printDebug(DM_HashIndex
, "Updating hash index node: Removing node from list with head %x", head1
);
505 list1
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
506 bucket
->bucketList_
=list1
.getBucketListHead();
510 printError(ErrSysInternal
,"Update: Bucket list is null");
511 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
512 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
515 free(keyStartBuffer
);
516 free(oldKeyStartBuffer
);
517 return ErrSysInternal
;
521 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, hInfo1
, sizeof(HashUndoLogInfo
));
524 BucketList
list((HashIndexNode
*) bucket
->bucketList_
);
525 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
526 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
527 bucket
->bucketList_
= list
.getBucketListHead();
528 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
529 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
532 free(keyStartBuffer
);
533 free(oldKeyStartBuffer
);
537 HashIndexNode
*head2
= (HashIndexNode
*) bucket1
->bucketList_
;
538 //Note:: the tuple will be in the same address location
539 //so not changing the keyptr and tuple during append
540 //only bucket where this node resides will only change
541 //if the index key is updated.
545 HashIndexNode
*firstNode
= (HashIndexNode
*)(((Chunk
*)iptr
->hashNodeChunk_
)->allocate(tbl
->db_
, &rv
));
546 if (firstNode
== NULL
)
548 printError(rv
, "Error in allocating hash node");
549 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
550 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
553 free(keyStartBuffer
);
554 free(oldKeyStartBuffer
);
557 firstNode
->ptrToKey_
= keyPtr
;
558 firstNode
->ptrToTuple_
= tuple
;
559 firstNode
->next_
= NULL
;
560 bucket1
->bucketList_
= (HashIndexNode
*)firstNode
;
561 printDebug(DM_HashIndex
, "Updating hash index node: Adding new node %x:Head is empty", firstNode
);
565 BucketList
list2(head2
);
566 printDebug(DM_HashIndex
, "Updating hash index node: Adding node to list with head %x", head2
);
567 list2
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
568 bucket1
->bucketList_
= list2
.getBucketListHead();
572 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, hInfo2
, sizeof(HashUndoLogInfo
));
575 //reverting back the changes:delete new node and add the old
576 //node + remove logical undo log of the DeleteHashIndexOperation
577 BucketList
list1((HashIndexNode
*) bucket
->bucketList_
);
578 BucketList
list2((HashIndexNode
*) bucket1
->bucketList_
);
579 list1
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
580 list2
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
581 bucket
->bucketList_
= list1
.getBucketListHead();
582 bucket1
->bucketList_
= list2
.getBucketListHead();
583 UndoLogInfo
*logInfo
= tr
->popUndoLog();
584 Chunk
*chunk
= tbl
->sysDB_
->getSystemDatabaseChunk(UndoLogTableID
);
585 chunk
->free(tbl
->sysDB_
, logInfo
);
588 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
589 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
592 free(keyStartBuffer
);
593 free(oldKeyStartBuffer
);
597 //Following three methods are used to undo Logical Hash Indexes
598 DbRetVal
HashIndex::insertLogicalUndoLog(Database
*sysdb
, void *data
)
600 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
601 TableImpl
* tbl
= (TableImpl
*)info
->tblPtr_
;
602 Chunk
*hChunk
= (Chunk
*) ((CINDEX
*)info
->indexPtr_
)->hashNodeChunk_
;
603 HashIndexNode
*head
= (HashIndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
604 BucketList
list(head
);
605 list
.insert(hChunk
, tbl
->db_
, info
->keyPtr_
, info
->tuple_
);
606 ((Bucket
*)info
->bucket_
)->bucketList_
= list
.getBucketListHead();
610 DbRetVal
HashIndex::deleteLogicalUndoLog(Database
*sysdb
, void *data
)
612 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
613 TableImpl
* tbl
= (TableImpl
*)info
->tblPtr_
;
614 Chunk
*hChunk
= (Chunk
*) ((CINDEX
*)info
->indexPtr_
)->hashNodeChunk_
;
615 HashIndexNode
*head
= (HashIndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
616 BucketList
list(head
);
617 list
.remove(hChunk
, tbl
->db_
, info
->keyPtr_
);
618 ((Bucket
*)info
->bucket_
)->bucketList_
= list
.getBucketListHead();