Fixing leaks and JDBCTest performance improvement
[csql.git] / src / storage / HashIndex.cxx
blob10bd596d1e333af72a20ac0172b7ff4fc9eea907
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int hashBinary(char *strVal, int length)
50 unsigned int hval, g;
51 hval = 0;
52 char *str =strVal;
53 int iter = 0;
54 while (iter != length)
56 hval <<= 4;
57 hval += (unsigned int) *str++;
58 g = hval & ((unsigned int) 0xf << (32 - 4));
59 if (g != 0)
61 hval ^= g >> (32 - 8);
62 hval ^= g;
64 iter++;
66 return hval;
69 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
72 switch(type)
74 case typeInt:
76 int val = *(int*)key;
77 return val % noOfBuckets;
78 break;
80 case typeLong:
82 long val = *(long*)key;
83 return val % noOfBuckets;
84 break;
86 case typeULong:
88 unsigned long val = *(unsigned long*)key;
89 return val % noOfBuckets;
90 break;
92 case typeLongLong:
94 long long val = *(long long*)key;
95 return val % noOfBuckets;
96 break;
98 case typeShort:
100 short val = *(short*)key;
101 return val % noOfBuckets;
102 break;
104 case typeByteInt:
106 ByteInt val = *(ByteInt*)key;
107 return val % noOfBuckets;
108 break;
110 case typeDate:
112 int val = *(int*)key;
113 return val % noOfBuckets;
114 break;
116 case typeTime:
118 int val = *(int*)key;
119 return val % noOfBuckets;
120 break;
122 case typeTimeStamp:
124 TimeStamp val = *(TimeStamp*)key;
125 //TODO return val % noOfBuckets;
126 break;
128 case typeDouble:
130 //TODO
131 break;
133 case typeFloat:
135 //TODO
136 break;
138 case typeDecimal:
140 //TODO::for porting
142 case typeString:
144 unsigned int val = hashString((char*)key);
145 return val % noOfBuckets;
147 case typeComposite:
148 case typeBinary:
150 unsigned int val = hashBinary((char*)key, length);
151 return val % noOfBuckets;
153 default:
155 break;
158 return -1;
161 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
163 HashIndexInfo *info = (HashIndexInfo*) indInfo;
164 CINDEX *iptr = (CINDEX*)indexPtr;
165 DbRetVal rc = OK;
166 int noOfBuckets = info->noOfBuckets;
167 int offset = info->fldOffset;
168 DataType type = info->type;
169 char *keyBuffer = (char*) malloc(info->compLength);
170 void *keyStartBuffer = keyBuffer, *keyPtr;
171 FieldIterator iter = info->idxFldList.getIterator();
172 while(iter.hasElement())
174 FieldDef def = iter.nextElement();
175 keyPtr = (char *)tuple + def.offset_;
176 AllDataType::copyVal(keyBuffer, keyPtr, def.type_, def.length_);
177 keyBuffer = keyBuffer + AllDataType::size(def.type_, def.length_);
180 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
181 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
182 Bucket* buckets = (Bucket*)citer.nextElement();
183 keyPtr =(void*)((char*)tuple + offset);
184 int bucketNo = 0;
185 if (type == typeComposite)
186 bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
187 else
188 bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
189 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
190 Bucket *bucket = &(buckets[bucketNo]);
191 HashUndoLogInfo *hInfo = new HashUndoLogInfo();
192 hInfo->tblPtr_ = tbl;
193 hInfo->bucket_ = bucket;
194 hInfo->tuple_ = tuple;
195 hInfo->indexPtr_ = indexPtr;
196 hInfo->keyPtr_ = keyPtr;
197 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
198 if (ret != 0)
200 delete hInfo;
201 free(keyStartBuffer);
202 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
203 return ErrLockTimeOut;
205 HashIndexNode *head = (HashIndexNode*) bucket->bucketList_;
206 if (head && info->isUnique)
208 BucketList list(head);
209 BucketIter iter = list.getIterator();
210 HashIndexNode *node;
211 void *bucketTuple;
212 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
213 bool res = false;
215 while((node = iter.next()) != NULL)
217 bucketTuple = node->ptrToTuple_;
218 if (type == typeComposite) {
219 FieldIterator fldIter = info->idxFldList.getIterator();
220 int i = 0;
221 while (fldIter.hasElement()) {
222 FieldDef def = fldIter.nextElement();
223 res = AllDataType::compareVal((char *)bucketTuple + def.offset_, (char *)tuple + def.offset_, OpEquals, def.type_, def.length_);
224 if (!res) break;
227 else res = AllDataType::compareVal((void*)((char*)bucketTuple +offset), (void*)((char*)tuple +offset), OpEquals,type, info->compLength);
228 if (res)
230 delete hInfo;
231 free(keyStartBuffer);
232 printError(ErrUnique, "Unique key violation");
233 bucket->mutex_.releaseLock(tbl->db_->procSlot);
234 return ErrUnique;
239 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
240 if (!head)
242 printDebug(DM_HashIndex, "HashIndex insert head is empty");
243 DbRetVal rv = OK;
244 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
245 if (firstNode == NULL)
247 bucket->mutex_.releaseLock(tbl->db_->procSlot);
248 delete hInfo;
249 free(keyStartBuffer);
250 return rv;
252 firstNode->ptrToKey_ = keyPtr;
253 firstNode->ptrToTuple_ = tuple;
254 firstNode->next_ = NULL;
255 bucket->bucketList_ = (HashIndexNode*)firstNode;
256 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
258 else
260 BucketList list(head);
261 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
262 if (rc !=OK) {
263 bucket->mutex_.releaseLock(tbl->db_->procSlot);
264 delete hInfo;
265 free(keyStartBuffer);
266 return rc;
270 if (undoFlag) {
271 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo, sizeof(HashUndoLogInfo));
272 if (rc !=OK)
274 BucketList list(head);
275 rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
276 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
277 bucket->bucketList_ = list.getBucketListHead();
280 free(keyStartBuffer);
281 delete hInfo; hInfo = NULL;
282 bucket->mutex_.releaseLock(tbl->db_->procSlot);
283 return rc;
287 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
289 CINDEX *iptr = (CINDEX*)indexPtr;
291 HashIndexInfo *info = (HashIndexInfo*) indInfo;
292 DataType type = info->type;
293 int offset = info->fldOffset;
294 int noOfBuckets = info->noOfBuckets;
296 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
297 Bucket* buckets = (Bucket*)citer.nextElement();
299 char *keyBuffer = (char*) malloc(info->compLength);
300 void *keyStartBuffer = keyBuffer, *keyPtr;
301 FieldIterator iter = info->idxFldList.getIterator();
302 while(iter.hasElement())
304 FieldDef def = iter.nextElement();
305 keyPtr = (char *)tuple + def.offset_;
306 AllDataType::copyVal(keyBuffer, keyPtr, def.type_, def.length_);
307 keyBuffer = keyBuffer + AllDataType::size(def.type_, def.length_);
310 keyPtr =(void*)((char*)tuple + offset);
311 int bucket = 0;
312 if (type == typeComposite)
313 bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
314 else bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
316 Bucket *bucket1 = &buckets[bucket];
317 HashUndoLogInfo *hInfo = new HashUndoLogInfo();
318 hInfo->tblPtr_ = tbl;
319 hInfo->bucket_ = bucket1;
320 hInfo->tuple_ = tuple;
321 hInfo->indexPtr_ = indexPtr;
322 hInfo->keyPtr_ = keyPtr;
324 int ret = bucket1->mutex_.getLock(tbl->db_->procSlot);
325 if (ret != 0)
327 delete hInfo;
328 free(keyStartBuffer);
329 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucket);
330 return ErrLockTimeOut;
332 HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_;
334 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n");
335 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
336 delete hInfo;
337 free(keyStartBuffer);
338 return ErrNotExists;
340 BucketList list(head);
341 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
343 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
344 if (SplCase == rc)
346 printDebug(DM_HashIndex, "Removing hash index node from head ");
347 bucket1->bucketList_ = list.getBucketListHead();
348 rc = OK;
350 if (undoFlag) {
351 rc =tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo, sizeof(HashUndoLogInfo));
352 if (rc !=OK)
354 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
355 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
356 bucket1->bucketList_ = list.getBucketListHead();
359 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
360 delete hInfo;
361 free(keyStartBuffer);
362 return rc;
365 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
367 CINDEX *iptr = (CINDEX*)indexPtr;
369 HashIndexInfo *info = (HashIndexInfo*) indInfo;
370 DataType type = info->type;
371 int offset = info->fldOffset;
372 int noOfBuckets = info->noOfBuckets;
374 //check whether the index key is updated or not
375 //if it is not updated return from here
376 void *keyPtr =(void*)((char*)tuple + offset);
377 char *kPtr= (char*)keyPtr;
379 //creating old key value buffer for composite primary keys
380 char *oldKeyBuffer = (char*) malloc(info->compLength);
381 void *oldKeyStartBuffer = oldKeyBuffer;
382 FieldIterator iter = info->idxFldList.getIterator();
383 while(iter.hasElement()) {
384 FieldDef def = iter.nextElement();
385 keyPtr = (char *)tuple + def.offset_;
386 AllDataType::copyVal(oldKeyBuffer, keyPtr, def.type_, def.length_);
387 oldKeyBuffer = oldKeyBuffer + AllDataType::size(def.type_, def.length_);
390 keyPtr = (void *) kPtr;
391 //Iterate through the bind list and check
392 FieldIterator idxFldIter = info->idxFldList.getIterator();
393 char *keyBindBuffer ;
394 if(type==typeBinary) keyBindBuffer = (char*) malloc(2 * info->compLength);
395 else keyBindBuffer = (char*) malloc(info->compLength);
396 void *keyStartBuffer = (void*) keyBindBuffer;
397 bool keyUpdated = false;
399 while (idxFldIter.hasElement()) {
400 FieldDef idef = idxFldIter.nextElement();
401 FieldIterator fldIter = tbl->fldList_.getIterator();
402 while (fldIter.hasElement()) {
403 FieldDef def = fldIter.nextElement();
404 if (0 == strcmp(def.fldName_, idef.fldName_)) {
405 if (NULL != def.bindVal_) {
406 if(type==typeBinary) {
407 AllDataType::copyVal(keyBindBuffer, def.bindVal_,
408 def.type_, 2*def.length_);
409 keyStartBuffer=calloc(1,info->compLength);
410 AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength);
411 free(keyBindBuffer);
412 } else {
413 AllDataType::copyVal(keyBindBuffer, def.bindVal_,
414 def.type_, def.length_);
415 keyBindBuffer = keyBindBuffer + AllDataType::size(def.type_, def.length_);
417 } else {
418 AllDataType::copyVal(keyBindBuffer, (char *) tuple + def.offset_, def.type_, def.length_);
419 keyBindBuffer = keyBindBuffer + AllDataType::size(def.type_, def.length_);
421 keyUpdated = true;
422 break;
426 if (!keyUpdated) {
427 //printf("PRABA::key not updated\n");
428 free(keyStartBuffer);
429 free(oldKeyStartBuffer);
430 return OK;
432 //printf("PRABA::it is wrong coming here\n");
433 bool result = false;
434 if (type == typeComposite)
435 result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer,
436 OpEquals, info->type, info->compLength);
437 else result = AllDataType::compareVal(keyPtr, keyStartBuffer,
438 OpEquals, info->type, info->compLength);
439 if (result) {
440 free(keyStartBuffer);
441 free(oldKeyStartBuffer);
442 return OK;
444 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
446 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
448 Bucket* buckets = (Bucket*)citer.nextElement();
450 //remove the node whose key is updated
451 int bucketNo = 0;
452 if (type == typeComposite)
453 bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength);
454 else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
455 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
456 Bucket *bucket = &buckets[bucketNo];
458 HashUndoLogInfo *hInfo1 = new HashUndoLogInfo();
459 hInfo1->tblPtr_ = tbl;
460 hInfo1->bucket_ = bucket;
461 hInfo1->tuple_ = tuple;
462 hInfo1->indexPtr_ = indexPtr;
463 hInfo1->keyPtr_ = keyPtr;
465 //it may run into deadlock, when two threads updates tuples which falls in
466 //same buckets.So take both the mutex one after another, which will reduce the
467 //deadlock window.
468 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
469 if (ret != 0)
471 delete hInfo1;
472 free(keyStartBuffer);
473 free(oldKeyStartBuffer);
474 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
475 return ErrLockTimeOut;
477 //insert node for the updated key value
478 int newBucketNo = computeHashBucket(type,
479 keyStartBuffer, noOfBuckets, info->compLength);
480 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
482 Bucket *bucket1 = &buckets[newBucketNo];
483 HashUndoLogInfo *hInfo2 = new HashUndoLogInfo();
484 hInfo2->tblPtr_ = tbl;
485 hInfo2->bucket_ = bucket;
486 hInfo2->tuple_ = tuple;
487 hInfo2->indexPtr_ = indexPtr;
488 hInfo2->keyPtr_ = keyPtr;
489 bucket1->mutex_.getLock(tbl->db_->procSlot);
490 if (ret != 0)
492 delete hInfo1;
493 delete hInfo2;
494 free(keyStartBuffer);
495 free(oldKeyStartBuffer);
496 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
497 return ErrLockTimeOut;
500 HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_;
501 if (head1)
503 BucketList list1(head1);
504 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
505 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
506 bucket->bucketList_=list1.getBucketListHead();
508 else
510 printError(ErrSysInternal,"Update: Bucket list is null");
511 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
512 bucket->mutex_.releaseLock(tbl->db_->procSlot);
513 delete hInfo1;
514 delete hInfo2;
515 free(keyStartBuffer);
516 free(oldKeyStartBuffer);
517 return ErrSysInternal;
519 DbRetVal rc = OK;
520 if (undoFlag) {
521 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo));
522 if (rc !=OK)
524 BucketList list((HashIndexNode*) bucket->bucketList_);
525 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
526 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
527 bucket->bucketList_ = list.getBucketListHead();
528 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
529 bucket->mutex_.releaseLock(tbl->db_->procSlot);
530 delete hInfo1;
531 delete hInfo2;
532 free(keyStartBuffer);
533 free(oldKeyStartBuffer);
534 return rc;
537 HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_;
538 //Note:: the tuple will be in the same address location
539 //so not changing the keyptr and tuple during append
540 //only bucket where this node resides will only change
541 //if the index key is updated.
542 if (!head2)
544 DbRetVal rv = OK;
545 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
546 if (firstNode == NULL)
548 printError(rv, "Error in allocating hash node");
549 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
550 bucket->mutex_.releaseLock(tbl->db_->procSlot);
551 delete hInfo1;
552 delete hInfo2;
553 free(keyStartBuffer);
554 free(oldKeyStartBuffer);
555 return rv;
557 firstNode->ptrToKey_ = keyPtr;
558 firstNode->ptrToTuple_ = tuple;
559 firstNode->next_ = NULL;
560 bucket1->bucketList_ = (HashIndexNode*)firstNode;
561 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
563 else
565 BucketList list2(head2);
566 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
567 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
568 bucket1->bucketList_ = list2.getBucketListHead();
570 if (undoFlag) {
572 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo2, sizeof(HashUndoLogInfo));
573 if (rc !=OK)
575 //reverting back the changes:delete new node and add the old
576 //node + remove logical undo log of the DeleteHashIndexOperation
577 BucketList list1((HashIndexNode*) bucket->bucketList_);
578 BucketList list2((HashIndexNode*) bucket1->bucketList_);
579 list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
580 list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
581 bucket->bucketList_ = list1.getBucketListHead();
582 bucket1->bucketList_ = list2.getBucketListHead();
583 UndoLogInfo *logInfo = tr->popUndoLog();
584 Chunk *chunk = tbl->sysDB_->getSystemDatabaseChunk(UndoLogTableID);
585 chunk->free(tbl->sysDB_, logInfo);
588 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
589 bucket->mutex_.releaseLock(tbl->db_->procSlot);
590 delete hInfo1;
591 delete hInfo2;
592 free(keyStartBuffer);
593 free(oldKeyStartBuffer);
594 return rc;
597 //Following three methods are used to undo Logical Hash Indexes
598 DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data)
600 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
601 TableImpl * tbl = (TableImpl *)info->tblPtr_;
602 Chunk *hChunk = (Chunk *) ((CINDEX *)info->indexPtr_)->hashNodeChunk_;
603 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
604 BucketList list(head);
605 list.insert(hChunk, tbl->db_, info->keyPtr_, info->tuple_);
606 ((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
607 return OK;
610 DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data)
612 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
613 TableImpl * tbl = (TableImpl *)info->tblPtr_;
614 Chunk *hChunk = (Chunk *) ((CINDEX *)info->indexPtr_)->hashNodeChunk_;
615 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
616 BucketList list(head);
617 list.remove(hChunk, tbl->db_, info->keyPtr_);
618 ((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
619 return OK;