show table implemented partially
[csql.git] / src / storage / HashIndex.cxx
blobd572546ae0de4b55e0f48471bb73a893dac0e0ba
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int hashBinary(char *strVal, int length)
50 unsigned int hval, g;
51 hval = 0;
52 char *str =strVal;
53 int iter = 0;
54 while (iter != length)
56 hval <<= 4;
57 hval += (unsigned int) *str++;
58 g = hval & ((unsigned int) 0xf << (32 - 4));
59 if (g != 0)
61 hval ^= g >> (32 - 8);
62 hval ^= g;
64 iter++;
66 return hval;
69 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
72 if (typeInt == type) {
73 int val = *(int*)key;
74 return val % noOfBuckets;
75 }else if (typeString == type) {
76 unsigned int val = hashString((char*)key);
77 return val % noOfBuckets;
78 }else if (typeShort == type) {
79 short val = *(short*) key;
80 return val % noOfBuckets;
81 }else if (typeLong == type) {
82 long val = *(long*) key;
83 return val % noOfBuckets;
84 }else if (typeLongLong == type) {
85 long long val = *(long long*) key;
86 return val % noOfBuckets;
87 }else if (typeByteInt == type) {
88 ByteInt val = *(ByteInt*)key;
89 return val % noOfBuckets;
90 }else if (typeDate == type) {
91 int val = *(int*)key;
92 return val % noOfBuckets;
93 }else if (typeTime == type) {
94 int val = *(int*)key;
95 return val % noOfBuckets;
96 }else if (typeComposite == type) {
97 unsigned int val = hashBinary((char*)key, length);
98 return val % noOfBuckets;
99 }else if (typeBinary == type) {
100 unsigned int val = hashBinary((char*)key, length);
101 return val % noOfBuckets;
102 }else if (typeULong == type) {
103 unsigned long val = *(unsigned long*)key;
104 return val % noOfBuckets;
106 printError(ErrSysFatal,"Type not supported for hashing\n");
107 return -1;
110 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
112 HashIndexInfo *info = (HashIndexInfo*) indInfo;
113 CINDEX *iptr = (CINDEX*)indexPtr;
114 DbRetVal rc = OK;
115 int noOfBuckets = info->noOfBuckets;
116 int offset = info->fldOffset;
117 DataType type = info->type;
118 char *keyBuffer = (char*) malloc(info->compLength);
119 void *keyStartBuffer = keyBuffer, *keyPtr;
120 FieldIterator iter = info->idxFldList.getIterator();
121 while(iter.hasElement())
123 FieldDef *def = iter.nextElement();
124 keyPtr = (char *)tuple + def->offset_;
125 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
126 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
129 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
130 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
131 Bucket* buckets = (Bucket*)citer.nextElement();
132 keyPtr =(void*)((char*)tuple + offset);
133 int bucketNo = 0;
134 if (type == typeComposite)
135 bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
136 else
137 bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
138 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
139 Bucket *bucket = &(buckets[bucketNo]);
140 HashUndoLogInfo *hInfo = new HashUndoLogInfo();
141 hInfo->tblPtr_ = tbl;
142 hInfo->bucket_ = bucket;
143 hInfo->tuple_ = tuple;
144 hInfo->indexPtr_ = indexPtr;
145 hInfo->keyPtr_ = keyPtr;
146 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
147 if (ret != 0)
149 delete hInfo;
150 free(keyStartBuffer);
151 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
152 return ErrLockTimeOut;
154 HashIndexNode *head = (HashIndexNode*) bucket->bucketList_;
155 if (head && info->isUnique)
157 BucketList list(head);
158 BucketIter iter = list.getIterator();
159 HashIndexNode *node;
160 void *bucketTuple;
161 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
162 bool res = false;
164 while((node = iter.next()) != NULL)
166 bucketTuple = node->ptrToTuple_;
167 if (type == typeComposite) {
168 FieldIterator fldIter = info->idxFldList.getIterator();
169 int i = 0;
170 while (fldIter.hasElement()) {
171 FieldDef *def = fldIter.nextElement();
172 res = AllDataType::compareVal((char *)bucketTuple + def->offset_, (char *)tuple + def->offset_, OpEquals, def->type_, def->length_);
173 if (!res) break;
176 else res = AllDataType::compareVal((void*)((char*)bucketTuple +offset), (void*)((char*)tuple +offset), OpEquals,type, info->compLength);
177 if (res)
179 delete hInfo;
180 free(keyStartBuffer);
181 printError(ErrUnique, "Unique key violation");
182 bucket->mutex_.releaseLock(tbl->db_->procSlot);
183 return ErrUnique;
188 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
189 if (!head)
191 printDebug(DM_HashIndex, "HashIndex insert head is empty");
192 DbRetVal rv = OK;
193 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
194 if (firstNode == NULL)
196 bucket->mutex_.releaseLock(tbl->db_->procSlot);
197 delete hInfo;
198 free(keyStartBuffer);
199 return rv;
201 firstNode->ptrToKey_ = keyPtr;
202 firstNode->ptrToTuple_ = tuple;
203 firstNode->next_ = NULL;
204 bucket->bucketList_ = (HashIndexNode*)firstNode;
205 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
207 else
209 BucketList list(head);
210 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
211 if (rc !=OK) {
212 bucket->mutex_.releaseLock(tbl->db_->procSlot);
213 delete hInfo;
214 free(keyStartBuffer);
215 return rc;
219 if (undoFlag) {
220 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo, sizeof(HashUndoLogInfo));
221 if (rc !=OK)
223 BucketList list(head);
224 rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
225 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
226 bucket->bucketList_ = list.getBucketListHead();
229 free(keyStartBuffer);
230 delete hInfo; hInfo = NULL;
231 bucket->mutex_.releaseLock(tbl->db_->procSlot);
232 return rc;
236 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
238 CINDEX *iptr = (CINDEX*)indexPtr;
240 HashIndexInfo *info = (HashIndexInfo*) indInfo;
241 DataType type = info->type;
242 int offset = info->fldOffset;
243 int noOfBuckets = info->noOfBuckets;
245 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
246 Bucket* buckets = (Bucket*)citer.nextElement();
248 char *keyBuffer = (char*) malloc(info->compLength);
249 void *keyStartBuffer = keyBuffer, *keyPtr;
250 FieldIterator iter = info->idxFldList.getIterator();
251 while(iter.hasElement())
253 FieldDef *def = iter.nextElement();
254 keyPtr = (char *)tuple + def->offset_;
255 AllDataType::copyVal(keyBuffer, keyPtr, def->type_, def->length_);
256 keyBuffer = keyBuffer + AllDataType::size(def->type_, def->length_);
259 keyPtr =(void*)((char*)tuple + offset);
260 int bucket = 0;
261 if (type == typeComposite)
262 bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength);
263 else bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
265 Bucket *bucket1 = &buckets[bucket];
266 HashUndoLogInfo *hInfo = new HashUndoLogInfo();
267 hInfo->tblPtr_ = tbl;
268 hInfo->bucket_ = bucket1;
269 hInfo->tuple_ = tuple;
270 hInfo->indexPtr_ = indexPtr;
271 hInfo->keyPtr_ = keyPtr;
273 int ret = bucket1->mutex_.getLock(tbl->db_->procSlot);
274 if (ret != 0)
276 delete hInfo;
277 free(keyStartBuffer);
278 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucket);
279 return ErrLockTimeOut;
281 HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_;
283 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n");
284 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
285 delete hInfo;
286 free(keyStartBuffer);
287 return ErrNotExists;
289 BucketList list(head);
290 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
292 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
293 if (SplCase == rc)
295 printDebug(DM_HashIndex, "Removing hash index node from head ");
296 bucket1->bucketList_ = list.getBucketListHead();
297 rc = OK;
299 if (undoFlag) {
300 rc =tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo, sizeof(HashUndoLogInfo));
301 if (rc !=OK)
303 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
304 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
305 bucket1->bucketList_ = list.getBucketListHead();
308 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
309 delete hInfo;
310 free(keyStartBuffer);
311 return rc;
314 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
316 CINDEX *iptr = (CINDEX*)indexPtr;
318 HashIndexInfo *info = (HashIndexInfo*) indInfo;
319 DataType type = info->type;
320 int offset = info->fldOffset;
321 int noOfBuckets = info->noOfBuckets;
323 //check whether the index key is updated or not
324 //if it is not updated return from here
325 void *keyPtr =(void*)((char*)tuple + offset);
326 char *kPtr= (char*)keyPtr;
328 //creating old key value buffer for composite primary keys
329 char *oldKeyBuffer = (char*) malloc(info->compLength);
330 void *oldKeyStartBuffer = oldKeyBuffer;
331 FieldIterator iter = info->idxFldList.getIterator();
332 while(iter.hasElement()) {
333 FieldDef *def = iter.nextElement();
334 keyPtr = (char *)tuple + def->offset_;
335 AllDataType::copyVal(oldKeyBuffer, keyPtr, def->type_, def->length_);
336 oldKeyBuffer = oldKeyBuffer + AllDataType::size(def->type_, def->length_);
339 keyPtr = (void *) kPtr;
340 //Iterate through the bind list and check
341 FieldIterator idxFldIter = info->idxFldList.getIterator();
342 char *keyBindBuffer ;
343 if(type==typeBinary) keyBindBuffer = (char*) malloc(2 * info->compLength);
344 else keyBindBuffer = (char*) malloc(info->compLength);
345 void *keyStartBuffer = (void*) keyBindBuffer;
346 bool keyUpdated = false;
348 while (idxFldIter.hasElement()) {
349 FieldDef *idef = idxFldIter.nextElement();
350 FieldIterator fldIter = tbl->fldList_.getIterator();
351 while (fldIter.hasElement()) {
352 FieldDef *def = fldIter.nextElement();
353 if (0 == strcmp(def->fldName_, idef->fldName_)) {
354 if (NULL != def->bindVal_) {
355 if(type==typeBinary) {
356 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
357 def->type_, 2*def->length_);
358 keyStartBuffer=calloc(1,info->compLength);
359 AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength);
360 free(keyBindBuffer);
361 } else {
362 AllDataType::copyVal(keyBindBuffer, def->bindVal_,
363 def->type_, def->length_);
364 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
366 } else {
367 AllDataType::copyVal(keyBindBuffer, (char *) tuple + def->offset_, def->type_, def->length_);
368 keyBindBuffer = keyBindBuffer + AllDataType::size(def->type_, def->length_);
370 keyUpdated = true;
371 break;
375 if (!keyUpdated) {
376 //printf("PRABA::key not updated\n");
377 free(keyStartBuffer);
378 free(oldKeyStartBuffer);
379 return OK;
381 //printf("PRABA::it is wrong coming here\n");
382 bool result = false;
383 if (type == typeComposite)
384 result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer,
385 OpEquals, info->type, info->compLength);
386 else result = AllDataType::compareVal(keyPtr, keyStartBuffer,
387 OpEquals, info->type, info->compLength);
388 if (result) {
389 free(keyStartBuffer);
390 free(oldKeyStartBuffer);
391 return OK;
393 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
395 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
397 Bucket* buckets = (Bucket*)citer.nextElement();
399 //remove the node whose key is updated
400 int bucketNo = 0;
401 if (type == typeComposite)
402 bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength);
403 else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
404 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
405 Bucket *bucket = &buckets[bucketNo];
407 HashUndoLogInfo *hInfo1 = new HashUndoLogInfo();
408 hInfo1->tblPtr_ = tbl;
409 hInfo1->bucket_ = bucket;
410 hInfo1->tuple_ = tuple;
411 hInfo1->indexPtr_ = indexPtr;
412 hInfo1->keyPtr_ = keyPtr;
414 //it may run into deadlock, when two threads updates tuples which falls in
415 //same buckets.So take both the mutex one after another, which will reduce the
416 //deadlock window.
417 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
418 if (ret != 0)
420 delete hInfo1;
421 free(keyStartBuffer);
422 free(oldKeyStartBuffer);
423 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
424 return ErrLockTimeOut;
426 //insert node for the updated key value
427 int newBucketNo = computeHashBucket(type,
428 keyStartBuffer, noOfBuckets, info->compLength);
429 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
431 Bucket *bucket1 = &buckets[newBucketNo];
432 HashUndoLogInfo *hInfo2 = new HashUndoLogInfo();
433 hInfo2->tblPtr_ = tbl;
434 hInfo2->bucket_ = bucket;
435 hInfo2->tuple_ = tuple;
436 hInfo2->indexPtr_ = indexPtr;
437 hInfo2->keyPtr_ = keyPtr;
438 bucket1->mutex_.getLock(tbl->db_->procSlot);
439 if (ret != 0)
441 delete hInfo1;
442 delete hInfo2;
443 free(keyStartBuffer);
444 free(oldKeyStartBuffer);
445 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
446 return ErrLockTimeOut;
449 HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_;
450 if (head1)
452 BucketList list1(head1);
453 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
454 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
455 bucket->bucketList_=list1.getBucketListHead();
457 else
459 printError(ErrSysInternal,"Update: Bucket list is null");
460 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
461 bucket->mutex_.releaseLock(tbl->db_->procSlot);
462 delete hInfo1;
463 delete hInfo2;
464 free(keyStartBuffer);
465 free(oldKeyStartBuffer);
466 return ErrSysInternal;
468 DbRetVal rc = OK;
469 if (undoFlag) {
470 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo));
471 if (rc !=OK)
473 BucketList list((HashIndexNode*) bucket->bucketList_);
474 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
475 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n");
476 bucket->bucketList_ = list.getBucketListHead();
477 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
478 bucket->mutex_.releaseLock(tbl->db_->procSlot);
479 delete hInfo1;
480 delete hInfo2;
481 free(keyStartBuffer);
482 free(oldKeyStartBuffer);
483 return rc;
486 HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_;
487 //Note:: the tuple will be in the same address location
488 //so not changing the keyptr and tuple during append
489 //only bucket where this node resides will only change
490 //if the index key is updated.
491 if (!head2)
493 DbRetVal rv = OK;
494 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
495 if (firstNode == NULL)
497 printError(rv, "Error in allocating hash node");
498 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
499 bucket->mutex_.releaseLock(tbl->db_->procSlot);
500 delete hInfo1;
501 delete hInfo2;
502 free(keyStartBuffer);
503 free(oldKeyStartBuffer);
504 return rv;
506 firstNode->ptrToKey_ = keyPtr;
507 firstNode->ptrToTuple_ = tuple;
508 firstNode->next_ = NULL;
509 bucket1->bucketList_ = (HashIndexNode*)firstNode;
510 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
512 else
514 BucketList list2(head2);
515 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
516 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
517 bucket1->bucketList_ = list2.getBucketListHead();
519 if (undoFlag) {
521 rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, InsertHashIndexOperation, hInfo2, sizeof(HashUndoLogInfo));
522 if (rc !=OK)
524 //reverting back the changes:delete new node and add the old
525 //node + remove logical undo log of the DeleteHashIndexOperation
526 BucketList list1((HashIndexNode*) bucket->bucketList_);
527 BucketList list2((HashIndexNode*) bucket1->bucketList_);
528 list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
529 list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
530 bucket->bucketList_ = list1.getBucketListHead();
531 bucket1->bucketList_ = list2.getBucketListHead();
532 UndoLogInfo *logInfo = tr->popUndoLog();
533 Chunk *chunk = tbl->sysDB_->getSystemDatabaseChunk(UndoLogTableID);
534 chunk->free(tbl->sysDB_, logInfo);
537 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
538 bucket->mutex_.releaseLock(tbl->db_->procSlot);
539 delete hInfo1;
540 delete hInfo2;
541 free(keyStartBuffer);
542 free(oldKeyStartBuffer);
543 return rc;
546 //Following three methods are used to undo Logical Hash Indexes
547 DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data)
549 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
550 TableImpl * tbl = (TableImpl *)info->tblPtr_;
551 Chunk *hChunk = (Chunk *) ((CINDEX *)info->indexPtr_)->hashNodeChunk_;
552 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
553 BucketList list(head);
554 list.insert(hChunk, tbl->db_, info->keyPtr_, info->tuple_);
555 ((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
556 return OK;
559 DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data)
561 HashUndoLogInfo *info = (HashUndoLogInfo *) data;
562 TableImpl * tbl = (TableImpl *)info->tblPtr_;
563 Chunk *hChunk = (Chunk *) ((CINDEX *)info->indexPtr_)->hashNodeChunk_;
564 HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_;
565 BucketList list(head);
566 list.remove(hChunk, tbl->db_, info->keyPtr_);
567 ((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead();
568 return OK;