code reorg
[csql.git] / src / server / HashIndex.cxx
blob735902fc9eb239fe3ecbf0ecc29e6b03b3f12934
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
47 unsigned int hashBinary(char *strVal, int length)
49 unsigned int hval, g;
50 hval = 0;
51 char *str =strVal;
52 int iter = 0;
53 while (iter != length)
55 hval <<= 4;
56 hval += (unsigned int) *str++;
57 g = hval & ((unsigned int) 0xf << (32 - 4));
58 if (g != 0)
60 hval ^= g >> (32 - 8);
61 hval ^= g;
63 iter++;
65 return hval;
68 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length)
71 switch(type)
73 case typeInt:
75 int val = *(int*)key;
76 return val % noOfBuckets;
77 break;
79 case typeLong:
81 long val = *(long*)key;
82 return val % noOfBuckets;
83 break;
85 case typeULong:
87 unsigned long val = *(unsigned long*)key;
88 return val % noOfBuckets;
89 break;
91 case typeLongLong:
93 long long val = *(long long*)key;
94 return val % noOfBuckets;
95 break;
97 case typeShort:
99 short val = *(short*)key;
100 return val % noOfBuckets;
101 break;
103 case typeByteInt:
105 ByteInt val = *(ByteInt*)key;
106 return val % noOfBuckets;
107 break;
109 case typeDate:
111 int val = *(int*)key;
112 return val % noOfBuckets;
113 break;
115 case typeTime:
117 int val = *(int*)key;
118 return val % noOfBuckets;
119 break;
121 case typeTimeStamp:
123 TimeStamp val = *(TimeStamp*)key;
124 //TODO return val % noOfBuckets;
125 break;
127 case typeDouble:
129 //TODO
130 break;
132 case typeFloat:
134 //TODO
135 break;
137 case typeDecimal:
139 //TODO::for porting
141 case typeString:
143 unsigned int val = hashString((char*)key);
144 return val % noOfBuckets;
146 case typeComposite:
147 case typeBinary:
149 unsigned int val = hashBinary((char*)key, length);
150 return val % noOfBuckets;
152 default:
154 break;
157 return -1;
160 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
162 HashIndexInfo *info = (HashIndexInfo*) indInfo;
163 INDEX *iptr = (INDEX*)indexPtr;
164 DbRetVal rc = OK;
165 int noOfBuckets = info->noOfBuckets;
166 int offset = info->fldOffset;
167 DataType type = info->type;
168 printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_);
169 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
170 Bucket* buckets = (Bucket*)citer.nextElement();
171 void *keyPtr =(void*)((char*)tuple + offset);
173 int bucketNo = computeHashBucket(type,
174 keyPtr, noOfBuckets, info->compLength);
175 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
176 Bucket *bucket = &(buckets[bucketNo]);
178 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
179 if (ret != 0)
181 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
182 return ErrLockTimeOut;
184 HashIndexNode *head = (HashIndexNode*) bucket->bucketList_;
185 if (info->isUnique)
187 BucketList list(head);
188 BucketIter iter = list.getIterator();
189 HashIndexNode *node;
190 void *bucketTuple;
191 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
192 while((node = iter.next()) != NULL)
194 bucketTuple = node->ptrToTuple_;
195 if (AllDataType::compareVal((void*)((char*)bucketTuple +offset),
196 (void*)((char*)tuple +offset), OpEquals,type, info->compLength))
198 printError(ErrUnique, "Unique key violation");
199 bucket->mutex_.releaseLock(tbl->db_->procSlot);
200 return ErrUnique;
205 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
206 if (!head)
208 printDebug(DM_HashIndex, "HashIndex insert head is empty");
209 DbRetVal rv = OK;
210 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
211 if (firstNode == NULL)
213 bucket->mutex_.releaseLock(tbl->db_->procSlot);
214 return rv;
216 firstNode->ptrToKey_ = keyPtr;
217 firstNode->ptrToTuple_ = tuple;
218 firstNode->next_ = NULL;
219 bucket->bucketList_ = (HashIndexNode*)firstNode;
220 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
222 else
224 BucketList list(head);
225 rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
226 if (rc !=OK) {
227 bucket->mutex_.releaseLock(tbl->db_->procSlot);
228 return rc;
232 if (undoFlag) {
234 rc = tr->appendLogicalUndoLog(tbl->sysDB_, InsertHashIndexOperation,
235 tuple, sizeof(void*), indexPtr);
236 if (rc !=OK)
238 BucketList list(head);
239 rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
240 if (rc !=OK) printError(ErrSysFatal, "double failure on undo log insert followed by hash bucket list remove\n");
244 bucket->mutex_.releaseLock(tbl->db_->procSlot);
245 return rc;
249 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
251 INDEX *iptr = (INDEX*)indexPtr;
253 HashIndexInfo *info = (HashIndexInfo*) indInfo;
254 DataType type = info->type;
255 int offset = info->fldOffset;
256 int noOfBuckets = info->noOfBuckets;
258 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
259 Bucket* buckets = (Bucket*)citer.nextElement();
260 void *keyPtr =(void*)((char*)tuple + offset);
262 int bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength);
264 Bucket *bucket1 = &buckets[bucket];
266 int ret = bucket1->mutex_.getLock(tbl->db_->procSlot);
267 if (ret != 0)
269 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucket);
270 return ErrLockTimeOut;
272 HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_;
274 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n"); return ErrNotExists; }
275 BucketList list(head);
276 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
278 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
279 if (SplCase == rc)
281 printDebug(DM_HashIndex, "Removing hash index node from head with only none node");
282 bucket1->bucketList_ = 0;
283 rc = OK;
285 if (undoFlag) {
286 rc =tr->appendLogicalUndoLog(tbl->sysDB_, DeleteHashIndexOperation,
287 tuple, sizeof(void*), indexPtr);
288 if (rc !=OK)
290 //TODO::add it back to the bucketlist
293 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
295 return rc;
298 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
300 INDEX *iptr = (INDEX*)indexPtr;
302 HashIndexInfo *info = (HashIndexInfo*) indInfo;
303 DataType type = info->type;
304 int offset = info->fldOffset;
305 int noOfBuckets = info->noOfBuckets;
307 //check whether the index key is updated or not
308 //if it is not updated return from here
309 void *keyPtr =(void*)((char*)tuple + offset);
310 char *kPtr= (char*)keyPtr;
311 //Iterate through the bind list and check
312 FieldIterator idxFldIter = info->idxFldList.getIterator();
313 char *keyBindBuffer = (char*) malloc(info->compLength);
314 void *keyStartBuffer = (void*) keyBindBuffer;
315 bool keyUpdated = false;
316 while (idxFldIter.hasElement())
318 FieldDef idef = idxFldIter.nextElement();
319 FieldIterator fldIter = tbl->fldList_.getIterator();
320 while (fldIter.hasElement())
322 FieldDef def = fldIter.nextElement();
323 if (0 == strcmp(def.fldName_, idef.fldName_))
325 if (NULL != def.bindVal_) {
326 AllDataType::copyVal(keyBindBuffer, def.bindVal_,
327 def.type_, def.length_);
328 keyBindBuffer = keyBindBuffer + AllDataType::size(def.type_,
329 def.length_);
330 keyUpdated = true;
331 break;
336 if (!keyUpdated)
338 //printf("PRABA::key not updated\n");
339 free(keyStartBuffer);
340 return OK;
342 //printf("PRABA::it is wrong coming here\n");
343 bool result = AllDataType::compareVal(kPtr, keyStartBuffer,
344 OpEquals, info->type, info->compLength);
345 if (result) return OK;
347 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
349 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
351 Bucket* buckets = (Bucket*)citer.nextElement();
353 //remove the node whose key is updated
354 int bucketNo = computeHashBucket(type,
355 keyPtr, noOfBuckets, info->compLength);
356 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
357 Bucket *bucket = &buckets[bucketNo];
359 //it may run into deadlock, when two threads updates tuples which falls in
360 //same buckets.So take both the mutex one after another, which will reduce the
361 //deadlock window.
362 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
363 if (ret != 0)
365 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
366 return ErrLockTimeOut;
368 //insert node for the updated key value
369 int newBucketNo = computeHashBucket(type,
370 keyStartBuffer, noOfBuckets);
371 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
373 Bucket *bucket1 = &buckets[newBucketNo];
374 bucket1->mutex_.getLock(tbl->db_->procSlot);
375 if (ret != 0)
377 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
378 return ErrLockTimeOut;
381 HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_;
382 if (head1)
384 BucketList list1(head1);
385 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
386 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
388 else
390 printError(ErrSysInternal,"Update: Bucket list is null");
391 return ErrSysInternal;
393 DbRetVal rc = OK;
394 if (undoFlag) {
395 rc = tr->appendLogicalUndoLog(tbl->sysDB_, DeleteHashIndexOperation,
396 tuple, sizeof(void*), indexPtr);
397 if (rc !=OK)
399 //TODO::add it back to the bucket list
400 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
401 bucket->mutex_.releaseLock(tbl->db_->procSlot);
402 return rc;
405 HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_;
406 //Note:: the tuple will be in the same address location
407 //so not changing the keyptr and tuple during append
408 //only bucket where this node resides will only change
409 //if the index key is updated.
410 if (!head2)
412 DbRetVal rv = OK;
413 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv));
414 if (firstNode == NULL)
416 printError(rv, "Error in allocating hash node");
417 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
418 bucket->mutex_.releaseLock(tbl->db_->procSlot);
419 return rv;
421 firstNode->ptrToKey_ = keyPtr;
422 firstNode->ptrToTuple_ = tuple;
423 firstNode->next_ = NULL;
424 bucket1->bucketList_ = (HashIndexNode*)firstNode;
425 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
427 else
429 BucketList list2(head2);
430 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
431 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
433 if (undoFlag) {
435 rc = tr->appendLogicalUndoLog(tbl->sysDB_, InsertHashIndexOperation,
436 tuple, sizeof(void*), indexPtr);
437 if (rc !=OK)
439 //TODO::revert back the changes:delete and add the node + remove the logical undo log
440 //of the DeleteHashIndexOperation
443 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
444 bucket->mutex_.releaseLock(tbl->db_->procSlot);
446 return rc;