1919184 DMLTest delete fails when setUndoLogging is disabled
[csql.git] / src / server / HashIndex.cxx
blob6419d62c0bea5f8380f38a828a7f07ac1da1e2b7
1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 ***************************************************************************/
16 #include<Index.h>
17 #include<CatalogTables.h>
18 #include<Lock.h>
19 #include<Debug.h>
20 #include<Table.h>
21 #include<TableImpl.h>
22 #include<Predicate.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal)
31 unsigned int hval, g;
32 hval = 0;
33 char *str =strVal;
34 while (*str != '\0')
36 hval <<= 4;
37 hval += (unsigned int) *str++;
38 g = hval & ((unsigned int) 0xf << (32 - 4));
39 if (g != 0)
41 hval ^= g >> (32 - 8);
42 hval ^= g;
45 return hval;
48 unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets)
51 switch(type)
53 case typeInt:
55 int val = *(int*)key;
56 return val % noOfBuckets;
57 break;
59 case typeLong:
61 long val = *(long*)key;
62 return val % noOfBuckets;
63 break;
65 case typeULong:
67 unsigned long val = *(unsigned long*)key;
68 return val % noOfBuckets;
69 break;
71 case typeLongLong:
73 long long val = *(long long*)key;
74 return val % noOfBuckets;
75 break;
77 case typeShort:
79 short val = *(short*)key;
80 return val % noOfBuckets;
81 break;
83 case typeByteInt:
85 ByteInt val = *(ByteInt*)key;
86 return val % noOfBuckets;
87 break;
89 case typeDate:
91 int val = *(int*)key;
92 return val % noOfBuckets;
93 break;
95 case typeTime:
97 int val = *(int*)key;
98 return val % noOfBuckets;
99 break;
101 case typeTimeStamp:
103 TimeStamp val = *(TimeStamp*)key;
104 //TODO return val % noOfBuckets;
105 break;
107 case typeDouble:
109 //TODO
110 break;
112 case typeFloat:
114 //TODO
115 break;
117 case typeDecimal:
119 //TODO::for porting
121 case typeString:
123 unsigned int val = hashString((char*)key);
124 return val % noOfBuckets;
126 case typeBinary:
128 //TODO
130 default:
132 break;
135 return -1;
138 //TODO::composite keys are not supported currently
139 DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
141 SingleFieldHashIndexInfo *info = (SingleFieldHashIndexInfo*) indInfo;
142 INDEX *iptr = (INDEX*)indexPtr;
144 DataType type = info->type;
145 char *name = info->fldName;
146 int offset = info->offset;
147 int noOfBuckets = info->noOfBuckets;
148 int length = info->length;
150 printDebug(DM_HashIndex, "Inserting hash index node for field %s", name);
151 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
152 Bucket* buckets = (Bucket*)citer.nextElement();
153 void *keyPtr =(void*)((char*)tuple + offset);
155 int bucketNo = computeHashBucket(type,
156 keyPtr, noOfBuckets);
157 printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo);
158 Bucket *bucket = &(buckets[bucketNo]);
160 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
161 if (ret != 0)
163 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
164 return ErrLockTimeOut;
166 HashIndexNode *head = (HashIndexNode*) bucket->bucketList_;
167 if (info->isUnique)
169 BucketList list(head);
170 BucketIter iter = list.getIterator();
171 HashIndexNode *node;
172 void *bucketTuple;
173 printDebug(DM_HashIndex, "HashIndex insert Checking for unique");
174 while((node = iter.next()) != NULL)
176 bucketTuple = node->ptrToTuple_;
177 if (AllDataType::compareVal((void*)((char*)bucketTuple +offset),
178 (void*)((char*)tuple +offset), OpEquals,type, length))
180 printError(ErrUnique, "Unique key violation");
181 bucket->mutex_.releaseLock(tbl->db_->procSlot);
182 return ErrUnique;
187 printDebug(DM_HashIndex, "HashIndex insert into bucket list");
188 if (!head)
190 printDebug(DM_HashIndex, "HashIndex insert head is empty");
191 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_));
192 firstNode->ptrToKey_ = keyPtr;
193 firstNode->ptrToTuple_ = tuple;
194 firstNode->next_ = NULL;
195 bucket->bucketList_ = (HashIndexNode*)firstNode;
196 printDebug(DM_HashIndex, "HashIndex insert new node %x in empty bucket", bucket->bucketList_);
198 else
200 BucketList list(head);
201 list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
203 DbRetVal rc = OK;
204 if (undoFlag) {
206 rc = tr->appendLogicalUndoLog(tbl->sysDB_, InsertHashIndexOperation,
207 tuple, sizeof(void*), indexPtr);
208 if (rc !=OK)
210 //TODO::remove it from the bucketlist
214 bucket->mutex_.releaseLock(tbl->db_->procSlot);
215 return rc;
219 //TODO::composite keys are not supported currently
220 DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
222 INDEX *iptr = (INDEX*)indexPtr;
224 SingleFieldHashIndexInfo *info = (SingleFieldHashIndexInfo*) indInfo;
225 DataType type = info->type;
226 char *name = info->fldName;
227 int offset = info->offset;
228 int noOfBuckets = info->noOfBuckets;
230 printDebug(DM_HashIndex, "Removing hash index node for field %s", name);
231 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
232 Bucket* buckets = (Bucket*)citer.nextElement();
233 void *keyPtr =(void*)((char*)tuple + offset);
235 int bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets);
237 Bucket *bucket1 = &buckets[bucket];
239 int ret = bucket1->mutex_.getLock(tbl->db_->procSlot);
240 if (ret != 0)
242 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucket);
243 return ErrLockTimeOut;
245 HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_;
247 if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n"); return ErrNotExists; }
248 BucketList list(head);
249 printDebug(DM_HashIndex, "Removing hash index node from head %x", head);
251 DbRetVal rc = list.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
252 if (SplCase == rc)
254 printDebug(DM_HashIndex, "Removing hash index node from head with only none node");
255 bucket1->bucketList_ = 0;
256 rc = OK;
258 if (undoFlag) {
259 rc =tr->appendLogicalUndoLog(tbl->sysDB_, DeleteHashIndexOperation,
260 tuple, sizeof(void*), indexPtr);
261 if (rc !=OK)
263 //TODO::add it back to the bucketlist
266 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
267 return rc;
270 //TODO::composite keys are not supported currently
271 DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag)
273 INDEX *iptr = (INDEX*)indexPtr;
275 SingleFieldHashIndexInfo *info = (SingleFieldHashIndexInfo*) indInfo;
276 DataType type = info->type;
277 char *name = info->fldName;
278 int offset = info->offset;
279 int noOfBuckets = info->noOfBuckets;
281 printDebug(DM_HashIndex, "Updating hash index node for field %s", name);
283 //check whether the index key is updated or not
284 //if it is not updated return from here
285 void *keyPtr =(void*)((char*)tuple + offset);
286 //Iterate through the bind list and check
287 FieldIterator fldIter = tbl->fldList_.getIterator();
288 void *newKey = NULL;
289 while (fldIter.hasElement())
291 FieldDef def = fldIter.nextElement();
292 if (0 == strcmp(def.fldName_, name))
294 if (NULL == def.bindVal_)
295 return OK;
296 bool result = AllDataType::compareVal(keyPtr, def.bindVal_,
297 OpEquals, def.type_, def.length_);
298 if (result) return OK; else newKey = def.bindVal_;
301 printDebug(DM_HashIndex, "Updating hash index node: Key value is updated");
303 if (newKey == NULL)
305 printError(ErrSysInternal,"Internal Error:: newKey is Null");
306 return ErrSysInternal;
308 ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr);
310 Bucket* buckets = (Bucket*)citer.nextElement();
312 //remove the node whose key is updated
313 int bucketNo = computeHashBucket(type,
314 keyPtr, noOfBuckets);
315 printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo);
316 Bucket *bucket = &buckets[bucketNo];
318 //it may run into deadlock, when two threads updates tuples which falls in
319 //same buckets.So take both the mutex one after another, which will reduce the
320 //deadlock window.
321 int ret = bucket->mutex_.getLock(tbl->db_->procSlot);
322 if (ret != 0)
324 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",bucketNo);
325 return ErrLockTimeOut;
327 //insert node for the updated key value
328 int newBucketNo = computeHashBucket(type,
329 newKey, noOfBuckets);
330 printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo);
332 Bucket *bucket1 = &buckets[newBucketNo];
333 bucket1->mutex_.getLock(tbl->db_->procSlot);
334 if (ret != 0)
336 printError(ErrLockTimeOut,"Unable to acquire bucket Mutex for bucket %d",newBucketNo);
337 return ErrLockTimeOut;
340 HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_;
341 if (head1)
343 BucketList list1(head1);
344 printDebug(DM_HashIndex, "Updating hash index node: Removing node from list with head %x", head1);
345 list1.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr);
347 else
349 printError(ErrSysInternal,"Update: Bucket list is null");
350 return ErrSysInternal;
352 DbRetVal rc = OK;
353 if (undoFlag) {
354 rc = tr->appendLogicalUndoLog(tbl->sysDB_, DeleteHashIndexOperation,
355 tuple, sizeof(void*), indexPtr);
356 if (rc !=OK)
358 //TODO::add it back to the bucket list
359 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
360 bucket->mutex_.releaseLock(tbl->db_->procSlot);
361 return rc;
364 HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_;
365 //Note:: the tuple will be in the same address location
366 //so not changing the keyptr and tuple during append
367 //only bucket where this node resides will only change
368 //if the index key is updated.
369 if (!head2)
371 HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_));
372 firstNode->ptrToKey_ = keyPtr;
373 firstNode->ptrToTuple_ = tuple;
374 firstNode->next_ = NULL;
375 bucket1->bucketList_ = (HashIndexNode*)firstNode;
376 printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode);
378 else
380 BucketList list2(head2);
381 printDebug(DM_HashIndex, "Updating hash index node: Adding node to list with head %x", head2);
382 list2.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple);
384 if (undoFlag) {
386 rc = tr->appendLogicalUndoLog(tbl->sysDB_, InsertHashIndexOperation,
387 tuple, sizeof(void*), indexPtr);
388 if (rc !=OK)
390 //TODO::revert back the changes:delete and add the node + remove the logical undo log
391 //of the DeleteHashIndexOperation
394 bucket1->mutex_.releaseLock(tbl->db_->procSlot);
395 bucket->mutex_.releaseLock(tbl->db_->procSlot);
396 return rc;