From c4b5500a4698d9066ec4902d2b08539f7eb079ad Mon Sep 17 00:00:00 2001 From: prabatuty Date: Sun, 27 Mar 2011 18:42:25 +0000 Subject: [PATCH] Trie Implementation --- include/Allocator.h | 1 + include/Database.h | 1 + include/DatabaseManagerImpl.h | 10 ++ include/Debug.h | 10 +- include/Index.h | 53 ++++-- include/Info.h | 1 + include/Table.h | 1 + include/Transaction.h | 9 +- src/storage/BucketIter.cxx | 4 +- src/storage/BucketList.cxx | 19 ++- src/storage/CatalogTables.cxx | 3 +- src/storage/Chunk.cxx | 19 +++ src/storage/DatabaseManagerImpl.cxx | 242 +++++++++++++++++++++----- src/storage/Debug.cxx | 5 +- src/storage/HashIndex.cxx | 30 ++-- src/storage/Index.cxx | 5 + src/storage/TableImpl.cxx | 32 +++- src/storage/Transaction.cxx | 23 +++ src/storage/TrieIndex.cxx | 330 ++++++++++++++++++++++++++++++++++++ src/storage/TupleIterator.cxx | 48 +++++- 20 files changed, 750 insertions(+), 96 deletions(-) create mode 100644 src/storage/TrieIndex.cxx diff --git a/include/Allocator.h b/include/Allocator.h index 3f390da6..452b57bc 100644 --- a/include/Allocator.h +++ b/include/Allocator.h @@ -175,6 +175,7 @@ class Chunk void setFirstPage(void *fp) { firstPage_ = fp;} void setCurPage(void *cp) { curPage_ = cp;} PageInfo* getPageInfo(Database *db, void *ptr); + void* tryAllocate(Database *db, DbRetVal *status); void* allocate(Database *db, DbRetVal *status); void* allocate(Database *db, size_t size, DbRetVal *status); diff --git a/include/Database.h b/include/Database.h index 68a9fbb6..d91284ca 100644 --- a/include/Database.h +++ b/include/Database.h @@ -198,6 +198,7 @@ class DllExport Database friend class Table; friend class TreeIndex; friend class HashIndex; + friend class TrieIndex; friend class Transaction; }; diff --git a/include/DatabaseManagerImpl.h b/include/DatabaseManagerImpl.h index 1a4d91f1..2d37140f 100644 --- a/include/DatabaseManagerImpl.h +++ b/include/DatabaseManagerImpl.h @@ -81,10 +81,20 @@ class DllExport DatabaseManagerImpl : public DatabaseManager FieldNameList &fldList, int bucketSize, bool isUnique, bool isPrimary = false); DbRetVal createTreeIndex(const char *indName, const char *tableName, FieldNameList &fldList, int bucketSize, bool isUnique, bool isPrimary = false); + DbRetVal createTrieIndex(const char *indName, const char *tableName, + FieldNameList &fldList, bool isUnique, bool isPrimary = false); void initHashBuckets(Bucket *buck, int bucketSize); DbRetVal dropIndexInt(const char *name, bool takeLock); DbRetVal writeSchemaFile(); + DbRetVal updateIndexCatalogTables(const char *indName,void *tptr, + char **fptr, FieldNameList &fldList, bool isUnique, + Chunk* chunkInfo, Chunk* hChunk); + DbRetVal validateIndex(const char *tblName, FieldNameList &fldList, + void **tptr, char ***fptr, bool isPrimary); + DbRetVal removeIndexCatalogTables(const char *name, void *chunk, void* hchunk, void *tptr); + DbRetVal removeIndexChunks(void* chunk, void* hchunk, IndexType iType); + public: Database* db() { return db_; } diff --git a/include/Debug.h b/include/Debug.h index dd48d5c2..0797d2bf 100644 --- a/include/Debug.h +++ b/include/Debug.h @@ -19,6 +19,7 @@ extern int DebugDM_RedoLog; extern int DebugDM_Index; extern int DebugDM_HashIndex; extern int DebugDM_TreeIndex; +extern int DebugDM_TrieIndex; extern int DebugDM_SystemDatabase; extern int DebugDM_Database; extern int DebugDM_Table; @@ -35,9 +36,9 @@ extern int DebugDM_Warning; int printStackTrace(); #ifdef WINNT -DllExport int printError1(DbRetVal val, char* fname, int lno, char *format, ...); +DllExport int printError1(DbRetVal val, char* fname, int lno, const char *format, ...); #else -extern int printError1(DbRetVal val, char* fname, int lno, char *format, ...); +extern int printError1(DbRetVal val, char* fname, int lno, const char *format, ...); #endif #define printError(a, ...) printError1(a, __FILE__, __LINE__, __VA_ARGS__) @@ -52,6 +53,7 @@ enum DebugModule DM_Index, DM_HashIndex, DM_TreeIndex, + DM_TrieIndex, DM_SystemDatabase, DM_Database, DM_Table, @@ -69,12 +71,12 @@ enum DebugModule static char moduleNames[][20] = { "Alloc", "VariableAlloc", "Lock", "Trans", "UndoLog", "RedoLog", "Index", - "HashIndex", "TreeIndex", "SysDb", "Db", "Table", "Predicate", "Iter", + "HashIndex", "TreeIndex", "TrieIndex", "SysDb", "Db", "Table", "Predicate", "Iter", "Procmgmt", "Network", "Gateway", "Adapter", "SqlLog", "CacheServer", "TEST", "Warning" }; -extern int printDebug1(int module, char *fname, int lineno, char *format, ...); +extern int printDebug1(int module, char *fname, int lineno, const char *format, ...); #ifdef DEBUG diff --git a/include/Index.h b/include/Index.h index 1a8190d5..bc44a48f 100644 --- a/include/Index.h +++ b/include/Index.h @@ -18,6 +18,8 @@ #include #include #include +#define TRIE_SIZE 10 +#define TRIE_MAX_LENGTH 64 class Database; class Chunk; @@ -31,12 +33,19 @@ class Bucket Mutex mutex_; void *bucketList_; }; -class HashIndexNode +class IndexNode { public: void *ptrToKey_; void *ptrToTuple_; - HashIndexNode *next_; + IndexNode *next_; +}; + +class TrieNode +{ + public: + IndexNode *head_[TRIE_SIZE]; + TrieNode *next_[TRIE_SIZE]; }; class IndexInfo; @@ -65,31 +74,32 @@ class TreeNode class BucketIter { - HashIndexNode *iter; - HashIndexNode *head; + IndexNode *iter; + IndexNode *head; bool isUnique; bool recordsOver; public: BucketIter(){iter = head = NULL; isUnique=false; recordsOver = false;} - void setHead(HashIndexNode *hd) { iter = head = hd; recordsOver=false;} - BucketIter(HashIndexNode *head) { iter = head = head; + void setHead(IndexNode *hd) { iter = head = hd; recordsOver=false;} + BucketIter(IndexNode *head) { iter = head = head; isUnique=false; recordsOver = false;} void setUnique() { isUnique = true; } bool getUnique() { return isUnique; } - HashIndexNode* next(); + IndexNode* next(); void reset() { iter = head; recordsOver=false;} friend class BucketList; }; class BucketList { - HashIndexNode *head; + IndexNode *head; public: BucketList(){ head = NULL;} - BucketList(HashIndexNode *h){ head = h; } + BucketList(IndexNode *h){ head = h; } void *getBucketListHead(){ return head;} DbRetVal insert(Chunk *chunk, Database *db, void *key, void *tuple); DbRetVal remove(Chunk *chunk, Database *db, void *key); + void print(); BucketIter getIterator() { BucketIter it; @@ -101,6 +111,7 @@ class HashIndex; class IndexInfo; class HashIndexInfo; class TreeIndex; +class TrieIndex; class Index { // create (one) object for each indexing mechanisms here @@ -108,6 +119,7 @@ class Index // accordingly for new index machanism. static HashIndex *hIdx; static TreeIndex *tIdx; + static TrieIndex *iIdx; static long usageCount; public: static Index* getIndex(IndexType type); @@ -124,7 +136,7 @@ class HashIndex : public Index DbRetVal insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *info, void *tuple, bool undoFlag); DbRetVal remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *info, void *tuple, bool undoFlag); DbRetVal update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *info, void *tuple, bool undoFlag); - bool checkForUniqueKey(HashIndexNode *head,HashIndexInfo *info, void *tuple); + bool checkForUniqueKey(IndexNode *head,HashIndexInfo *info, void *tuple); static unsigned int computeHashBucket(DataType type, void *key, int noOfBuckets, int length=0); static DbRetVal insertLogicalUndoLog(Database *sysdb, void *info); static DbRetVal deleteLogicalUndoLog(Database *sysdb, void *info); @@ -145,6 +157,24 @@ class TreeIndex : public Index static DbRetVal upgradeTreeNodeMutex(TreeNode*, int procSlot); }; + +class TrieIndex: public Index +{ + public: + DbRetVal insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *info, void *tuple, bool undoFlag); + DbRetVal remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *info, void *tuple, bool undoFlag); + DbRetVal update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *info, void *tuple, bool undoFlag); + //bool checkForUniqueKey(IndexNode *head, HashIndexInfo *info, void *tuple); + static void computeHashValues(DataType type, void *key, char *in, int length=0); + static DbRetVal insertLogicalUndoLog(Database *sysdb, void *info); + static DbRetVal deleteLogicalUndoLog(Database *sysdb, void *info); + void displayAll(TrieNode *node, int level =1); + void printTrieNode(TrieNode *node, int level); + private: + DbRetVal addToValueList(Database*, void**, Chunk*, void*, void*); + DbRetVal removeFromValueList(Database*, void**, Chunk*, void*, void*); + +}; class TreeIter { TreeNode *iter; @@ -188,7 +218,8 @@ enum IndexIntType { hashOneField = 1, hash = 2, - tree = 3 + tree = 3, + trie = 4 }; class IndexInfo diff --git a/include/Info.h b/include/Info.h index 4293d766..c5101c38 100644 --- a/include/Info.h +++ b/include/Info.h @@ -191,6 +191,7 @@ enum IndexType { hashIndex = 0, /** -HashIndexNode* BucketIter::next() +IndexNode* BucketIter::next() { if (iter == NULL) return NULL; if(recordsOver) return NULL; - HashIndexNode *node = iter; + IndexNode *node = iter; iter = iter ->next_; printDebug(DM_HashIndex,"BucketIter::next returns %x",node); if (isUnique) recordsOver = true; diff --git a/src/storage/BucketList.cxx b/src/storage/BucketList.cxx index dbac6a60..b4af334a 100644 --- a/src/storage/BucketList.cxx +++ b/src/storage/BucketList.cxx @@ -21,13 +21,13 @@ DbRetVal BucketList::insert(Chunk *chunk, Database *db, void *key, void*tuple) { DbRetVal rv = OK; - HashIndexNode *newNode;// (HashIndexNode*) chunk->allocate(db, &rv); + IndexNode *newNode;// (IndexNode*) chunk->allocate(db, &rv); int tries=0; int totalTries = Conf::config.getMutexRetries(); while (tries < totalTries) { rv = OK; - newNode= (HashIndexNode*) chunk->allocate(db, &rv); + newNode= (IndexNode*) chunk->allocate(db, &rv); if (newNode !=NULL) break; if (rv != ErrLockTimeOut) { @@ -59,7 +59,7 @@ DbRetVal BucketList::insert(Chunk *chunk, Database *db, void *key, void*tuple) return OK; } - HashIndexNode *it = head; + IndexNode *it = head; while (NULL != it->next_) it = it->next_; //it->next_ = newNode; if ( 0 != Mutex::CASL((long*)&it->next_, 0, (long)newNode)) { @@ -71,11 +71,22 @@ DbRetVal BucketList::insert(Chunk *chunk, Database *db, void *key, void*tuple) if (rv != OK) printError(ErrSysFatal, "rv is not OK %d\n", rv); return rv; } +void BucketList::print() +{ + if (NULL == head) return ; + IndexNode *ite = head, *prev = head; + while (ite != NULL) + { + printf( "%d ", *((int*)ite->ptrToKey_)); + ite = ite->next_; + } + return; +} //Returns 2 if the head itself is removed. DbRetVal BucketList::remove(Chunk *chunk, Database *db, void *keyPtr) { if (NULL == head) return ErrNotFound; - HashIndexNode *ite = head, *prev = head; + IndexNode *ite = head, *prev = head; while (ite != NULL) { if (ite->ptrToKey_ == keyPtr) diff --git a/src/storage/CatalogTables.cxx b/src/storage/CatalogTables.cxx index 0729f7c4..820dc5e1 100644 --- a/src/storage/CatalogTables.cxx +++ b/src/storage/CatalogTables.cxx @@ -388,9 +388,10 @@ DbRetVal CatalogTableINDEX::insert(const char *name, void *tptr, int numFlds, bo indexInfo->tblPtr_ = tptr; indexInfo->numFlds_ = numFlds; if (NULL == hChunk) - indexInfo->indexType_ = treeIndex; + indexInfo->indexType_ = treeIndex; else indexInfo->indexType_ = hashIndex; + if (0 == bucketSize) indexInfo->indexType_ = trieIndex; indexInfo->chunkPtr_ = chunk; indexInfo->hashNodeChunk_ = hChunk; indexInfo->noOfBuckets_ = bucketSize; diff --git a/src/storage/Chunk.cxx b/src/storage/Chunk.cxx index e5d9c84e..10bc512b 100644 --- a/src/storage/Chunk.cxx +++ b/src/storage/Chunk.cxx @@ -304,6 +304,25 @@ void* Chunk::allocate(Database *db, DbRetVal *status) //releaseChunkMutex(db->procSlot); return data + sizeof(InUse); } +void* Chunk::tryAllocate(Database *db, DbRetVal *status) +{ + int tries=0; + int totalTries = Conf::config.getMutexRetries(); + void *node = NULL; + while (tries < totalTries) + { + *status = OK; + node= allocate(db, status); + if (NULL != node) break; + if (*status != ErrLockTimeOut) + { + printError(*status, "Unable to allocate node"); + return NULL; + } + tries++; + } + return node; +} void Chunk::setPageDirty(Database *db, void *ptr) { diff --git a/src/storage/DatabaseManagerImpl.cxx b/src/storage/DatabaseManagerImpl.cxx index fa338f65..b9a2d5b5 100644 --- a/src/storage/DatabaseManagerImpl.cxx +++ b/src/storage/DatabaseManagerImpl.cxx @@ -230,7 +230,7 @@ DbRetVal DatabaseManagerImpl::createDatabase(const char *name, size_t size) if (0 == strcmp(name, SYSTEMDB)) return OK; /*Allocate new chunk to store hash index nodes - Chunk *chunkInfo = createUserChunk(sizeof(HashIndexNode)); + Chunk *chunkInfo = createUserChunk(sizeof(IndexNode)); if (NULL == chunkInfo) { printError(ErrSysInternal, "Failed to allocate hash index nodes chunk"); @@ -1048,6 +1048,12 @@ DbRetVal DatabaseManagerImpl::createIndex(const char *indName, IndexInitInfo *in HashIndexInitInfo *hInfo = (HashIndexInitInfo*) info; rv = createTreeIndex(indName, info->tableName, info->list, hInfo->bucketSize, info->isUnique, info->isPrimary); + } + else if (info->indType == trieIndex) + { + HashIndexInitInfo *hInfo = (HashIndexInitInfo*) info; + rv = createTrieIndex(indName, info->tableName, info->list, + info->isUnique, info->isPrimary); }else { printError(ErrBadCall, "Index type not supported\n"); @@ -1163,7 +1169,7 @@ DbRetVal DatabaseManagerImpl::createHashIndex(const char *indName, const char *t initHashBuckets(buck, bucketSize); //create chunk to store the hash index nodes - Chunk* hChunk = createUserChunk(sizeof(HashIndexNode)); + Chunk* hChunk = createUserChunk(sizeof(IndexNode)); if (NULL == hChunk) { delete[] fptr; @@ -1393,45 +1399,138 @@ DbRetVal DatabaseManagerImpl::createTreeIndex(const char *indName, const char *t return OK; } +DbRetVal DatabaseManagerImpl::createTrieIndex(const char *indName, const char *tblName, + FieldNameList &fldList, bool isUnique, bool isPrimary) +{ + int totFlds = fldList.size(); + void *tptr =NULL; + char **fptr = new char* [totFlds]; + DbRetVal rv = validateIndex(tblName, fldList, &tptr, &fptr, isPrimary); + if (OK != rv) + { + delete[] fptr; + return rv; + } + rv = systemDatabase_->getXCheckpointMutex(); + if (OK != rv) + { + printError(ErrSysInternal, "Unable to get database mutex"); + return ErrSysInternal; + } + //below statements are actually setting values in the catalog table + //thats why mutex is taken before this stmt. Do not change the order + CFIELD* fInfo = (CFIELD*)fptr[0]; + if(isPrimary){fInfo->isPrimary_=true;fInfo->isUnique_=true;} + if(isUnique){fInfo->isUnique_=true;} -void DatabaseManagerImpl::initHashBuckets(Bucket *buck, int bucketSize) -{ - os::memset((void*)buck, 0, bucketSize * sizeof(Bucket)); + printDebug(DM_TrieIndex, "Creating chunk for storing trie nodes\n" ); + Chunk* chunkInfo = createUserChunk(sizeof(TrieNode)); - for (int i=0; i < bucketSize ; i++) + //chunk to store the linked list of trie values + Chunk* hChunk = createUserChunk(sizeof(IndexNode)); + if (NULL == chunkInfo || NULL == hChunk) { - buck[i].mutex_.init("Bucket"); + delete[] fptr; + if (chunkInfo) deleteUserChunk(chunkInfo); + systemDatabase_->releaseCheckpointMutex(); + printError(ErrSysInternal, "Unable to create trie node chunk"); + return ErrSysInternal; } - return; + chunkInfo->setChunkName(indName); + hChunk->setChunkName(indName); + rv = updateIndexCatalogTables(indName,tptr, fptr, fldList, isUnique, + chunkInfo, hChunk ); + delete[] fptr; + if (OK != rv) { + printError(ErrSysInternal, "Catalog table updation failed"); + deleteUserChunk(chunkInfo); + deleteUserChunk(hChunk); + systemDatabase_->releaseCheckpointMutex(); + return rv; + } + systemDatabase_->releaseCheckpointMutex(); + //TODO:: create index nodes if records already exist in the table + return OK; } - -DbRetVal DatabaseManagerImpl::dropIndex(const char *name) +DbRetVal DatabaseManagerImpl::validateIndex(const char *tblName, + FieldNameList &fldList, void **tptr, char ***fptr, + bool isPrimary) { - return dropIndexInt(name, true); + int totFlds = fldList.size(); + if (totFlds != 1) + { + printError(ErrBadCall, "No Field name specified or composite fields specified"); + return ErrBadCall; + } + void *chunk = NULL; + void *vcchunk = NULL; + //check whether table exists + CatalogTableTABLE cTable(systemDatabase_); + cTable.getChunkAndTblPtr(tblName, chunk, *tptr, vcchunk); + if (NULL == tptr) + { + printError(ErrNotExists, "Table does not exist %s", tblName); + return ErrNotExists; + } + + //check whether field exists + CatalogTableFIELD cField(systemDatabase_); + DbRetVal rv = cField.getFieldPtrs(fldList, *tptr, *fptr); + if (OK != rv) + { + if (rv != ErrBadCall) { + printError(ErrNotExists, "Field does not exist"); + return ErrNotExists; + } + } + CFIELD* fInfo = (CFIELD*)fptr[0]; + if (fInfo->type_ == typeFloat || fInfo->type_ == typeDouble || fInfo->type_ == typeTimeStamp) + { + printError(ErrBadArg, "Trie Index cannot be created for float or double or timestamp type"); + return ErrBadArg; + } + if (!fInfo->isNull_ && isPrimary ) + { + printError(ErrBadArg, "Primary Index cannot be created on field without NOTNULL constraint"); + return ErrBadArg; + } + + return OK; } -DbRetVal DatabaseManagerImpl::dropIndexInt(const char *name, bool takeLock) +DbRetVal DatabaseManagerImpl::updateIndexCatalogTables(const char *indName, + void *tptr, char **fptr, FieldNameList &fldList, + bool isUnique, Chunk* chunkInfo, Chunk* hChunk ) { - DbRetVal rv = OK; - void *chunk = NULL, *hchunk = NULL; - void *tptr =NULL; - int ret = 0; - if (takeLock) { - rv = systemDatabase_->getXCheckpointMutex(); - if (OK != rv) - { - printError(ErrSysInternal, "Unable to get database mutex"); - return ErrSysInternal; - } + void *tupleptr = NULL; + CatalogTableINDEX cIndex(systemDatabase_); + DbRetVal rv = cIndex.insert(indName, tptr, fldList.size(), isUnique, + chunkInfo, 0, hChunk, tupleptr); + if (OK != rv) + { + printError(ErrSysInternal, "Catalog table updation failed in INDEX table"); + return ErrSysInternal; } + CatalogTableINDEXFIELD cIndexField(systemDatabase_); + rv = cIndexField.insert(fldList, tupleptr, tptr, fptr); + if (OK != rv) + { + //rollback the previous operation + cIndex.remove(indName, (void *&)chunkInfo, (void *&)hChunk, (void *&)tupleptr); + printError(ErrSysInternal, "Catalog table updation failed in INDEXFIELD table"); + return ErrSysInternal; + } + return rv; +} +DbRetVal DatabaseManagerImpl::removeIndexCatalogTables(const char *name, void *chunk, void* hchunk, void *tptr) +{ //remove the entry in INDEX CatalogTableINDEX cIndex(systemDatabase_); - rv = cIndex.remove(name, chunk, hchunk, tptr); + DbRetVal rv = cIndex.remove(name, chunk, hchunk, tptr); if (OK != rv) { - if (takeLock) systemDatabase_->releaseCheckpointMutex(); printError(ErrSysInternal, "Catalog table updation failed for INDEX table"); return ErrSysInternal; } @@ -1441,39 +1540,79 @@ DbRetVal DatabaseManagerImpl::dropIndexInt(const char *name, bool takeLock) rv = cIndexField.remove(tptr); if (OK != rv) { - if (takeLock) systemDatabase_->releaseCheckpointMutex(); printError(ErrSysInternal, "Catalog table updation failed for INDEX table"); return ErrSysInternal; } printDebug(DM_Database, "Removing from INDEXFIELD %s",name); - - //delete the index chunk - CINDEX *iptr = (CINDEX*)tptr; - rv = deleteUserChunk((Chunk*)chunk); + return OK; +} +DbRetVal DatabaseManagerImpl::removeIndexChunks(void* chunk, void* hchunk, IndexType iType) +{ + DbRetVal rv = deleteUserChunk((Chunk*)chunk); if (OK != rv) { - if (takeLock) systemDatabase_->releaseCheckpointMutex(); printError(ErrSysInternal, "Unable to delete the index chunk"); return ErrSysInternal; } //delete the index hash node chunk - if (iptr->indexType_ == hashIndex) { + if (iType == hashIndex || iType == trieIndex) { rv = deleteUserChunk((Chunk*)hchunk); if (OK != rv) { - if (takeLock) systemDatabase_->releaseCheckpointMutex(); printError(ErrSysInternal, "Unable to delete the index hash node chunk"); return ErrSysInternal; } } - if (takeLock) systemDatabase_->releaseCheckpointMutex(); + return OK; +} - //TODO::If tuples present in this table, then - //free all hash index nodes for this table. - //free all nodes in list of all buckets - //Take table lock +void DatabaseManagerImpl::initHashBuckets(Bucket *buck, int bucketSize) +{ + os::memset((void*)buck, 0, bucketSize * sizeof(Bucket)); + + for (int i=0; i < bucketSize ; i++) + { + buck[i].mutex_.init("Bucket"); + } + return; +} - printDebug(DM_Database, "Dropped hash index %s",name); +DbRetVal DatabaseManagerImpl::dropIndex(const char *name) +{ + return dropIndexInt(name, true); +} + +DbRetVal DatabaseManagerImpl::dropIndexInt(const char *name, bool takeLock) +{ + DbRetVal rv = OK; + void *chunk = NULL, *hchunk = NULL; + void *tptr =NULL; + int ret = 0; + if (takeLock) { + rv = systemDatabase_->getXCheckpointMutex(); + if (OK != rv) + { + printError(ErrSysInternal, "Unable to get database mutex"); + return ErrSysInternal; + } + } + rv = removeIndexCatalogTables(name, chunk, hchunk, tptr); + if (OK != rv) + { + if (takeLock) systemDatabase_->releaseCheckpointMutex(); + return rv; + } + + CINDEX *iptr = (CINDEX*)tptr; + rv = removeIndexChunks(chunk, hchunk, iptr->indexType_); + if (OK != rv) + { + if (takeLock) systemDatabase_->releaseCheckpointMutex(); + return rv; + } + if (takeLock) systemDatabase_->releaseCheckpointMutex(); + + printDebug(DM_Database, "Dropped index %s",name); logFinest(Conf::logger, "Deleted Index %s", name); return OK; } @@ -1657,6 +1796,8 @@ DbRetVal DatabaseManagerImpl::printIndexInfo(char *name) printf(" Hash Index \n"); else if (treeIndex == iType) printf(" Tree Index \n"); + else if (trieIndex == iType) + printf(" Trie Index \n"); else printf(" Unknown Index \n"); @@ -1665,21 +1806,32 @@ DbRetVal DatabaseManagerImpl::printIndexInfo(char *name) printf(" %d \n", ch->totalPages()); printf(" %d \n", CatalogTableINDEX::getNoOfBuckets(tptr)); printf("\n"); - if(treeIndex != iType){ + printf("\n"); + if(hashIndex == iType){ ch = (Chunk*) hchunk; - printf("\n"); printf(" %d \n", ch->totalPages()); printf(" %d \n", ch->getTotalDataNodes()); - printf("\n"); - }else{ - printf("\n"); + } else if (treeIndex == iType) { printf(" %d \n", ch->getTotalDataNodes()); if(hchunk) printf(" %lld \n",((TreeNode*) hchunk)->getTotalElements()); else printf(" 0 \n"); - printf("\n"); + } else if (trieIndex == iType) + { + printf(" \n"); + printf(" %d \n", ch->totalPages()); + printf(" %d \n", ch->getTotalDataNodes()); + printf(" \n \n"); + ch = (Chunk*) hchunk; + printf(" %d \n", ch->totalPages()); + printf(" %d \n", ch->getTotalDataNodes()); + printf(" \n"); + } else + { + printf("Unknown Index type\n"); } + printf("\n"); return OK; } diff --git a/src/storage/Debug.cxx b/src/storage/Debug.cxx index 79628d22..93880355 100644 --- a/src/storage/Debug.cxx +++ b/src/storage/Debug.cxx @@ -25,6 +25,7 @@ int DebugDM_RedoLog = 0; int DebugDM_Index = 0; int DebugDM_HashIndex = 0; int DebugDM_TreeIndex = 0; +int DebugDM_TrieIndex = 1; int DebugDM_SystemDatabase = 0; int DebugDM_Database = 0; int DebugDM_Table = 0; @@ -48,7 +49,7 @@ int printStackTrace() return 0; } -int printError1(DbRetVal val, char* fname, int lno, char *format, ...) +int printError1(DbRetVal val, char* fname, int lno, const char *format, ...) { va_list ap; int fd = -1; @@ -86,7 +87,7 @@ int printError1(DbRetVal val, char* fname, int lno, char *format, ...) return 0; } -int printDebug1(int module, char *fname, int lno, char *format, ...) +int printDebug1(int module, char *fname, int lno, const char *format, ...) { switch(module) { case DM_Alloc: { if (!DebugDM_Alloc) return 1; break; } diff --git a/src/storage/HashIndex.cxx b/src/storage/HashIndex.cxx index 66639fd5..232561e6 100644 --- a/src/storage/HashIndex.cxx +++ b/src/storage/HashIndex.cxx @@ -107,14 +107,14 @@ unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuck return -1; } -bool HashIndex::checkForUniqueKey(HashIndexNode *head, HashIndexInfo *info, void *tuple) +bool HashIndex::checkForUniqueKey(IndexNode *head, HashIndexInfo *info, void *tuple) { if (!head) return false; int offset = info->fldOffset; DataType type = info->type; BucketList list(head); BucketIter iter = list.getIterator(); - HashIndexNode *node; + IndexNode *node; void *bucketTuple; printDebug(DM_HashIndex, "HashIndex insert Checking for unique"); bool res = false; @@ -219,7 +219,7 @@ DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_; hInfo.keyPtr_ = keyPtr; - HashIndexNode *head = (HashIndexNode*) bucket->bucketList_; + IndexNode *head = (IndexNode*) bucket->bucketList_; if (info->isUnique) { bool isKeyPresent = checkForUniqueKey(head, info, tuple); @@ -231,14 +231,14 @@ DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde { printDebug(DM_HashIndex, "HashIndex insert head is empty"); DbRetVal rv = OK; - HashIndexNode *firstNode= NULL; + IndexNode *firstNode= NULL; int tries=0; int totalTries = Conf::config.getMutexRetries(); while (tries < totalTries) { rv = OK; - firstNode= (HashIndexNode*) hIdxNodeChunk->allocate(tbl->db_, &rv); + firstNode= (IndexNode*) hIdxNodeChunk->allocate(tbl->db_, &rv); if (firstNode !=NULL) break; if (rv != ErrLockTimeOut) { @@ -351,7 +351,7 @@ DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde hInfo.hChunk_ = ((CINDEX *)indexPtr)->hashNodeChunk_; hInfo.keyPtr_ = keyPtr; - HashIndexNode *head = (HashIndexNode*) bucket1->bucketList_; + IndexNode *head = (IndexNode*) bucket1->bucketList_; if (!head) { printError(ErrNotExists, "Hash index does not exist:should never happen\n"); return ErrNotExists; @@ -528,7 +528,7 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde return ErrLockTimeOut; } - HashIndexNode *head1 = (HashIndexNode*) bucket->bucketList_; + IndexNode *head1 = (IndexNode*) bucket->bucketList_; if (head1) { BucketList list1(head1); @@ -552,7 +552,7 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde rc = tr->appendLogicalHashUndoLog(tbl->sysDB_, DeleteHashIndexOperation, hInfo1, sizeof(HashUndoLogInfo)); if (rc !=OK) { - BucketList list((HashIndexNode*) bucket->bucketList_); + BucketList list((IndexNode*) bucket->bucketList_); rc = list.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple); if (rc !=OK) printError(ErrSysFatal, "double failure on undo log remove followed by hash bucket list insert\n"); bucket->bucketList_ = list.getBucketListHead(); @@ -565,7 +565,7 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde return rc; } } - HashIndexNode *head2 = (HashIndexNode*) bucket1->bucketList_; + IndexNode *head2 = (IndexNode*) bucket1->bucketList_; //Note:: the tuple will be in the same address location //so not changing the keyptr and tuple during append //only bucket where this node resides will only change @@ -573,7 +573,7 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde if (!head2) { DbRetVal rv = OK; - HashIndexNode *firstNode= (HashIndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv)); + IndexNode *firstNode= (IndexNode*)(((Chunk*)iptr->hashNodeChunk_)->allocate(tbl->db_, &rv)); if (firstNode == NULL) { printError(rv, "Error in allocating hash node"); @@ -588,7 +588,7 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde firstNode->ptrToKey_ = keyPtr; firstNode->ptrToTuple_ = tuple; firstNode->next_ = NULL; - bucket1->bucketList_ = (HashIndexNode*)firstNode; + bucket1->bucketList_ = (IndexNode*)firstNode; printDebug(DM_HashIndex, "Updating hash index node: Adding new node %x:Head is empty", firstNode); } else @@ -605,8 +605,8 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde { //reverting back the changes:delete new node and add the old //node + remove logical undo log of the DeleteHashIndexOperation - BucketList list1((HashIndexNode*) bucket->bucketList_); - BucketList list2((HashIndexNode*) bucket1->bucketList_); + BucketList list1((IndexNode*) bucket->bucketList_); + BucketList list2((IndexNode*) bucket1->bucketList_); list1.insert((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr, tuple); list2.remove((Chunk*)iptr->hashNodeChunk_, tbl->db_, keyPtr); bucket->bucketList_ = list1.getBucketListHead(); @@ -633,7 +633,7 @@ DbRetVal HashIndex::insertLogicalUndoLog(Database *sysdb, void *data) Database db; db.setMetaDataPtr((DatabaseMetaData *) info->metaData_); db.setProcSlot(sysdb->procSlot); - HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_; + IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_; BucketList list(head); DbRetVal rv = list.insert(hChunk, &db, info->keyPtr_, info->tuple_); if (rv != OK) @@ -659,7 +659,7 @@ DbRetVal HashIndex::deleteLogicalUndoLog(Database *sysdb, void *data) Database db; db.setMetaDataPtr((DatabaseMetaData *)info->metaData_); db.setProcSlot(sysdb->procSlot); - HashIndexNode *head = (HashIndexNode *)((Bucket *)info->bucket_)->bucketList_; + IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_; BucketList list(head); DbRetVal rc = list.remove(hChunk, &db, info->keyPtr_); //((Bucket *)info->bucket_)->bucketList_ = list.getBucketListHead(); diff --git a/src/storage/Index.cxx b/src/storage/Index.cxx index b139793a..afe1868a 100644 --- a/src/storage/Index.cxx +++ b/src/storage/Index.cxx @@ -20,6 +20,7 @@ HashIndex* Index::hIdx = NULL; TreeIndex* Index::tIdx = NULL; +TrieIndex* Index::iIdx = NULL; long Index::usageCount = 0; Index* Index::getIndex(IndexType type) @@ -31,6 +32,9 @@ Index* Index::getIndex(IndexType type) }else if (type == treeIndex) { if (NULL == tIdx) tIdx = new TreeIndex(); return tIdx; + }else if (type == trieIndex) { + if (NULL == iIdx) iIdx = new TrieIndex(); + return iIdx; } return NULL; } @@ -40,6 +44,7 @@ void Index::destroy() if(!usageCount) { if(!hIdx) { delete hIdx; hIdx=NULL; } if(!tIdx) { delete tIdx; tIdx=NULL; } + if(!iIdx) { delete tIdx; iIdx=NULL; } } } diff --git a/src/storage/TableImpl.cxx b/src/storage/TableImpl.cxx index f6e7c8b8..360f15bd 100644 --- a/src/storage/TableImpl.cxx +++ b/src/storage/TableImpl.cxx @@ -336,8 +336,12 @@ DbRetVal TableImpl::createPlan() { if (!isAllFldPointLookup) break; printDebug(DM_Predicate, "point lookup involved for field %s",def->fldName_); - if(hashIndex == info->indType) scanType_ = hashIndexScan; - else scanType_ = treeIndexScan; + if(hashIndex == info->indType) + scanType_ = hashIndexScan; + else if (trieIndex == info->indType) + scanType_ = trieIndexScan; + else + scanType_ = treeIndexScan; isPointLook = true; useIndex_ = i; } @@ -1560,6 +1564,7 @@ void TableImpl::printSQLIndexString(FILE *fp, int fd) obj.bucketChunk = cIter.nextElement(); } else obj.bucketChunk = NULL; } + //TODO::Trie void *buf = &obj; write(fd, buf, sizeof(obj)); } @@ -1576,10 +1581,15 @@ void TableImpl::printSQLIndexString(FILE *fp, int fd) } fldList.removeAll(); fprintf(fp, " ) "); + if (iptr->indexType_ == hashIndex) fprintf(fp, " HASH "); - else fprintf(fp, " TREE "); - if (((HashIndexInfo*) idxInfo[i])->isUnique) fprintf(fp, " UNIQUE"); - if(((HashIndexInfo*) idxInfo[i])->noOfBuckets != 1009 ) fprintf(fp, " SIZE %d ",((HashIndexInfo*) idxInfo[i])->noOfBuckets ); + else if (iptr->indexType_ == treeIndex) fprintf(fp, " TREE "); + else fprintf(fp, " TRIE "); + + HashIndexInfo* hInfo = (HashIndexInfo*)idxInfo[i]; + if (hInfo->isUnique) fprintf(fp, " UNIQUE"); + if(hInfo->noOfBuckets != 1009 && + hInfo->noOfBuckets !=0) fprintf(fp, " SIZE %d ",((HashIndexInfo*) idxInfo[i])->noOfBuckets ); fprintf(fp, ";\n"); } } @@ -2088,12 +2098,22 @@ DbRetVal TableImpl::compactIndexNode( void *indexPtr) if(ret1!=0){ return ErrLockTimeOut; } - }else + }else if (treeIndex == (iptr->indexType_)) + { + ret1 =((Chunk*)iptr->chunkPtr_)->compact(db_->procSlot); + if(ret1!=0){ + return ErrLockTimeOut; + } + } else if ( trieIndex == (iptr->indexType_)) { ret1 =((Chunk*)iptr->chunkPtr_)->compact(db_->procSlot); if(ret1!=0){ return ErrLockTimeOut; } + ret1 =((Chunk*)iptr->hashNodeChunk_)->compact(db_->procSlot); + if(ret1!=0){ + return ErrLockTimeOut; + } } return OK; } diff --git a/src/storage/Transaction.cxx b/src/storage/Transaction.cxx index cb350c1e..b6e2fa70 100644 --- a/src/storage/Transaction.cxx +++ b/src/storage/Transaction.cxx @@ -181,6 +181,17 @@ DbRetVal Transaction::appendLogicalTreeUndoLog(Database *sysdb, OperationType ty printDebug(DM_Transaction, "creating logical undo log and append %x optype:%d", logInfo, type); return rv; } +DbRetVal Transaction::appendLogicalTrieUndoLog(Database *sysdb, OperationType type, void *data, size_t size) +{ + DbRetVal rv = OK; + TrieUndoLogInfo *hInfo = (TrieUndoLogInfo *) data; + UndoLogInfo *logInfo = createUndoLog(sysdb, type, hInfo->tuple_, size, &rv); + if (logInfo == NULL) return rv; + memcpy((char*)logInfo + sizeof(UndoLogInfo), data, sizeof(TrieUndoLogInfo)); + addAtBegin(logInfo); + printDebug(DM_Transaction, "creating logical undo log and append %x optype:%d", logInfo, type); + return rv; +} UndoLogInfo* Transaction::createUndoLog(Database *sysdb, OperationType type, void *data, size_t size, DbRetVal *rv) @@ -371,6 +382,18 @@ DbRetVal Transaction::applyUndoLogs(Database *sysdb) + sizeof(UndoLogInfo)); break; } + case InsertTrieIndexOperation: + { + TrieIndex::deleteLogicalUndoLog(sysdb, (char *)logInfo + + sizeof(UndoLogInfo)); + break; + } + case DeleteTrieIndexOperation: + { + TrieIndex::insertLogicalUndoLog(sysdb, (char *)logInfo + + sizeof(UndoLogInfo)); + break; + } default: { printError(ErrSysFatal, "Illegal undo log type"); diff --git a/src/storage/TrieIndex.cxx b/src/storage/TrieIndex.cxx new file mode 100644 index 00000000..555c76fe --- /dev/null +++ b/src/storage/TrieIndex.cxx @@ -0,0 +1,330 @@ +/*************************************************************************** + * Copyright (C) 2007 by www.databasecache.com * + * Contact: praba_tuty@databasecache.com * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + ***************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include + +//converts the given value into 0 to TRIE_SIZE +//value could be 0-9, A-Z, a-z and special chars +char hashString(char value) +{ + if (value > 47 && value <58) return value - 48; + else if (value > 64 && value <91 ) return (value - 64)/3; + else if (value > 96 && value <123) return (value - 96)/3; + //for all special and other chars return 9 + return 9; +} + +void TrieIndex::computeHashValues(DataType type, void *key, char *in, int length) +{ + if (typeInt == type) { + int val = *(int*)key; + sprintf(in, "%d", val); + int i=0; + while (in[i] != '\0') + { + in[i] = hashString(in[i]); + printDebug(DM_TrieIndex, "Hash Value Pos:%d Hash:%d\n", i, in[i]); + i++; + } + in[i] = -1; + }else if (typeLongLong == type) { + long val = *(long*)key; + sprintf(in, "%lld", val); + int i=0; + while (in[i] != '\0') + { + in[i] = hashString(in[i]); + printDebug(DM_TrieIndex, "Hash Value Pos:%d Hash:%d\n", i, in[i]); + i++; + } + in[i] = -1; + }else if (typeString == type || typeVarchar == type) { + char *val = (char*) key; + int i=0; + while (*val != '\0') + { + in[i] = hashString(*val); + printDebug(DM_TrieIndex, "Hash Value Pos:%d Hash:%d\n", i, in[i]); + val++; + i++; + } + in[i] = (char)-1; + }else + printError(ErrSysFatal,"Type not supported for trie hashing\n"); + return ; +} +/* +bool HashIndex::checkForUniqueKey(IndexNode *head, HashIndexInfo *info, void *tuple) +{ + return false; +} +*/ + +DbRetVal TrieIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag) +{ + DbRetVal rv = OK; + HashIndexInfo *info = (HashIndexInfo*) indInfo; + CINDEX *iptr = (CINDEX*)indexPtr; + char hashValue[TRIE_MAX_LENGTH]; + void *keyPtr =(void*)((char*)tuple + info->fldOffset); + //for varchar ptr to value is stored in tuple + if (info->type == typeVarchar) keyPtr = (void *) *(long *) keyPtr; + computeHashValues(info->type, keyPtr, hashValue, info->compLength); + Chunk *hIdxNodeChunk = (Chunk*)iptr->hashNodeChunk_; + Chunk *chunk = (Chunk*) iptr->chunkPtr_; + ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr); + TrieNode* start = (TrieNode*)citer.nextElement(); + if(start) displayAll(start); + int cnt=0; + if (NULL == start) { + //first value is inserted into the trie index + start = (TrieNode*) chunk->tryAllocate(tbl->db_, &rv); + if (NULL == start) { + printError(ErrSysInternal, "Could not allocate trie node %d\n", rv); + return rv; + } + } + char **prev = NULL; + while(-1 != hashValue[cnt+1]) { + prev = (char**)&(start->next_[hashValue[cnt]]); + if (*prev) + { + start = (TrieNode*) *prev; + cnt++; + continue; + } + //allocate trie node + TrieNode *newNode = (TrieNode*) chunk->tryAllocate(tbl->db_, &rv); + if (NULL == newNode) { + printError(ErrSysInternal, "Could not allocate trie node %d\n", rv); + return rv; + } + //set the previous trie node ptr to this node + *prev = (char*)newNode; + start = newNode; + cnt++; + } + void **ptr = (void**)&(start->head_[hashValue[cnt]]); + rv = addToValueList(tbl->db_, ptr, hIdxNodeChunk, tuple, keyPtr); + if (OK != rv) return rv; + + //create logical undo log + TrieUndoLogInfo hInfo; + hInfo.metaData_ = tbl->db_->getMetaDataPtr(); + hInfo.bucket_ = NULL; + hInfo.tuple_ = tuple; + hInfo.keyPtr_ = keyPtr; + + if (!loadFlag) { + rv = tr->appendLogicalTrieUndoLog(tbl->sysDB_, InsertTrieIndexOperation, &hInfo, sizeof(TrieUndoLogInfo)); + //TODO:: if it fails need to remove the element from the bucket + } + return rv; +} +DbRetVal TrieIndex::addToValueList(Database *db, void **ptr, Chunk *hIdxNodeChunk, void *tuple, void *keyPtr) +{ + IndexNode *head = (IndexNode*) *ptr; + printDebug(DM_TrieIndex, "TrieIndex insert into bucket list"); + DbRetVal rv = OK; + if (!head) + { + printDebug(DM_TrieIndex, "TrieIndex insert head is empty"); + IndexNode *firstNode= NULL; + firstNode= (IndexNode*) hIdxNodeChunk->tryAllocate(db, &rv); + if (firstNode == NULL){ + printError(rv, "Unable to allocate index node for Trie index after retry"); + return rv; + } + firstNode->ptrToKey_ = keyPtr; + firstNode->ptrToTuple_ = tuple; + firstNode->next_ = NULL; + if (0 != Mutex::CASL((long*)ptr, 0, (long)firstNode)) { + printError(ErrLockTimeOut, "Trie Index bucket lock timeout.. retry"); + hIdxNodeChunk->free(db, firstNode); + return ErrLockTimeOut; + } + printDebug(DM_TrieIndex, "TrieIndex insert new node %x in empty bucket", head); + } + else + { + BucketList list(head); + rv = list.insert(hIdxNodeChunk, db, keyPtr, tuple); + if (rv !=OK) { + printError(rv, "unable to insert into Trie bucketlist rv:%d", rv); + return rv; + } + } + return OK; +} + +DbRetVal TrieIndex::removeFromValueList(Database *db, void **ptr, Chunk *hIdxNodeChunk, void *tuple, void *keyPtr) +{ + IndexNode *head = (IndexNode*) *ptr; + printDebug(DM_TrieIndex, "TrieIndex remove from bucket list"); + DbRetVal rv = OK; + if (!head) + { + printError(rv, "Trie value list head is empty"); + return rv; + } + else + { + BucketList list(head); + rv = list.remove(hIdxNodeChunk, db, keyPtr); + if (SplCase == rv) + { + rv = OK; + printDebug(DM_TrieIndex, "Removing trie index node from head "); + if (0 != Mutex::CASL((long*)ptr, + (long)head, (long)list.getBucketListHead())) { + printError(ErrSysFatal, "Lock time out for trie bucket. retry\n"); + return ErrLockTimeOut; + } + } + if (rv !=OK) { + printError(rv, "unable to remove from Trie bucketlist rv:%d", rv); + return rv; + } + } + return OK; + +} + +DbRetVal TrieIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag) +{ + DbRetVal rv = OK; + HashIndexInfo *info = (HashIndexInfo*) indInfo; + CINDEX *iptr = (CINDEX*)indexPtr; + char hashValue[TRIE_MAX_LENGTH]; + void *keyPtr =(void*)((char*)tuple + info->fldOffset); + Chunk *hIdxNodeChunk = (Chunk*)iptr->hashNodeChunk_; + //for varchar ptr to value is stored in tuple + if (info->type == typeVarchar) keyPtr = (void *) *(long *) keyPtr; + computeHashValues(info->type, keyPtr, hashValue, info->compLength); + ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr); + TrieNode* start = (TrieNode*)citer.nextElement(); + int cnt=0; + if (NULL == start) { + printError(ErrSysInternal, "No trie nodes found %d\n", rv); + return rv; + } + char **iter = NULL; + while(-1 != hashValue[cnt+1]) { + iter = (char**)&(start->next_[hashValue[cnt]]); + if (! *iter) + { + printError(ErrNotFound, "No trie node found %d\n", rv); + return ErrNotFound; + } + //traverse till the end + start = (TrieNode*) *iter; + cnt++; + } + void **ptr = (void**)&(start->head_[hashValue[cnt]]); + rv = removeFromValueList(tbl->db_, ptr, hIdxNodeChunk, tuple, keyPtr); + if (OK != rv) return rv; + + //create logical undo log + TrieUndoLogInfo hInfo; + hInfo.metaData_ = tbl->db_->getMetaDataPtr(); + hInfo.bucket_ = NULL; + hInfo.tuple_ = tuple; + hInfo.keyPtr_ = keyPtr; + + if (!loadFlag) { + rv = tr->appendLogicalTrieUndoLog(tbl->sysDB_, DeleteTrieIndexOperation, &hInfo, sizeof(TrieUndoLogInfo)); + //TODO:: if it fails need to remove the element from the bucket + } + return rv; +} + +DbRetVal TrieIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool loadFlag) +{ + DbRetVal rv = OK; + return rv; +} + +DbRetVal TrieIndex::insertLogicalUndoLog(Database *sysdb, void *data) +{ + TrieUndoLogInfo *info = (TrieUndoLogInfo *) data; + Chunk *hChunk = (Chunk *) info->hChunk_; + Database db; + db.setMetaDataPtr((DatabaseMetaData *) info->metaData_); + db.setProcSlot(sysdb->procSlot); + IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_; + BucketList list(head); + DbRetVal rv = list.insert(hChunk, &db, info->keyPtr_, info->tuple_); + if (rv != OK) + { + printError(ErrLockTimeOut, "Unable to add to bucket..retry\n"); + return ErrLockTimeOut; + } + return OK; +} + +DbRetVal TrieIndex::deleteLogicalUndoLog(Database *sysdb, void *data) +{ + TrieUndoLogInfo *info = (TrieUndoLogInfo *) data; + Chunk *hChunk = (Chunk *) info->hChunk_; + Database db; + db.setMetaDataPtr((DatabaseMetaData *)info->metaData_); + db.setProcSlot(sysdb->procSlot); + IndexNode *head = (IndexNode *)((Bucket *)info->bucket_)->bucketList_; + BucketList list(head); + DbRetVal rc = list.remove(hChunk, &db, info->keyPtr_); + if (SplCase == rc) { + ;//TODO + }else if (rc != OK) { + printError(ErrLockTimeOut, "Unable to remove hash index node"); + return ErrLockTimeOut; + } + return OK; +} +void TrieIndex::displayAll(TrieNode *start, int level) +{ + printTrieNode(start, level); + level++; + for (int i=0; i < TRIE_SIZE; i++) + { + if (start->next_[i]) displayAll(start->next_[i], level); + } +} +void TrieIndex::printTrieNode(TrieNode *node, int level) +{ + printf("Trie %x Level %d child:", node, level); + for (int i=0; i < TRIE_SIZE; i++) + { + printf("%x ", node->next_[i]); + } + printf("bucket:", node); + for (int i=0; i < TRIE_SIZE; i++) + { + printf("%x ", node->head_[i]); + if ( node->head_[i]) { + printf("{ "); + BucketList list(node->head_[i]); + list.print(); + printf("} "); + } + } + printf("\n"); +} diff --git a/src/storage/TupleIterator.cxx b/src/storage/TupleIterator.cxx index e3c5302d..4f84eb06 100644 --- a/src/storage/TupleIterator.cxx +++ b/src/storage/TupleIterator.cxx @@ -59,7 +59,6 @@ DbRetVal TupleIterator::open() }else if (hashIndexScan == scanType_) { HashIndexInfo *hIdxInfo = (HashIndexInfo*)info; - bool isPtr = false; FieldIterator iter = hIdxInfo->idxFldList.getIterator(); int offset = hIdxInfo->fldOffset; if(!keyBuffer) keyBuffer = (char*) malloc(hIdxInfo->compLength); @@ -81,7 +80,7 @@ DbRetVal TupleIterator::open() int bucketNo = HashIndex::computeHashBucket(hIdxInfo->type, keyBuffer, hIdxInfo->noOfBuckets, hIdxInfo->compLength); Bucket *bucket = &(hIdxInfo->buckets[bucketNo]); - HashIndexNode *head = (HashIndexNode*) bucket->bucketList_; + IndexNode *head = (IndexNode*) bucket->bucketList_; if (!head) { bIter->setHead(head); @@ -89,6 +88,45 @@ DbRetVal TupleIterator::open() } printDebug(DM_HashIndex, "open:head for bucket %x is :%x", bucket, head); bIter->setHead(head); + }else if (trieIndexScan == scanType_) + { + HashIndexInfo *indInfo = (HashIndexInfo*)info; + char hashValue[TRIE_MAX_LENGTH]; + FieldIterator iter = indInfo->idxFldList.getIterator(); + FieldDef *def = iter.nextElement(); + void* keyPtr = (void*)predImpl->valPtrForIndexField(def->fldName_,false); + if (NULL == keyPtr) { + printError(ErrSysFatal, "Fatal: Should not come here"); + } + TrieIndex::computeHashValues(indInfo->type, keyPtr, hashValue, indInfo->compLength); + TrieNode* start = (TrieNode*)indInfo->buckets; + if (NULL == start) + { + bIter->setHead(NULL); + return OK; + } + char **next = NULL; + int cnt = 0; + while(-1 != hashValue[cnt+1]) { + next = (char**)&(start->next_[hashValue[cnt]]); + if (! *next) + { + printError(ErrNotFound, "No trie node found \n"); + return ErrNotFound; + } + //traverse till the end + start = (TrieNode*) *next; + cnt++; + } + void **ptr = (void**)&(start->head_[hashValue[cnt]]); + IndexNode *head = (IndexNode*) *ptr; + if (!head) + { + bIter->setHead(head); + return OK; + } + bIter->setHead(head); + }else if (treeIndexScan == scanType_) { HashIndexInfo *hIdxInfo = (HashIndexInfo*)info; @@ -190,15 +228,15 @@ void* TupleIterator::next() predImpl->evaluateForTable(result, (char*)tuple); } } - }else if (hashIndexScan == scanType_) + }else if (hashIndexScan == scanType_ || trieIndexScan == scanType_) { //evaluate till it succeeds bool result = false; while (!result) { - HashIndexNode *node = bIter->next(); + IndexNode *node = bIter->next(); if (node == NULL) return NULL; - printDebug(DM_HashIndex, "next: returned HashIndexNode: %x", node); + printDebug(DM_HashIndex, "next: returned IndexNode: %x", node); tuple = node->ptrToTuple_; if(NULL == tuple) { printDebug(DM_HashIndex, "next::tuple is null"); -- 2.11.4.GIT