From 179ab4474078f10de8d48b3e010301e0033f5b2b Mon Sep 17 00:00:00 2001 From: prabatuty Date: Fri, 20 Jun 2008 04:38:33 +0000 Subject: [PATCH] 1501526 Composite primary keys --- src/odbc/Makefile | 18 +++--- src/server/CatalogTables.cxx | 35 ++++++++++++ src/server/DataType.cxx | 1 + src/server/DatabaseManagerImpl.cxx | 29 +++++++--- src/server/HashIndex.cxx | 110 +++++++++++++++++++++++-------------- src/server/TableImpl.cxx | 32 +++++++---- src/server/TupleIterator.cxx | 18 +++++- 7 files changed, 171 insertions(+), 72 deletions(-) diff --git a/src/odbc/Makefile b/src/odbc/Makefile index 3092657a..2c3f9936 100644 --- a/src/odbc/Makefile +++ b/src/odbc/Makefile @@ -77,14 +77,14 @@ HEADERS = $(noinst_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = ${SHELL} /home/csql/latest/csql/missing --run aclocal-1.9 +ACLOCAL = ${SHELL} /home/csql/latest/compkey/csql/missing --run aclocal-1.9 AMDEP_FALSE = # AMDEP_TRUE = -AMTAR = ${SHELL} /home/csql/latest/csql/missing --run tar +AMTAR = ${SHELL} /home/csql/latest/compkey/csql/missing --run tar AR = ar -AUTOCONF = ${SHELL} /home/csql/latest/csql/missing --run autoconf -AUTOHEADER = ${SHELL} /home/csql/latest/csql/missing --run autoheader -AUTOMAKE = ${SHELL} /home/csql/latest/csql/missing --run automake-1.9 +AUTOCONF = ${SHELL} /home/csql/latest/compkey/csql/missing --run autoconf +AUTOHEADER = ${SHELL} /home/csql/latest/compkey/csql/missing --run autoheader +AUTOMAKE = ${SHELL} /home/csql/latest/compkey/csql/missing --run automake-1.9 AWK = gawk CC = gcc CCDEPMODE = depmode=gcc3 @@ -94,7 +94,7 @@ CPPFLAGS = CXX = g++ CXXCPP = g++ -E CXXDEPMODE = depmode=gcc3 -CXXFLAGS = -g -I/opt/java/jdk1.6.0_04/include -I/opt/java/jdk1.6.0_04/include/linux +CXXFLAGS = -g -I/home/csql/jdk1.5.0_14/include -I/home/csql/jdk1.5.0_14/include/linux CYGPATH_W = echo DEFS = -DHAVE_CONFIG_H DEPDIR = .deps @@ -120,7 +120,7 @@ LIBS = LIBTOOL = $(SHELL) $(top_builddir)/libtool LN_S = ln -s LTLIBOBJS = -MAKEINFO = ${SHELL} /home/csql/latest/csql/missing --run makeinfo +MAKEINFO = ${SHELL} /home/csql/latest/compkey/csql/missing --run makeinfo OBJEXT = o PACKAGE = csql PACKAGE_BUGREPORT = @@ -167,7 +167,7 @@ host_vendor = pc htmldir = ${docdir} includedir = ${prefix}/include infodir = ${datarootdir}/info -install_sh = /home/csql/latest/csql/install-sh +install_sh = /home/csql/latest/compkey/csql/install-sh libdir = ${exec_prefix}/lib libexecdir = ${exec_prefix}/libexec localedir = ${datarootdir}/locale @@ -176,7 +176,7 @@ mandir = ${datarootdir}/man mkdir_p = mkdir -p -- oldincludedir = /usr/include pdfdir = ${docdir} -prefix = /home/csql/latest/csql/install +prefix = /home/csql/latest/compkey/csql/install program_transform_name = s,x,x, psdir = ${docdir} sbindir = ${exec_prefix}/sbin diff --git a/src/server/CatalogTables.cxx b/src/server/CatalogTables.cxx index 3ab29eba..032496a0 100644 --- a/src/server/CatalogTables.cxx +++ b/src/server/CatalogTables.cxx @@ -465,6 +465,41 @@ DbRetVal CatalogTableINDEXFIELD::getFieldNameAndType(void *index, return ErrNotExists; } +DbRetVal CatalogTableINDEXFIELD::getFieldInfo(void *index, FieldList &list) +{ + Chunk *ifChunk; + ifChunk = systemDatabase_->getSystemDatabaseChunk(IndexFieldTableId); + ChunkIterator ifIter = ifChunk->getIterator(); + void *data = NULL; + int rowCount =0; + while ((data = ifIter.nextElement())!= NULL) + { + if (((INDEXFIELD*)data)->indexPtr == index) + { + //add the information to the field list + FIELD *fTuple = (FIELD*)(((INDEXFIELD*)data)->fieldPtr); + FieldDef fldDef; + strcpy(fldDef.fldName_, fTuple->fldName_); + fldDef.fldName_[IDENTIFIER_LENGTH] = '\0'; + fldDef.type_ = fTuple->type_; + fldDef.length_ = fTuple->length_; + fldDef.isDefault_ = fTuple->isDefault_; + os::memcpy(fldDef.defaultValueBuf_, fTuple->defaultValueBuf_, + DEFAULT_VALUE_BUF_LENGTH); + fldDef.isNull_ = fTuple->isNull_; + fldDef.isUnique_ = fTuple->isUnique_; + fldDef.isPrimary_ = fTuple->isPrimary_; + list.append(fldDef); + } + rowCount++; + } + if (!rowCount) { + printError(ErrNotExists,"Index %x not exists in catalog table", index); + return ErrNotExists; + } + return OK; +} + DbRetVal CatalogTableUSER::insert(const char *name, const char *pass) { Chunk *tChunk = systemDatabase_->getSystemDatabaseChunk(UserTableId); diff --git a/src/server/DataType.cxx b/src/server/DataType.cxx index d5edd2ec..cfecce77 100644 --- a/src/server/DataType.cxx +++ b/src/server/DataType.cxx @@ -651,6 +651,7 @@ bool AllDataType::compareVal(void *val1, void *val2, ComparisionOp op, case typeString: result = AllDataType::compareStringVal(val1, val2, op); break; + case typeComposite: case typeBinary: result = AllDataType::compareBinaryVal(val1, val2, op, length); break; diff --git a/src/server/DatabaseManagerImpl.cxx b/src/server/DatabaseManagerImpl.cxx index 411d05f8..6b06e5e5 100644 --- a/src/server/DatabaseManagerImpl.cxx +++ b/src/server/DatabaseManagerImpl.cxx @@ -663,16 +663,31 @@ Table* DatabaseManagerImpl::openTable(const char *name) cIndex.getIndexPtrs(tptr, table->indexPtr_); for (int i =0 ; i < table->numIndexes_; i++ ) { - SingleFieldHashIndexInfo *hIdxInfo = new SingleFieldHashIndexInfo(); + HashIndexInfo *hIdxInfo = new HashIndexInfo(); CatalogTableINDEXFIELD cIndexField(systemDatabase_); - cIndexField.getFieldNameAndType(table->indexPtr_[i], hIdxInfo->fldName, - hIdxInfo->type); + cIndexField.getFieldInfo(table->indexPtr_[i], hIdxInfo->idxFldList); ChunkIterator citer = CatalogTableINDEX::getIterator(table->indexPtr_[i]); hIdxInfo->noOfBuckets = CatalogTableINDEX::getNoOfBuckets(table->indexPtr_[i]); + FieldIterator fIter = hIdxInfo->idxFldList.getIterator(); + bool firstFld = true; + while (fIter.hasElement()) + { + FieldDef def = fIter.nextElement(); + if (firstFld) + { + hIdxInfo->fldOffset = table->fldList_.getFieldOffset(def.fldName_); + hIdxInfo->type = table->fldList_.getFieldType(def.fldName_); + hIdxInfo->compLength = table->fldList_.getFieldLength(def.fldName_); + firstFld = false; + }else { + hIdxInfo->type = typeComposite; + hIdxInfo->compLength = hIdxInfo->compLength + + table->fldList_.getFieldLength(def.fldName_); + } + } + hIdxInfo->isUnique = CatalogTableINDEX::getUnique(table->indexPtr_[i]); hIdxInfo->buckets = (Bucket*)citer.nextElement(); - hIdxInfo->offset = table->fldList_.getFieldOffset(hIdxInfo->fldName); - hIdxInfo->length = table->fldList_.getFieldLength(hIdxInfo->fldName); table->idxInfo[i] = (IndexInfo*) hIdxInfo; } systemDatabase_->releaseDatabaseMutex(); @@ -764,9 +779,6 @@ DbRetVal DatabaseManagerImpl::createHashIndex(const char *indName, const char *t { printError(ErrBadCall, "No Field name specified"); return ErrBadCall; - }else if (totFlds >1) { - printError(ErrNotYet, "Composite keys not supported"); - return ErrNotYet; } void *tptr =NULL; void *chunk = NULL; @@ -861,7 +873,6 @@ DbRetVal DatabaseManagerImpl::createHashIndex(const char *indName, const char *t return ErrSysInternal; } - //add row to INDEX void *tupleptr = NULL; CatalogTableINDEX cIndex(systemDatabase_); diff --git a/src/server/HashIndex.cxx b/src/server/HashIndex.cxx index 0d04278e..735902fc 100644 --- a/src/server/HashIndex.cxx +++ b/src/server/HashIndex.cxx @@ -44,8 +44,28 @@ unsigned int hashString(char *strVal) } return hval; } +unsigned int hashBinary(char *strVal, int length) +{ + unsigned int hval, g; + hval = 0; + char *str =strVal; + int iter = 0; + while (iter != length) + { + hval <<= 4; + hval += (unsigned int) *str++; + g = hval & ((unsigned int) 0xf << (32 - 4)); + if (g != 0) + { + hval ^= g >> (32 - 8); + hval ^= g; + } + iter++; + } + return hval; +} -unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets) +unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuckets, int length) { switch(type) @@ -123,9 +143,11 @@ unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuck unsigned int val = hashString((char*)key); return val % noOfBuckets; } + case typeComposite: case typeBinary: { - //TODO + unsigned int val = hashBinary((char*)key, length); + return val % noOfBuckets; } default: { @@ -135,25 +157,21 @@ unsigned int HashIndex::computeHashBucket(DataType type, void *key, int noOfBuck return -1; } -//TODO::composite keys are not supported currently DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag) { - SingleFieldHashIndexInfo *info = (SingleFieldHashIndexInfo*) indInfo; + HashIndexInfo *info = (HashIndexInfo*) indInfo; INDEX *iptr = (INDEX*)indexPtr; DbRetVal rc = OK; - DataType type = info->type; - char *name = info->fldName; - int offset = info->offset; int noOfBuckets = info->noOfBuckets; - int length = info->length; - - printDebug(DM_HashIndex, "Inserting hash index node for field %s", name); + int offset = info->fldOffset; + DataType type = info->type; + printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_); ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr); Bucket* buckets = (Bucket*)citer.nextElement(); void *keyPtr =(void*)((char*)tuple + offset); int bucketNo = computeHashBucket(type, - keyPtr, noOfBuckets); + keyPtr, noOfBuckets, info->compLength); printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo); Bucket *bucket = &(buckets[bucketNo]); @@ -175,7 +193,7 @@ DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde { bucketTuple = node->ptrToTuple_; if (AllDataType::compareVal((void*)((char*)bucketTuple +offset), - (void*)((char*)tuple +offset), OpEquals,type, length)) + (void*)((char*)tuple +offset), OpEquals,type, info->compLength)) { printError(ErrUnique, "Unique key violation"); bucket->mutex_.releaseLock(tbl->db_->procSlot); @@ -228,23 +246,20 @@ DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde } -//TODO::composite keys are not supported currently DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag) { INDEX *iptr = (INDEX*)indexPtr; - SingleFieldHashIndexInfo *info = (SingleFieldHashIndexInfo*) indInfo; + HashIndexInfo *info = (HashIndexInfo*) indInfo; DataType type = info->type; - char *name = info->fldName; - int offset = info->offset; + int offset = info->fldOffset; int noOfBuckets = info->noOfBuckets; - printDebug(DM_HashIndex, "Removing hash index node for field %s", name); ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr); Bucket* buckets = (Bucket*)citer.nextElement(); void *keyPtr =(void*)((char*)tuple + offset); - int bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets); + int bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength); Bucket *bucket1 = &buckets[bucket]; @@ -276,54 +291,68 @@ DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde } } bucket1->mutex_.releaseLock(tbl->db_->procSlot); + return rc; } -//TODO::composite keys are not supported currently DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, IndexInfo *indInfo, void *tuple, bool undoFlag) { INDEX *iptr = (INDEX*)indexPtr; - SingleFieldHashIndexInfo *info = (SingleFieldHashIndexInfo*) indInfo; + HashIndexInfo *info = (HashIndexInfo*) indInfo; DataType type = info->type; - char *name = info->fldName; - int offset = info->offset; + int offset = info->fldOffset; int noOfBuckets = info->noOfBuckets; - printDebug(DM_HashIndex, "Updating hash index node for field %s", name); - //check whether the index key is updated or not //if it is not updated return from here void *keyPtr =(void*)((char*)tuple + offset); + char *kPtr= (char*)keyPtr; //Iterate through the bind list and check - FieldIterator fldIter = tbl->fldList_.getIterator(); - void *newKey = NULL; - while (fldIter.hasElement()) + FieldIterator idxFldIter = info->idxFldList.getIterator(); + char *keyBindBuffer = (char*) malloc(info->compLength); + void *keyStartBuffer = (void*) keyBindBuffer; + bool keyUpdated = false; + while (idxFldIter.hasElement()) { + FieldDef idef = idxFldIter.nextElement(); + FieldIterator fldIter = tbl->fldList_.getIterator(); + while (fldIter.hasElement()) + { FieldDef def = fldIter.nextElement(); - if (0 == strcmp(def.fldName_, name)) + if (0 == strcmp(def.fldName_, idef.fldName_)) { - if (NULL == def.bindVal_) - return OK; - bool result = AllDataType::compareVal(keyPtr, def.bindVal_, - OpEquals, def.type_, def.length_); - if (result) return OK; else newKey = def.bindVal_; + if (NULL != def.bindVal_) { + AllDataType::copyVal(keyBindBuffer, def.bindVal_, + def.type_, def.length_); + keyBindBuffer = keyBindBuffer + AllDataType::size(def.type_, + def.length_); + keyUpdated = true; + break; + } } + } } - printDebug(DM_HashIndex, "Updating hash index node: Key value is updated"); - - if (newKey == NULL) + if (!keyUpdated) { - printError(ErrSysInternal,"Internal Error:: newKey is Null"); - return ErrSysInternal; + //printf("PRABA::key not updated\n"); + free(keyStartBuffer); + return OK; } + //printf("PRABA::it is wrong coming here\n"); + bool result = AllDataType::compareVal(kPtr, keyStartBuffer, + OpEquals, info->type, info->compLength); + if (result) return OK; + + printDebug(DM_HashIndex, "Updating hash index node: Key value is updated"); + ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr); Bucket* buckets = (Bucket*)citer.nextElement(); //remove the node whose key is updated int bucketNo = computeHashBucket(type, - keyPtr, noOfBuckets); + keyPtr, noOfBuckets, info->compLength); printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo); Bucket *bucket = &buckets[bucketNo]; @@ -338,7 +367,7 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde } //insert node for the updated key value int newBucketNo = computeHashBucket(type, - newKey, noOfBuckets); + keyStartBuffer, noOfBuckets); printDebug(DM_HashIndex, "Updating hash index node: Bucket for new value is %d", newBucketNo); Bucket *bucket1 = &buckets[newBucketNo]; @@ -413,5 +442,6 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde } bucket1->mutex_.releaseLock(tbl->db_->procSlot); bucket->mutex_.releaseLock(tbl->db_->procSlot); + return rc; } diff --git a/src/server/TableImpl.cxx b/src/server/TableImpl.cxx index 0b7ebbce..5d7d98e5 100644 --- a/src/server/TableImpl.cxx +++ b/src/server/TableImpl.cxx @@ -173,16 +173,26 @@ DbRetVal TableImpl::createPlan() printDebug(DM_Predicate, "predicate does not involve NOT , OR operator"); for (int i =0; i < numIndexes_; i++) { - char *fName = ((SingleFieldHashIndexInfo*)idxInfo[i])->fldName; - if (pred->pointLookupInvolved(fName)) + HashIndexInfo* info = (HashIndexInfo*) idxInfo[i]; + FieldIterator iter = info->idxFldList.getIterator(); + while(iter.hasElement()) { - printDebug(DM_Predicate, "point lookup involved for field %s",fName); - scanType_ = hashIndexScan; - useIndex_ = i; - isPlanCreated = true; - return OK; - } - } + FieldDef def = iter.nextElement(); + if (pred->pointLookupInvolved(def.fldName_)) + { + printDebug(DM_Predicate, "point lookup involved for field %s",def.fldName_); + scanType_ = hashIndexScan; + isPlanCreated = true; + useIndex_ = i; + } + else + { + useIndex_ = -1; + break; + } + }//while iter.hasElement() + if (useIndex_ != -1) return OK; + }//for } } scanType_ = fullTableScan; @@ -667,7 +677,7 @@ void TableImpl::printSQLIndexString() INDEX *iptr = (INDEX*) indexPtr_[i]; cIndexField.getFieldNameAndType((void*)iptr, fldName, type); printf("CREATE INDEX %s on %s ( %s ) ", iptr->indName_, getName(), fldName); - if (((SingleFieldHashIndexInfo*) idxInfo[i])->isUnique) printf(" UNIQUE;\n"); else printf(";\n"); + if (((HashIndexInfo*) idxInfo[i])->isUnique) printf(" UNIQUE;\n"); else printf(";\n"); } } @@ -680,7 +690,7 @@ DbRetVal TableImpl::updateIndexNode(Transaction *tr, void *indexPtr, IndexInfo * //TODO::currently it updates irrespective of whether the key changed or not //because of this commenting the whole index update code. relook at it and uncomment - //ret = idx->update(this, tr, indexPtr, info, tuple, undoFlag); + ret = idx->update(this, tr, indexPtr, info, tuple, undoFlag); return ret; } diff --git a/src/server/TupleIterator.cxx b/src/server/TupleIterator.cxx index da608696..2ef266d4 100644 --- a/src/server/TupleIterator.cxx +++ b/src/server/TupleIterator.cxx @@ -29,13 +29,24 @@ DbRetVal TupleIterator::open() *cIter = ((Chunk*)chunkPtr_)->getIterator(); }else if (hashIndexScan == scanType_) { - SingleFieldHashIndexInfo *hIdxInfo = (SingleFieldHashIndexInfo*)info; + HashIndexInfo *hIdxInfo = (HashIndexInfo*)info; PredicateImpl *predImpl = (PredicateImpl*) pred_; bool isPtr = false; - void *keyPtr = (void*)predImpl->valPtrForIndexField(hIdxInfo->fldName); + FieldIterator iter = hIdxInfo->idxFldList.getIterator(); + char *keyBuffer; + keyBuffer = (char*) malloc(hIdxInfo->compLength); + void *keyStartBuffer = (void*) keyBuffer, *keyPtr; + while(iter.hasElement()) + { + FieldDef def = iter.nextElement(); + keyPtr = (void*)predImpl->valPtrForIndexField(def.fldName_); + AllDataType::copyVal(keyBuffer, keyPtr, def.type_, def.length_); + keyBuffer = keyBuffer + AllDataType::size(def.type_, def.length_); + } int bucketNo = HashIndex::computeHashBucket(hIdxInfo->type, - keyPtr, hIdxInfo->noOfBuckets); + keyStartBuffer, hIdxInfo->noOfBuckets, hIdxInfo->compLength); + free(keyStartBuffer); Bucket *bucket = &(hIdxInfo->buckets[bucketNo]); int ret = bucket->mutex_.getLock(procSlot); if (ret != 0) @@ -53,6 +64,7 @@ DbRetVal TupleIterator::open() printDebug(DM_HashIndex, "open:head for bucket %x is :%x", bucket, head); bIter = new BucketIter(head); bucket->mutex_.releaseLock(procSlot); + } return OK; } -- 2.11.4.GIT