From 98afb4425cb9c68fa23d6b7b0d4bc6bfc10e1348 Mon Sep 17 00:00:00 2001 From: kishoramballi Date: Wed, 7 Jan 2009 11:27:01 +0000 Subject: [PATCH] Fix for Bug # 2483638 --- include/Field.h | 2 + include/Info.h | 4 +- src/sql/UpdStatement.cxx | 4 ++ src/storage/CatalogTables.cxx | 4 +- src/storage/FieldList.cxx | 7 +- src/storage/HashIndex.cxx | 152 ++++++++++++++++++++++++++++-------------- src/storage/TableDef.cxx | 1 + src/storage/TupleIterator.cxx | 10 +-- 8 files changed, 125 insertions(+), 59 deletions(-) diff --git a/include/Field.h b/include/Field.h index 7436fad2..72239507 100644 --- a/include/Field.h +++ b/include/Field.h @@ -38,6 +38,7 @@ class FieldDef void init() { type_ = typeUnknown; length_ = 0; + offset_ = 0; bindVal_ = NULL; isDefault_ = false; isNull_ = false; @@ -48,6 +49,7 @@ class FieldDef char fldName_[IDENTIFIER_LENGTH]; DataType type_; size_t length_; + size_t offset_; //currently default value is supported for string and binary //less than length 32 bytes char defaultValueBuf_[DEFAULT_VALUE_BUF_LENGTH]; diff --git a/include/Info.h b/include/Info.h index 7d11a661..266a909f 100644 --- a/include/Info.h +++ b/include/Info.h @@ -71,7 +71,6 @@ class TableDef private: FieldList fldList; int fldCount; - public: TableDef() { fldCount = 0; } ~TableDef(); @@ -118,11 +117,12 @@ class FieldInfo char fldName[IDENTIFIER_LENGTH]; DataType type; size_t length; - int offset; + size_t offset; char defaultValueBuf[DEFAULT_VALUE_BUF_LENGTH]; bool isNull; bool isPrimary; bool isDefault; + bool isUnique; }; diff --git a/src/sql/UpdStatement.cxx b/src/sql/UpdStatement.cxx index 1afd5d0d..e36b28c4 100644 --- a/src/sql/UpdStatement.cxx +++ b/src/sql/UpdStatement.cxx @@ -453,6 +453,10 @@ DbRetVal UpdStatement::resolveForAssignment() value->fldName); return ErrSyntaxError; } + if (fInfo->isUnique) { + printError(ErrUnique, "Unique field %s cannot be updated", value->fldName); + return ErrUnique; + } value->type = fInfo->type; value->length = fInfo->length; value->isNullable = fInfo->isNull; diff --git a/src/storage/CatalogTables.cxx b/src/storage/CatalogTables.cxx index 12e497b0..3a761c3c 100644 --- a/src/storage/CatalogTables.cxx +++ b/src/storage/CatalogTables.cxx @@ -127,7 +127,7 @@ DbRetVal CatalogTableFIELD::insert(FieldIterator &iter, int tblID, void *tptr) fldInfo->tblPtr_ = tptr; fldInfo->type_ = fDef.type_; fldInfo->length_ = fDef.length_; - fldInfo->offset_ = 0; //TODO + fldInfo->offset_ = fDef.offset_; os::memcpy(fldInfo->defaultValueBuf_, fDef.defaultValueBuf_, DEFAULT_VALUE_BUF_LENGTH); fldInfo->isNull_ = fDef.isNull_; @@ -175,6 +175,7 @@ void CatalogTableFIELD::getFieldInfo(void* tptr, FieldList &list) fldDef.fldName_[IDENTIFIER_LENGTH] = '\0'; fldDef.type_ = fTuple->type_; fldDef.length_ = fTuple->length_; + fldDef.offset_ = fTuple->offset_; fldDef.isDefault_ = fTuple->isDefault_; os::memcpy(fldDef.defaultValueBuf_, fTuple->defaultValueBuf_, DEFAULT_VALUE_BUF_LENGTH); @@ -536,6 +537,7 @@ DbRetVal CatalogTableINDEXFIELD::getFieldInfo(void *index, FieldList &list) fldDef.fldName_[IDENTIFIER_LENGTH] = '\0'; fldDef.type_ = fTuple->type_; fldDef.length_ = fTuple->length_; + fldDef.offset_ = fTuple->offset_; fldDef.isDefault_ = fTuple->isDefault_; os::memcpy(fldDef.defaultValueBuf_, fTuple->defaultValueBuf_, DEFAULT_VALUE_BUF_LENGTH); diff --git a/src/storage/FieldList.cxx b/src/storage/FieldList.cxx index 8c4b3ab0..c2021244 100644 --- a/src/storage/FieldList.cxx +++ b/src/storage/FieldList.cxx @@ -138,11 +138,12 @@ DbRetVal FieldList::getFieldInfo(const char *fldName, FieldInfo *&info) strcpy(info->fldName , iter->fldDef.fldName_); info->length = iter->fldDef.length_; info->type = iter->fldDef.type_; - info->offset = getFieldOffset(fldName); + info->offset = iter->fldDef.offset_; info->isDefault = iter->fldDef.isDefault_; strcpy(info->defaultValueBuf, iter->fldDef.defaultValueBuf_); info->isNull = iter->fldDef.isNull_; info->isPrimary = iter->fldDef.isPrimary_; + info->isUnique = iter->fldDef.isUnique_; return OK; } iter = iter ->next; @@ -210,8 +211,8 @@ int FieldList::getTupleSize() { offset = offset + os::align(iter->fldDef.length_); iter = iter ->next; - } - return offset; + } + return offset; } diff --git a/src/storage/HashIndex.cxx b/src/storage/HashIndex.cxx index 8237e2c6..36b59d78 100644 --- a/src/storage/HashIndex.cxx +++ b/src/storage/HashIndex.cxx @@ -37,13 +37,14 @@ unsigned int hashString(char *strVal) hval += (unsigned int) *str++; g = hval & ((unsigned int) 0xf << (32 - 4)); if (g != 0) - { - hval ^= g >> (32 - 8); - hval ^= g; - } + { + hval ^= g >> (32 - 8); + hval ^= g; + } } return hval; } + unsigned int hashBinary(char *strVal, int length) { unsigned int hval, g; @@ -56,10 +57,10 @@ unsigned int hashBinary(char *strVal, int length) hval += (unsigned int) *str++; g = hval & ((unsigned int) 0xf << (32 - 4)); if (g != 0) - { - hval ^= g >> (32 - 8); - hval ^= g; - } + { + hval ^= g >> (32 - 8); + hval ^= g; + } iter++; } return hval; @@ -165,13 +166,26 @@ DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde int noOfBuckets = info->noOfBuckets; int offset = info->fldOffset; DataType type = info->type; + char *keyBuffer = (char*) malloc(info->compLength); + void *keyStartBuffer = keyBuffer, *keyPtr; + FieldIterator iter = info->idxFldList.getIterator(); + while(iter.hasElement()) + { + FieldDef def = iter.nextElement(); + keyPtr = (char *)tuple + def.offset_; + AllDataType::copyVal(keyBuffer, keyPtr, def.type_, def.length_); + keyBuffer = keyBuffer + AllDataType::size(def.type_, def.length_); + } + printDebug(DM_HashIndex, "Inserting hash index node for %s", iptr->indName_); ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr); Bucket* buckets = (Bucket*)citer.nextElement(); - void *keyPtr =(void*)((char*)tuple + offset); - - int bucketNo = computeHashBucket(type, - keyPtr, noOfBuckets, info->compLength); + keyPtr =(void*)((char*)tuple + offset); + int bucketNo = 0; + if (type == typeComposite) + bucketNo = computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength); + else + bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength); printDebug(DM_HashIndex, "HashIndex insert bucketno %d", bucketNo); Bucket *bucket = &(buckets[bucketNo]); @@ -189,11 +203,22 @@ DbRetVal HashIndex::insert(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde HashIndexNode *node; void *bucketTuple; printDebug(DM_HashIndex, "HashIndex insert Checking for unique"); + bool res = false; + while((node = iter.next()) != NULL) { bucketTuple = node->ptrToTuple_; - if (AllDataType::compareVal((void*)((char*)bucketTuple +offset), - (void*)((char*)tuple +offset), OpEquals,type, info->compLength)) + if (type == typeComposite) { + FieldIterator fldIter = info->idxFldList.getIterator(); + int i = 0; + while (fldIter.hasElement()) { + FieldDef def = fldIter.nextElement(); + res = AllDataType::compareVal((char *)bucketTuple + def.offset_, (char *)tuple + def.offset_, OpEquals, def.type_, def.length_); + if (!res) break; + } + } + else res = AllDataType::compareVal((void*)((char*)bucketTuple +offset), (void*)((char*)tuple +offset), OpEquals,type, info->compLength); + if (res) { printError(ErrUnique, "Unique key violation"); bucket->mutex_.releaseLock(tbl->db_->procSlot); @@ -257,9 +282,23 @@ DbRetVal HashIndex::remove(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde ChunkIterator citer = CatalogTableINDEX::getIterator(indexPtr); Bucket* buckets = (Bucket*)citer.nextElement(); - void *keyPtr =(void*)((char*)tuple + offset); - int bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength); + char *keyBuffer = (char*) malloc(info->compLength); + void *keyStartBuffer = keyBuffer, *keyPtr; + FieldIterator iter = info->idxFldList.getIterator(); + while(iter.hasElement()) + { + FieldDef def = iter.nextElement(); + keyPtr = (char *)tuple + def.offset_; + AllDataType::copyVal(keyBuffer, keyPtr, def.type_, def.length_); + keyBuffer = keyBuffer + AllDataType::size(def.type_, def.length_); + } + + keyPtr =(void*)((char*)tuple + offset); + int bucket = 0; + if (type == typeComposite) + bucket = HashIndex::computeHashBucket(type, keyStartBuffer, noOfBuckets, info->compLength); + else bucket = HashIndex::computeHashBucket(type, keyPtr, noOfBuckets, info->compLength); Bucket *bucket1 = &buckets[bucket]; @@ -311,52 +350,65 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde //if it is not updated return from here void *keyPtr =(void*)((char*)tuple + offset); char *kPtr= (char*)keyPtr; + + //creating old key value buffer for composite primary keys + char *oldKeyBuffer = (char*) malloc(info->compLength); + void *oldKeyStartBuffer = oldKeyBuffer; + FieldIterator iter = info->idxFldList.getIterator(); + while(iter.hasElement()) { + FieldDef def = iter.nextElement(); + keyPtr = (char *)tuple + def.offset_; + AllDataType::copyVal(oldKeyBuffer, keyPtr, def.type_, def.length_); + oldKeyBuffer = oldKeyBuffer + AllDataType::size(def.type_, def.length_); + } + + keyPtr = (void *) kPtr; //Iterate through the bind list and check FieldIterator idxFldIter = info->idxFldList.getIterator(); char *keyBindBuffer ; - if(type==typeBinary) - keyBindBuffer = (char*) malloc(2 * info->compLength); - else - keyBindBuffer = (char*) malloc(info->compLength); + if(type==typeBinary) keyBindBuffer = (char*) malloc(2 * info->compLength); + else keyBindBuffer = (char*) malloc(info->compLength); void *keyStartBuffer = (void*) keyBindBuffer; bool keyUpdated = false; - while (idxFldIter.hasElement()) - { - FieldDef idef = idxFldIter.nextElement(); - FieldIterator fldIter = tbl->fldList_.getIterator(); - while (fldIter.hasElement()) - { - FieldDef def = fldIter.nextElement(); - if (0 == strcmp(def.fldName_, idef.fldName_)) - { - if (NULL != def.bindVal_) { - if(type==typeBinary){ - AllDataType::copyVal(keyBindBuffer, def.bindVal_, - def.type_, 2*def.length_); - keyStartBuffer=calloc(1,info->compLength); - AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength); - free(keyBindBuffer); - }else - { - AllDataType::copyVal(keyBindBuffer, def.bindVal_, - def.type_, def.length_); - keyBindBuffer = keyBindBuffer + AllDataType::size(def.type_, - def.length_); - } + + while (idxFldIter.hasElement()) { + FieldDef idef = idxFldIter.nextElement(); + FieldIterator fldIter = tbl->fldList_.getIterator(); + while (fldIter.hasElement()) { + FieldDef def = fldIter.nextElement(); + if (0 == strcmp(def.fldName_, idef.fldName_)) { + if (NULL != def.bindVal_) { + if(type==typeBinary) { + AllDataType::copyVal(keyBindBuffer, def.bindVal_, + def.type_, 2*def.length_); + keyStartBuffer=calloc(1,info->compLength); + AllDataType::convertToBinary(keyStartBuffer, keyBindBuffer, typeString, info->compLength); + free(keyBindBuffer); + } else { + AllDataType::copyVal(keyBindBuffer, def.bindVal_, + def.type_, def.length_); + keyBindBuffer = keyBindBuffer + AllDataType::size(def.type_, def.length_); + } + } else { + AllDataType::copyVal(keyBindBuffer, (char *) tuple + def.offset_, def.type_, def.length_); + keyBindBuffer = keyBindBuffer + AllDataType::size(def.type_, def.length_); + } keyUpdated = true; break; } } - } } - if (!keyUpdated) - { + if (!keyUpdated) { //printf("PRABA::key not updated\n"); free(keyStartBuffer); return OK; } //printf("PRABA::it is wrong coming here\n"); - bool result = AllDataType::compareVal(kPtr, keyStartBuffer, + bool result = false; + if (type == typeComposite) + result = AllDataType::compareVal(oldKeyStartBuffer, keyStartBuffer, + OpEquals, info->type, info->compLength); + else result = AllDataType::compareVal(keyPtr, keyStartBuffer, OpEquals, info->type, info->compLength); if (result) return OK; printDebug(DM_HashIndex, "Updating hash index node: Key value is updated"); @@ -366,8 +418,10 @@ DbRetVal HashIndex::update(TableImpl *tbl, Transaction *tr, void *indexPtr, Inde Bucket* buckets = (Bucket*)citer.nextElement(); //remove the node whose key is updated - int bucketNo = computeHashBucket(type, - keyPtr, noOfBuckets, info->compLength); + int bucketNo = 0; + if (type == typeComposite) + bucketNo = computeHashBucket(type, oldKeyStartBuffer, noOfBuckets, info->compLength); + else bucketNo = computeHashBucket(type, keyPtr, noOfBuckets, info->compLength); printDebug(DM_HashIndex, "Updating hash index node: Bucket for old value is %d", bucketNo); Bucket *bucket = &buckets[bucketNo]; diff --git a/src/storage/TableDef.cxx b/src/storage/TableDef.cxx index e8f9847f..be730d61 100644 --- a/src/storage/TableDef.cxx +++ b/src/storage/TableDef.cxx @@ -82,6 +82,7 @@ int TableDef::addField(const char *name, DataType type, size_t length, fldDef.length_ = AllDataType::size(type); break; } + fldDef.offset_ = fldList.getTupleSize(); int ret = fldList.append(fldDef); if (0 == ret) fldCount++; return ret; diff --git a/src/storage/TupleIterator.cxx b/src/storage/TupleIterator.cxx index 8fb0b767..ba8e87ad 100644 --- a/src/storage/TupleIterator.cxx +++ b/src/storage/TupleIterator.cxx @@ -34,6 +34,7 @@ DbRetVal TupleIterator::open() bool isPtr = false; FieldIterator iter = hIdxInfo->idxFldList.getIterator(); char *keyBuffer; + int offset = hIdxInfo->fldOffset; keyBuffer = (char*) malloc(hIdxInfo->compLength); void *keyStartBuffer = (void*) keyBuffer, *keyPtr; while(iter.hasElement()) @@ -43,8 +44,11 @@ DbRetVal TupleIterator::open() AllDataType::copyVal(keyBuffer, keyPtr, def.type_, def.length_); keyBuffer = keyBuffer + AllDataType::size(def.type_, def.length_); } - - int bucketNo = HashIndex::computeHashBucket(hIdxInfo->type, + int bucketNo = 0; + if (hIdxInfo->type == typeComposite) + bucketNo = HashIndex::computeHashBucket(hIdxInfo->type, + (char *)keyStartBuffer, hIdxInfo->noOfBuckets, hIdxInfo->compLength); + else bucketNo = HashIndex::computeHashBucket(hIdxInfo->type, keyStartBuffer, hIdxInfo->noOfBuckets, hIdxInfo->compLength); free(keyStartBuffer); Bucket *bucket = &(hIdxInfo->buckets[bucketNo]); @@ -84,8 +88,6 @@ DbRetVal TupleIterator::open() tIter->setFldOffset(hIdxInfo->fldOffset); tIter->setTypeLength(hIdxInfo->type, hIdxInfo->compLength); } - - return OK; } -- 2.11.4.GIT