diff --git a/dalmdl/corefdb/bucket.go b/dalmdl/corefdb/bucket.go
deleted file mode 100644
index ec30d4911825af9b2256a7c5ca7ce055d33e8e6e..0000000000000000000000000000000000000000
--- a/dalmdl/corefdb/bucket.go
+++ /dev/null
@@ -1,2460 +0,0 @@
-package corefdb
-
-import (
-	"errors"
-	"io"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/lazywriter"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl/filepack"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
-
-	"github.com/tidwall/sjson"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
-
-	"github.com/tidwall/buntdb"
-	"github.com/tidwall/gjson"
-)
-
-const (
-	lineBreak = "\r\n"
-
-	// BucketTypeSimple -
-	BucketTypeSimple = "Normal"
-	// BucketTypePack -
-	BucketTypePack = "Pack"
-	// BucketTypeAppend -
-	BucketTypeAppend = `Append`
-	// BucketTypeMixed -
-	BucketTypeMixed = `Mixed`
-	// BucketTypeAsset -
-	BucketTypeAsset = `Asset`
-	// IndexKeyValSeperator -
-	IndexKeyValSeperator = "="
-	// FileType - represents key for type of file. Used whenever we need to set file type field in json
-	FileType = "fileType"
-	// FileTypeAsset - represents media file type
-	FileTypeAsset = "Asset"
-	// FileTypeFDBIndex -
-	FileTypeFDBIndex = "FDBIndex"
-
-	MigrationTypeUpdate     = "MigrationTypeUpdate"
-	MigrationTypeReplace    = "MigrationTypeReplace"
-	MigrationTypeKeyword    = "migrationType"
-	MigrationConfigFilename = "migrationConfig"
-)
-
-// ErrNoDataFound - This error describes that the required data might be deleted and not found. Kindly ignore this error in caller.
-var ErrNoDataFound = errors.New("No data found")
-var bucketLock = sync.Mutex{}
-
-// Bucket - Bucket
-type Bucket struct {
-	BucketID        string                          `json:"bucketId"`
-	IsDynamicName   bool                            `json:"isDynamicName"`
-	BucketNameQuery string                          `json:"bucketNameQuery"`
-	Indices         []string                        `json:"indices"`
-	BucketPath      string                          `json:"bucketPath"`
-	InFileIndexMap  map[string]filepack.InFileIndex `json:"inFileIndexMap"`
-	BucketType      string                          `json:"bucketType"`
-	EnableLazy      bool                            `json:"enableLazy"`
-}
-
-// GetNewIndex - GetNewIndex returns new index
-func (fdb *FDB) GetNewIndex(indexNameQuery string, IsDynamicName bool) (*Index, error) {
-	fdb.indexMux.Lock()
-	defer fdb.indexMux.Unlock()
-	if _, ok := fdb.indices[indexNameQuery]; ok {
-		return nil, errormdl.Wrap("Index name already found")
-	}
-	idx, err := NewIndex(indexNameQuery, indexNameQuery, IsDynamicName)
-	if err != nil {
-		return nil, err
-	}
-
-	fdb.indices[idx.IndexID] = &idx
-	return &idx, nil
-}
-
-// RegisterNewIndex - RegisterNewIndex returns new index
-func (fdb *FDB) RegisterNewIndex(idx *Index) (*Index, error) {
-	fdb.indexMux.Lock()
-	defer fdb.indexMux.Unlock()
-	if _, ok := fdb.indices[idx.IndexID]; ok {
-		return nil, errormdl.Wrap("Index ID already found")
-	}
-
-	index, err := NewIndex(idx.IndexID, idx.IndexNameQuery, idx.IsDynamicName)
-	if err != nil {
-		return nil, err
-	}
-	index.SetFields(idx.IndexFields...)
-	fdb.indices[idx.IndexID] = &index
-	return &index, nil
-}
-
-// GetIndexDB -  GetIndexDB
-func (fdb *FDB) GetIndexDB(index *Index) (*buntdb.DB, error) {
-	// dbPath := filepath.Join(fdb.DBPath, INDEXFOLDER)
-	// filemdl.CreateDirectoryRecursive(dbPath)
-	// dbPath = filepath.Join(dbPath, index.IndexID)
-	db, err := buntdb.Open(":memory:")
-	if err != nil {
-		return nil, err
-	}
-	return db, nil
-}
-
-// GetNewBucket - return Bucket Obj with provided data and new guid
-func (fdb *FDB) GetNewBucket(BucketNameQuery string, IsDynamicName bool, parentBucket *Bucket) *Bucket {
-	bucket := &Bucket{
-		BucketID:        guidmdl.GetGUID(),
-		BucketNameQuery: BucketNameQuery,
-		IsDynamicName:   IsDynamicName,
-	}
-	if IsDynamicName {
-		BucketNameQuery = "$$" + BucketNameQuery
-	}
-	bucket.BucketPath = filepath.Join(parentBucket.BucketPath, BucketNameQuery)
-	fdb.buckets[bucket.BucketID] = bucket
-	return bucket
-}
-
-// NewBucket - return Bucket Obj
-func (fdb *FDB) NewBucket(bucket, parentBucket *Bucket) *Bucket {
-	fdb.bLocker.Lock()
-	defer fdb.bLocker.Unlock()
-
-	bucketNameQuery := bucket.BucketNameQuery
-	if bucket.IsDynamicName {
-		bucketNameQuery = "$$" + bucket.BucketNameQuery
-	}
-	bucket.BucketPath = filepath.Join(parentBucket.BucketPath, bucketNameQuery)
-	fdb.buckets[bucket.BucketID] = bucket
-	return bucket
-}
-
-// SetBucketType -
-func (bucket *Bucket) SetBucketType(bucketType string) {
-	bucket.BucketType = bucketType
-}
-
-// EnableLazyWrite - enables lazy on the bucket
-func (bucket *Bucket) EnableLazyWrite(isEnable bool) {
-	bucket.EnableLazy = isEnable
-}
-
-// SetInFileIndex - SetInFileIndex
-func (bucket *Bucket) SetInFileIndex(inFileIndex filepack.InFileIndex) {
-	if bucket.InFileIndexMap == nil {
-		bucket.InFileIndexMap = make(map[string]filepack.InFileIndex)
-	}
-	bucket.InFileIndexMap[inFileIndex.FileType] = inFileIndex
-}
-
-// SetInFileIndexes - SetInFileIndexes
-func (bucket *Bucket) SetInFileIndexes(inFileIndexes []filepack.InFileIndex) {
-	if bucket.InFileIndexMap == nil {
-		bucket.InFileIndexMap = make(map[string]filepack.InFileIndex, len(inFileIndexes))
-	}
-	for _, inFileIndex := range inFileIndexes {
-		bucket.InFileIndexMap[inFileIndex.FileType] = inFileIndex
-	}
-}
-
-// bucketNameResolver - returns bucket name
-func (fdb *FDB) bucketNameResolver(bucketID string, rs *gjson.Result) (string, error) {
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return "", errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-	name := bucket.BucketNameQuery
-	if bucket.IsDynamicName {
-		name = rs.Get(name).String()
-	}
-	if name == "" {
-		return name, errormdl.Wrap("Bucket name should not be empty: " + bucket.BucketNameQuery)
-	}
-	return name, nil
-}
-
-// resolveIndex- returns bucket path by resolving index
-func (fdb *FDB) resolveIndex(index *Index, rs *gjson.Result) (string, error) {
-	path := ""
-	for _, bucketID := range index.BucketSequence {
-		bucketName, err := fdb.bucketNameResolver(bucketID, rs)
-		if err != nil {
-			return path, err
-		}
-		path = filepath.Join(path, bucketName)
-	}
-	indexName := index.IndexNameQuery
-	if index.IsDynamicName {
-		indexName = rs.Get(index.IndexNameQuery).String()
-	}
-	if indexName == "" {
-		return path, errormdl.Wrap("required attribute not provided:" + index.IndexNameQuery)
-	}
-	path = filepath.Join(path, indexName)
-	return path, nil
-}
-
-// createIndexJSON - create JSON with index field data
-func createIndexJSON(index *Index, rs *gjson.Result) (string, error) {
-	json := `{}`
-	for _, indexField := range index.IndexFields {
-		if rs.Get(indexField.Query).Value() == nil {
-			return json, errormdl.Wrap("please provide value for index field: " + indexField.Query)
-		}
-		json, _ = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
-	}
-	return json, nil
-}
-
-// updateIndexJSON - update JSON with index field data
-func updateIndexJSON(index *Index, existingData string, rs *gjson.Result) (string, error) {
-	json := existingData
-	var err error
-	for _, indexField := range index.IndexFields {
-		if rs.Get(indexField.Query).Value() == nil {
-			continue
-		}
-		json, err = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
-		// loggermdl.LogError("Error - ", err)
-	}
-	return json, err
-}
-
-// UpdateIndexLazyObjectInCache - updates index data in lay writer cache
-func UpdateIndexLazyObjectInCache(indexID string, indexData *Index) error {
-	// lazy cache must be present for provided indexID
-	lazyObj, ok := IndexLazyObjHolder.Get(indexID)
-	if !ok {
-		loggermdl.LogError("index not found in lazy writer cache")
-		return errormdl.Wrap("index not found in lazy writer cache")
-	}
-
-	idxLazyData, ok := lazyObj.(lazywriter.LazyCacheObject)
-	if !ok {
-		loggermdl.LogError("interface type is not lazywriter.LazyCacheObject")
-		return errormdl.Wrap("interface type is not lazywriter.LazyCacheObject")
-	}
-
-	// idxLazyData.GJSONData = indexData
-	idxLazyData.InterfaceData = indexData
-	if ok := IndexMaster.SaveOrUpdateDataInCache(idxLazyData); !ok {
-		loggermdl.LogError("failed to update index data in lazy cache")
-		return errormdl.Wrap("failed to update index data in lazy cache")
-	}
-
-	IndexLazyObjHolder.SetNoExpiration(indexID, idxLazyData)
-	return nil
-}
-
-// AppendDataInLazyObjectInCache - updates index data in lay writer cache
-func AppendDataInLazyObjectInCache(bucketID string, data gjson.Result, filePath string) error {
-	// lazy cache must be present for provided bucketID
-	bucketLock.Lock()
-	defer bucketLock.Unlock()
-	lazyObj, ok := AppendLazyObjHolder.Get(bucketID)
-	if !ok {
-		loggermdl.LogError(AppendLazyObjHolder.GetItems())
-		return errormdl.Wrap("lazyObject not found in lazywriter cache")
-	}
-
-	bucketLazyData, ok := lazyObj.(lazywriter.LazyCacheObject)
-	if !ok {
-		loggermdl.LogError("interface type is not lazywriter.LazyCacheObject")
-		return errormdl.Wrap("interface type is not lazywriter.LazyCacheObject")
-	}
-
-	bucketLazyData.FileName = filePath
-	updatedData := ""
-	var err error
-	dataBytes := []byte(data.String())
-	if isSecurityEnabled {
-		_, fileName := filepath.Split(filePath)
-		dataBytes, err = encryptData(dataBytes, fileName)
-		if err != nil {
-			loggermdl.LogError(err)
-			return err
-		}
-	}
-	if bucketLazyData.InterfaceData == nil {
-		updatedData = string(dataBytes)
-	} else {
-		previousData, ok := bucketLazyData.InterfaceData.(string)
-		if !ok {
-			return errormdl.Wrap("previous lazy data is not of type string")
-		}
-		updatedData = previousData + string(lineBreak) + string(dataBytes)
-	}
-
-	// bucketLazyData.GJSONData = indexData
-	bucketLazyData.InterfaceData = updatedData
-	if ok := AppendMaster.SaveOrUpdateDataInCache(bucketLazyData); !ok {
-		loggermdl.LogError("failed to update index data in lazy cache")
-		return errormdl.Wrap("failed to update index data in lazy cache")
-	}
-
-	AppendLazyObjHolder.SetNoExpiration(bucketID, bucketLazyData)
-	return nil
-}
-
-func readNormalFileUsingFp(fp *os.File, rs *gjson.Result, secParams securitymdl.FDBSecParams) ([]byte, error) {
-	ba, err := filemdl.ReadFileUsingFp(fp)
-	if err != nil {
-		loggermdl.LogError(err)
-		return nil, err
-	}
-
-	if len(ba) == 0 {
-		return ba, nil
-	}
-
-	if secParams.EnableSecurity {
-		// _, fileName := filepath.Split(fp.Name())
-		key, err := securitymdl.GenSecKeyBytes(fp.Name(), rs)
-		if err != nil {
-			loggermdl.LogError("failed to generate security key: ", err)
-			return nil, err
-		}
-		ba, err = decryptwithDecompression(ba, secParams.EnableCompression, key)
-		if err != nil {
-			loggermdl.LogError(err)
-			return nil, err
-		}
-	}
-	return ba, nil
-}
-
-func readNormalFilesUsingFp(fps []*os.File, bucket *Bucket, rs *gjson.Result, secParams securitymdl.FDBSecParams) (*gjson.Result, error) {
-
-	sb := strings.Builder{}
-	sb.WriteString("[")
-	for _, fp := range fps {
-		if filemdl.FileAvailabilityCheck(fp.Name()) {
-			if bucket.EnableLazy {
-				data, err := readDataFromLazyBucket(bucket, fp, rs, secParams)
-				if err != nil {
-					loggermdl.LogError(err)
-					return nil, err
-				}
-				_, err = sb.WriteString(data)
-				if err != nil {
-					loggermdl.LogError(err)
-					return nil, err
-				}
-			} else {
-				ba, err := readNormalFileUsingFp(fp, rs, secParams)
-				if err != nil {
-					loggermdl.LogError(err)
-					return nil, err
-				}
-				_, err = sb.Write(ba)
-				if err != nil {
-					loggermdl.LogError(err)
-					return nil, err
-				}
-			}
-			sb.WriteString(",")
-		}
-	}
-	sb.WriteString("]")
-	finalResult := strings.Replace(sb.String(), ",]", "]", 1)
-	res := gjson.Parse(finalResult)
-	return &res, nil
-}
-
-func prepareLazyDataForNormalBucket(prevData string, data *gjson.Result) (string, error) {
-	return data.String(), nil
-}
-
-type lazySec struct {
-	securitymdl.FDBSecParams
-}
-
-func (ls lazySec) SaveFnForNormalBucket(bucketId string, data *lazywriter.LazyCacheObject) {
-	cachedData, ok := data.InterfaceData.(string)
-	if !ok {
-		loggermdl.LogError("lazy write: no data found in lazy cache for bucket id ", bucketId)
-		return
-	}
-
-	var (
-		cachedDataRS = gjson.Parse(cachedData)
-		bucketType   = cachedDataRS.Get("bucketType").String()
-		dataRS       = cachedDataRS.Get("data")
-		dataBytes    []byte
-		err          error
-	)
-
-	if bucketType != BucketTypeSimple {
-		loggermdl.LogError("lazy write: expected Normal bucket, got ", bucketType)
-		return
-	}
-
-	dataBytes = []byte(dataRS.String())
-
-	if ls.EnableSecurity {
-		key, err := securitymdl.GenSecKeyBytes(data.FileName, &dataRS)
-		if err != nil {
-			loggermdl.LogError("lazy write: failed to generate security key for normal bucket, ", err)
-			return
-		}
-
-		dataBytes, err = encryptWithCompression(dataBytes, ls.EnableCompression, key)
-		if err != nil {
-			loggermdl.LogError("lazy write: failed to encrypt data for normal bucket, ", err)
-			return
-		}
-	}
-
-	if err = filemdl.WriteFile(data.FileName, dataBytes, true, true); err != nil {
-		loggermdl.LogError("lazy write: failed to save data from normal bucket to disk, ", err)
-	}
-}
-
-var lazyCallBackFn lazywriter.SaveDataFn = func(bucketId string, data *lazywriter.LazyCacheObject) {
-	dataInLazyMemory, ok := data.InterfaceData.(string)
-	if !ok {
-		loggermdl.LogError("data not found to write")
-		return
-	}
-	dataObj := gjson.Parse(dataInLazyMemory)
-	bucketType := dataObj.Get("bucketType").String()
-	dataByte := []byte(dataObj.Get("data").String())
-	var err error
-	switch bucketType {
-	// case BucketTypeSimple:
-	// 	err = filemdl.WriteFile(data.FileName, dataByte, true, true)
-	case BucketTypeAppend:
-		_, _, err = filemdl.AppendDataToFile(data.FileName, dataByte, true)
-		if err != nil {
-			loggermdl.LogError(err)
-		}
-		err = LazyWriteObjMaster.ClearLazyObjInterfaceData(data.FileName)
-		data.InterfaceData = ""
-		LazyWriteObjHolder.Set(data.FileName, data)
-
-	default:
-		loggermdl.LogError("operation not allowed on this bucket type: ", bucketType)
-	}
-	if err != nil {
-		loggermdl.LogError(err)
-	}
-}
-
-func saveDatInLazyBucket(bucket *Bucket, fp *os.File, data *gjson.Result, secParams securitymdl.FDBSecParams) error {
-	if !(bucket.BucketType == BucketTypeSimple || bucket.BucketType == BucketTypeAppend) {
-		return errormdl.Wrap("save to lazy cache operation not allowed on this bucket type: " + bucket.BucketType)
-	}
-
-	var (
-		lazyDataObj lazywriter.LazyCacheObject
-		prevDataStr = ""
-	)
-
-	lazyObj, ok := LazyWriteObjHolder.Get(fp.Name())
-	if ok {
-		lazyDataObj, ok = lazyObj.(lazywriter.LazyCacheObject)
-		if !ok {
-			return errormdl.Wrap("interface type is not lazywriter.LazyCacheObject")
-		}
-		prevDataStr, ok = lazyDataObj.InterfaceData.(string)
-		if !ok {
-			return errormdl.Wrap("previous lazy data is not of type string")
-		}
-	} else {
-		lazyDataObj = lazywriter.LazyCacheObject{
-			FileName:      fp.Name(),
-			Identifier:    fp.Name(),
-			InterfaceData: "",
-			SaveFn: func() lazywriter.SaveDataFn {
-				if bucket.BucketType == BucketTypeAppend {
-					return lazyCallBackFn
-				}
-
-				return (lazySec{secParams}).SaveFnForNormalBucket
-			}(),
-		}
-	}
-
-	updatedData := ""
-
-	// prevDataObj.Get("data")
-	switch bucket.BucketType {
-	case BucketTypeSimple:
-		updatedData = data.String()
-	case BucketTypeAppend:
-		prevDataObj := gjson.Parse(prevDataStr)
-		prevData := prevDataObj.Get("data").String()
-		dataToSave := data.String()
-		// // currently the security is disabled for append bucket. Don't have any way to read encrypted data if encrypted by field query.
-		// rs := gjson.Parse(dataToSave)
-		// if secParams.EnableSecurity {
-		// 	// _, fileName := filepath.Split(f.Name())
-		// 	key, err := securitymdl.GenSecKeyBytes(fp.Name(), &rs)
-		// 	if err != nil {
-		// 		loggermdl.LogError("failed to generate security key for append bucket: ", err)
-		// 		return err
-		// 	}
-
-		// 	dataBytes, err := encryptWithCompression([]byte(dataToSave), secParams.EnableCompression, key)
-		// 	if err != nil {
-		// 		loggermdl.LogError("failed to encrypt data for append bucket: ", err)
-		// 		return err
-		// 	}
-
-		// 	dataToSave = string(dataBytes)
-		// }
-		updatedData = prevData + dataToSave + string(lineBreak)
-	default:
-		return errormdl.Wrap("operation not allowed on this bucket type: " + bucket.BucketType)
-	}
-
-	updatedDataObj, _ := sjson.Set("", "bucketType", bucket.BucketType)
-	updatedDataObj, _ = sjson.Set(updatedDataObj, "data", updatedData)
-
-	// bucketLazyData.GJSONData = indexData
-	lazyDataObj.InterfaceData = updatedDataObj
-	if ok := LazyWriteObjMaster.SaveOrUpdateDataInCache(lazyDataObj); !ok {
-		loggermdl.LogError("failed to update index data in lazy cache")
-		return errormdl.Wrap("failed to update index data in lazy cache")
-	}
-
-	LazyWriteObjHolder.SetNoExpiration(fp.Name(), lazyDataObj)
-	return nil
-}
-
-func updateDataInLazyBucket(bucket *Bucket, fp *os.File, data *gjson.Result, secParam securitymdl.FDBSecParams) (*gjson.Result, error) {
-	if bucket.BucketType != BucketTypeSimple {
-		return data, errormdl.Wrap("update data in lazy cache operation not allowed on this bucket type: " + bucket.BucketType)
-	}
-	var lazyDataObj lazywriter.LazyCacheObject
-	lazyObj, foundLazyObj := LazyWriteObjHolder.Get(fp.Name())
-	updatedData := ""
-	prevDataStr := ""
-	ok := false
-	if foundLazyObj {
-		lazyDataObj, ok = lazyObj.(lazywriter.LazyCacheObject)
-		if !ok {
-			return nil, errormdl.Wrap("interface type is not lazywriter.LazyCacheObject")
-		}
-		prevDataStr, ok = lazyDataObj.InterfaceData.(string)
-		if !ok {
-			return nil, errormdl.Wrap("previous lazy data is not of type string")
-		}
-		updatedData = gjson.Parse(prevDataStr).Get("data").String()
-	} else {
-		lazyDataObj = lazywriter.LazyCacheObject{
-			FileName:   fp.Name(),
-			Identifier: fp.Name(),
-			SaveFn:     (lazySec{secParam}).SaveFnForNormalBucket,
-		}
-		dataByte, err := readNormalFileUsingFp(fp, data, secParam)
-
-		if err != nil {
-			return data, err
-		}
-		updatedData = string(dataByte)
-	}
-
-	data.ForEach(func(key, val gjson.Result) bool {
-		updatedData, _ = sjson.Set(updatedData, key.String(), val.Value())
-		return true
-	})
-
-	updatedDataStr, _ := sjson.Set("", "bucketType", bucket.BucketType)
-	updatedDataStr, _ = sjson.Set(updatedDataStr, "data", updatedData)
-
-	// bucketLazyData.GJSONData = indexData
-	lazyDataObj.InterfaceData = updatedDataStr
-	if ok := LazyWriteObjMaster.SaveOrUpdateDataInCache(lazyDataObj); !ok {
-		loggermdl.LogError("failed to update index data in lazy cache")
-		return nil, errormdl.Wrap("failed to update index data in lazy cache")
-	}
-
-	LazyWriteObjHolder.SetNoExpiration(fp.Name(), lazyDataObj)
-	updatedDataObj := gjson.Parse(updatedData)
-	return &updatedDataObj, nil
-}
-
-func readDataFromLazyBucket(bucket *Bucket, fp *os.File, rs *gjson.Result, secParam securitymdl.FDBSecParams) (data string, err error) {
-	if bucket.BucketType != BucketTypeSimple {
-		return data, errormdl.Wrap("read from lazy cache operation not allowed on this bucket type: " + bucket.BucketType)
-	}
-	var lazyDataObj lazywriter.LazyCacheObject
-	lazyObj, foundLazyObj := LazyWriteObjHolder.Get(fp.Name())
-	prevDataStr := ""
-	ok := false
-	if foundLazyObj {
-		lazyDataObj, ok = lazyObj.(lazywriter.LazyCacheObject)
-		if !ok {
-			return data, errormdl.Wrap("interface type is not lazywriter.LazyCacheObject")
-		}
-		prevDataStr, ok = lazyDataObj.InterfaceData.(string)
-		if !ok {
-			return data, errormdl.Wrap("previous lazy data is not of type string")
-		}
-		prevData := gjson.Parse(prevDataStr)
-		data = prevData.Get("data").String()
-
-		return
-	}
-	dataByte, err := readNormalFileUsingFp(fp, rs, secParam)
-	if err != nil {
-		return data, err
-	}
-	data = string(dataByte)
-	lazyDataObj = lazywriter.LazyCacheObject{
-		FileName:   fp.Name(),
-		Identifier: fp.Name(),
-		SaveFn:     (lazySec{secParam}).SaveFnForNormalBucket,
-	}
-
-	updatedDataObj, _ := sjson.Set("", "bucketType", bucket.BucketType)
-	updatedDataObj, _ = sjson.Set(updatedDataObj, "data", data)
-
-	// bucketLazyData.GJSONData = indexData
-	lazyDataObj.InterfaceData = updatedDataObj
-	if ok := LazyWriteObjMaster.SaveOrUpdateDataInCache(lazyDataObj); !ok {
-		loggermdl.LogError("failed to update index data in lazy cache")
-		return data, errormdl.Wrap("failed to update index data in lazy cache")
-	}
-	LazyWriteObjHolder.SetNoExpiration(fp.Name(), lazyDataObj)
-
-	return data, nil
-}
-
-func deleteDataFromLazyBucket(bucket *Bucket, fp *os.File) error {
-	if bucket.BucketType != BucketTypeSimple {
-		return errormdl.Wrap("operation not allowed on this bucket type: " + bucket.BucketType)
-	}
-
-	LazyWriteObjMaster.ClearLazyObjInterfaceData(fp.Name())
-	LazyWriteObjHolder.Delete(fp.Name())
-
-	return nil
-}
-
-func saveDataInNormalBucketUsingFp(f *os.File, bucket *Bucket, rs *gjson.Result, secParams securitymdl.FDBSecParams) error {
-	dataBytes := []byte(rs.String())
-	var err error
-	if bucket.EnableLazy {
-		if secParams.EnableSecurity {
-			// do not add data to lazy cache if security is enabled and got empty result for field query
-			if _, err := securitymdl.CheckDataForFieldQuery(f.Name(), rs); err != nil {
-				loggermdl.LogError("failed to save data in lazy writer chache: ", err)
-				return err
-			}
-		}
-
-		return saveDatInLazyBucket(bucket, f, rs, secParams)
-	}
-	if secParams.EnableSecurity {
-		// _, fileName := filepath.Split(f.Name())
-		key, err := securitymdl.GenSecKeyBytes(f.Name(), rs)
-		if err != nil {
-			loggermdl.LogError("failed to generate security key for normal bucket: ", err)
-			return err
-		}
-
-		dataBytes, err = encryptWithCompression(dataBytes, secParams.EnableCompression, key)
-		if err != nil {
-			loggermdl.LogError("failed to encrypt data for normal bucket: ", err)
-			return err
-		}
-	}
-	err = filemdl.WriteFileUsingFp(f, dataBytes, true, false)
-	if errormdl.CheckErr(err) != nil {
-		loggermdl.LogError(err)
-		return errormdl.CheckErr(err)
-	}
-	return nil
-}
-
-func saveDataInPackBucketUsingFp(fdb *FDB, f *os.File, index *Index, infileIndexData, rs *gjson.Result) (*gjson.Result, error) {
-	// check is file locked
-	// if yes
-	// wait
-	// else
-	// /*  */update filestatus to fileStatusUpdatingData
-	// compress and encrypt data
-	// calculate filehash
-	//save data in file & getoffset and len
-	//update footer address
-	// update filestatuse to fileStatusUpdatingIndex
-	// update index
-	// update filestatus to fileStatusReady
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket := fdb.buckets[bucketID]
-	fileType := rs.Get("fileType").String()
-	if len(fileType) == 0 {
-		return infileIndexData, errormdl.Wrap("please specify fileType")
-	}
-	isFilePresent := filemdl.FileAvailabilityCheck(f.Name())
-	info, err := f.Stat()
-	if err != nil {
-		loggermdl.LogError(err)
-		return infileIndexData, errormdl.Wrap("failed to save data: " + err.Error())
-	}
-
-	infileIndex, ok := bucket.InFileIndexMap[fileType]
-	if !ok {
-		loggermdl.LogError("infileIndex for specified fileType not found: ", fileType)
-		return infileIndexData, errormdl.Wrap("infileIndex for specified fileType not found: " + fileType)
-	}
-
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-
-	infileIndexData, err = filepack.AddPackFileUsingFp(f, infileIndex.IndexFields, infileIndexData, rs, secParams)
-	if err != nil {
-		loggermdl.LogError("fail to add fdb index entry in file: ", err)
-		return infileIndexData, errormdl.Wrap("fail to write data in file")
-	}
-	// for adding fdb index data
-	if !isFilePresent || info.Size() == 0 {
-		err = addFDBIndexEntryFileUsingFp(f, bucket, index.IndexFields, rs, secParams)
-		if err != nil {
-			loggermdl.LogError("fail to add fdb index entry in file: ", err)
-			return infileIndexData, errormdl.Wrap("fail to write data in file")
-		}
-	}
-	return infileIndexData, err
-}
-
-// SaveMediaInFDB -
-func SaveMediaInFDB(dbName string, indexID string, mediaData []byte, rs *gjson.Result) (string, error) {
-	recordPath := ""
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return recordPath, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return recordPath, errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	path, err := fdb.resolveIndex(index, rs)
-	if errormdl.CheckErr(err) != nil {
-		loggermdl.LogError(err)
-		return recordPath, errormdl.CheckErr(err)
-	}
-	rsStr, _ := sjson.Set(rs.String(), "fileType", FileTypeAsset)
-	data := gjson.Parse(rsStr)
-
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return recordPath, errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-
-	if bucket.BucketType != BucketTypeMixed && bucket.BucketType != BucketTypeAsset {
-		loggermdl.LogError("operation not valid on bucket type: ", bucket.BucketType)
-		return recordPath, errormdl.Wrap("operation not valid on this type of bucket: " + bucket.BucketType)
-	}
-	prevFDBIndexVal, err := index.GetEntryByPath(path)
-	if err != nil {
-		loggermdl.LogError("fail to get fdb index entry ", bucket.BucketType)
-		return recordPath, errormdl.Wrap("fail to add media " + bucket.BucketType)
-	}
-
-	filePath := filepath.Join(fdb.DBPath, path)
-	var f *os.File
-	var inFileIndexData *gjson.Result
-	rfile, err := fileFpCache.Get(filePath)
-	if err != nil {
-		f, err = openFile(filePath)
-		if err != nil {
-			loggermdl.LogError(err)
-			return recordPath, err
-		}
-		rfile = fileFpCache.Set(f, nil)
-	} else {
-		f = rfile.file
-		inFileIndexData = rfile.InfileIndex
-	}
-	rfile.lock.Lock()
-	defer rfile.lock.Unlock()
-	recordID, updatedInfileIndex, err := filepack.AddMediaInPackFileUsingFp(f, inFileIndexData, mediaData, []filepack.InFileIndexField{}, &data)
-	if err != nil {
-		loggermdl.LogError("fail to add fdb index entry in file: ", err)
-		return recordPath, errormdl.Wrap("fail to add fdb index entry")
-	}
-	fileFpCache.Set(f, updatedInfileIndex)
-	rowID, err := GenRowID(path)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-
-	prevFDBIndexVal, _ = sjson.Set(prevFDBIndexVal, "rowID", rowID)
-	updatedJSON, err := updateIndexJSON(index, prevFDBIndexVal, rs)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-	updatedJSONObj := gjson.Parse(updatedJSON)
-	err = index.AddEntry(path, &updatedJSONObj)
-	if err != nil {
-		loggermdl.LogError("failed to update index data - ", err)
-		return "", errormdl.Wrap("failed to update index data")
-	}
-
-	if isLazyWriterEnabled {
-		err = UpdateIndexLazyObjectInCache(indexID, index)
-		if err != nil {
-			loggermdl.LogError("failed to update index data in lazy writer cache - ", err)
-			return "", errormdl.Wrap("failed to update index data in lazy writer cache")
-		}
-	}
-
-	if err != nil {
-		loggermdl.LogError("fail to set fdb index: ", err)
-		return recordPath, errormdl.Wrap("fail to set fdb index: " + err.Error())
-	}
-	recordPath = dbName + "/" + indexID + "/" + rowID + "/" + recordID
-	return recordPath, nil
-}
-
-// UpdateMediaInFDB -
-func UpdateMediaInFDB(dbName string, indexID, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
-	recordID = strings.TrimSpace(recordID)
-	if recordID == "" {
-		return "", errors.New("please provide recordID for Update operation")
-	}
-	recordPath := ""
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return recordPath, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return recordPath, errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	path, err := fdb.resolveIndex(index, rs)
-	if errormdl.CheckErr(err) != nil {
-		loggermdl.LogError(err)
-		return recordPath, errormdl.CheckErr(err)
-	}
-	rsStr, _ := sjson.Set(rs.String(), "fileType", FileTypeAsset)
-	data := gjson.Parse(rsStr)
-
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return recordPath, errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-
-	if bucket.BucketType != BucketTypeMixed && bucket.BucketType != BucketTypeAsset {
-		loggermdl.LogError("operation not valid on bucket type: ", bucket.BucketType)
-		return recordPath, errormdl.Wrap("operation not valid on this type of bucket: " + bucket.BucketType)
-	}
-
-	rowID, err := GenRowID(path)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-	if rowID == "" {
-		loggermdl.LogError("please provide value for index field :", index.IndexNameQuery)
-		return recordPath, errormdl.Wrap("please provide value for index field :" + index.IndexNameQuery)
-	}
-	query := []string{`#[rowID=` + rowID + `]`}
-
-	entry, found, err := index.GetEntryByQueries(query)
-	if err != nil {
-		loggermdl.LogError("fail to upsert media: ", err)
-		return recordPath, errormdl.Wrap("fail to upsert media: " + err.Error())
-	}
-
-	if err != nil {
-		loggermdl.LogError("failed to get data: ", err)
-		return recordPath, errormdl.Wrap("data not found: " + err.Error())
-	}
-
-	if !found {
-		loggermdl.LogError("no data found to update")
-		return recordPath, errormdl.Wrap("no data found to update")
-	}
-
-	filePath := entry.Key
-	prevIndexVal := entry.Value
-	filePath = filepath.Join(fdb.DBPath, filePath)
-
-	var f *os.File
-	var inFileIndexData *gjson.Result
-	rfile, err := fileFpCache.Get(filePath)
-	if err != nil {
-		f, err = openFile(filePath)
-		if err != nil {
-			loggermdl.LogError(err)
-			return recordPath, err
-		}
-		rfile = fileFpCache.Set(f, nil)
-	} else {
-		f = rfile.file
-		inFileIndexData = rfile.InfileIndex
-	}
-	rfile.lock.Lock()
-	defer rfile.lock.Unlock()
-	var updatedInfileIndex *gjson.Result
-	recordID, updatedInfileIndex, err = filepack.UpdateMediaInPackFileUsingFp(f, inFileIndexData, recordID, mediaData, []filepack.InFileIndexField{}, &data)
-	if err != nil {
-		loggermdl.LogError("fail to update media: ", err)
-		return recordPath, err
-	}
-	fileFpCache.Set(f, updatedInfileIndex)
-	prevIndexVal, _ = sjson.Set(prevIndexVal, "rowID", rowID)
-	updatedJSON, err := updateIndexJSON(index, prevIndexVal, rs)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-	updatedJSONObj := gjson.Parse(updatedJSON)
-	err = index.AddEntry(path, &updatedJSONObj)
-
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-
-	if isLazyWriterEnabled {
-		err = UpdateIndexLazyObjectInCache(indexID, index)
-		if err != nil {
-			loggermdl.LogError("failed to update index data in lazy writer cache - ", err)
-			return "", errormdl.Wrap("failed to update index data in lazy writer cache")
-		}
-	}
-
-	if err != nil {
-		loggermdl.LogError("fail to set fdb index: ", err)
-		return recordPath, errormdl.Wrap("fail to set fdb index: " + err.Error())
-	}
-	recordPath = dbName + "/" + indexID + "/" + rowID + "/" + recordID
-	return recordPath, nil
-}
-
-// UpsertMediaInFDB -
-func UpsertMediaInFDB(dbName string, indexID, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
-	recordID = strings.TrimSpace(recordID)
-	if recordID == "" {
-		return "", errors.New("please provide recordID for Upsert operation")
-	}
-	recordPath := ""
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return recordPath, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return recordPath, errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	path, err := fdb.resolveIndex(index, rs)
-	if errormdl.CheckErr(err) != nil {
-		loggermdl.LogError(err)
-		return recordPath, errormdl.CheckErr(err)
-	}
-	rsStr, _ := sjson.Set(rs.String(), "fileType", FileTypeAsset)
-	data := gjson.Parse(rsStr)
-
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return recordPath, errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-
-	if bucket.BucketType != BucketTypeMixed && bucket.BucketType != BucketTypeAsset {
-		loggermdl.LogError("operation not valid on bucket type: ", bucket.BucketType)
-		return recordPath, errormdl.Wrap("operation not valid on this type of bucket: " + bucket.BucketType)
-	}
-
-	rowID, err := GenRowID(path)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-
-	query := []string{`#[rowID=` + rowID + `]`}
-	// rs := gjson.Result{}
-
-	entry, _, err := index.GetEntryByQueries(query)
-	if err != nil {
-		loggermdl.LogError("fail to upsert media: ", err)
-		return recordPath, errormdl.Wrap("fail to upsert media: " + err.Error())
-	}
-
-	prevIndexVal := entry.Value
-	filePath := filepath.Join(fdb.DBPath, path)
-	var f *os.File
-	var inFileIndexData *gjson.Result
-	rfile, err := fileFpCache.Get(filePath)
-	if err != nil {
-		f, err = openFile(filePath)
-		if err != nil {
-			loggermdl.LogError(err)
-			return recordPath, err
-		}
-		rfile = fileFpCache.Set(f, nil)
-	} else {
-		f = rfile.file
-		inFileIndexData = rfile.InfileIndex
-	}
-
-	rfile.lock.Lock()
-	defer rfile.lock.Unlock()
-	var updatedInfileIndex *gjson.Result
-
-	recordID, updatedInfileIndex, err = filepack.UpsertMediaInPackFileUsingFp(f, inFileIndexData, recordID, mediaData, []filepack.InFileIndexField{}, &data)
-
-	if err != nil {
-		loggermdl.LogError("fail to upsert media: ", err)
-		return recordPath, errormdl.Wrap("fail to upsert media: " + err.Error())
-	}
-	fileFpCache.Set(f, updatedInfileIndex)
-	prevIndexVal, _ = sjson.Set(prevIndexVal, "rowID", rowID)
-
-	updatedJSON, err := updateIndexJSON(index, prevIndexVal, rs)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-	updatedJSONObj := gjson.Parse(updatedJSON)
-
-	err = index.AddEntry(path, &updatedJSONObj)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordPath, err
-	}
-
-	if err != nil {
-		loggermdl.LogError("failed to upsert index data - ", err)
-		return "", errormdl.Wrap("failed to upsert index data")
-	}
-
-	if isLazyWriterEnabled {
-		err = UpdateIndexLazyObjectInCache(indexID, index)
-		if err != nil {
-			loggermdl.LogError("failed to upsert media: ", err)
-			return "", errormdl.Wrap("failed to upsert media: fail to update lazy writer cache")
-		}
-	}
-
-	if err != nil {
-		loggermdl.LogError("failed to upsert media: fail to set fdb index: ", err)
-		return recordPath, errormdl.Wrap("fail to set fdb index: " + err.Error())
-	}
-
-	recordPath = dbName + "/" + indexID + "/" + rowID + "/" + recordID
-	return recordPath, nil
-}
-
-// GetMediaFromFDB -
-func GetMediaFromFDB(dbName, indexID, rowID, recordID string) ([]byte, gjson.Result, error) {
-	dataByte := []byte{}
-	fileMeta := gjson.Result{}
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return dataByte, fileMeta, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return dataByte, fileMeta, errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	queries := []string{`#[rowID=` + rowID + `]`}
-	// rs := gjson.Result{}
-	entry, found, err := index.GetEntryByQueries(queries)
-	if err != nil {
-		loggermdl.LogError("failed to get data: ", err)
-		return dataByte, fileMeta, errormdl.Wrap("data not found: " + err.Error())
-	}
-	if !found {
-		loggermdl.LogError("data not found")
-		return dataByte, fileMeta, errormdl.Wrap("data not found")
-	}
-
-	var f *os.File
-	var fpInfileIndex *gjson.Result
-	filePath := filepath.Join(fdb.DBPath, entry.Key)
-	rfile, err := fileFpCache.Get(filePath)
-	if err != nil {
-		f, err = openFile(filePath)
-		if err != nil {
-			return dataByte, fileMeta, errormdl.Wrap("fail to get media: " + err.Error())
-		}
-		rfile = fileFpCache.Set(f, nil)
-
-		fpInfileIndex = nil
-	} else {
-		fpInfileIndex = rfile.InfileIndex
-		f = rfile.file
-	}
-
-	rfile.lock.Lock()
-	defer rfile.lock.Unlock()
-	var metaData *gjson.Result
-	dataByte, metaData, err = filepack.GetMediaFromPackFileUsingFp(f, fpInfileIndex, recordID)
-	if err != nil {
-		loggermdl.LogError(err)
-		return dataByte, fileMeta, err
-	}
-	updatedInfileIndex := metaData.Get("infileIndex").String()
-	ui := gjson.Parse(updatedInfileIndex)
-	fileFpCache.Set(f, &ui)
-	fileMeta = metaData.Get("requiredData")
-	return dataByte, gjson.Parse(fileMeta.String()), nil
-}
-
-func saveDataInAppendBucketUsingFp(f *os.File, bucket *Bucket, rs *gjson.Result, secParams securitymdl.FDBSecParams) error {
-	// loggermdl.LogError("isLazyWriterEnabled", isLazyWriterEnabled)
-	if bucket.EnableLazy {
-		return saveDatInLazyBucket(bucket, f, rs, secParams)
-	}
-	//  else
-
-	var (
-		dataBytes   = []byte(rs.String())
-		_, fileName = filepath.Split(f.Name())
-		err         error
-	)
-
-	if secParams.EnableSecurity {
-		key, err := securitymdl.GenSecKeyBytes(fileName, rs)
-		if err != nil {
-			loggermdl.LogError("failed to generate security key for append bucket: ", err)
-			return err
-		}
-
-		dataBytes, err = encryptWithCompression(dataBytes, secParams.EnableCompression, key)
-		if err != nil {
-			loggermdl.LogError("failed to encrypt data for append bucket: ", err)
-			return err
-		}
-	}
-
-	dataBytes = []byte(string(dataBytes) + lineBreak)
-	_, _, err = filemdl.AppendDataToFile(f.Name(), dataBytes, true)
-	return err
-}
-
-func addFDBIndexEntryFileUsingFp(f *os.File, bucket *Bucket, indexFields []IndexField, rs *gjson.Result, secParams securitymdl.FDBSecParams) error {
-	infileIndexFields := []filepack.InFileIndexField{}
-	for _, field := range indexFields {
-		infileIndexField := filepack.InFileIndexField{}
-		infileIndexField.FieldName = field.FieldName
-		infileIndexField.Query = field.Query
-		infileIndexFields = append(infileIndexFields, infileIndexField)
-	}
-	// adding FDBIndex as infileIndex
-	FDBIndexInFileIndexMap := filepack.InFileIndex{
-		FileType:    FileTypeFDBIndex,
-		IndexFields: infileIndexFields,
-	}
-	bucket.SetInFileIndex(FDBIndexInFileIndexMap)
-	// create infile index data
-	newData, err := filepack.CreateIndexJSON(infileIndexFields, rs)
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-	newData, _ = sjson.Set(newData, "fileType", FileTypeFDBIndex)
-
-	fieldQuery := securitymdl.GetFDBSecOptions().FieldQuery
-	if fieldQuery != "" {
-		newData, _ = sjson.Set(newData, fieldQuery, rs.Get(fieldQuery).Value()) // set data required user defined query to RS. This is required for encryption.
-	}
-	newDataObj := gjson.Parse(newData)
-	return filepack.AddFileInPackFileUsingFp(f, infileIndexFields, &newDataObj, secParams)
-}
-
-func updateDataInNormalBucketUsingFp(fdb *FDB, bucket *Bucket, fps []*os.File, rs *gjson.Result) (*gjson.Result, []*os.File, []error) {
-	resultStr := "[]"
-	errList := []error{}
-	updatedfps := make([]*os.File, 0)
-	var data *gjson.Result
-	var err error
-	lazyEnable := bucket.EnableLazy
-	for _, fp := range fps {
-		if lazyEnable {
-			data, err = updateDataInLazyBucket(bucket, fp, rs, securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression})
-			if err != nil {
-				// loggermdl.LogError("failed to update data in file: " + path + " : " + err.Error())
-				errList = append(errList, errormdl.Wrap("failed to update data in file: "+fp.Name()+" : "+err.Error()))
-				continue
-			}
-		} else {
-			data, err = updateNormalFileDataUsingFp(fp, bucket, rs, securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression})
-			if err != nil {
-				// loggermdl.LogError("failed to update data in file: " + path + " : " + err.Error())
-				errList = append(errList, errormdl.Wrap("failed to update data in file: "+fp.Name()+" : "+err.Error()))
-				continue
-			}
-		}
-		updatedfps = append(updatedfps, fp)
-		resultStr, _ = sjson.Set(resultStr, "-1", data.Value())
-	}
-	result := gjson.Parse(resultStr)
-	return &result, updatedfps, errList
-}
-
-func updateNormalFileDataUsingFp(fp *os.File, bucket *Bucket, rs *gjson.Result, secParams securitymdl.FDBSecParams) (*gjson.Result, error) {
-	data, err := readNormalFileUsingFp(fp, rs, secParams)
-	if err != nil {
-		return nil, err
-	}
-	existingDataStr := string(data)
-	rs.ForEach(func(key, val gjson.Result) bool {
-		// updating existing data
-		existingDataStr, _ = sjson.Set(existingDataStr, key.String(), val.Value())
-		return true
-	})
-
-	updatedData := gjson.Parse(existingDataStr)
-	err = saveDataInNormalBucketUsingFp(fp, bucket, &updatedData, secParams)
-	return &updatedData, err
-}
-
-func updateDataInFileIndexBucketUsingFp(fdb *FDB, bucket *Bucket, fpInfileIndexMap map[*os.File]*gjson.Result, rs *gjson.Result, infileIndexQuery []string) (*gjson.Result, map[*os.File]*gjson.Result, []error) {
-	// if bucket.BucketType == BucketTypePack {
-	requestedFileType := rs.Get("fileType").String()
-	if len(requestedFileType) == 0 {
-		loggermdl.LogError("please specify fileType")
-		return nil, fpInfileIndexMap, []error{errormdl.Wrap("please specify fileType")}
-	}
-	infileIndexQuery = append(infileIndexQuery, `#[fileType=="`+requestedFileType+`"]`)
-	// }
-	finalResultArray := []gjson.Result{}
-	errList := []error{}
-	updatedfpInfileIndexMap := make(map[*os.File]*gjson.Result)
-
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-	for fp, infileIndex := range fpInfileIndexMap {
-		// filePath := filepath.Join(fdb.DBPath, path)
-		resultArray, updatedInfileIndex, err := filepack.UpdateFileInPackFileUsingFp(fp, infileIndexQuery, infileIndex, rs, secParams)
-		if err != nil {
-			loggermdl.LogError(err)
-			errList = append(errList, err)
-			continue
-		}
-		updatedfpInfileIndexMap[fp] = updatedInfileIndex
-		finalResultArray = append(finalResultArray, resultArray.Array()...)
-	}
-
-	resultListStr := "[]"
-	for _, resultObj := range finalResultArray {
-		resultListStr, _ = sjson.Set(resultListStr, "-1", resultObj.Value())
-	}
-	result := gjson.Parse(resultListStr)
-	return &result, updatedfpInfileIndexMap, errList
-}
-
-// ReindexOnSpecialBucket - ReindexOnSpecialBucket
-func ReindexOnSpecialBucket(fdbName string, indexID string, rs gjson.Result) error {
-	fdb, err := GetFDBInstance(fdbName)
-	if err != nil {
-		loggermdl.LogError("fdb instance not found: ", fdbName)
-		return errormdl.Wrap(("fdb instance not found: " + fdbName))
-	}
-
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("index not found: ", indexID)
-		return errormdl.Wrap("index not found: " + indexID)
-	}
-	// find path to start file walk
-	pathToStartWalk := fdb.DBPath
-	for _, bucketID := range index.BucketSequence {
-		bucket := fdb.buckets[bucketID]
-		if bucket.IsDynamicName {
-			break
-		}
-		pathToStartWalk = filepath.Join(pathToStartWalk, bucket.BucketNameQuery)
-	}
-	// bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	// bucket := fdb.buckets[bucketID]
-	indexDataMap := make(map[string]string)
-	if !filemdl.FileAvailabilityCheck(pathToStartWalk) {
-		return errormdl.Wrap("invalid path: " + pathToStartWalk)
-	}
-
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-
-	ferr := filemdl.Walk(pathToStartWalk, func(filePath string, info os.FileInfo, err error) error {
-		if err != nil {
-			loggermdl.LogError(err)
-			return nil
-		}
-
-		if !info.IsDir() {
-			result, err := filepack.GetDataFromPackFile(filePath, []string{`#[fileType==` + FileTypeFDBIndex + `]`}, &rs, secParams)
-			if err != nil {
-				loggermdl.LogError(err)
-				return nil
-			}
-			data := gjson.Parse(result)
-			data = data.Get("0")
-			pathToSave := strings.TrimPrefix(filePath, fdb.DBPath+string(filepath.Separator))
-			valString, _ := sjson.Delete(data.String(), "fileType")
-			// val= gjson.Parse(valString)
-			indexDataMap[pathToSave] = strings.TrimSpace(valString)
-		}
-
-		return nil
-	})
-	if ferr != nil {
-		return ferr
-	}
-	return reCreateFDBIndexFile(fdb, index, indexDataMap)
-}
-
-func reCreateFDBIndexFile(fdb *FDB, index *Index, indexDataMap map[string]string) error {
-	// create or replace index
-	err := index.CloseStore()
-	if err != nil {
-		return err
-	}
-	indexFilePath := filepath.Join(fdb.DBPath, INDEXFOLDER, index.IndexID)
-	if filemdl.FileAvailabilityCheck(indexFilePath) {
-		err := filemdl.DeleteFile(indexFilePath)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = index.ReplaceIndex()
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-	// update index file by reading all data and updating index file
-	return index.AddEntries(indexDataMap)
-}
-
-// AddIndexEntriesInFile -AddIndexEntriesInFile
-func AddIndexEntriesInFile(indexFilePath string, entries map[string]string) error {
-	// dbPath := filepath.Join(fdbPath, INDEXFOLDER)
-	// loggermdl.LogDebug("in log fdb index")
-	dataToStore := ""
-	for key, val := range entries {
-		dataToStore = dataToStore + key + IndexKeyValSeperator + val + lineBreak
-	}
-	dataByteToWriteRes := []byte{}
-	var hashError error
-	if len(dataToStore) > 0 {
-		_, fileName := filepath.Split(indexFilePath)
-		dataByteToWriteRes, hashError = encryptData([]byte(dataToStore), fileName)
-		if errormdl.CheckErr1(hashError) != nil {
-			return errormdl.CheckErr1(hashError)
-		}
-	}
-	return filemdl.WriteFile(indexFilePath, dataByteToWriteRes, true, false)
-}
-
-// GetKeyWithFileNameAndDefaultKey generates key using file name + Default key
-func GetKeyWithFileNameAndDefaultKey(filePath string) ([]byte, error) {
-	fileName := filepath.Base(filePath)
-	fileNameBytes := []byte(fileName)
-	fileNameBytes = append(fileNameBytes, defaultSecurityKey...)
-	keyBytes, getHashError := hashmdl.Get128BitHash(fileNameBytes)
-	if errormdl.CheckErr(getHashError) != nil {
-		return nil, errormdl.CheckErr(getHashError)
-	}
-	return keyBytes[:], nil
-}
-
-func deleteDataFromNormalBucketUsingFp(index *Index, bucket *Bucket, fps []*os.File) (recordsDeleted int, dataDeletedFromPaths []string, errList []error) {
-	lazyEnable := bucket.EnableLazy
-	for _, fp := range fps {
-		if lazyEnable {
-			err := deleteDataFromLazyBucket(bucket, fp)
-			if err != nil {
-				errList = append(errList, errormdl.Wrap("unable to delete file from lazy cache : "+err.Error()))
-			}
-		}
-		// delete file
-		if filemdl.FileAvailabilityCheck(fp.Name()) {
-			err := fp.Truncate(0)
-			if err != nil {
-				errList = append(errList, errormdl.Wrap("unable to delete file : "+err.Error()))
-				continue
-			}
-
-			if _, err := fp.Seek(0, io.SeekStart); err != nil {
-				errList = append(errList, errormdl.Wrap("unable to delete file : "+err.Error()))
-				continue
-			}
-			// err = filemdl.DeleteFile(fp.Name())
-			// // err = deleteNormalFile(filePath)
-			// if err != nil {
-			// 	errList = append(errList, errormdl.Wrap("unable to delete file : "+err.Error()))
-			// 	continue
-			// }
-			dataDeletedFromPaths = append(dataDeletedFromPaths, fp.Name())
-			recordsDeleted++
-		}
-	}
-
-	return
-}
-
-func deleteDataFromPackBucketUsingFp(bucket *Bucket, fpInfileIndexMap map[*os.File]*gjson.Result, rs *gjson.Result, infileIndexQueries []string) (recordsDeleted int, updatedFpInfileIndexMap map[*os.File]*gjson.Result, errList []error) {
-	// updatedFpInfileIndexMap := make(map[*os.File]*gjson.Result)
-	updatedFpInfileIndexMap = make(map[*os.File]*gjson.Result)
-	fileType := rs.Get("fileType").String()
-	if len(fileType) == 0 {
-		loggermdl.LogError("fileType value not provided")
-		return recordsDeleted, updatedFpInfileIndexMap, []error{errormdl.Wrap("please specify fileType")}
-	}
-
-	_, ok := bucket.InFileIndexMap[fileType]
-	if !ok {
-		loggermdl.LogError("infileIndex for specified fileType not found")
-		return recordsDeleted, updatedFpInfileIndexMap, []error{errormdl.Wrap("infileIndex for specified fileType not found")}
-	}
-
-	infileIndexQueries = append(infileIndexQueries, `#[fileType=="`+fileType+`"]`)
-	noDataFoundCnt := 0
-	for fp, infileIndex := range fpInfileIndexMap {
-		// path := filepath.Join(fdbPath, filePath)
-		recordsDeletedCnt, updatedInfileIndex, err := filepack.DeletDataFromPackFileUsingFp(fp, infileIndex, infileIndexQueries)
-		if err != nil && (err.Error() == ErrNoDataFound.Error()) {
-			noDataFoundCnt++
-			continue
-		}
-		if err != nil {
-			errList = append(errList, err)
-			continue
-		}
-		// dataDeletedFromPath = append(dataDeletedFromPath, filePath)
-		recordsDeleted += recordsDeletedCnt
-		updatedFpInfileIndexMap[fp] = updatedInfileIndex
-	}
-
-	if noDataFoundCnt == len(fpInfileIndexMap) {
-		errList = []error{ErrNoDataFound}
-	}
-	return
-}
-
-// GetBucketByIndexID - return bucket by specified indexID
-func GetBucketByIndexID(dbName, indexID string) (Bucket, error) {
-	fdb, err := GetFDBInstance(dbName)
-	bucket := Bucket{}
-	if err != nil {
-		loggermdl.LogError("fdb instance not found for: ", dbName)
-		return bucket, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("index not found: " + indexID)
-		return bucket, errormdl.Wrap("index not found: " + indexID)
-	}
-
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucketPtr, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return bucket, errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-	return *bucketPtr, nil
-}
-
-func compressData(data []byte) ([]byte, error) {
-	var hashError error
-	dataByteToWriteRes, hashError := filemdl.ZipBytes(data)
-	if errormdl.CheckErr2(hashError) != nil {
-		return data, errormdl.CheckErr2(hashError)
-	}
-	return dataByteToWriteRes, nil
-}
-func decompressData(data []byte) ([]byte, error) {
-	var hashError error
-	dataByteToWriteRes, hashError := filemdl.UnZipBytes(data)
-	if errormdl.CheckErr2(hashError) != nil {
-		return data, errormdl.CheckErr2(hashError)
-	}
-	return dataByteToWriteRes, nil
-}
-
-func encryptData(data []byte, fileName string) (dataOut []byte, err error) {
-	key, err := getSecurityKey(fileName)
-	if err != nil {
-		return dataOut, err
-	}
-	dataOut, err = compressData(data)
-	if errormdl.CheckErr1(err) != nil {
-		return
-	}
-	dataOut, err = securitymdl.AESEncrypt(dataOut, key)
-	if errormdl.CheckErr1(err) != nil {
-		return
-	}
-	return
-}
-
-func encryptWithCompression(data []byte, compress bool, encKey []byte) (res []byte, err error) {
-	if compress {
-		res, err = compressData(data)
-		if err != nil {
-			return
-		}
-	}
-
-	return securitymdl.AESEncrypt(res, encKey)
-}
-
-func decryptwithDecompression(data []byte, deCompress bool, encKey []byte) (res []byte, err error) {
-	res, err = securitymdl.AESDecrypt(data, encKey)
-	if err != nil {
-		return
-	}
-
-	if deCompress {
-		return decompressData(res)
-	}
-
-	return
-}
-
-func decryptData(data []byte, fileName string) (dataOut []byte, err error) {
-
-	key, err := getSecurityKey(fileName)
-	if err != nil {
-		return dataOut, err
-	}
-
-	dataOut, err = securitymdl.AESDecrypt(data, key)
-	if errormdl.CheckErr1(err) != nil {
-		return
-	}
-
-	dataOut, err = decompressData(dataOut)
-	if errormdl.CheckErr1(err) != nil {
-		return
-	}
-	return
-}
-
-func getSecurityKey(fileName string) ([]byte, error) {
-	var key []byte
-	var err error
-	securityKeyGenFunc := securitymdl.GetSecurityKeyGeneratorFunc()
-	if securityKeyGenFunc != nil {
-		key, err = securityKeyGenFunc(fileName)
-	} else {
-		key, err = GetKeyWithFileNameAndDefaultKey(fileName)
-	}
-	return key, err
-}
-
-// SaveDataInFDB -
-func SaveDataInFDB(dbName, indexID string, rs *gjson.Result) error {
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	path, err := fdb.resolveIndex(index, rs)
-	if errormdl.CheckErr(err) != nil {
-		loggermdl.LogError(err)
-		return errormdl.CheckErr(err)
-	}
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-	prevVal, err := index.GetEntryByPath(path)
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-	filePath := filepath.Join(fdb.DBPath, path)
-	var fp *os.File
-	var inFileIndexData *gjson.Result
-	rfile, err := fileFpCache.Get(filePath)
-	if err != nil {
-		fp, err = openFile(filePath)
-		if err != nil {
-			loggermdl.LogError(err)
-			return err
-		}
-		rfile = fileFpCache.Set(fp, nil)
-	} else {
-		fp = rfile.file
-		inFileIndexData = rfile.InfileIndex
-	}
-	rfile.lock.Lock()
-	defer rfile.lock.Unlock()
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-	switch bucket.BucketType {
-	case BucketTypeSimple:
-		// err = saveDataInNormalBucketUsingFp(fp, rs)
-		err = saveDataInNormalBucketUsingFp(fp, bucket, rs, secParams)
-	case BucketTypePack, BucketTypeMixed:
-		inFileIndexData, err = saveDataInPackBucketUsingFp(fdb, fp, index, inFileIndexData, rs)
-		fileFpCache.Set(fp, inFileIndexData)
-	case BucketTypeAppend:
-		err := saveDataInAppendBucketUsingFp(fp, bucket, rs, secParams)
-		if errormdl.CheckErr(err) != nil {
-			loggermdl.LogError(err)
-			return errormdl.CheckErr(err)
-		}
-	default:
-		return errormdl.Wrap("Please provide valid bucket type")
-	}
-
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-
-	rowID, err := GenRowID(path)
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-
-	prevVal, _ = sjson.Set(prevVal, "rowID", rowID)
-	path = strings.TrimPrefix(fp.Name(), fdb.DBPath+string(filepath.Separator))
-	updatedJSON, err := updateIndexJSON(index, prevVal, rs)
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-	updatedJSONObj := gjson.Parse(updatedJSON)
-	err = index.AddEntry(path, &updatedJSONObj)
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-	// TODO: Currently index data is overwritten by new data.
-	if isLazyWriterEnabled {
-		err = UpdateIndexLazyObjectInCache(indexID, index)
-		if err != nil {
-			loggermdl.LogError("failed to update index data in lazy writer cache - ", err)
-			return errormdl.Wrap("failed to update index data in lazy writer cache")
-		}
-	}
-	return nil
-}
-
-// UpdateDataInFDB -
-func UpdateDataInFDB(dbName, indexID string, rs *gjson.Result, query []string, infileIndexQuery []string) (*gjson.Result, []error) {
-	var errList []error
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return nil, []error{err}
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return nil, []error{errormdl.Wrap("INDEX not found: " + indexID)}
-	}
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return nil, []error{errormdl.Wrap("Bucket not found: " + bucketID)}
-	}
-
-	indexKeyValMap, err := index.GetEntriesByQueries(query)
-	if err != nil {
-		return nil, []error{err}
-	}
-	// resultToReturn := gjson.Result{}
-
-	if len(indexKeyValMap) == 0 {
-		loggermdl.LogError("files not found")
-		return nil, []error{ErrNoDataFound}
-	}
-	var fps []*os.File
-	fpInfileIndexMap := make(map[*os.File]*gjson.Result)
-
-	var rFiles []*File
-	for filePath := range indexKeyValMap {
-		filePath = filepath.Join(fdb.DBPath, filePath)
-		rfile, err := fileFpCache.Get(filePath)
-		if err != nil {
-			fp, err := openFile(filePath)
-			if err != nil {
-				continue
-			}
-			// fileFpCache.Set(fp, nil)
-			fps = append(fps, fp)
-			fpInfileIndexMap[fp] = nil
-			rfile = fileFpCache.Set(fp, nil)
-		} else {
-			fpInfileIndexMap[rfile.file] = rfile.InfileIndex
-			fps = append(fps, rfile.file)
-		}
-		rfile.lock.Lock()
-		rFiles = append(rFiles, rfile)
-	}
-
-	defer func() {
-		for i := range rFiles {
-			rFiles[i].lock.Unlock()
-		}
-	}()
-	var result *gjson.Result
-	// var dataUpdatedAtFilePaths []string
-	var updatedFpInfileIndexMap map[*os.File]*gjson.Result
-	var updatedFps []*os.File
-	switch bucket.BucketType {
-	case BucketTypeSimple:
-		result, updatedFps, errList = updateDataInNormalBucketUsingFp(fdb, bucket, fps, rs)
-		// optimization : if data not changed then dont update
-		for _, fp := range updatedFps {
-			path := strings.TrimPrefix(fp.Name(), fdb.DBPath+string(filepath.Separator))
-
-			json := indexKeyValMap[path]
-			rowID, err := GenRowID(path)
-			if err != nil {
-				errList = append(errList, err)
-			}
-			json, _ = sjson.Set(json, "rowID", rowID)
-			updatedJSON, err := updateIndexJSON(index, json, rs)
-			if err != nil {
-				errList = append(errList, err)
-			}
-
-			updatedJSONObj := gjson.Parse(updatedJSON)
-			err = index.AddEntry(path, &updatedJSONObj)
-			if err != nil {
-				errList = append(errList, err)
-			}
-		}
-
-	case BucketTypePack, BucketTypeMixed:
-		result, updatedFpInfileIndexMap, errList = updateDataInFileIndexBucketUsingFp(fdb, bucket, fpInfileIndexMap, rs, infileIndexQuery)
-		for fp, infileIndex := range updatedFpInfileIndexMap {
-			fileFpCache.Set(fp, infileIndex)
-			path := strings.TrimPrefix(fp.Name(), fdb.DBPath+string(filepath.Separator))
-			json := indexKeyValMap[path]
-			rowID, err := GenRowID(path)
-			if err != nil {
-				errList = append(errList, err)
-			}
-			json, _ = sjson.Set(json, "rowID", rowID)
-			updatedJSON, err := updateIndexJSON(index, json, rs)
-			if err != nil {
-				loggermdl.LogError(err)
-				errList = append(errList, err)
-			}
-			updatedJSONObj := gjson.Parse(updatedJSON)
-			err = index.AddEntry(path, &updatedJSONObj)
-			if err != nil {
-				loggermdl.LogError(err)
-				errList = append(errList, err)
-			}
-		}
-	default:
-		loggermdl.LogError("invalid bucket type")
-		return nil, []error{errormdl.Wrap("invalid bucket type - " + bucket.BucketType)}
-	}
-
-	// TODO: Currently index data is overwritten by new data.
-	if isLazyWriterEnabled {
-		err = UpdateIndexLazyObjectInCache(indexID, index)
-		if err != nil {
-			loggermdl.LogError("failed to update index data in lazy writer cache - ", err)
-			return nil, []error{errormdl.Wrap("failed to update index data in lazy writer cache")}
-		}
-	}
-
-	return result, errList
-}
-
-// DeleteDataFromFDB -
-func DeleteDataFromFDB(dbName string, indexID string, rs *gjson.Result, queries []string, infileIndexQueries []string) (recordsDeletedCnt int, errList []error) {
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		loggermdl.LogError("fdb instance not found for: ", dbName)
-		return recordsDeletedCnt, []error{err}
-	}
-
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("index not found: " + indexID)
-		return recordsDeletedCnt, []error{errormdl.Wrap("index not found: " + indexID)}
-	}
-	indexKeyValMap, err := index.GetEntriesByQueries(queries)
-	if err != nil {
-		loggermdl.LogError(err)
-		return recordsDeletedCnt, []error{err}
-	}
-	if len(indexKeyValMap) == 0 {
-		loggermdl.LogError("no data found to delete")
-		return recordsDeletedCnt, []error{ErrNoDataFound}
-	}
-	fps := make([]*os.File, 0)
-	fpInfileIndexMap := make(map[*os.File]*gjson.Result)
-	var rFiles []*File
-	for filePath := range indexKeyValMap {
-		filePath = filepath.Join(fdb.DBPath, filePath)
-		rfile, err := fileFpCache.Get(filePath)
-		if err != nil {
-			fp, err := openFile(filePath)
-			if err != nil {
-				continue
-			}
-			fps = append(fps, fp)
-			fpInfileIndexMap[fp] = nil
-			rfile = fileFpCache.Set(fp, nil)
-		} else {
-			fpInfileIndexMap[rfile.file] = rfile.InfileIndex
-			fps = append(fps, rfile.file)
-		}
-
-		rfile.lock.Lock()
-		rFiles = append(rFiles, rfile)
-	}
-
-	defer func() {
-		for i := range rFiles {
-			rFiles[i].lock.Unlock()
-		}
-	}()
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return recordsDeletedCnt, []error{errormdl.Wrap("Bucket not found: " + bucketID)}
-	}
-	dataDeletedFromPaths := []string{}
-	var updatedfpInfileIndexMap map[*os.File]*gjson.Result
-	if bucket.BucketType == BucketTypeSimple {
-		recordsDeletedCnt, dataDeletedFromPaths, errList = deleteDataFromNormalBucketUsingFp(index, bucket, fps)
-		for _, path := range dataDeletedFromPaths {
-			fileFpCache.Delete(path)
-			path = strings.TrimPrefix(path, fdb.DBPath+string(filepath.Separator))
-			err = index.Delete(path)
-			if err != nil {
-				loggermdl.LogError(err)
-				errList = append(errList, err)
-			}
-		}
-
-	} else if bucket.BucketType == BucketTypePack || bucket.BucketType == BucketTypeMixed {
-		recordsDeletedCnt, updatedfpInfileIndexMap, errList = deleteDataFromPackBucketUsingFp(bucket, fpInfileIndexMap, rs, infileIndexQueries)
-		for fp, infileIndex := range updatedfpInfileIndexMap {
-			fileFpCache.Set(fp, infileIndex)
-		}
-	} else {
-		return recordsDeletedCnt, []error{errormdl.Wrap("Operation not allowed on bucket type: " + bucket.BucketType)}
-	}
-	// loggermdl.LogError("dataDeletedFromPaths", dataDeletedFromPaths)
-
-	if isLazyWriterEnabled {
-
-		err := UpdateIndexLazyObjectInCache(index.IndexID, index)
-		if err != nil {
-			loggermdl.LogError("failed to update index data in lazy writer cache - ", err)
-			return 0, []error{errormdl.Wrap("failed to update index data in lazy writer cache")}
-		}
-	}
-	return recordsDeletedCnt, errList
-}
-
-// ReadDataFromFDB - return records with matching query
-func ReadDataFromFDB(dbName, indexID string, rs *gjson.Result, queries []string, infileIndexQuery []string) (*gjson.Result, error) {
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return nil, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return nil, errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return nil, errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
-	if err != nil {
-		loggermdl.LogError(err)
-		return nil, err
-	}
-	resultToReturn := gjson.Parse("[]")
-	if len(indexKeyValueMap) == 0 {
-		loggermdl.LogError("files not found")
-		return &resultToReturn, nil
-	}
-	fps := make([]*os.File, 0)
-	fpInfileIndexMap := make(map[*os.File]*gjson.Result)
-	var rFiles []*File
-
-	for filePath := range indexKeyValueMap {
-		filePath = filepath.Join(fdb.DBPath, filePath)
-		rfile, err := fileFpCache.Get(filePath)
-		if err != nil {
-			fp, err := openFile(filePath)
-			if err != nil {
-				continue
-			}
-			fps = append(fps, fp)
-			fpInfileIndexMap[fp] = nil
-			rfile = fileFpCache.Set(fp, nil)
-		} else {
-			fpInfileIndexMap[rfile.file] = rfile.InfileIndex
-			fps = append(fps, rfile.file)
-		}
-		rfile.lock.Lock()
-		rFiles = append(rFiles, rfile)
-	}
-
-	defer func() {
-		for i := range rFiles {
-			rFiles[i].lock.Unlock()
-		}
-	}()
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-
-	switch bucket.BucketType {
-	case BucketTypeSimple:
-		return readNormalFilesUsingFp(fps, bucket, rs, secParams)
-	case BucketTypePack, BucketTypeMixed:
-		requestedFileType := rs.Get("fileType").String()
-		if len(requestedFileType) == 0 {
-			return &resultToReturn, errormdl.Wrap("please specify fileType")
-		}
-		infileIndexQuery = append(infileIndexQuery, `#[fileType==`+requestedFileType+`]`)
-		resultArray, updatedFpInfileIndexMap, err := readDataFromPackFiles(fpInfileIndexMap, infileIndexQuery, rs, secParams)
-		if err != nil {
-			loggermdl.LogError(err)
-			return &resultToReturn, err
-		}
-		for fp, infileIndex := range updatedFpInfileIndexMap {
-			fileFpCache.Set(fp, infileIndex)
-		}
-		resultToReturn = gjson.Parse(resultArray)
-		return &resultToReturn, nil
-	}
-	return nil, errormdl.Wrap("Operation not available on bucket type: " + bucket.BucketType)
-	// loggermdl.LogError("resultToReturn - ", resultToReturn)
-}
-
-func readDataFromPackFiles(filepathInfileIndexMap map[*os.File]*gjson.Result, infileIndexQuery []string, rs *gjson.Result, secParams securitymdl.FDBSecParams) (string, map[*os.File]*gjson.Result, error) {
-	resultArray := "[]"
-	for f, infileIndexData := range filepathInfileIndexMap {
-		result, updatedInfileIndexData, err := filepack.GetDataFromPackFileUsingFp(f, infileIndexData, infileIndexQuery, rs, secParams)
-		if err != nil {
-			return resultArray, filepathInfileIndexMap, err
-		}
-		// loggermdl.LogDebug("result", result)
-		for _, val := range gjson.Parse(result).Array() {
-			resultArray, _ = sjson.Set(resultArray, "-1", val.Value())
-		}
-		filepathInfileIndexMap[f] = updatedInfileIndexData
-	}
-	return resultArray, filepathInfileIndexMap, nil
-}
-
-func openFile(filePath string) (*os.File, error) {
-	dir, _ := filepath.Split(filePath)
-	if dir != "" {
-		createError := filemdl.CreateDirectoryRecursive(dir)
-		if errormdl.CheckErr(createError) != nil {
-			return nil, errormdl.CheckErr(createError)
-		}
-	}
-	return os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, os.ModePerm)
-}
-
-func addMigrationReplaceConfig(targetBasePath string, secParams securitymdl.FDBSecParams) error {
-	configfilePath := filepath.Join(targetBasePath, MigrationConfigFilename)
-	fp, err := openFile(configfilePath)
-	defer func() {
-		fp.Close()
-	}()
-	if err != nil {
-		return err
-	}
-	migartionConfigStr, _ := sjson.Set("", MigrationTypeKeyword, MigrationTypeReplace)
-	rs := gjson.Parse(migartionConfigStr)
-	bucket := Bucket{}
-	return saveDataInNormalBucketUsingFp(fp, &bucket, &rs, secParams)
-}
-
-func addMigrationUpdateConfig(targetBasePath string, secParams securitymdl.FDBSecParams) error {
-	configfilePath := filepath.Join(targetBasePath, MigrationConfigFilename)
-	fp, err := openFile(configfilePath)
-	defer func() {
-		fp.Close()
-	}()
-	if err != nil {
-		return err
-	}
-	migartionConfigStr, _ := sjson.Set("", MigrationTypeKeyword, MigrationTypeUpdate)
-	rs := gjson.Parse(migartionConfigStr)
-	bucket := Bucket{}
-	return saveDataInNormalBucketUsingFp(fp, &bucket, &rs, secParams)
-}
-
-// DataImporter is the interface that wraps the basic DataImport method.
-type DataImporter interface {
-	DataImport()
-}
-
-// DataExporter is the interface that wraps the basic DataExport method.
-type DataExporter interface {
-	DataExport()
-}
-
-// ZipImporter is a DataImporter
-// allow to import fdb data from zip
-type ZipImporter struct {
-	FdbName    string
-	IndexID    string
-	SourcePath string
-	Data       *gjson.Result
-}
-
-// ZipExporter is a DataExporter
-// allow to export fdb data as zip
-type ZipExporter struct {
-	FdbName       string
-	IndexID       string
-	Queries       []string
-	DestPath      string
-	MigrationType string
-}
-
-// DataExport exports fdb data as zip
-func (z ZipExporter) DataExport() (err error) {
-	fdb, err := GetFDBInstance(z.FdbName)
-	if err != nil {
-		loggermdl.LogError("fdb instance not found: ", z.FdbName)
-		return errormdl.Wrap("fdb instance not found: " + z.FdbName)
-	}
-	index, ok := fdb.GetFDBIndex(z.IndexID)
-	if !ok {
-		return errormdl.Wrap("INDEX not found: " + z.IndexID)
-	}
-	sourcePath := ""
-	timeStamp := time.Now().Nanosecond()
-	targetBasePath := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
-	filteredKeyValMap, err := index.GetEntriesByQueries(z.Queries)
-	if err != nil {
-		return err
-	}
-	if len(filteredKeyValMap) == 0 {
-		return errormdl.Wrap("no data found to export")
-	}
-	defer func() {
-		// removes created zip
-		filemdl.DeleteDirectory(targetBasePath)
-	}()
-	// copy data files
-	for path := range filteredKeyValMap {
-		sourcePath = filepath.Join(fdb.DBPath, path)
-		targetPath := filepath.Join(targetBasePath, path)
-		_, err := filemdl.CopyFile(sourcePath, targetPath, true)
-		if err != nil {
-			return err
-		}
-	}
-
-	// copy index file
-	targetPath := filepath.Join(targetBasePath, INDEXFOLDER, z.IndexID)
-	err = AddIndexEntriesInFile(targetPath, filteredKeyValMap)
-	if err != nil {
-		return err
-	}
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-
-	switch z.MigrationType {
-	case MigrationTypeUpdate:
-		err = addMigrationUpdateConfig(targetBasePath, secParams)
-	case MigrationTypeReplace:
-		err = addMigrationReplaceConfig(targetBasePath, secParams)
-	default:
-		return errormdl.Wrap("fail to export data: export operation not allowed on migration type - " + z.MigrationType)
-	}
-
-	if err != nil {
-		loggermdl.LogError("fail to export data: ", err)
-		return errormdl.Wrap("fail to export data: " + err.Error())
-	}
-	// make zip of copied data to destination folder
-	// zip will have name of indexId
-	destinationPath := filepath.Join(z.DestPath, z.IndexID)
-	return filemdl.Zip(targetBasePath, destinationPath)
-}
-
-// DataImport imports data from zip
-func (z ZipImporter) DataImport() (err error) {
-	fdb, err := GetFDBInstance(z.FdbName)
-	if err != nil {
-		loggermdl.LogError("fdb instance not found: ", z.FdbName)
-		return errormdl.Wrap("fdb instance not found: " + z.FdbName)
-	}
-	index, ok := fdb.GetFDBIndex(z.IndexID)
-	if !ok {
-		loggermdl.LogError("index not found: ", z.IndexID)
-		return errormdl.Wrap("index not found: " + z.IndexID)
-	}
-	archivePath := z.SourcePath
-	if !filemdl.FileAvailabilityCheck(archivePath) {
-		loggermdl.LogError("archive file not found at specified location: ", archivePath)
-		return errormdl.Wrap("archive file not found at location: " + archivePath)
-	}
-	timeStamp := time.Now().Nanosecond()
-	pathToExtractZip := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
-
-	err = filemdl.Unzip(archivePath, pathToExtractZip)
-	if err != nil {
-		loggermdl.LogError("failed to import data: ", err)
-		return errormdl.Wrap("invalid archived file")
-	}
-	defer func() {
-		// removes extracted files
-		filemdl.DeleteDirectory(pathToExtractZip)
-	}()
-	childDirs, err := filemdl.ListDirectory(pathToExtractZip)
-	if err != nil {
-		loggermdl.LogError("failed to import data: ", err)
-		return errormdl.Wrap("invalid archived file")
-	}
-	if len(childDirs) == 0 {
-		loggermdl.LogError("no data found to import")
-		return errormdl.Wrap("no data found to import")
-	}
-	if !childDirs[0].IsDir() {
-		loggermdl.LogError("invalid archive file")
-		return errormdl.Wrap("invalid archive file")
-	}
-	sourcePath := filepath.Join(pathToExtractZip, childDirs[0].Name())
-	fdbBasePath := fdb.DBPath
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-
-	// loggermdl.LogDebug(sourcePath)
-	migrationConfig, err := getMigrationConfig(sourcePath, secParams, z.Data)
-	if err != nil {
-		loggermdl.LogError("fail to get migration config", err)
-		return errormdl.Wrap("invalid archived file")
-	}
-	migrationType := migrationConfig.Get(MigrationTypeKeyword).String()
-	if migrationType != MigrationTypeReplace && migrationType != MigrationTypeUpdate {
-		return errormdl.Wrap("import operation not allowed on migration type - " + migrationType)
-	}
-	indexFilePath := filepath.Join(fdb.DBPath, INDEXFOLDER, z.IndexID)
-	err = filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			loggermdl.LogError("err", err)
-			return err
-		}
-		if info.IsDir() {
-			return nil
-		}
-		//  ignore config file from copying
-		if strings.Contains(path, MigrationConfigFilename) {
-			return nil
-		}
-
-		foundAtIndex := strings.LastIndex(path, sourcePath)
-		if foundAtIndex == -1 {
-			return errormdl.Wrap("invalid archived file")
-		}
-		// loggermdl.LogDebug(path)
-
-		// if migartion type is MigrationTypeUpdate then copy index entries from index files else replace index files
-		if migrationType == MigrationTypeUpdate && strings.Contains(path, INDEXFOLDER) {
-			// load index entries from
-			err := ImportIndexEntries(path, fdb, z.IndexID)
-			if err != nil {
-				loggermdl.LogError("fail to load indexes from data", err)
-				return errormdl.Wrap("fail to load indexes")
-			}
-			err = LogFDBIndexFile(indexFilePath, index)
-			if err != nil {
-				loggermdl.LogError("fail to add indexes: ", err)
-				return errormdl.Wrap("fail to add indexes")
-			}
-			return nil
-		}
-		destPath := filepath.Join(fdbBasePath, path[foundAtIndex+len(sourcePath):])
-		if !filemdl.FileAvailabilityCheck(destPath) {
-			dir, _ := filepath.Split(destPath)
-			err = filemdl.CreateDirectoryRecursive(dir)
-			if err != nil {
-				return err
-			}
-		}
-		fileFpCache.Delete(destPath)
-		return filemdl.AtomicReplaceFile(path, destPath)
-	})
-	if err != nil {
-		loggermdl.LogError("fail to import data: ", err)
-		return errormdl.Wrap("fail to import data: " + err.Error())
-	}
-
-	err = LoadFDBIndexFromFile(indexFilePath, fdb, z.IndexID)
-	if err != nil {
-		loggermdl.LogError("fail to add indexes", err)
-		return errormdl.Wrap("fail to add indexes")
-	}
-
-	return nil
-}
-
-func getMigrationConfig(sourcePath string, secParams securitymdl.FDBSecParams, rs *gjson.Result) (*gjson.Result, error) {
-	configPath := filepath.Join(sourcePath, MigrationConfigFilename)
-	if !filemdl.FileAvailabilityCheck(configPath) {
-		return nil, errormdl.Wrap("file not found")
-	}
-	fp, err := filemdl.Open(configPath)
-	defer func() {
-		fp.Close()
-	}()
-	dataByte, err := readNormalFileUsingFp(fp, rs, secParams)
-	if err != nil {
-		return nil, err
-	}
-	migrationConfig := gjson.ParseBytes(dataByte)
-	return &migrationConfig, nil
-}
-
-// ImportIndexEntries -
-func ImportIndexEntries(indexFilePath string, fdb *FDB, indexID string) error {
-	index, found := fdb.GetFDBIndex(indexID)
-	if !found {
-		return errormdl.Wrap("index not found")
-	}
-	if !filemdl.FileAvailabilityCheck(indexFilePath) {
-		return nil
-	}
-	fileData, err := filemdl.FastReadFile(indexFilePath)
-	if err != nil {
-		loggermdl.LogError("failed to load FDB index from: ", indexFilePath)
-		return err
-	}
-
-	_, fileName := filepath.Split(indexFilePath)
-	fileData, err = decryptData(fileData, fileName)
-	if err != nil {
-		loggermdl.LogError("failed to decrypt FDB index data: ", err)
-		return errormdl.Wrap("failed to decrypt FDB index data: " + err.Error())
-	}
-	data := string(fileData)
-	indexRecords := strings.Split(data, lineBreak)
-	indexDataMap := make(map[string]string)
-	for _, indexRecord := range indexRecords {
-		indexValues := strings.Split(indexRecord, IndexKeyValSeperator)
-		if len(indexValues) == 2 {
-			indexDataMap[indexValues[0]] = indexValues[1]
-		}
-	}
-	var fns []func(a, b string) bool
-	for _, idx := range index.IndexFields {
-		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
-	}
-
-	// update index file by reading all data and updating index file
-	return index.AddEntries(indexDataMap)
-}
-
-// FileImporter is a DataImporter
-// allow to import fdb data from exported folder
-type FileImporter struct {
-	FdbName    string
-	IndexID    string
-	SourcePath string
-	Data       *gjson.Result
-}
-
-// FileExporter is a DataExporter
-// allow to export fdb data in a folder
-type FileExporter struct {
-	FdbName       string
-	IndexID       string
-	Queries       []string
-	DestPath      string
-	MigrationType string
-}
-
-// DataExport exports fdb data in a folder
-func (f FileExporter) DataExport() (err error) {
-	fdb, err := GetFDBInstance(f.FdbName)
-	if err != nil {
-		loggermdl.LogError("fdb instance not found: ", f.FdbName)
-		return errormdl.Wrap("fdb instance not found: " + f.FdbName)
-	}
-	index, ok := fdb.GetFDBIndex(f.IndexID)
-	if !ok {
-		return errormdl.Wrap("INDEX not found: " + f.IndexID)
-	}
-	sourcePath := ""
-	targetBasePath := filepath.Join(f.DestPath, f.IndexID)
-	filteredKeyValMap, err := index.GetEntriesByQueries(f.Queries)
-	if err != nil {
-		return err
-	}
-
-	if len(filteredKeyValMap) == 0 {
-		return errormdl.Wrap("no data found to export")
-	}
-
-	for path := range filteredKeyValMap {
-		sourcePath = filepath.Join(fdb.DBPath, path)
-		targetPath := filepath.Join(targetBasePath, path)
-		_, err := filemdl.CopyFile(sourcePath, targetPath, true)
-		if err != nil {
-			return err
-		}
-	}
-
-	// copy index file
-	targetPath := filepath.Join(targetBasePath, INDEXFOLDER, f.IndexID)
-	err = AddIndexEntriesInFile(targetPath, filteredKeyValMap)
-	if err != nil {
-		return err
-	}
-
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-
-	switch f.MigrationType {
-	case MigrationTypeUpdate:
-		err = addMigrationUpdateConfig(targetBasePath, secParams)
-	case MigrationTypeReplace:
-		err = addMigrationReplaceConfig(targetBasePath, secParams)
-	default:
-		return errormdl.Wrap("export operation not allowed on migration type - " + f.MigrationType)
-	}
-
-	return err
-}
-
-// DataImport imports data from exported folder
-func (f FileImporter) DataImport() (err error) {
-	fdb, err := GetFDBInstance(f.FdbName)
-	if err != nil {
-		loggermdl.LogError("fdb instance not found: ", f.FdbName)
-		return errormdl.Wrap("fdb instance not found: " + f.FdbName)
-	}
-	index, ok := fdb.GetFDBIndex(f.IndexID)
-	if !ok {
-		loggermdl.LogError("index not found: ", f.IndexID)
-		return errormdl.Wrap("index not found: " + f.IndexID)
-	}
-	if !filemdl.FileAvailabilityCheck(f.SourcePath) {
-		loggermdl.LogError("archive file not found at specified location: ", f.SourcePath)
-		return errormdl.Wrap("archive file not found at location: " + f.SourcePath)
-	}
-
-	timeStamp := time.Now().Nanosecond()
-	tempDir := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
-	err = filemdl.CopyDir(f.SourcePath, tempDir)
-	if err != nil {
-		loggermdl.LogError("failed to import data: ", err)
-		return errormdl.Wrap("fail to copy data")
-	}
-	defer func() {
-		filemdl.DeleteDirectory(tempDir)
-	}()
-
-	childDirs, err := filemdl.ListDirectory(tempDir)
-	if err != nil {
-		loggermdl.LogError("failed to import data: ", err)
-		return errormdl.Wrap("invalid archived file")
-	}
-	if len(childDirs) == 0 {
-		loggermdl.LogError("no data found to import")
-		return errormdl.Wrap("no data found to import")
-	}
-	fdbBasePath := fdb.DBPath
-	secParams := securitymdl.FDBSecParams{EnableSecurity: fdb.EnableSecurity, EnableCompression: fdb.EnableCompression}
-
-	// loggermdl.LogDebug(f.SourcePath)
-	migrationConfig, err := getMigrationConfig(tempDir, secParams, f.Data)
-	if err != nil {
-		loggermdl.LogError("fail to get migration config", err)
-		return errormdl.Wrap("invalid archived file")
-	}
-	migrationType := migrationConfig.Get(MigrationTypeKeyword).String()
-	if migrationType != MigrationTypeReplace && migrationType != MigrationTypeUpdate {
-		return errormdl.Wrap("import operation not allowed on migration type - " + migrationType)
-	}
-	indexFilePath := filepath.Join(fdb.DBPath, INDEXFOLDER, f.IndexID)
-	err = filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			loggermdl.LogError("err", err)
-			return err
-		}
-		// loggermdl.LogError(path)
-		if info.IsDir() {
-			return nil
-		}
-		//  ignore config file from copying
-		if strings.Contains(path, MigrationConfigFilename) {
-			return nil
-		}
-
-		foundAtIndex := strings.LastIndex(path, tempDir)
-		if foundAtIndex == -1 {
-			return errormdl.Wrap("invalid archived file")
-		}
-		// if file is index file then copy index entries from index files
-		if strings.Contains(path, INDEXFOLDER) {
-			// load index entries from file
-			err := ImportIndexEntries(path, fdb, f.IndexID)
-			if err != nil {
-				loggermdl.LogError("fail to import indexes", err)
-				return errormdl.Wrap("fail to import indexes")
-			}
-			err = LogFDBIndexFile(indexFilePath, index)
-			if err != nil {
-				loggermdl.LogError("fail to import indexes: ", err)
-				return errormdl.Wrap("fail to import indexes")
-			}
-			return nil
-		}
-
-		destPath := filepath.Join(fdbBasePath, path[foundAtIndex+len(tempDir):])
-		if !filemdl.FileAvailabilityCheck(destPath) {
-			dir, _ := filepath.Split(destPath)
-			err = filemdl.CreateDirectoryRecursive(dir)
-			if err != nil {
-				return err
-			}
-		}
-		// removing filepointer from memory
-		fileFpCache.Delete(destPath)
-		return filemdl.AtomicReplaceFile(path, destPath)
-	})
-
-	if err != nil {
-		loggermdl.LogError("fail to import data: ", err)
-		return errormdl.Wrap("fail to import data: " + err.Error())
-	}
-
-	return nil
-}
diff --git a/dalmdl/corefdb/bucket/appendbucket.go b/dalmdl/corefdb/bucket/appendbucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ce755ad895905a2e1dc397a1e7fbd6f2954fad3
--- /dev/null
+++ b/dalmdl/corefdb/bucket/appendbucket.go
@@ -0,0 +1,52 @@
+package bucket
+
+import (
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+)
+
+type AppendBucket struct {
+	Bucket
+}
+
+func NewAppendBucket(bucketNameQuery string, isDynamicName bool, isLazyEnable bool, bucketPath string) (*AppendBucket, error) {
+	if bucketNameQuery == "" {
+		return nil, errormdl.Wrap("please provide value of bucketNameQuery")
+	}
+
+	b := Bucket{
+		BucketID:        guidmdl.GetGUID(),
+		BucketNameQuery: bucketNameQuery,
+		IsDynamicName:   isDynamicName,
+		BucketPath:      bucketPath,
+	}
+	bucket := AppendBucket{}
+	bucket.Bucket = b
+	return &bucket, nil
+}
+
+func (ab *AppendBucket) Insert(filePath string, data *gjson.Result) error {
+	locker := locker.NewLocker(filePath)
+	appendFile, err := filetype.NewAppendFile(filePath, ab.Bucket.SecurityProvider, locker)
+	defer appendFile.Close()
+	if err != nil {
+		return err
+	}
+
+	return appendFile.Write(data)
+}
+
+func (ab *AppendBucket) Find(filePaths []string, queries []string, data *gjson.Result) (string, error) {
+	return "", errormdl.Wrap("operation not allowed")
+}
+
+func (ab *AppendBucket) Update(filePaths []string, queries []string, data *gjson.Result) (*gjson.Result, []error) {
+	return nil, []error{errormdl.Wrap("operation not allowed")}
+}
+
+func (ab *AppendBucket) Delete(filePaths, queries []string, data *gjson.Result) (recordsDeletedCnt int, errList []error) {
+	return 0, []error{errormdl.Wrap("operation not allowed")}
+}
diff --git a/dalmdl/corefdb/bucket/bucket.go b/dalmdl/corefdb/bucket/bucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..6559cbe81841d4a90b636e362bab4febd55ff001
--- /dev/null
+++ b/dalmdl/corefdb/bucket/bucket.go
@@ -0,0 +1,85 @@
+package bucket
+
+import (
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/index"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/tidwall/gjson"
+)
+
+const (
+	PathSeperator     = "/"
+	DynamicPathPrefix = "$$"
+)
+
+type PathProvider interface {
+	GetPath(rs *gjson.Result) (string, error)
+}
+
+type Securable interface {
+	Secure(securityprovider.SecurityProvider)
+}
+type Store interface {
+	Insert(string, *gjson.Result) error
+	Find([]string, []string, *gjson.Result) (string, error)
+	Update([]string, []string, *gjson.Result) (*gjson.Result, []error)
+	Delete([]string, []string, *gjson.Result) (int, []error)
+}
+
+type MediaStore interface {
+	WriteMedia(filePath string, mediaData []byte, rs *gjson.Result) (string, error)
+	ReadMedia(filePath string, recordID string) ([]byte, *gjson.Result, error)
+	UpdateMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) (err error)
+	UpsertMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) (string, error)
+}
+
+type Bucket struct {
+	BucketID        string `json:"bucketId"`
+	IsDynamicName   bool   `json:"isDynamicName"`
+	BucketNameQuery string `json:"bucketNameQuery"`
+	// TODO: rename to Indexex
+	Indexes          []string `json:"indices"`
+	BucketPath       string   `json:"bucketPath"`
+	SecurityProvider securityprovider.SecurityProvider
+}
+
+func (bucket *Bucket) AddIndex(index *index.Index) error {
+	if index == nil {
+		return errormdl.Wrap("index value is nil")
+	}
+	bucket.Indexes = append(bucket.Indexes, index.IndexID)
+	index.BucketSequence = append(index.BucketSequence, bucket.BucketID)
+	return nil
+}
+
+// ResolveName - returns bucket name
+func (bucket *Bucket) GetPath(rs *gjson.Result) (string, error) {
+	path := ""
+	pathChunks := strings.Split(bucket.BucketPath, PathSeperator)
+	for i := range pathChunks {
+		pathVal := pathChunks[i]
+		if strings.HasPrefix(pathChunks[i], DynamicPathPrefix) {
+			dynamicField := strings.TrimSpace(strings.TrimPrefix(pathChunks[i], DynamicPathPrefix))
+			pathVal = strings.TrimSpace(rs.Get(dynamicField).String())
+			if pathVal == "" {
+				return "", errormdl.Wrap("please provide value for bucket name: " + dynamicField)
+			}
+		}
+		path = path + PathSeperator + pathVal
+	}
+	name := bucket.BucketNameQuery
+	if bucket.IsDynamicName {
+		name = rs.Get(name).String()
+	}
+	if name == "" {
+		return name, errormdl.Wrap("please provide value for bucket name: " + bucket.BucketNameQuery)
+	}
+	path = strings.TrimPrefix(path+PathSeperator+name, PathSeperator)
+	return path, nil
+}
+
+func (bucket *Bucket) Secure(securityprovider securityprovider.SecurityProvider) {
+	bucket.SecurityProvider = securityprovider
+}
diff --git a/dalmdl/corefdb/bucket/packbucket.go b/dalmdl/corefdb/bucket/packbucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..aaf56806a69c24e205f1951238c5eadd103f6f99
--- /dev/null
+++ b/dalmdl/corefdb/bucket/packbucket.go
@@ -0,0 +1,239 @@
+package bucket
+
+import (
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+)
+
+type PackBucket struct {
+	Bucket
+	InFileIndexSchemaMap map[string]filetype.InFileIndex `json:"inFileIndexMap"`
+	// TODO: filepointer cache
+	packFiles map[string]filetype.PackFile
+}
+
+func NewPackBucket(bucketNameQuery string, isDynamicName bool, bucketPath string, inFileIndexSchemaMap map[string]filetype.InFileIndex) (*PackBucket, error) {
+	if bucketNameQuery == "" {
+		return nil, errormdl.Wrap("please provide value of bucketNameQuery")
+	}
+
+	bucket := Bucket{
+		BucketID:        guidmdl.GetGUID(),
+		BucketNameQuery: bucketNameQuery,
+		IsDynamicName:   isDynamicName,
+		BucketPath:      bucketPath,
+	}
+	packBucket := PackBucket{}
+	packBucket.Bucket = bucket
+	if inFileIndexSchemaMap != nil {
+		packBucket.InFileIndexSchemaMap = inFileIndexSchemaMap
+	} else {
+		packBucket.InFileIndexSchemaMap = make(map[string]filetype.InFileIndex)
+	}
+	return &packBucket, nil
+}
+
+// TODO: add fdb index data call
+func (pb *PackBucket) Insert(filePath string, data *gjson.Result) error {
+	requestedFileType := data.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		return errormdl.Wrap("please specify fileType")
+	}
+	_, ok := pb.InFileIndexSchemaMap[requestedFileType]
+	if !ok {
+		return errormdl.Wrap("filetype not found: " + requestedFileType)
+	}
+	locker := locker.NewLocker(filePath)
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+	defer packFile.Close()
+	if err != nil {
+		return err
+	}
+	return packFile.Write(data)
+}
+
+func (pb *PackBucket) Find(filePaths []string, queries []string, data *gjson.Result) (string, error) {
+	requestedFileType := data.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		return "", errormdl.Wrap("please specify fileType")
+	}
+	_, ok := pb.InFileIndexSchemaMap[requestedFileType]
+	if !ok {
+		return "", errormdl.Wrap("filetype not found: " + requestedFileType)
+	}
+	queries = append(queries, `#[fileType==`+requestedFileType+`]`)
+	resultArray := "[]"
+	loggermdl.LogError("len", len(filePaths))
+	for i := range filePaths {
+		loggermdl.LogError("path", filePaths[i])
+		locker := locker.NewLocker(filePaths[i])
+		packFile, err := filetype.NewPackFile(filePaths[i], pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+		defer packFile.Close()
+		if err != nil {
+			return "", err
+		}
+		result, err := packFile.Read(queries, data)
+		if err != nil {
+			return resultArray, err
+		}
+		for _, val := range gjson.Parse(result).Array() {
+			resultArray, _ = sjson.Set(resultArray, "-1", val.Value())
+		}
+	}
+	return resultArray, nil
+}
+
+func (pb *PackBucket) Update(filePaths []string, queries []string, data *gjson.Result) (*gjson.Result, []error) {
+	requestedFileType := data.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		loggermdl.LogError("please specify fileType")
+		return nil, []error{errormdl.Wrap("please specify fileType")}
+	}
+	_, ok := pb.InFileIndexSchemaMap[requestedFileType]
+	if !ok {
+		return nil, []error{errormdl.Wrap("filetype not found: " + requestedFileType)}
+	}
+	queries = append(queries, `#[fileType=="`+requestedFileType+`"]`)
+
+	finalResultArray := []gjson.Result{}
+	errList := []error{}
+
+	for i := range filePaths {
+		locker := locker.NewLocker(filePaths[i])
+		loggermdl.LogError("filePaths[i]", filePaths[i])
+		packFile, err := filetype.NewPackFile(filePaths[i], pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+		defer packFile.Close()
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		resultArray, err := packFile.Update(queries, data)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		finalResultArray = append(finalResultArray, resultArray.Array()...)
+	}
+
+	resultListStr := "[]"
+	for _, resultObj := range finalResultArray {
+		resultListStr, _ = sjson.Set(resultListStr, "-1", resultObj.Value())
+	}
+	result := gjson.Parse(resultListStr)
+	return &result, errList
+}
+
+func (pb *PackBucket) Delete(filePaths []string, queries []string, data *gjson.Result) (recordsDeletedCnt int, errList []error) {
+
+	fileType := data.Get("fileType").String()
+	if len(fileType) == 0 {
+		loggermdl.LogError("fileType value not provided")
+		return recordsDeletedCnt, []error{errormdl.Wrap("please specify fileType")}
+	}
+
+	_, ok := pb.InFileIndexSchemaMap[fileType]
+	if !ok {
+		loggermdl.LogError("infileIndex for specified fileType not found")
+		return recordsDeletedCnt, []error{errormdl.Wrap("infileIndex for specified fileType not found")}
+	}
+
+	queries = append(queries, `#[fileType=="`+fileType+`"]`)
+	noDataFoundCnt := 0
+
+	for i := range filePaths {
+		locker := locker.NewLocker(filePaths[i])
+
+		packFile, err := filetype.NewPackFile(filePaths[i], pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+		defer packFile.Close()
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		deletedRecordsCnt, err := packFile.Remove(queries)
+		if err != nil {
+			if err.Error() == "not found" {
+				noDataFoundCnt++
+				continue
+			}
+			errList = append(errList, err)
+			continue
+		}
+		recordsDeletedCnt += deletedRecordsCnt
+	}
+
+	if noDataFoundCnt == len(filePaths) {
+		errList = []error{errormdl.Wrap("data not found")}
+	}
+	return
+}
+
+func (pb *PackBucket) WriteMedia(filePath string, mediaData []byte, rs *gjson.Result) (recordID string, err error) {
+	locker := locker.NewLocker(filePath)
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+	defer packFile.Close()
+	if err != nil {
+		return "", err
+	}
+	return packFile.WriteMedia(mediaData, rs)
+}
+
+func (pb *PackBucket) ReadMedia(filePath string, recordID string) ([]byte, *gjson.Result, error) {
+	locker := locker.NewLocker(filePath)
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+	defer packFile.Close()
+	if err != nil {
+		return nil, nil, err
+	}
+	return packFile.ReadMedia(recordID)
+}
+
+func (pb *PackBucket) UpdateMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) (err error) {
+	locker := locker.NewLocker(filePath)
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+	defer packFile.Close()
+	if err != nil {
+		return err
+	}
+
+	return packFile.UpdateMedia(recordID, mediaData, rs)
+}
+
+func (pb *PackBucket) UpsertMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	locker := locker.NewLocker(filePath)
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+	defer packFile.Close()
+	if err != nil {
+		return recordID, err
+	}
+	return packFile.UpsertMedia(recordID, mediaData, rs)
+}
+
+func (pb *PackBucket) DeleteMedia(filePath string, recordID string) error {
+	// TODO: implement media delete
+	return nil
+}
+
+func (pb *PackBucket) Reorg(filePaths []string) (errList []error) {
+	for i := range filePaths {
+		locker := locker.NewLocker(filePaths[i])
+		packFile, err := filetype.NewPackFile(filePaths[i], pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider, locker)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		err = packFile.Reorg()
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+	}
+	return nil
+}
diff --git a/dalmdl/corefdb/bucket/simplebucket.go b/dalmdl/corefdb/bucket/simplebucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6ab251e726d9512ddeac41cd49350693cfd5654
--- /dev/null
+++ b/dalmdl/corefdb/bucket/simplebucket.go
@@ -0,0 +1,124 @@
+package bucket
+
+import (
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+type SimpleBucket struct {
+	Bucket
+	// TODO: implement lazy
+	EnableLazy       bool
+	securityProvider securityprovider.SecurityProvider
+	Locker           locker.Locker
+}
+
+func NewSimpleBucket(bucketNameQuery string, isDynamicName bool, isLazyEnable bool, bucketPath string) (*SimpleBucket, error) {
+	if bucketNameQuery == "" {
+		return nil, errormdl.Wrap("please provide value of bucketNameQuery")
+	}
+
+	b := Bucket{
+		BucketID:        guidmdl.GetGUID(),
+		BucketNameQuery: bucketNameQuery,
+		IsDynamicName:   isDynamicName,
+		BucketPath:      bucketPath,
+	}
+
+	bucket := SimpleBucket{
+		EnableLazy: isLazyEnable,
+	}
+	bucket.Bucket = b
+	return &bucket, nil
+}
+
+func (sb *SimpleBucket) Insert(filePath string, data *gjson.Result) error {
+	locker := locker.NewLocker(filePath)
+	simpleFile, err := filetype.NewSimpleFile(filePath, sb.Bucket.SecurityProvider, locker)
+	defer simpleFile.Close()
+	if err != nil {
+		return err
+	}
+	return simpleFile.Write(data)
+}
+
+func (sb *SimpleBucket) Find(filePaths []string, queries []string, data *gjson.Result) (string, error) {
+	resultArray := "[]"
+
+	for i := range filePaths {
+		locker := locker.NewLocker(filePaths[i])
+
+		simpleFile, err := filetype.NewSimpleFile(filePaths[i], sb.Bucket.SecurityProvider, locker)
+		defer simpleFile.Close()
+
+		if err != nil {
+			return "", err
+		}
+
+		result, err := simpleFile.Read(data)
+		if err != nil {
+			return resultArray, err
+		}
+		resultArray, _ = sjson.Set(resultArray, "-1", gjson.ParseBytes(result).Value())
+	}
+	return resultArray, nil
+}
+
+func (sb *SimpleBucket) Update(filePaths []string, queries []string, data *gjson.Result) (*gjson.Result, []error) {
+	errList := []error{}
+	resultListStr := "[]"
+
+	for i := range filePaths {
+		locker := locker.NewLocker(filePaths[i])
+		simpleFile, err := filetype.NewSimpleFile(filePaths[i], sb.Bucket.SecurityProvider, locker)
+		defer simpleFile.Close()
+
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		updatedData, err := simpleFile.Update(data)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		resultListStr, _ = sjson.Set(resultListStr, "-1", updatedData.Value())
+	}
+	result := gjson.Parse(resultListStr)
+	return &result, errList
+}
+
+func (sb *SimpleBucket) Delete(filePaths, queries []string, data *gjson.Result) (recordsDeletedCnt int, errList []error) {
+	noDataFoundCnt := 0
+	for i := range filePaths {
+		locker := locker.NewLocker(filePaths[i])
+		simpleFile, err := filetype.NewSimpleFile(filePaths[i], sb.Bucket.SecurityProvider, locker)
+		defer simpleFile.Close()
+
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		err = simpleFile.Remove()
+		if err != nil {
+			if err.Error() == "not found" {
+				noDataFoundCnt++
+				continue
+			}
+			errList = append(errList, err)
+			continue
+		}
+		recordsDeletedCnt++
+	}
+
+	if noDataFoundCnt == len(filePaths) {
+		errList = []error{errormdl.Wrap("no data found")}
+	}
+	return
+}
diff --git a/dalmdl/corefdb/bucket_test.go b/dalmdl/corefdb/bucket_test.go
deleted file mode 100644
index cf79f6e9b5df89ffc286bb029d1faa8435f9b841..0000000000000000000000000000000000000000
--- a/dalmdl/corefdb/bucket_test.go
+++ /dev/null
@@ -1,1575 +0,0 @@
-package corefdb
-
-import (
-	"log"
-	"path/filepath"
-	"strings"
-	"testing"
-	"time"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/lazywriter"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl/filepack"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
-	"github.com/tidwall/gjson"
-	"github.com/tidwall/sjson"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
-)
-
-var dbInstance *FDB
-var bucketInstance *Bucket
-var indexInstance *Index
-
-func init() {
-	Init(false, false, false, "")
-
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-
-	dbInstance = db
-	// step 1:  create bucket
-	bucket := db.GetNewBucket("Candidates", false, &Bucket{})
-	bucketInstance = bucket
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(bucket)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-		IndexField{
-			FieldName: "class",
-			Query:     "class",
-		},
-	}
-	i.SetFields(fields...)
-	indexInstance = i
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestSaveDataInNormalBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "name", "ajay")
-	data, _ = sjson.Set(data, "studentId", 1235)
-	data, _ = sjson.Set(data, "examName", "unit2")
-	data, _ = sjson.Set(data, "totalQuestion", 50)
-	data, _ = sjson.Set(data, "marks", 26)
-	data, _ = sjson.Set(data, "examId", "MATH001")
-	data, _ = sjson.Set(data, "fileType", "Exam")
-
-	studentObj := gjson.Parse(data)
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	err = SaveDataInFDB("myfdb", i.IndexID, &studentObj)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestGetDataFromNormalBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	//   create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// queries := []string{`#[name==ajay]`}
-
-	data, _ := sjson.Set("", "fileType", "Exam")
-	inFileIndexQueries := []string{`#[examId=="MATH001"]`}
-
-	// data, _ := sjson.Set("", "fileType", "Profile")
-	// inFileIndexQueries := []string{`#[class==TY_MCA]`}
-
-	queries := []string{`#[name==ajay]`}
-	// inFileIndexQueries := []string{`#[class==SY_MCA]`}
-
-	studentObj := gjson.Parse(data)
-	// step 3: save data
-	result, err := ReadDataFromFDB("myfdb", i.IndexID, &studentObj, queries, inFileIndexQueries)
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogDebug("result", result.String())
-}
-
-func TestUpdateDataInNormalBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "abc", 10)
-	data, _ = sjson.Set(data, "marks", 30)
-	// data, _ = sjson.Set(data, "fileType", "Exam")
-
-	queries := []string{`#[name=="sanjay"]`}
-	infileIndexQueries := []string{}
-	studentObj := gjson.Parse(data)
-	// step 3: save data
-	updatedData, errList := UpdateDataInFDB("myfdb", i.IndexID, &studentObj, queries, infileIndexQueries)
-	if len(errList) > 0 {
-		loggermdl.LogError(errList)
-	}
-	loggermdl.LogDebug("updatedData", updatedData)
-}
-
-func TestDeleteDataFromNormalBucket(t *testing.T) {
-	loggermdl.LogError("in TestDeleteDataFromNormalBucket")
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	//   create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	queries := []string{`#[name==sanjay]`}
-	infileIndexQueries := []string{}
-
-	// data, _ := sjson.Set("", "fileType", "Exam")
-	// data, _ := sjson.Set(data, "studentId", 1234)
-
-	studentObj := gjson.Result{}
-
-	recordsDeletedCnt, errList := DeleteDataFromFDB("myfdb", i.IndexID, &studentObj, queries, infileIndexQueries)
-	if len(errList) > 0 {
-		loggermdl.LogError("errList", errList)
-	}
-	loggermdl.LogDebug("recordsDeletedCnt", recordsDeletedCnt)
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-}
-
-func TestSaveDataInPackBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackType", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexProfile := filepack.InFileIndex{
-		FileType: "Profile",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "class",
-				Query:     "class",
-			},
-		},
-	}
-	infileIndexExam := filepack.InFileIndex{
-		FileType: "Exam",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "examId",
-				Query:     "examId",
-			},
-		},
-	}
-	// loggermdl.LogDebug(infileIndexExam)
-	b.SetInFileIndex(infileIndexProfile)
-	b.SetInFileIndex(infileIndexExam)
-	i, err := db.GetNewIndex("stdId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// data, _ := sjson.Set("", "name", "ajay")
-	// data, _ = sjson.Set(data, "studentId", 10013)
-	// data, _ = sjson.Set(data, "class", "TY_MCA")
-	// data, _ = sjson.Set(data, "age", 24)
-	// data, _ = sjson.Set(data, "fileType", "Profile")
-
-	data, _ := sjson.Set("", "name", "vijay")
-	data, _ = sjson.Set(data, "stdId", 1239)
-	data, _ = sjson.Set(data, "examName", "unit2")
-	data, _ = sjson.Set(data, "totalQuestion", 50)
-	data, _ = sjson.Set(data, "marks", 26)
-	data, _ = sjson.Set(data, "examId", "MATH002")
-	data, _ = sjson.Set(data, "fileType", "Exam")
-
-	////// ------------vijay
-
-	// studentList := []gjson.Result{}
-	// for index := 0; index < 5; index++ {
-	// 	data, _ = sjson.Set(data, "studentId", index+1)
-	// 	studentList = append(studentList, gjson.Parse(data))
-	// }
-	// for _, studentObj := range studentList {
-	// 	err = SaveDataInFDB("myfdb", i.IndexID, &studentObj)
-	// 	if err != nil {
-	// 		log.Fatal(err)
-	// 	}
-	// }
-	studentObj := gjson.Parse(data)
-
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	err = SaveDataInFDB("myfdb", i.IndexID, &studentObj)
-	if err != nil {
-		log.Fatal(err)
-	}
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestGetDataFromPackBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackType", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexProfile := filepack.InFileIndex{
-		FileType: "Profile",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "class",
-				Query:     "class",
-			},
-		},
-	}
-	infileIndexExam := filepack.InFileIndex{
-		FileType: "Exam",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "examId",
-				Query:     "examId",
-			},
-		},
-	}
-	inFileFDBIndex := filepack.InFileIndex{
-		FileType: "FDBIndex",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "name",
-				Query:     "name",
-			},
-		},
-	}
-	// loggermdl.LogDebug(infileIndexExam)
-	b.SetInFileIndex(infileIndexProfile)
-	b.SetInFileIndex(infileIndexExam)
-	b.SetInFileIndex(inFileFDBIndex)
-	i, err := db.GetNewIndex("stdId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	//   create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// queries := []string{`#[name==ajay]`}
-
-	data, _ := sjson.Set("", "fileType", "Exam")
-	inFileIndexQueries := []string{`#[examId=="MATH001"]`}
-
-	// data, _ := sjson.Set("", "fileType", "Profile")
-	// inFileIndexQueries := []string{`#[class==TY_MCA]`}
-
-	queries := []string{`#[name=="sanjay"]`}
-	// inFileIndexQueries := []string{`#[class==SY_MCA]`}
-
-	studentObj := gjson.Parse(data)
-	// step 3: save data
-	result, err := ReadDataFromFDB("myfdb", i.IndexID, &studentObj, queries, inFileIndexQueries)
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogDebug("result", result)
-}
-
-func TestUpdateDataInPackBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackType", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexProfile := filepack.InFileIndex{
-		FileType: "Profile",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "class",
-				Query:     "class",
-			},
-		},
-	}
-
-	infileIndexExam := filepack.InFileIndex{
-		FileType: "Exam",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "marks",
-				Query:     "marks",
-			},
-		},
-	}
-	// loggermdl.LogDebug(infileIndexExam)
-	b.SetInFileIndex(infileIndexProfile)
-	b.SetInFileIndex(infileIndexExam)
-	i, err := db.GetNewIndex("stdId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// data, _ := sjson.Set("", "name", "ajay")
-	// data, _ = sjson.Set(data, "studentId", 10013)
-	// data, _ = sjson.Set(data, "class", "TY_MCA")
-	// data, _ = sjson.Set(data, "age", 24)
-	// data, _ = sjson.Set(data, "fileType", "Profile")
-
-	// data, _ := sjson.Set("", "studentId", 10013)
-	// data, _ = sjson.Set(data, "name", "ajay")
-	// data, _ = sjson.Set(data, "examName", "Unit1")
-	// data, _ = sjson.Set(data, "totalQuestion", 50)
-	// data, _ = sjson.Set(data, "marks", 30)
-	// data, _ = sjson.Set(data, "fileType", "Exam")
-
-	//////// ------------ajay
-
-	// data, _ := sjson.Set("", "name", "vijay")
-	// data, _ = sjson.Set(data, "studentId", 10014)
-	// data, _ = sjson.Set(data, "class", "SY_MCA")
-	// data, _ = sjson.Set(data, "age", 23)
-	// data, _ = sjson.Set(data, "fileType", "Profile")
-
-	// data, _ := sjson.Set("", "studentId", 10014)
-	// data, _ = sjson.Set(data, "name", "vijay")
-	// data, _ = sjson.Set(data, "examName", "t")
-	data, _ := sjson.Set("", "abc", 150)
-	data, _ = sjson.Set(data, "marks", 32)
-	data, _ = sjson.Set(data, "fileType", "Exam")
-
-	queries := []string{`#[name=="ajay"]`}
-	infileIndexQueries := []string{`#[examId=="MATH001"]`}
-	studentObj := gjson.Parse(data)
-	// step 3: save data
-	updatedData, errList := UpdateDataInFDB("myfdb", i.IndexID, &studentObj, queries, infileIndexQueries)
-	if len(errList) > 0 {
-		loggermdl.LogError(errList)
-	}
-	loggermdl.LogDebug("updatedData", updatedData)
-}
-
-func TestDeleteDataFromPackBucket(t *testing.T) {
-	// TestSaveDataInFDB(t)
-
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackType", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexProfile := filepack.InFileIndex{
-		FileType: "Profile",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "class",
-				Query:     "class",
-			},
-		},
-	}
-	infileIndexExam := filepack.InFileIndex{
-		FileType: "Exam",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "examId",
-				Query:     "examId",
-			},
-		},
-	}
-	b.SetInFileIndex(infileIndexProfile)
-	b.SetInFileIndex(infileIndexExam)
-	i, err := db.GetNewIndex("stdId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	data, _ := sjson.Set("", "fileType", "Exam")
-
-	// data, _ = sjson.Set(data, "studentId", 1234)
-
-	// studentObj := gjson.Result{}
-
-	// err = SaveDataInFDB("myfdb", i.IndexID, &studentObj)
-	// if err != nil {
-	// 	log.Fatal(err)
-	// }
-
-	queries := []string{`#[name=="ajay"]`}
-	infileIndexQueries := []string{`#[examId=="MATH001"]`}
-	studentObj := gjson.Parse(data)
-	recordsDeleted, errList := DeleteDataFromFDB("myfdb", i.IndexID, &studentObj, queries, infileIndexQueries)
-	if len(errList) > 0 {
-		loggermdl.LogError(errList)
-	}
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogDebug("recordsDeleted", recordsDeleted)
-}
-
-func TestReindexOnPackBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackType", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexFDB := filepack.InFileIndex{
-		FileType: "FDBIndex",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "name",
-				Query:     "name",
-			},
-		},
-	}
-	// loggermdl.LogDebug(infileIndexExam)
-	b.SetInFileIndex(infileIndexFDB)
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = ReindexOnSpecialBucket("myfdb", i.IndexID, gjson.Result{})
-	if err != nil {
-		log.Fatal(err)
-	}
-
-}
-
-func TestReindexOnSimpleBucket(t *testing.T) {
-
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Candidates", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-		IndexField{
-			FieldName: "class",
-			Query:     "class",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "name", "vivek")
-	data, _ = sjson.Set(data, "studentId", 1000)
-
-	data, _ = sjson.Set(data, "class", "TY_MCA")
-	data, _ = sjson.Set(data, "age", 26)
-
-	studentObj := gjson.Parse(data)
-	// step 3: save data
-	err = SaveDataInFDB("myfdb", i.IndexID, &studentObj)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// step 4: fetch data
-	result, err := ReadDataFromFDB("myfdb", i.IndexID, &studentObj, []string{`#[name=="vivek"]`}, []string{""})
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogDebug("before reindex", result)
-
-	// step 5: delete index file
-	err = i.CloseStore()
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	err = filemdl.DeleteFile("/home/vivekn/fdb_data/myfdb/index/studentId")
-	if err != nil {
-		loggermdl.LogError(err)
-		log.Fatal(err)
-	}
-
-	// step 6: reindex
-	err = db.ReIndex(i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// step 7: fetch data
-	data2, err := ReadDataFromFDB("myfdb", i.IndexID, &studentObj, []string{`#[name=="vivek"]`}, []string{""})
-
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogDebug("after reindex", data2)
-}
-
-func TestLogFDBIndexFile(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackType", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexProfile := filepack.InFileIndex{
-		FileType: "Profile",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "class",
-				Query:     "class",
-			},
-		},
-	}
-	infileIndexExam := filepack.InFileIndex{
-		FileType: "Exam",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "marks",
-				Query:     "marks",
-			},
-		},
-	}
-	// loggermdl.LogDebug(infileIndexExam)
-	b.SetInFileIndex(infileIndexProfile)
-	b.SetInFileIndex(infileIndexExam)
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "name", "ajay")
-	data, _ = sjson.Set(data, "examName", "unit2")
-	data, _ = sjson.Set(data, "totalQuestion", 50)
-	data, _ = sjson.Set(data, "marks", 26)
-	data, _ = sjson.Set(data, "fileType", "Exam")
-
-	studentList := []gjson.Result{}
-	for index := 0; index < 1; index++ {
-		data, _ = sjson.Set(data, "studentId", index+1)
-		studentList = append(studentList, gjson.Parse(data))
-	}
-	// step 3: save data
-	for _, studentObj := range studentList {
-		// data, _ = sjson.Set(data, "studentId", v+1)
-		// studentObj := gjson.Parse(data)
-		err = SaveDataInFDB("myfdb", i.IndexID, &studentObj)
-		if err != nil {
-			log.Fatal(err)
-		}
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-}
-
-func TestLoadFDBIndexFromFile(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackType", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexProfile := filepack.InFileIndex{
-		FileType: "Profile",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "class",
-				Query:     "class",
-			},
-		},
-	}
-	infileIndexExam := filepack.InFileIndex{
-		FileType: "Exam",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "marks",
-				Query:     "marks",
-			},
-		},
-	}
-	// loggermdl.LogDebug(infileIndexExam)
-	b.SetInFileIndex(infileIndexProfile)
-	b.SetInFileIndex(infileIndexExam)
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "fileType", "Exam")
-	inFileIndexQueries := []string{`#[marks==26]`}
-
-	// data, _ := sjson.Set("", "fileType", "Profile")
-	// inFileIndexQueries := []string{`#[class==TY_MCA]`}
-
-	queries := []string{`#[name=="ajay"]`}
-	// inFileIndexQueries := []string{`#[class==SY_MCA]`}
-
-	studentObj := gjson.Parse(data)
-	// step 3: save data
-	result, err := ReadDataFromFDB("myfdb", i.IndexID, &studentObj, queries, inFileIndexQueries)
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogDebug("result", result)
-}
-func TestLazyAppend(t *testing.T) {
-	// Init(false, false, false, "")
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("appendBucket", false, &Bucket{})
-	b.SetBucketType(BucketTypeAppend)
-	b.EnableLazyWrite(true)
-	// loggermdl.LogDebug(infileIndexExam)
-	i, err := db.GetNewIndex("studentProfId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexsaveFn, err := GetLazyCallBackFunc(LazyCallBackFnSaveIndex)
-	if err != nil {
-		log.Fatal(err)
-
-	}
-	idxFP := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexID)
-	indexLazyObj := lazywriter.LazyCacheObject{
-		FileName:      idxFP,
-		Identifier:    i.IndexID,
-		InterfaceData: i,
-		SaveFn:        indexsaveFn,
-	}
-
-	IndexLazyObjHolder.SetNoExpiration(i.IndexID, indexLazyObj)
-
-	// saveFn, err := GetLazyCallBackFunc(LazyCallBackFnAppendBucket)
-	// if err != nil {
-	// 	log.Fatal(err)
-	// }
-	// appendBucketPath := filepath.Join(db.DBPath, b.BucketPath)
-	// lazyObj := lazywriter.LazyCacheObject{
-	// 	FileName:      appendBucketPath,
-	// 	Identifier:    b.BucketID,
-	// 	InterfaceData: nil,
-	// 	SaveFn:        saveFn,
-	// }
-	// AppendLazyObjHolder.SetNoExpiration(b.BucketID, lazyObj)
-
-	data, _ := sjson.Set("", "name", "vijay")
-	data, _ = sjson.Set(data, "studentProfId", "10013")
-	data, _ = sjson.Set(data, "class", "TY_MCA")
-	data, _ = sjson.Set(data, "age", 24)
-	data, _ = sjson.Set(data, "fileType", "Profile")
-	studentProfObj := gjson.Parse(data)
-
-	err = SaveDataInFDB("myfdb", i.IndexID, &studentProfObj)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	time.Sleep(time.Second * 15)
-
-}
-func TestSaveDataInAppendBucket(t *testing.T) {
-	Init(false, false, false, "")
-	// for encryption
-	// Init(true, false, false, "")
-
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("appendBucket", false, &Bucket{})
-	b.SetBucketType(BucketTypeAppend)
-
-	// loggermdl.LogDebug(infileIndexExam)
-	i, err := db.GetNewIndex("studentProfId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "name", "vijay")
-	data, _ = sjson.Set(data, "studentProfId", "10013")
-	data, _ = sjson.Set(data, "class", "TY_MCA")
-	data, _ = sjson.Set(data, "age", 24)
-	data, _ = sjson.Set(data, "fileType", "Profile")
-	studentProfObj := gjson.Parse(data)
-
-	// data2, _ := sjson.Set("", "name", "ajay")
-	// data2, _ = sjson.Set(data2, "studentId", 10013)
-	// data2, _ = sjson.Set(data2, "examName", "unit2")
-	// data2, _ = sjson.Set(data2, "totalQuestion", 50)
-	// data2, _ = sjson.Set(data2, "marks", 26)
-	// data2, _ = sjson.Set(data2, "fileType", "Exam")
-	// studentExamObj := gjson.Parse(data2)
-	err = SaveDataInFDB("myfdb", i.IndexID, &studentProfObj)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// err = SaveDataInAppendBucket("myfdb", i.IndexID, &studentExamObj)
-	// if err != nil {
-	// 	log.Fatal(err)
-	// }
-	// indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	// err = LogFDBIndexFile(indexFilePath, i)
-	// if err != nil {
-	// 	log.Fatal(err)
-	// }
-}
-
-func TestLoadFDBEncryptedData(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("appendBucket", false, &Bucket{})
-	b.SetBucketType(BucketTypePack)
-
-	infileIndexProfile := filepack.InFileIndex{
-		FileType: "Profile",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "class",
-				Query:     "class",
-			},
-		},
-	}
-	infileIndexExam := filepack.InFileIndex{
-		FileType: "Exam",
-		IndexFields: []filepack.InFileIndexField{
-			filepack.InFileIndexField{
-				FieldName: "marks",
-				Query:     "marks",
-			},
-		},
-	}
-	// loggermdl.LogDebug(infileIndexExam)
-	b.SetInFileIndex(infileIndexProfile)
-	b.SetInFileIndex(infileIndexExam)
-	i, err := db.GetNewIndex("studentProfId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	keyValMap, err := i.GetAllEntries()
-
-	if err != nil {
-		log.Fatal(err)
-	}
-	for key, value := range keyValMap {
-		loggermdl.LogDebug("key ", key, "value", value)
-	}
-}
-func TestSaveMediaInFDB(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackMixed", false, &Bucket{})
-	b.SetBucketType(BucketTypeMixed)
-
-	// infileIndexProfile := filepack.InFileIndex{}
-	i, err := db.GetNewIndex("studentMedia", false)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "contentType", "image/jpg")
-	data, _ = sjson.Set(data, "name", "abc")
-	data, _ = sjson.Set(data, "fileType", "Asset")
-	rs := gjson.Parse(data)
-
-	filePath := "/home/vivekn/Downloads/image001.jpg"
-
-	dataByte, err := filemdl.ReadFile(filePath)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// dataByte = []byte("hello")
-
-	path, err := SaveMediaInFDB("myfdb", i.IndexID, dataByte, &rs)
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogError("path: ", path)
-	loggermdl.LogError("recordID: ", filepath.Base(path))
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestGetMediaFromFDB(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackMixed", false, &Bucket{})
-	b.SetBucketType(BucketTypeMixed)
-
-	// infileIndexProfile := filepack.InFileIndex{}
-	i, err := db.GetNewIndex("studentMedia", false)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// dataByte = []byte("hello")
-	rowID := "-502807381009242"
-	// please provide record id
-	recordID := "1YTa5X04ti0l90LjrJxdehHzVTvcv"
-	data, fileMeta, err := GetMediaFromFDB("myfdb", i.IndexID, rowID, recordID)
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogDebug(len(data))
-	loggermdl.LogDebug(fileMeta)
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestUpdateMediaInFDB(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackMixed", false, &Bucket{})
-	b.SetBucketType(BucketTypeMixed)
-
-	// infileIndexProfile := filepack.InFileIndex{}
-	i, err := db.GetNewIndex("studentMedia", false)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "contentType", "image/jpg")
-	data, _ = sjson.Set(data, "name", "abc")
-	data, _ = sjson.Set(data, "fileType", "Asset")
-	rs := gjson.Parse(data)
-
-	filePath := "/home/vivekn/Downloads/Screenshot from 2019-11-20 12-50-36.png"
-
-	dataByte, err := filemdl.ReadFile(filePath)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// dataByte = []byte("hello")
-	recordID := "1YTa5X04ti0l90LjrJxdehHzVTv"
-	path, err := UpdateMediaInFDB("myfdb", i.IndexID, recordID, dataByte, &rs)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	loggermdl.LogError(path)
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestUpsertMediaInFDB(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("bucketPackMixed", false, &Bucket{})
-	b.SetBucketType(BucketTypeMixed)
-
-	// infileIndexProfile := filepack.InFileIndex{}
-	i, err := db.GetNewIndex("studentMedia", false)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	data, _ := sjson.Set("", "contentType", "image/png")
-	data, _ = sjson.Set(data, "name", "abc")
-	data, _ = sjson.Set(data, "fileType", "Asset")
-	rs := gjson.Parse(data)
-
-	filePath := "/home/vivekn/Downloads/gocert_1569587829495.jpg"
-
-	dataByte, err := filemdl.ReadFile(filePath)
-	if err != nil {
-		log.Fatal(err)
-	}
-	// dataByte = []byte("hello")
-	recordID := "1YTa5X04ti0l90LjrJxdehHzVTvcv"
-	path, err := UpsertMediaInFDB("myfdb", i.IndexID, recordID, dataByte, &rs)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	loggermdl.LogError(path)
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestLazySaveInNormalBucket(t *testing.T) {
-	Init(false, false, true, "")
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("normalLazy", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-	b.EnableLazyWrite(true)
-	// loggermdl.LogDebug(infileIndexExam)
-	i, err := db.GetNewIndex("studentProfId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexsaveFn, err := GetLazyCallBackFunc(LazyCallBackFnSaveIndex)
-	if err != nil {
-		log.Fatal(err)
-
-	}
-	idxFP := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexID)
-	indexLazyObj := lazywriter.LazyCacheObject{
-		FileName:      idxFP,
-		Identifier:    i.IndexID,
-		InterfaceData: i,
-		SaveFn:        indexsaveFn,
-	}
-
-	IndexLazyObjHolder.SetNoExpiration(i.IndexID, indexLazyObj)
-
-	// saveFn, err := GetLazyCallBackFunc(LazyCallBackFnAppendBucket)
-	// if err != nil {
-	// 	log.Fatal(err)
-	// }
-	// appendBucketPath := filepath.Join(db.DBPath, b.BucketPath)
-	// lazyObj := lazywriter.LazyCacheObject{
-	// 	FileName:      appendBucketPath,
-	// 	Identifier:    b.BucketID,
-	// 	InterfaceData: nil,
-	// 	SaveFn:        saveFn,
-	// }
-	// AppendLazyObjHolder.SetNoExpiration(b.BucketID, lazyObj)
-
-	data, _ := sjson.Set("", "name", "ajay")
-	data, _ = sjson.Set(data, "studentProfId", "10013")
-	data, _ = sjson.Set(data, "class", "TY_MCA")
-	data, _ = sjson.Set(data, "age", 24)
-	data, _ = sjson.Set(data, "fileType", "Profile")
-	studentProfObj := gjson.Parse(data)
-
-	err = SaveDataInFDB("myfdb", i.IndexID, &studentProfObj)
-	if err != nil {
-		log.Fatal(err)
-	}
-	data, _ = sjson.Set(data, "studentProfId", "10014")
-
-	nesStd := gjson.Parse(data)
-	err = SaveDataInFDB("myfdb", i.IndexID, &nesStd)
-	if err != nil {
-		log.Fatal(err)
-	}
-	queries := []string{`#[name=="ajay"]`}
-	res, err := ReadDataFromFDB("myfdb", i.IndexID, nil, queries, []string{})
-	if err != nil {
-		log.Fatal(err)
-	}
-	loggermdl.LogError("res", res)
-	datatoUpdateStr, _ := sjson.Set("", "newField", "val")
-	datatoUpdate := gjson.Parse(datatoUpdateStr)
-	updatedRes, errList := UpdateDataInFDB("myfdb", i.IndexID, &datatoUpdate, queries, []string{})
-	// if err != nil {
-	// 	log.Fatal(err)
-	// }
-	loggermdl.LogError("updatedRes", updatedRes)
-	loggermdl.LogError("errList", errList)
-	// delRes, errList := DeleteDataFromFDB("myfdb", i.IndexID, &datatoUpdate, queries, []string{})
-	// loggermdl.LogError("delRes", delRes)
-	// loggermdl.LogError("del errList", errList)
-	time.Sleep(time.Second * 10)
-}
-
-func TestExportDataFromNormalBucket(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	zipExporter := ZipExporter{
-		DestPath:      "home/vivekn/fdb_data/dest",
-		IndexID:       i.IndexID,
-		MigrationType: MigrationTypeUpdate,
-		Queries:       []string{},
-		FdbName:       db.DBName,
-	}
-
-	err = zipExporter.DataExport()
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-func TestImportDataIntoFDB(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	zipImporter := ZipImporter{
-		Data:       nil,
-		SourcePath: "home/vivekn/fdb_data/dest/studentId",
-		FdbName:    db.DBName,
-		IndexID:    i.IndexID,
-	}
-	err = zipImporter.DataImport()
-	// err := ImportZip("myfdb", "home/vivekn/fdb_data/dest/stdId")
-	if err != nil {
-		loggermdl.LogError(err)
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestExportDataAsFiles(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-	err = LoadFDBIndexFromFile(indexFilePath, db, i.IndexID)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	exporter := FileExporter{
-		DestPath:      "/home/vivekn/fdb_data/dest",
-		IndexID:       i.IndexID,
-		MigrationType: MigrationTypeUpdate,
-		Queries:       []string{},
-		FdbName:       db.DBName,
-	}
-
-	err = exporter.DataExport()
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-func TestImportDataFromFile(t *testing.T) {
-	db, err := CreateFDBInstance("/home/vivekn/fdb_data/myfdb", "myfdb", false)
-	if err != nil {
-		log.Fatal("CreateFDBInstance = ", err)
-	}
-	// step 1:  create bucket
-	b := db.GetNewBucket("Simple", false, &Bucket{})
-	b.SetBucketType(BucketTypeSimple)
-
-	i, err := db.GetNewIndex("studentId", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-	}
-	i.SetFields(fields...)
-	// step 2:  create index
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-	fileImporter := FileImporter{
-		Data:       nil,
-		SourcePath: "/home/vivekn/fdb_data/dest/studentId",
-		FdbName:    db.DBName,
-		IndexID:    i.IndexID,
-	}
-	err = fileImporter.DataImport()
-	// err := ImportZip("myfdb", "home/vivekn/fdb_data/dest/stdId")
-	if err != nil {
-		loggermdl.LogError(err)
-		log.Fatal(err)
-	}
-	indexFilePath := filepath.Join(db.DBPath, INDEXFOLDER, i.IndexNameQuery)
-
-	err = LogFDBIndexFile(indexFilePath, i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func TestPath(t *testing.T) {
-
-	path1 := filepath.Join("\\abc", "pqr")
-	loggermdl.LogError(path1)
-	loggermdl.LogError(strings.ReplaceAll(path1, "\\", "/"))
-}
diff --git a/dalmdl/corefdb/buntdbmdl.go b/dalmdl/corefdb/buntdbmdl.go
deleted file mode 100644
index bbb237580e302534d4c4e07ecb93eb00cd0ee835..0000000000000000000000000000000000000000
--- a/dalmdl/corefdb/buntdbmdl.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package corefdb
-
-import (
-	"os"
-	"strings"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl/filepack"
-
-	"github.com/tidwall/gjson"
-	"github.com/tidwall/sjson"
-
-	"path/filepath"
-	"sync"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/lazywriter"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
-
-	"github.com/tidwall/buntdb"
-)
-
-const (
-	// INDEXFOLDER -INDEXFOLDER
-	INDEXFOLDER = "index"
-	// LazyCallBackFnAppendBucket - LazyCallBackFnAppendBucket
-	LazyCallBackFnAppendBucket = "LazyWriterAppendBucketCallBackFn"
-	// LazyCallBackFnSaveIndex - LazyCallBackFnSaveIndex
-	LazyCallBackFnSaveIndex = "LazyWriterCallBackFnAppendBucketSaveIndex"
-)
-
-var databases cachemdl.FastCacheHelper
-var defaultDB string
-
-var isLazyWriterEnabled = false
-var defaultSecurityKey = []byte{}
-var isSecurityEnabled = false
-var fileFpCache Cache
-
-func init() {
-	databases.Setup(1, 1000, 1000)
-	fpCache, _ := NewCache()
-	fileFpCache = *fpCache
-}
-
-// Init - initializes bundbmdl
-func Init(isSecurityRequired, isCompressionRequired, isLazyWriterEnable bool, securityKey string) {
-	filepack.Init(isSecurityRequired, isCompressionRequired, securityKey)
-	defaultSecurityKey = []byte(securityKey)
-	isLazyWriterEnabled = isLazyWriterEnable
-	isSecurityEnabled = isSecurityRequired
-}
-
-// FDB - FDB
-type FDB struct {
-	DBName              string
-	DBPath              string `json:"dbPath"`
-	EnableSecurity      bool   `json:"enableSec"` // if enabled, fdb files will be encrypted
-	EnableCompression   bool   `json:"enableCmp"` // if enabled, fdb files will be compressed and then encrypted
-	indices             map[string]*Index
-	indexMux            sync.Mutex
-	buckets             map[string]*Bucket
-	bLocker             sync.Mutex
-	restoreFileFromPack bool
-}
-
-// CreateFDBInstance - creates fdb instance
-func CreateFDBInstance(dbPath, dbName string, isDefault bool) (*FDB, error) {
-	fdb := &FDB{
-		DBPath:              dbPath,
-		indices:             make(map[string]*Index),
-		indexMux:            sync.Mutex{},
-		buckets:             make(map[string]*Bucket),
-		bLocker:             sync.Mutex{},
-		restoreFileFromPack: true,
-		DBName:              dbName,
-	}
-	if isDefault {
-		defaultDB = dbName
-	}
-	databases.SetNoExpiration(dbName, fdb)
-	return fdb, nil
-}
-
-// EnableFDBSecurity enables security. Files will be encrypted.
-func (fdb *FDB) EnableFDBSecurity(sec bool) {
-	if !sec {
-		return
-	}
-	fdb.EnableSecurity = sec
-}
-
-// EnableFDBCompression enables security. Files will be encrypted.
-func (fdb *FDB) EnableFDBCompression(cmp bool) {
-	if !cmp {
-		return
-	}
-	fdb.EnableCompression = cmp
-}
-
-// GetBucketByName - reaturn bucket with specified bucket name
-func (f *FDB) GetBucketByName(bucketName string) *Bucket {
-	for _, val := range f.buckets {
-		if val.BucketNameQuery == bucketName {
-			return val
-		}
-	}
-	return nil
-}
-
-// SetFileRestoreFormPackFlag - set whether to restore file from pack while performing Get operation
-func (f *FDB) SetFileRestoreFormPackFlag(restoreFile bool) {
-	f.restoreFileFromPack = restoreFile
-}
-
-// GetFDBInstance - returns fdb instance
-func GetFDBInstance(dbName string) (*FDB, error) {
-	if dbName == "" {
-		dbName = defaultDB
-	}
-
-	rawDB, ok := databases.Get(dbName)
-	if !ok {
-		loggermdl.LogError("Database instance not found")
-		return nil, errormdl.Wrap("Database instance not found")
-	}
-	fdb, ok := rawDB.(*FDB)
-	if !ok {
-		loggermdl.LogError("Can not cast object into *FDB")
-		return nil, errormdl.Wrap("Can not cast object into *FDB")
-	}
-	return fdb, nil
-}
-
-// GetFDBIndex - returns index
-func (f *FDB) GetFDBIndex(indexName string) (*Index, bool) {
-	index, ok := f.indices[indexName]
-	return index, ok
-}
-
-// CreateIndex - Creates index
-func (f *FDB) CreateIndex(index *Index) error {
-	return index.CreateIndex()
-}
-
-// GetBucketIndexes ; returns list of indexes of bucket
-func (f *FDB) GetBucketIndexes(bucketName string) ([]*Index, error) {
-
-	var bucket *Bucket
-	var foundBucket = false
-	for _, bucketObj := range f.buckets {
-		if bucketObj.BucketNameQuery == bucketName {
-			foundBucket = true
-			bucket = bucketObj
-			break
-		}
-
-	}
-
-	if !foundBucket {
-		return nil, errormdl.Wrap("Bucket not found")
-	}
-
-	indexList := make([]*Index, 0)
-	for _, indexID := range bucket.Indices {
-		index, ok := f.indices[indexID]
-
-		if !ok {
-			return nil, errormdl.Wrap("index not found")
-		}
-		indexList = append(indexList, index)
-	}
-	return indexList, nil
-}
-
-// ReIndex - performs reindexing
-func (f *FDB) ReIndex(indexID string) error {
-
-	index, found := f.GetFDBIndex(indexID)
-	if !found {
-		return errormdl.Wrap("index not found")
-	}
-	// find path to start file walk
-	pathToStartWalk := f.DBPath
-	for _, bucketID := range index.BucketSequence {
-		bucket := f.buckets[bucketID]
-		if bucket.IsDynamicName {
-			break
-		}
-		pathToStartWalk = filepath.Join(pathToStartWalk, bucket.BucketNameQuery)
-	}
-
-	// get required data for index file
-	indexDataMap, err := getFDBIndexData(pathToStartWalk, index, f.DBPath)
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-	// create or replace index
-	var fns []func(a, b string) bool
-	for _, idx := range index.IndexFields {
-		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
-	}
-	err = index.CloseStore()
-	if err != nil {
-		return err
-	}
-	indexFilePath := filepath.Join(f.DBPath, INDEXFOLDER, index.IndexID)
-	if filemdl.FileAvailabilityCheck(indexFilePath) {
-		err = filemdl.DeleteFile(indexFilePath)
-		if err != nil {
-			return err
-		}
-	}
-	err = index.ReplaceIndex()
-	if err != nil {
-		loggermdl.LogError(err)
-		return err
-	}
-	// update index file by reading all data and updating index file
-	return index.AddEntries(indexDataMap)
-}
-
-// getFDBIndexData - returns index data with index fields recursively from specified direcory
-func getFDBIndexData(path string, index *Index, dbPath string) (map[string]string, error) {
-
-	indexDataMap := make(map[string]string)
-	if !filemdl.FileAvailabilityCheck(path) {
-		return nil, errormdl.Wrap("invalid path: " + path)
-	}
-
-	dbPath = filepath.Join(dbPath) + string(filepath.Separator)
-	ferr := filemdl.Walk(path, func(filePath string, info os.FileInfo, err error) error {
-		if err != nil {
-			loggermdl.LogError(err)
-			return nil
-		}
-
-		if !info.IsDir() {
-			fileData, err := filemdl.ReadFile(filePath)
-			if err != nil {
-				loggermdl.LogError(err)
-				return nil
-			}
-
-			dataObj := gjson.Parse(string(fileData))
-			indexDataObj := ""
-			for _, indexField := range index.IndexFields {
-				indexDataObj, _ = sjson.Set(indexDataObj, indexField.FieldName, dataObj.Get(indexField.Query).String())
-			}
-
-			if indexDataObj != "" {
-				pathToSave := strings.TrimPrefix(filePath, dbPath)
-				indexDataMap[pathToSave] = indexDataObj
-			}
-		}
-
-		return nil
-	})
-	if ferr != nil {
-		return indexDataMap, ferr
-	}
-	return indexDataMap, nil
-}
-
-// GetLazyCallBackFunc - return callback functions for lazywriter
-func GetLazyCallBackFunc(funcName string) (lazywriter.SaveDataFn, error) {
-	if funcName == LazyCallBackFnAppendBucket {
-		return lazyCallBackFnAppendBucket, nil
-	} else if funcName == LazyCallBackFnSaveIndex {
-		return lazyCallBackFnSaveIndex, nil
-	} else {
-		return nil, errormdl.Wrap("func not found for: " + funcName)
-	}
-}
-
-var lazyCallBackFnAppendBucket lazywriter.SaveDataFn = func(bucketId string, data *lazywriter.LazyCacheObject) {
-	dataInLazyMemory, ok := data.InterfaceData.(string)
-	if !ok {
-		return
-	}
-	_, _, err := filemdl.AppendDataToFile(data.FileName, []byte(dataInLazyMemory+"\r\n"), true)
-	if err != nil {
-		loggermdl.LogError(err)
-		return
-	}
-	data.InterfaceData = nil
-	err = AppendMaster.ClearLazyObjInterfaceData(data.Identifier)
-	if err != nil {
-		loggermdl.LogError(err)
-		return
-	}
-	AppendLazyObjHolder.Set(data.Identifier, *data)
-}
-
-var lazyCallBackFnSaveIndex lazywriter.SaveDataFn = func(indexID string, data *lazywriter.LazyCacheObject) {
-	indexData, ok := data.InterfaceData.(*Index)
-	if !ok {
-		return
-	}
-
-	err := LogFDBIndexFile(data.FileName, indexData)
-	if err != nil {
-		loggermdl.LogError(err)
-		return
-	}
-}
diff --git a/dalmdl/corefdb/corefdb.go b/dalmdl/corefdb/corefdb.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a5ed6d0f25f21f045121e5566dc25850f9ac431
--- /dev/null
+++ b/dalmdl/corefdb/corefdb.go
@@ -0,0 +1,1269 @@
+package corefdb
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/bucket"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/index"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+	"github.com/tidwall/buntdb"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+const (
+	// INDEXFOLDER -INDEXFOLDER
+	INDEXFOLDER = "index"
+	// LazyCallBackFnAppendBucket - LazyCallBackFnAppendBucket
+	LazyCallBackFnAppendBucket = "LazyWriterAppendBucketCallBackFn"
+	// LazyCallBackFnSaveIndex - LazyCallBackFnSaveIndex
+	LazyCallBackFnSaveIndex = "LazyWriterCallBackFnAppendBucketSaveIndex"
+	lineBreak               = "\r\n"
+	IndexKeyValSeperator    = "="
+	FileType                = "fileType"
+	MigrationTypeUpdate     = "MigrationTypeUpdate"
+	MigrationTypeReplace    = "MigrationTypeReplace"
+	MigrationTypeKeyword    = "migrationType"
+	MigrationConfigFilename = "migrationConfig"
+	PathSeperator           = "/"
+)
+
+// ErrNoDataFound - This error describes that the required data might be deleted and not found. Kindly ignore this error in caller.
+var ErrNoDataFound = errors.New("data not found")
+
+var databases cachemdl.FastCacheHelper
+var defaultDB string
+
+func init() {
+	databases.Setup(1, 1000, 1000)
+}
+
+// FDB - FDB
+type FDB struct {
+	DBName            string
+	DBPath            string `json:"dbPath"`
+	EnableSecurity    bool   `json:"enableSec"` // if enabled, fdb files will be encrypted
+	EnableCompression bool   `json:"enableCmp"` // if enabled, fdb files will be compressed and then encrypted
+	indexes           map[string]*index.Index
+	indexMux          sync.Mutex
+	buckets           map[string]bucket.Store
+	bLocker           sync.Mutex
+	securityProvider  securityprovider.SecurityProvider
+}
+
+// CreateFDBInstance - creates fdb instance
+func CreateFDBInstance(dbPath, dbName string, isDefault bool) (*FDB, error) {
+	fdb := &FDB{
+		DBPath:   dbPath,
+		indexes:  make(map[string]*index.Index),
+		indexMux: sync.Mutex{},
+		buckets:  make(map[string]bucket.Store),
+		bLocker:  sync.Mutex{},
+		DBName:   dbName,
+	}
+
+	if isDefault {
+		defaultDB = dbName
+	}
+	databases.SetNoExpiration(dbName, fdb)
+	return fdb, nil
+}
+
+// GetFDBInstance - returns fdb instance
+func GetFDBInstance(dbName string) (*FDB, error) {
+	if dbName == "" {
+		dbName = defaultDB
+	}
+
+	rawDB, ok := databases.Get(dbName)
+	if !ok {
+		loggermdl.LogError("Database instance not found")
+		return nil, errormdl.Wrap("Database instance not found")
+	}
+	fdb, ok := rawDB.(*FDB)
+	if !ok {
+		loggermdl.LogError("Can not cast object into *FDB")
+		return nil, errormdl.Wrap("Can not cast object into *FDB")
+	}
+	return fdb, nil
+}
+
+func (fdb *FDB) SetSecurityProvider(securityProvider securityprovider.SecurityProvider) error {
+	if securityProvider == nil {
+		return errormdl.Wrap("please provide security provider")
+	}
+	for key := range fdb.buckets {
+		val := fdb.buckets[key]
+		if bucketObj, ok := val.(bucket.Securable); ok {
+			bucketObj.Secure(securityProvider)
+			bucketStore, _ := bucketObj.(bucket.Store)
+			loggermdl.LogError("bucketStore typof", bucketStore)
+			fdb.buckets[key] = bucketStore
+		}
+	}
+	fdb.securityProvider = securityProvider
+	return nil
+}
+
+// EnableFDBSecurity enables security. Files will be encrypted.
+func (fdb *FDB) EnableFDBSecurity(sec bool) {
+	if !sec {
+		return
+	}
+	fdb.EnableSecurity = sec
+}
+
+// RegisterNewIndex - RegisterNewIndex returns new index
+func (fdb *FDB) RegisterNewIndex(indexID, indexNameQuery string, isDynamicName bool, indexFields []index.IndexField) (*index.Index, error) {
+	fdb.indexMux.Lock()
+	defer fdb.indexMux.Unlock()
+
+	indexFilePath, err := filepath.Abs(filepath.Join(fdb.DBPath, INDEXFOLDER, indexID))
+	if err != nil {
+		return nil, err
+	}
+	index, err := index.NewIndex(indexID, indexNameQuery, isDynamicName, indexFilePath)
+	if err != nil {
+		return nil, err
+	}
+	index.SetFields(indexFields...)
+	if _, ok := fdb.indexes[indexID]; ok {
+		return nil, errormdl.Wrap("Index ID already found")
+	}
+	err = index.CreateIndex()
+	if err != nil {
+		return nil, err
+	}
+	fdb.indexes[indexID] = index
+	return index, nil
+}
+
+func (fdb *FDB) AddBucket(bucketID string, bucketObj bucket.Store) error {
+
+	if bucketObj == nil {
+		return errormdl.Wrap("bucket is nil")
+	}
+	if _, ok := fdb.buckets[bucketID]; ok {
+		return errormdl.Wrap("bucket already present: " + bucketID)
+	}
+	if fdb.securityProvider != nil {
+		if securable, ok := bucketObj.(bucket.Securable); ok {
+			securable.Secure(fdb.securityProvider)
+			bucketObj, _ = securable.(bucket.Store)
+		}
+
+	}
+	fdb.buckets[bucketID] = bucketObj
+
+	return nil
+}
+
+// GetFDBIndex - returns index
+func (f *FDB) GetFDBIndex(indexID string) (*index.Index, bool) {
+	index, ok := f.indexes[indexID]
+	return index, ok
+}
+
+func SaveDataInFDB(dbName string, indexID string, data *gjson.Result) error {
+	loggermdl.LogError("indexID", indexID, data.String())
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	// get index from fdb index map
+	index, ok := fdb.indexes[indexID]
+	if !ok {
+		loggermdl.LogError("index not found: ", indexID)
+		return errormdl.Wrap("index not found: " + indexID)
+	}
+	//  get bucket id from index
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	//  get bucket from fdb map
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("bucket not found: ", bucketID)
+		return errormdl.Wrap("bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, data)
+	if err != nil {
+		loggermdl.LogError("could not resolve filepath: ", err)
+		return errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	loggermdl.LogError("resolvedpath", path)
+	prevVal, err := index.GetEntryByPath(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	filePath, err := filepath.Abs(filepath.Join(fdb.DBPath, path))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	//  call save on bucket
+	err = bucketObj.Insert(filePath, data)
+	if err != nil {
+		loggermdl.LogError("fail to insert data: ", err)
+		return errormdl.Wrap("fail to insert data:: " + err.Error())
+	}
+	// save index record in index store
+	// basepath, err := filepath.Abs(fdb.DBPath)
+	// if err != nil {
+	// 	loggermdl.LogError(err)
+	// 	return err
+	// }
+	// basepath = basepath + string(filepath.Separator)
+	// path := strings.TrimPrefix(filePath, basepath)
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	prevVal, _ = sjson.Set(prevVal, "rowID", rowID)
+	updatedJSON, err := updateIndexJSON(index, prevVal, data)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	updatedJSONObj := gjson.Parse(updatedJSON)
+	err = index.AddEntry(path, &updatedJSONObj)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	return nil
+}
+
+func ReadDataFromFDB(dbName, indexID string, data *gjson.Result, queries []string, infileIndexQueries []string) (*gjson.Result, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return nil, errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	//  get index Id from index map
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return nil, errormdl.Wrap("INDEX not found: " + indexID)
+	}
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	bucket, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return nil, errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	resultToReturn := gjson.Parse("[]")
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return &resultToReturn, nil
+	}
+	filePaths := make([]string, 0)
+	for filePath := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+		filePaths = append(filePaths, filePath)
+	}
+	loggermdl.LogError("filePaths", filePaths)
+	resultArray, err := bucket.Find(filePaths, infileIndexQueries, data)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	resultToReturn = gjson.Parse(resultArray)
+	return &resultToReturn, nil
+}
+
+func UpdateDataInFDB(dbName, indexID string, data *gjson.Result, queries []string, infileIndexQueries []string) (*gjson.Result, []error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return nil, []error{errormdl.Wrap("fdb instance not found " + dbName)}
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return nil, []error{errormdl.Wrap("INDEX not found: " + indexID)}
+	}
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	bucket, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return nil, []error{errormdl.Wrap("Bucket not found: " + bucketID)}
+	}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, []error{err}
+	}
+	resultToReturn := gjson.Parse("[]")
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return &resultToReturn, []error{ErrNoDataFound}
+	}
+	filePaths := make([]string, 0)
+	for filePath := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, []error{err}
+		}
+		filePaths = append(filePaths, filePath)
+	}
+	resultArray, errList := bucket.Update(filePaths, infileIndexQueries, data)
+	if len(errList) > 1 {
+		loggermdl.LogError(err)
+		return nil, errList
+	}
+	for filePath, json := range indexKeyValueMap {
+		rowID, err := GenRowID(filePath)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		json, _ = sjson.Set(json, "rowID", rowID)
+		updatedJSON, err := updateIndexJSON(index, json, data)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+
+		updatedJSONObj := gjson.Parse(updatedJSON)
+		err = index.AddEntry(filePath, &updatedJSONObj)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+	}
+	return resultArray, nil
+}
+
+func DeleteDataFromFDB(dbName, indexID string, rs *gjson.Result, queries []string, infileIndexQueries []string) (recordsDeletedCnt int, errList []error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return recordsDeletedCnt, []error{errormdl.Wrap("fdb instance not found " + dbName)}
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return recordsDeletedCnt, []error{errormdl.Wrap("INDEX not found: " + indexID)}
+	}
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return recordsDeletedCnt, []error{errormdl.Wrap("Bucket not found: " + bucketID)}
+	}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordsDeletedCnt, []error{err}
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return recordsDeletedCnt, []error{ErrNoDataFound}
+	}
+	filePaths := make([]string, 0)
+	for filePath := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordsDeletedCnt, []error{err}
+		}
+		filePaths = append(filePaths, filePath)
+	}
+	cnt, errList := bucketObj.Delete(filePaths, infileIndexQueries, rs)
+	if len(errList) > 0 {
+		loggermdl.LogError(errList)
+		return recordsDeletedCnt, errList
+	}
+	if _, ok := bucketObj.(*bucket.SimpleBucket); ok {
+		for path := range indexKeyValueMap {
+			loggermdl.LogError("deleting path", path)
+			err := index.Delete(path)
+			errList = append(errList, err)
+		}
+	}
+	return cnt, errList
+}
+
+func SaveMediaInFDB(dbName, indexID string, mediaData []byte, data *gjson.Result) (recordPath string, err error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return recordPath, errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	// get index from fdb index map
+	index, ok := fdb.indexes[indexID]
+	if !ok {
+		loggermdl.LogError("index not found: ", indexID)
+		return recordPath, errormdl.Wrap("index not found: " + indexID)
+	}
+	//  get bucket id from index
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	//  get bucket from fdb map
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("bucket not found: ", bucketID)
+		return recordPath, errormdl.Wrap("bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, data)
+	if err != nil {
+		loggermdl.LogError("could not resolve filepath: ", err)
+		return recordPath, errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	prevVal, err := index.GetEntryByPath(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	filePath, err := filepath.Abs(filepath.Join(fdb.DBPath, path))
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+
+	mediaStore, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return recordPath, errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+	//  call save on bucket
+	recordID, err := mediaStore.WriteMedia(filePath, mediaData, data)
+	if err != nil {
+		loggermdl.LogError("fail to insert data: ", err)
+		return recordPath, errormdl.Wrap("fail to insert data:: " + err.Error())
+	}
+
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	prevVal, _ = sjson.Set(prevVal, "rowID", rowID)
+	updatedJSON, err := updateIndexJSON(index, prevVal, data)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	loggermdl.LogError("path after", path)
+	updatedJSONObj := gjson.Parse(updatedJSON)
+	err = index.AddEntry(path, &updatedJSONObj)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	recordPath = fdb.DBName + PathSeperator + indexID + PathSeperator + rowID + PathSeperator + recordID
+	return recordPath, nil
+}
+
+func GetMediaFromFDB(dbName, indexID string, rowID, recordID string) (dataByte []byte, fileMeta gjson.Result, err error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return dataByte, fileMeta, errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return dataByte, fileMeta, errormdl.Wrap("INDEX not found: " + indexID)
+	}
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return dataByte, fileMeta, errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	mediaReader, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return dataByte, fileMeta, errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+	queries := []string{`#[rowID=` + rowID + `]`}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, fileMeta, err
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return dataByte, fileMeta, nil
+	}
+	filePath := ""
+	for path := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, path))
+		if err != nil {
+			loggermdl.LogError(err)
+			return dataByte, fileMeta, err
+		}
+		// find only one file
+		break
+	}
+	dataByte, metaData, err := mediaReader.ReadMedia(filePath, recordID)
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, fileMeta, err
+	}
+	fileMeta = metaData.Get("requiredData")
+	return dataByte, gjson.Parse(fileMeta.String()), nil
+}
+
+func UpdateMediaInFDB(dbName, indexID, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return "", errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	recordID = strings.TrimSpace(recordID)
+	if recordID == "" {
+		return "", errormdl.Wrap("please provide recordID")
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("index not found: " + indexID)
+		return "", errormdl.Wrap("index not found: " + indexID)
+	}
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return "", errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, rs)
+	if err != nil {
+		loggermdl.LogError("could not find filepath: ", err)
+		return "", errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	queries := []string{`#[rowID=` + rowID + `]`}
+
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return "", errormdl.Wrap("no data found to update")
+	}
+	filePath := ""
+	for path := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, path))
+		if err != nil {
+			loggermdl.LogError(err)
+			return "", err
+		}
+	}
+	mediaStore, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return "", errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+	err = mediaStore.UpdateMedia(filePath, recordID, mediaData, rs)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	for path, val := range indexKeyValueMap {
+		json, _ := sjson.Set(val, "rowID", rowID)
+		updatedJSON, err := updateIndexJSON(index, json, rs)
+		if err != nil {
+			return "", err
+		}
+
+		updatedJSONObj := gjson.Parse(updatedJSON)
+		err = index.AddEntry(path, &updatedJSONObj)
+		if err != nil {
+			return "", err
+		}
+	}
+	recordPath := fdb.DBName + PathSeperator + indexID + PathSeperator + rowID + PathSeperator + recordID
+	return recordPath, nil
+}
+
+func UpsertMediaInFDB(dbName, indexID, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return "", errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	recordID = strings.TrimSpace(recordID)
+	if recordID == "" {
+		return "", errormdl.Wrap("please provide recordID")
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("index not found: " + indexID)
+		return "", errormdl.Wrap("index not found: " + indexID)
+	}
+	bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return "", errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, rs)
+	if err != nil {
+		loggermdl.LogError("could not find filepath: ", err)
+		return "", errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	queries := []string{`#[rowID=` + rowID + `]`}
+
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return "", errormdl.Wrap("no data found to update")
+	}
+	filePath := ""
+	for path := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, path))
+		if err != nil {
+			loggermdl.LogError(err)
+			return "", err
+		}
+	}
+	mediaStore, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return "", errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+	recordID, err = mediaStore.UpsertMedia(filePath, recordID, mediaData, rs)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	for path, val := range indexKeyValueMap {
+		json, _ := sjson.Set(val, "rowID", rowID)
+		updatedJSON, err := updateIndexJSON(index, json, rs)
+		if err != nil {
+			return "", err
+		}
+
+		updatedJSONObj := gjson.Parse(updatedJSON)
+		err = index.AddEntry(path, &updatedJSONObj)
+		if err != nil {
+			return "", err
+		}
+	}
+	recordPath := fdb.DBName + PathSeperator + indexID + PathSeperator + rowID + PathSeperator + recordID
+	return recordPath, nil
+}
+
+func ReorganizeFiles(dbName string) (errList []error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("Error occured while fetching DB instance", err)
+		return []error{errormdl.Wrap("Error occured while fetching DB instance")}
+	}
+	for _, index := range fdb.indexes {
+		bucketID := index.BucketSequence[len(index.BucketSequence)-1]
+		bucketObj, ok := fdb.buckets[bucketID]
+		if !ok {
+			loggermdl.LogError("Bucket not found: " + bucketID)
+			return []error{errormdl.Wrap("Bucket not found: " + bucketID)}
+		}
+
+		if packBucketObj, ok := bucketObj.(*bucket.PackBucket); ok {
+
+			indexKeyValMap, err := index.GetAllEntries()
+			if err != nil {
+				loggermdl.LogError("index data not found", err)
+				return []error{errormdl.Wrap("index data not found")}
+			}
+			if len(indexKeyValMap) == 0 {
+				loggermdl.LogError("no data found to reorganize")
+				return []error{}
+			}
+			filePaths := make([]string, len(indexKeyValMap))
+			i := 0
+			for filePath := range indexKeyValMap {
+				sourceFile, err := filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+				if err != nil {
+					errList = append(errList, errormdl.Wrap("Error occured during reOrg of file data"))
+					continue
+				}
+				filePaths[i] = sourceFile
+				i++
+			}
+			reorgErrs := packBucketObj.Reorg(filePaths[:i])
+			if len(reorgErrs) > 0 {
+				errList = append(errList, reorgErrs...)
+			}
+		}
+	}
+	return errList
+}
+
+func (fdb *FDB) ResolvePath(index *index.Index, rs *gjson.Result) (string, error) {
+	path := ""
+	for _, bucketID := range index.BucketSequence {
+		bucketObj, ok := fdb.buckets[bucketID]
+		if !ok {
+			loggermdl.LogError("bucket not found: " + bucketID)
+			return "", errormdl.Wrap("bucket not found: " + bucketID)
+		}
+
+		pathResolver, ok := bucketObj.(bucket.PathProvider)
+		if !ok {
+			return "", errormdl.Wrap("cant not find bucket path")
+		}
+		bucketPath, err := pathResolver.GetPath(rs)
+		if err != nil {
+			return "", err
+		}
+		path = filepath.Join(path, bucketPath)
+	}
+	indexName := index.IndexNameQuery
+	if index.IsDynamicName {
+		indexName = rs.Get(index.IndexNameQuery).String()
+	}
+	if indexName == "" {
+		return "", errormdl.Wrap("required attribute not provided:" + index.IndexNameQuery)
+	}
+	path = filepath.Join(path, indexName)
+	return path, nil
+}
+
+// updateIndexJSON - update JSON with index field data
+func updateIndexJSON(index *index.Index, existingData string, rs *gjson.Result) (string, error) {
+	json := existingData
+	var err error
+	for _, indexField := range index.IndexFields {
+		if rs.Get(indexField.Query).Value() == nil {
+			continue
+		}
+		json, err = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
+		// loggermdl.LogError("Error - ", err)
+	}
+	return json, err
+}
+
+// GenRowID generates hash for the given filename. The length of hash is 16
+func GenRowID(name string) (string, error) {
+	name = strings.ReplaceAll(filepath.Clean(name), string(filepath.Separator), "")
+	rowID, err := securitymdl.GetHash(name)
+	if err != nil {
+		return "", err
+	}
+
+	if len(rowID) > 16 {
+		return rowID[:16], nil
+	}
+
+	return rowID, nil
+}
+
+func addMigrationReplaceConfig(targetBasePath string, securityProvider securityprovider.SecurityProvider) error {
+	configfilePath, err := filepath.Abs(filepath.Join(targetBasePath, MigrationConfigFilename))
+	if err != nil {
+		return err
+	}
+	migartionConfigStr, _ := sjson.Set("", MigrationTypeKeyword, MigrationTypeReplace)
+	rs := gjson.Parse(migartionConfigStr)
+	lockerObj := locker.NewLocker(configfilePath)
+	simpleFile, err := filetype.NewSimpleFile(configfilePath, securityProvider, lockerObj)
+	defer simpleFile.Close()
+	if err != nil {
+		return errormdl.Wrap("fail to add migration config: " + err.Error())
+	}
+	return simpleFile.Write(&rs)
+}
+
+func addMigrationUpdateConfig(targetBasePath string, securityProvider securityprovider.SecurityProvider) error {
+	configfilePath, err := filepath.Abs(filepath.Join(targetBasePath, MigrationConfigFilename))
+	if err != nil {
+		return err
+	}
+
+	migartionConfigStr, _ := sjson.Set("", MigrationTypeKeyword, MigrationTypeUpdate)
+	rs := gjson.Parse(migartionConfigStr)
+	lockerObj := locker.NewLocker(configfilePath)
+	simpleFile, err := filetype.NewSimpleFile(configfilePath, securityProvider, lockerObj)
+	defer simpleFile.Close()
+	if err != nil {
+		return errormdl.Wrap("fail to add migration config: " + err.Error())
+	}
+	return simpleFile.Write(&rs)
+}
+
+func getMigrationConfig(sourcePath string, rs *gjson.Result, securityProvider securityprovider.SecurityProvider) (*gjson.Result, error) {
+	configfilePath, err := filepath.Abs(filepath.Join(sourcePath, MigrationConfigFilename))
+	if err != nil {
+		return nil, err
+	}
+	if !filemdl.FileAvailabilityCheck(configfilePath) {
+		return nil, errormdl.Wrap("file not found")
+	}
+	lockerObj := locker.NewLocker(configfilePath)
+	simpleFile, err := filetype.NewSimpleFile(configfilePath, securityProvider, lockerObj)
+	defer simpleFile.Close()
+	if err != nil {
+		return nil, err
+	}
+	dataByte, err := simpleFile.Read(rs)
+	if err != nil {
+		return nil, err
+	}
+	migrationConfig := gjson.ParseBytes(dataByte)
+	return &migrationConfig, nil
+}
+
+type ZipImporter struct {
+	FdbName    string
+	IndexID    string
+	SourcePath string
+	Data       *gjson.Result
+}
+
+// ZipExporter is a DataExporter
+// allow to export fdb data as zip
+type ZipExporter struct {
+	FdbName       string
+	IndexID       string
+	Queries       []string
+	DestPath      string
+	MigrationType string
+}
+
+// DataExport exports fdb data as zip
+func (z ZipExporter) DataExport() (err error) {
+	fdb, err := GetFDBInstance(z.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", z.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + z.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(z.IndexID)
+	if !ok {
+		return errormdl.Wrap("INDEX not found: " + z.IndexID)
+	}
+	sourcePath := ""
+	timeStamp := time.Now().Nanosecond()
+	targetBasePath := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
+	filteredKeyValMap, err := index.GetEntriesByQueries(z.Queries)
+	if err != nil {
+		return err
+	}
+	if len(filteredKeyValMap) == 0 {
+		return errormdl.Wrap("no data found to export")
+	}
+	defer func() {
+		// removes created zip
+		filemdl.DeleteDirectory(targetBasePath)
+	}()
+	// copy data files
+	for path := range filteredKeyValMap {
+		sourcePath = filepath.Join(fdb.DBPath, path)
+		targetPath := filepath.Join(targetBasePath, path)
+		_, err := filemdl.CopyFile(sourcePath, targetPath, true)
+		if err != nil {
+			return err
+		}
+	}
+
+	// copy index file
+	targetPath := filepath.Join(targetBasePath, INDEXFOLDER, z.IndexID)
+	err = AddIndexEntriesInFile(targetPath, filteredKeyValMap, index.SecurityProvider)
+	if err != nil {
+		return err
+	}
+
+	switch z.MigrationType {
+	case MigrationTypeUpdate:
+		err = addMigrationUpdateConfig(targetBasePath, index.SecurityProvider)
+	case MigrationTypeReplace:
+		err = addMigrationReplaceConfig(targetBasePath, index.SecurityProvider)
+	default:
+		return errormdl.Wrap("fail to export data: export operation not allowed on migration type - " + z.MigrationType)
+	}
+
+	if err != nil {
+		loggermdl.LogError("fail to export data: ", err)
+		return errormdl.Wrap("fail to export data: " + err.Error())
+	}
+	// make zip of copied data to destination folder
+	// zip will have name of indexId
+	destinationPath := filepath.Join(z.DestPath, z.IndexID)
+	return filemdl.Zip(targetBasePath, destinationPath)
+}
+
+// DataImport imports data from zip
+func (z ZipImporter) DataImport() (err error) {
+	fdb, err := GetFDBInstance(z.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", z.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + z.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(z.IndexID)
+	if !ok {
+		loggermdl.LogError("index not found: ", z.IndexID)
+		return errormdl.Wrap("index not found: " + z.IndexID)
+	}
+	archivePath := z.SourcePath
+	if !filemdl.FileAvailabilityCheck(archivePath) {
+		loggermdl.LogError("archive file not found at specified location: ", archivePath)
+		return errormdl.Wrap("archive file not found at location: " + archivePath)
+	}
+	timeStamp := time.Now().Nanosecond()
+	pathToExtractZip := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
+
+	err = filemdl.Unzip(archivePath, pathToExtractZip)
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	defer func() {
+		// removes extracted files
+		filemdl.DeleteDirectory(pathToExtractZip)
+	}()
+	childDirs, err := filemdl.ListDirectory(pathToExtractZip)
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	if len(childDirs) == 0 {
+		loggermdl.LogError("no data found to import")
+		return errormdl.Wrap("no data found to import")
+	}
+	if !childDirs[0].IsDir() {
+		loggermdl.LogError("invalid archive file")
+		return errormdl.Wrap("invalid archive file")
+	}
+	sourcePath := filepath.Join(pathToExtractZip, childDirs[0].Name())
+	fdbBasePath := fdb.DBPath
+
+	// loggermdl.LogDebug(sourcePath)
+	migrationConfig, err := getMigrationConfig(sourcePath, z.Data, index.SecurityProvider)
+	if err != nil {
+		loggermdl.LogError("fail to get migration config", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	migrationType := migrationConfig.Get(MigrationTypeKeyword).String()
+	if migrationType != MigrationTypeReplace && migrationType != MigrationTypeUpdate {
+		return errormdl.Wrap("import operation not allowed on migration type - " + migrationType)
+	}
+	err = filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			loggermdl.LogError("err", err)
+			return err
+		}
+		if info.IsDir() {
+			return nil
+		}
+		//  ignore config file from copying
+		if strings.Contains(path, MigrationConfigFilename) {
+			return nil
+		}
+
+		foundAtIndex := strings.LastIndex(path, sourcePath)
+		if foundAtIndex == -1 {
+			return errormdl.Wrap("invalid archived file")
+		}
+		// loggermdl.LogDebug(path)
+
+		// if migartion type is MigrationTypeUpdate then copy index entries from index files else replace index files
+		if migrationType == MigrationTypeUpdate && strings.Contains(path, INDEXFOLDER) {
+			// load index entries from
+			err := ImportIndexEntries(path, fdb, z.IndexID)
+			if err != nil {
+				loggermdl.LogError("fail to load indexes from data", err)
+				return errormdl.Wrap("fail to load indexes")
+			}
+			err = index.WriteIndexEntriesInFile()
+			if err != nil {
+				loggermdl.LogError("fail to add indexes: ", err)
+				return errormdl.Wrap("fail to add indexes")
+			}
+			return nil
+		}
+		destPath := filepath.Join(fdbBasePath, path[foundAtIndex+len(sourcePath):])
+		if !filemdl.FileAvailabilityCheck(destPath) {
+			dir, _ := filepath.Split(destPath)
+			err = filemdl.CreateDirectoryRecursive(dir)
+			if err != nil {
+				return err
+			}
+		}
+		return filemdl.AtomicReplaceFile(path, destPath)
+	})
+	if err != nil {
+		loggermdl.LogError("fail to import data: ", err)
+		return errormdl.Wrap("fail to import data: " + err.Error())
+	}
+
+	err = index.LoadIndexEntriesFromFile()
+	if err != nil {
+		loggermdl.LogError("fail to add indexes", err)
+		return errormdl.Wrap("fail to add indexes")
+	}
+
+	return nil
+}
+
+// ImportIndexEntries -
+func ImportIndexEntries(indexFilePath string, fdb *FDB, indexID string) error {
+	index, found := fdb.GetFDBIndex(indexID)
+	if !found {
+		return errormdl.Wrap("index not found")
+	}
+	if !filemdl.FileAvailabilityCheck(indexFilePath) {
+		return nil
+	}
+	fileData, err := filemdl.FastReadFile(indexFilePath)
+	if err != nil {
+		loggermdl.LogError("failed to load FDB index from: ", indexFilePath)
+		return err
+	}
+	fileData, err = index.SecurityProvider.Decrypt(fileData, indexFilePath, nil)
+	if err != nil {
+		loggermdl.LogError("failed to decrypt FDB index data: ", err)
+		return errormdl.Wrap("failed to decrypt FDB index data: " + err.Error())
+	}
+	data := string(fileData)
+	indexRecords := strings.Split(data, lineBreak)
+	indexDataMap := make(map[string]string)
+	for _, indexRecord := range indexRecords {
+		indexValues := strings.Split(indexRecord, IndexKeyValSeperator)
+		if len(indexValues) == 2 {
+			indexDataMap[indexValues[0]] = indexValues[1]
+		}
+	}
+	var fns []func(a, b string) bool
+	for _, idx := range index.IndexFields {
+		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
+	}
+
+	// update index file by reading all data and updating index file
+	return index.AddEntries(indexDataMap)
+}
+
+// FileImporter is a DataImporter
+// allow to import fdb data from exported folder
+type FileImporter struct {
+	FdbName    string
+	IndexID    string
+	SourcePath string
+	Data       *gjson.Result
+}
+
+// FileExporter is a DataExporter
+// allow to export fdb data in a folder
+type FileExporter struct {
+	FdbName       string
+	IndexID       string
+	Queries       []string
+	DestPath      string
+	MigrationType string
+}
+
+// DataExport exports fdb data in a folder
+func (f FileExporter) DataExport() (err error) {
+	fdb, err := GetFDBInstance(f.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", f.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + f.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(f.IndexID)
+	if !ok {
+		return errormdl.Wrap("INDEX not found: " + f.IndexID)
+	}
+	sourcePath := ""
+	targetBasePath := filepath.Join(f.DestPath, f.IndexID)
+	filteredKeyValMap, err := index.GetEntriesByQueries(f.Queries)
+	if err != nil {
+		return err
+	}
+
+	if len(filteredKeyValMap) == 0 {
+		return errormdl.Wrap("no data found to export")
+	}
+
+	for path := range filteredKeyValMap {
+		sourcePath = filepath.Join(fdb.DBPath, path)
+		targetPath := filepath.Join(targetBasePath, path)
+		_, err := filemdl.CopyFile(sourcePath, targetPath, true)
+		if err != nil {
+			return err
+		}
+	}
+
+	// copy index file
+	targetPath := filepath.Join(targetBasePath, INDEXFOLDER, f.IndexID)
+	err = AddIndexEntriesInFile(targetPath, filteredKeyValMap, index.SecurityProvider)
+	if err != nil {
+		return err
+	}
+
+	switch f.MigrationType {
+	case MigrationTypeUpdate:
+		err = addMigrationUpdateConfig(targetBasePath, index.SecurityProvider)
+	case MigrationTypeReplace:
+		err = addMigrationReplaceConfig(targetBasePath, index.SecurityProvider)
+	default:
+		return errormdl.Wrap("export operation not allowed on migration type - " + f.MigrationType)
+	}
+
+	return err
+}
+
+// DataImport imports data from exported folder
+func (f FileImporter) DataImport() (err error) {
+	fdb, err := GetFDBInstance(f.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", f.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + f.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(f.IndexID)
+	if !ok {
+		loggermdl.LogError("index not found: ", f.IndexID)
+		return errormdl.Wrap("index not found: " + f.IndexID)
+	}
+	if !filemdl.FileAvailabilityCheck(f.SourcePath) {
+		loggermdl.LogError("archive file not found at specified location: ", f.SourcePath)
+		return errormdl.Wrap("archive file not found at location: " + f.SourcePath)
+	}
+
+	timeStamp := time.Now().Nanosecond()
+	tempDir := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
+	err = filemdl.CopyDir(f.SourcePath, tempDir)
+	defer func() {
+		filemdl.DeleteDirectory(tempDir)
+	}()
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("fail to copy data")
+	}
+
+	childDirs, err := filemdl.ListDirectory(tempDir)
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	if len(childDirs) == 0 {
+		loggermdl.LogError("no data found to import")
+		return errormdl.Wrap("no data found to import")
+	}
+	fdbBasePath := fdb.DBPath
+
+	// loggermdl.LogDebug(f.SourcePath)
+	migrationConfig, err := getMigrationConfig(tempDir, f.Data, index.SecurityProvider)
+	if err != nil {
+		loggermdl.LogError("fail to get migration config", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	migrationType := migrationConfig.Get(MigrationTypeKeyword).String()
+	if migrationType != MigrationTypeReplace && migrationType != MigrationTypeUpdate {
+		return errormdl.Wrap("import operation not allowed on migration type - " + migrationType)
+	}
+	err = filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			loggermdl.LogError("err", err)
+			return err
+		}
+		// loggermdl.LogError(path)
+		if info.IsDir() {
+			return nil
+		}
+		//  ignore config file from copying
+		if strings.Contains(path, MigrationConfigFilename) {
+			return nil
+		}
+
+		foundAtIndex := strings.LastIndex(path, tempDir)
+		if foundAtIndex == -1 {
+			return errormdl.Wrap("invalid archived file")
+		}
+		// if file is index file then copy index entries from index files
+		if strings.Contains(path, INDEXFOLDER) {
+			// load index entries from file
+			err := ImportIndexEntries(path, fdb, f.IndexID)
+			if err != nil {
+				loggermdl.LogError("fail to import indexes", err)
+				return errormdl.Wrap("fail to import indexes")
+			}
+			err = index.WriteIndexEntriesInFile()
+			if err != nil {
+				loggermdl.LogError("fail to import indexes: ", err)
+				return errormdl.Wrap("fail to import indexes")
+			}
+			return nil
+		}
+
+		destPath := filepath.Join(fdbBasePath, path[foundAtIndex+len(tempDir):])
+		if err != nil {
+			return err
+		}
+		if !filemdl.FileAvailabilityCheck(destPath) {
+			dir, _ := filepath.Split(destPath)
+			err = filemdl.CreateDirectoryRecursive(dir)
+			if err != nil {
+				return err
+			}
+		}
+		return filemdl.AtomicReplaceFile(path, destPath)
+	})
+
+	if err != nil {
+		loggermdl.LogError("fail to import data: ", err)
+		return errormdl.Wrap("fail to import data: " + err.Error())
+	}
+
+	return nil
+}
+
+// AddIndexEntriesInFile -AddIndexEntriesInFile
+func AddIndexEntriesInFile(indexFilePath string, entries map[string]string, securityProvider securityprovider.SecurityProvider) error {
+	// dbPath := filepath.Join(fdbPath, INDEXFOLDER)
+	// loggermdl.LogDebug("in log fdb index")
+	dataToStore := ""
+	for key, val := range entries {
+		dataToStore = dataToStore + key + IndexKeyValSeperator + val + lineBreak
+	}
+	dataByteToWriteRes := []byte{}
+	var hashError error
+	if len(dataToStore) > 0 {
+		_, fileName := filepath.Split(indexFilePath)
+		dataByteToWriteRes, hashError = securityProvider.Encrypt([]byte(dataToStore), fileName, nil)
+		if errormdl.CheckErr1(hashError) != nil {
+			return errormdl.CheckErr1(hashError)
+		}
+	}
+	return filemdl.WriteFile(indexFilePath, dataByteToWriteRes, true, false)
+}
diff --git a/dalmdl/corefdb/corefdb_test.go b/dalmdl/corefdb/corefdb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f89a54d4de7e8308cfe263166c375aada2324edd
--- /dev/null
+++ b/dalmdl/corefdb/corefdb_test.go
@@ -0,0 +1,757 @@
+package corefdb
+
+import (
+	"log"
+	"testing"
+	"time"
+
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/bucket"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/index"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+var (
+	dbPath              = "D:\\exe\\myfdb"
+	dbName              = "myfdb"
+	CheckLazyIndexWrite = false
+	lazyWriteInterval   = 3
+)
+
+func init() {
+
+	fdb, err := CreateFDBInstance(dbPath, dbName, true)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// enable security on fdb
+	{
+
+		fdbSecurityProvider := securityprovider.New(securityprovider.SecurityConfig{
+			EncKey:         "myenckey",
+			UserDefinedKey: "mkcl",
+			FieldQuery:     "instituteId",
+		})
+
+		err = fdb.SetSecurityProvider(fdbSecurityProvider)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+	//  creating simple bucket
+	{
+		simpleBucket, err := bucket.NewSimpleBucket("Simple", false, false, "")
+		if err != nil {
+			log.Fatal(err)
+		}
+		fields := []index.IndexField{
+			index.IndexField{
+				FieldName: "name",
+				Query:     "name",
+			},
+		}
+		i, err := fdb.RegisterNewIndex("stdId", "studentId", true, fields)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = simpleBucket.AddIndex(i)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = fdb.AddBucket(simpleBucket.BucketID, simpleBucket)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	//  creating pack bucket
+	{
+
+		inFileIndexSchemaMap := map[string]filetype.InFileIndex{
+			"Exam": filetype.InFileIndex{
+				FileType: "Exam",
+				IndexFields: []filetype.InFileIndexField{
+					filetype.InFileIndexField{
+						FieldName: "examId",
+						Query:     "examId",
+					},
+				},
+			},
+			"Profile": filetype.InFileIndex{
+				FileType: "Profile",
+				IndexFields: []filetype.InFileIndexField{
+					filetype.InFileIndexField{
+						FieldName: "class",
+						Query:     "class",
+					},
+				},
+			},
+		}
+
+		packBucket, err := bucket.NewPackBucket("PackBucket", false, "", inFileIndexSchemaMap)
+		if err != nil {
+			log.Fatal(err)
+		}
+		packIndexfields := []index.IndexField{
+			index.IndexField{
+				FieldName: "name",
+				Query:     "name",
+			},
+		}
+		packbucketIndex, err := fdb.RegisterNewIndex("studentPack", "stdId", true, packIndexfields)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = packBucket.AddIndex(packbucketIndex)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = fdb.AddBucket(packBucket.BucketID, packBucket)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	//  creating append bucket
+	{
+		appendBucket, err := bucket.NewAppendBucket("Append", false, false, "")
+		if err != nil {
+			log.Fatal(err)
+		}
+		fields := []index.IndexField{
+			index.IndexField{
+				FieldName: "name",
+				Query:     "name",
+			},
+		}
+		i, err := fdb.RegisterNewIndex("stdResponse", "studentId", true, fields)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = appendBucket.AddIndex(i)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = fdb.AddBucket(appendBucket.BucketID, appendBucket)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	// load index record from index files
+	// {
+	// 	indexIds := []string{"stdId", "studentPack", "stdResponse"}
+	// 	for _, indexID := range indexIds {
+	// 		indexFilePath := filepath.Join(fdb.DBPath, INDEXFOLDER, indexID)
+	// 		err = LoadFDBIndexFromFile(indexFilePath, fdb, indexID)
+	// 		if err != nil {
+	// 			log.Fatal(err)
+	// 		}
+	// 	}
+
+	// }
+}
+func TestSaveDataInSimpleBucket(t *testing.T) {
+	tests := []struct {
+		data           string
+		ShouldGetError bool
+	}{
+		{
+			data:           `{"instituteId":"geca" ,"name": "ajay","fileType": "EXAM","studentId": 1234,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+		{
+			data:           `{"instituteId":"geca" ,"name": "ajay","fileType": "EXAM","studentId": 1235,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+		{
+			data:           `{"instituteId":"geca" ,"name": "ajay","fileType": "EXAM","studentId": 1236,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+	}
+
+	indexID := "stdId"
+
+	for i, test := range tests {
+		studentObj := gjson.Parse(test.data)
+		t.Logf("\t Test: %d", i)
+		now := time.Now()
+		err := SaveDataInFDB(dbName, indexID, &studentObj)
+		if err != nil {
+			log.Fatal(err)
+		}
+		timeElapsed := time.Since(now)
+		// loggermdl.LogError("timeElapsed", timeElapsed)
+		t.Logf("\t %s\t should be able to save data %s", "succeed", timeElapsed)
+	}
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestGetDataFromSimpleBucket(t *testing.T) {
+	indexID := "stdId"
+
+	data, _ := sjson.Set("", "fileType", "Exam")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	studentObj := gjson.Parse(data)
+
+	queries := []string{`#[name==ajay]`}
+	inFileIndexQueries := []string{}
+
+	result, err := ReadDataFromFDB(dbName, indexID, &studentObj, queries, inFileIndexQueries)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogDebug("result", result.String())
+}
+func TestUpdateDataInNormalBucket(t *testing.T) {
+	indexID := "stdId"
+	data, _ := sjson.Set("", "abc", 10)
+	data, _ = sjson.Set(data, "marks", 30)
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	queries := []string{`#[name=="ajay"]`}
+	infileIndexQueries := []string{}
+	studentObj := gjson.Parse(data)
+
+	updatedData, errList := UpdateDataInFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+
+	if len(errList) > 0 {
+		loggermdl.LogError(errList)
+	}
+	loggermdl.LogDebug("updatedData", updatedData)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestDeleteDataFromNormalBucket(t *testing.T) {
+	indexID := "stdId"
+
+	queries := []string{`#[name==ajay]`}
+	infileIndexQueries := []string{}
+
+	// data, _ := sjson.Set("", "fileType", "Exam")
+	// data, _ := sjson.Set(data, "studentId", 1234)
+
+	studentObj := gjson.Result{}
+
+	recordsDeletedCnt, errList := DeleteDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		loggermdl.LogError("errList", errList)
+	}
+	loggermdl.LogDebug("recordsDeletedCnt", recordsDeletedCnt)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestSaveDataInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+	// get fdb obj
+
+	data, _ := sjson.Set("", "name", "ajay")
+	data, _ = sjson.Set(data, "stdId", 1238)
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "examName", "unit2")
+	data, _ = sjson.Set(data, "totalQuestion", 50)
+	data, _ = sjson.Set(data, "marks", 26)
+	data, _ = sjson.Set(data, "examId", "MATH001")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+
+	err := SaveDataInFDB(dbName, indexID, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+func TestGetDataFromPackBucket(t *testing.T) {
+	indexID := "studentPack"
+	data, _ := sjson.Set("", "name", "ajay")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+
+	queries := []string{`#[name=ajay]`}
+	infileIndexQueries := []string{`#[examId=MATH001]`}
+
+	result, err := ReadDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("result", result)
+}
+
+func TestUpdateDataInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "abc", "123")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+	queries := []string{`#[name=ajay]`}
+	infileIndexQueries := []string{`#[examId=MATH002]`}
+	result, errList := UpdateDataInFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		log.Fatal(errList)
+	}
+	loggermdl.LogError("result", result)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestDeleteDataFromPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "abc", "123")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+
+	queries := []string{`#[name=vijay]`}
+	infileIndexQueries := []string{`#[examId=MATH002]`}
+	cnt, errList := DeleteDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		log.Fatal(errList)
+	}
+	loggermdl.LogError("delete cnt", cnt)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestSaveMediaInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "stdId", 1239)
+
+	studentObj := gjson.Parse(data)
+
+	filePath := "C:\\Users\\vivekn\\Pictures\\gopher.png"
+
+	dataByte, err := filemdl.ReadFile(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	recordID, err := SaveMediaInFDB(dbName, indexID, dataByte, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("recordId", recordID)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestReadMediaFromPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	rowID := "-568385317811827"
+	recordID := "1ZtdPpUYLuKmcHJpTn2LXQ4XABM"
+	_, fileMeta, err := GetMediaFromFDB(dbName, indexID, rowID, recordID)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("fileMeta", fileMeta)
+}
+
+func TestUpdateMediaInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1239)
+
+	studentObj := gjson.Parse(data)
+
+	recordID := "1ZsY055dnvgL6qutjy5sbnsubS8"
+	filePath := "C:\\Users\\vivekn\\Pictures\\gopher.png"
+
+	dataByte, err := filemdl.ReadFile(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	recordPath, err := UpdateMediaInFDB(dbName, indexID, recordID, dataByte, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("recordPath", recordPath)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestUpsertMediaInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "stdId", 1239)
+
+	studentObj := gjson.Parse(data)
+
+	recordID := "dfsdg123243"
+	filePath := "C:\\Users\\vivekn\\Pictures\\ghg.jpg"
+
+	dataByte, err := filemdl.ReadFile(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	recordPath, err := UpsertMediaInFDB(dbName, indexID, recordID, dataByte, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("recordPath", recordPath)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestSaveDataInAppendBucket(t *testing.T) {
+	tests := []struct {
+		data           string
+		ShouldGetError bool
+	}{
+		{
+			data:           `{"name": "ajay","instituteId": "geca", "fileType": "EXAM","studentId": 1234,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+		{
+			data:           `{"name": "sanjay","instituteId": "geca","fileType": "EXAM","studentId": 1234,"examId":"exam002","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+	}
+
+	indexID := "stdResponse"
+
+	for i, test := range tests {
+		studentObj := gjson.Parse(test.data)
+		t.Logf("\t Test: %d", i)
+		err := SaveDataInFDB(dbName, indexID, &studentObj)
+		if err != nil {
+			log.Fatal(err)
+		}
+		t.Logf("\t %s\t should be able to save data", "succeed")
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestExportDataAsZip(t *testing.T) {
+	indexID := "studentPack"
+
+	zipExporter := ZipExporter{
+		DestPath:      "D:\\exe\\backup",
+		IndexID:       indexID,
+		MigrationType: MigrationTypeUpdate,
+		Queries:       []string{},
+		FdbName:       dbName,
+	}
+
+	err := zipExporter.DataExport()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+func TestImportDataFromZip(t *testing.T) {
+	indexID := "studentPack"
+	fdbObj, err := GetFDBInstance(dbName)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	zipImporter := ZipImporter{
+		Data:       nil,
+		SourcePath: "D:\\exe\\backup\\" + indexID,
+		FdbName:    dbName,
+		IndexID:    indexID,
+	}
+	err = zipImporter.DataImport()
+	// err := ImportZip("myfdb", "home/vivekn/fdb_data/dest/stdId")
+	if err != nil {
+		loggermdl.LogError(err)
+		log.Fatal(err)
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+}
+
+func TestExportDataAsFiles(t *testing.T) {
+	indexID := "stdId"
+	exporter := FileExporter{
+		DestPath:      "D:\\exe\\backup",
+		IndexID:       indexID,
+		MigrationType: MigrationTypeUpdate,
+		Queries:       []string{},
+		FdbName:       dbName,
+	}
+
+	err := exporter.DataExport()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+func TestImportDataFromFile(t *testing.T) {
+	indexID := "stdId"
+	fdbObj, err := GetFDBInstance(dbName)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	fileImporter := FileImporter{
+		Data:       nil,
+		SourcePath: "D:\\exe\\backup\\" + indexID,
+		FdbName:    dbName,
+		IndexID:    indexID,
+	}
+	err = fileImporter.DataImport()
+	// err := ImportZip("myfdb", "home/vivekn/fdb_data/dest/stdId")
+	if err != nil {
+		loggermdl.LogError(err)
+		log.Fatal(err)
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestReorg(t *testing.T) {
+	indexID := "studentPack"
+	fdbObj, err := GetFDBInstance(dbName)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ := sjson.Set("", "name", "ajay")
+	data, _ = sjson.Set(data, "stdId", 1237)
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "examName", "unit2")
+	data, _ = sjson.Set(data, "totalQuestion", 50)
+	data, _ = sjson.Set(data, "marks", 26)
+	data, _ = sjson.Set(data, "examId", "MATH001")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+	studentObj := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ = sjson.Set(data, "examId", "MATH003")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1238)
+	std2 := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &std2)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ = sjson.Set(data, "examId", "MATH001")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1237)
+	std3 := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &std3)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ = sjson.Set(data, "examId", "MATH002")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1238)
+	std4 := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &std4)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogDebug("saved")
+	queries := []string{`#[name=="ajay"]`}
+	infileIndexQueries := []string{`#[examId=="MATH001"]`}
+	recordsDeleted, errList := DeleteDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		loggermdl.LogError(errList)
+	}
+	index, ok := fdbObj.GetFDBIndex(indexID)
+	if !ok {
+		log.Fatal("index not found")
+	}
+	loggermdl.LogDebug("deletd cnt", recordsDeleted)
+	err = index.WriteIndexEntriesInFile()
+	if err != nil {
+		log.Fatal(err)
+	}
+	now := time.Now()
+
+	errList = ReorganizeFiles("myfdb")
+	if len(errList) > 0 {
+		log.Fatal(errList)
+	}
+	elapsed := time.Since(now)
+
+	loggermdl.LogError("elapsed", elapsed)
+
+}
diff --git a/dalmdl/corefdb/filepointercache.go b/dalmdl/corefdb/filepointercache.go
deleted file mode 100644
index d930ba9a5b0bbbd23301b9c2589e6b41609eb6b0..0000000000000000000000000000000000000000
--- a/dalmdl/corefdb/filepointercache.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package corefdb
-
-import (
-	"os"
-	"sync"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
-
-	"github.com/dgraph-io/ristretto"
-	"github.com/tidwall/gjson"
-)
-
-var (
-	// CacheConfig - used for creating new cache instance
-	CacheConfig = ristretto.Config{
-		MaxCost:     1000,
-		NumCounters: 10000,
-		BufferItems: 64,
-		OnEvict:     onFilepointerEvict,
-	}
-)
-
-// File -
-type File struct {
-	file        *os.File
-	lock        *sync.Mutex
-	InfileIndex *gjson.Result
-}
-
-// Cache -
-type Cache struct {
-	cache ristretto.Cache
-	cfg   *ristretto.Config
-}
-
-// Get -
-func (c Cache) Get(key string) (*File, error) {
-	val, found := c.cache.Get(key)
-	if !found {
-		return nil, errormdl.Wrap("not found")
-	}
-	fileInstance, ok := val.(*File)
-	if !ok {
-		return nil, errormdl.Wrap("invalid cache val")
-	}
-	return fileInstance, nil
-}
-
-// Set - sets filepointer and infile index data in cache
-func (c Cache) Set(fp *os.File, infileIndexData *gjson.Result) *File {
-	rfile := File{
-		file:        fp,
-		InfileIndex: infileIndexData,
-		lock:        &sync.Mutex{},
-	}
-	file, err := c.Get(fp.Name())
-	if err == nil {
-		rfile.lock = file.lock
-	}
-	c.cache.Set(fp.Name(), &rfile, 1)
-	return &rfile
-}
-
-// Delete -
-func (c Cache) Delete(key string) {
-	f, err := c.Get(key)
-	if err != nil {
-		return
-	}
-	c.cache.Del(key)
-	f.file.Close()
-}
-
-// DeleteAll -
-func (c Cache) DeleteAll() {
-	c.cache.Clear()
-}
-
-// NewCache - creates new Cache
-func NewCache() (*Cache, error) {
-	c, err := ristretto.NewCache(&CacheConfig)
-	if err != nil {
-		return nil, err
-	}
-
-	cacheInstance := Cache{
-		cache: *c,
-		cfg:   &CacheConfig,
-	}
-	return &cacheInstance, nil
-}
-
-func onFilepointerEvict(hashes, hash2 uint64, val interface{}, cost int64) {
-	fileInstance, ok := val.(*File)
-	if !ok {
-		return
-	}
-	fileInstance.lock.Lock()
-	defer func() {
-		fileInstance.lock.Unlock()
-	}()
-	fileInstance.file.Close()
-	// loggermdl.LogError("evicted - ", fileInstance.file.Name())
-}
diff --git a/dalmdl/corefdb/filetype/append.go b/dalmdl/corefdb/filetype/append.go
new file mode 100644
index 0000000000000000000000000000000000000000..33a8b42fd7231820648c4f57a93ff42321f24d50
--- /dev/null
+++ b/dalmdl/corefdb/filetype/append.go
@@ -0,0 +1,76 @@
+package filetype
+
+import (
+	"os"
+	"path/filepath"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/gjson"
+)
+
+type AppendFile struct {
+	FilePath         string
+	Fp               *os.File
+	IsLazyEnable     bool
+	securityProvider securityprovider.SecurityProvider
+	Locker           locker.Locker
+}
+
+func NewAppendFile(filePath string, securityProvider securityprovider.SecurityProvider, locker locker.Locker) (*AppendFile, error) {
+	if filePath == "" {
+		return nil, errormdl.Wrap("please provide valid filepath")
+	}
+	if locker == nil {
+		return nil, errormdl.Wrap("please provide locker")
+	}
+	path, err := filepath.Abs(filePath)
+	if err != nil {
+		return nil, err
+	}
+	dir, _ := filepath.Split(path)
+	if dir != "" {
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if err != nil {
+			return nil, err
+		}
+	}
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to open file: " + err.Error())
+	}
+	file := AppendFile{
+		FilePath:         filePath,
+		Fp:               f,
+		securityProvider: securityProvider,
+		Locker:           locker,
+	}
+	return &file, nil
+}
+
+func (af *AppendFile) Write(rs *gjson.Result) (err error) {
+	dataBytes := []byte(rs.String())
+	if af.securityProvider != nil {
+		dataBytes, err = af.securityProvider.Encrypt(dataBytes, af.Fp.Name(), rs)
+		if err != nil {
+			loggermdl.LogError(err)
+			return
+		}
+	}
+	dataBytes = append(dataBytes, []byte(lineBreak)...)
+	af.Locker.Lock()
+	defer func() {
+		af.Locker.Unlock()
+	}()
+	_, _, err = filemdl.AppendDataToFile(af.Fp.Name(), dataBytes, true)
+	return err
+}
+
+func (af *AppendFile) Close() error {
+	return af.Fp.Close()
+}
diff --git a/dalmdl/corefdb/filetype/filetype.go b/dalmdl/corefdb/filetype/filetype.go
new file mode 100644
index 0000000000000000000000000000000000000000..36291ca7f05ee2159588dc7b8c647e36b632caf0
--- /dev/null
+++ b/dalmdl/corefdb/filetype/filetype.go
@@ -0,0 +1,5 @@
+package filetype
+
+const (
+	lineBreak = "\r\n"
+)
diff --git a/dalmdl/corefdb/filetype/pack.go b/dalmdl/corefdb/filetype/pack.go
new file mode 100644
index 0000000000000000000000000000000000000000..32c3fed2f0423cbb38028ca25040d4ef857395e2
--- /dev/null
+++ b/dalmdl/corefdb/filetype/pack.go
@@ -0,0 +1,1222 @@
+package filetype
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+const (
+	fileStatusReady                     = 0
+	fileStatusUpdatingData              = 1
+	fileStatusUpdatingIndex             = 2
+	fileStatusOffsetInFile              = 0
+	isReadyForUploadOffsetInFile        = 1
+	isUpdatedAndNotCommitedOffsetInFile = 2
+	isReorgRequiredOffsetInFile         = 3
+	isReindexRequiredOffsetInFile       = 4
+	footerOffsetInFile                  = 5
+	footerOffsetReservedSize            = 15
+	footerSizeOffset                    = 20
+	filehashOffest                      = 35
+	lastUpdatedOffset                   = 43
+	dataStartOffset                     = 53
+	sizeReservedForHeaders              = 53
+
+	// IndexKeyValSeperator -
+	IndexKeyValSeperator = "="
+	// FileType - represents key for type of file. Used whenever we need to set file type field in json
+	FileType = "fileType"
+)
+
+// ErrNoDataFound - ErrNoDataFound
+var ErrNoDataFound = errors.New("No data found")
+
+type PackFile struct {
+	FilePath             string
+	InfileIndexRows      *gjson.Result
+	Fp                   *os.File
+	SecurityProvider     securityprovider.SecurityProvider
+	infileIndexSchemaMap map[string]InFileIndex
+	Locker               locker.Locker
+}
+
+type InFileIndex struct {
+	FileType    string             `json:"fileType"`
+	IndexFields []InFileIndexField `json:"indexFields"`
+}
+
+type InFileIndexField struct {
+	FieldName string `json:"fieldName"`
+	Query     string `json:"query"`
+}
+
+func NewPackFile(filePath string, infileIndexSchemaMap map[string]InFileIndex, securityProvider securityprovider.SecurityProvider, locker locker.Locker) (*PackFile, error) {
+	if filePath == "" {
+		return nil, errormdl.Wrap("please provide valid filepath")
+	}
+	if locker == nil {
+		return nil, errormdl.Wrap("please provide locker")
+	}
+	path, err := filepath.Abs(filePath)
+	if err != nil {
+		return nil, err
+	}
+	dir, _ := filepath.Split(path)
+	if dir != "" {
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if err != nil {
+			return nil, err
+		}
+	}
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to open file: " + err.Error())
+	}
+	packFile := PackFile{
+		FilePath:         filePath,
+		Fp:               f,
+		SecurityProvider: securityProvider,
+		Locker:           locker,
+	}
+	if infileIndexSchemaMap == nil {
+		packFile.infileIndexSchemaMap = make(map[string]InFileIndex)
+	} else {
+		packFile.infileIndexSchemaMap = infileIndexSchemaMap
+	}
+	err = initializeFile(f)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to create pack file: " + err.Error())
+	}
+	return &packFile, nil
+}
+
+func (p *PackFile) Close() error {
+	return p.Fp.Close()
+}
+func (p *PackFile) Write(rs *gjson.Result) (err error) {
+	// filePath := p.Fp.Name()
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return errormdl.Wrap("please specify fileType")
+	}
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+	f := p.Fp
+	infileIndexSchema, ok := p.infileIndexSchemaMap[fileType]
+	if !ok {
+		return errormdl.Wrap("infileIndex schema for specified fileType not found: " + fileType)
+	}
+
+	indexRowJSON, err := CreateIndexJSON(infileIndexSchema.IndexFields, rs)
+	if err != nil {
+		return err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", fileType)
+	fileHash, err := securitymdl.GetHash(rs.String())
+	if err != nil {
+		return err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+	previousIndexData := "[]"
+	if p.InfileIndexRows == nil {
+		previousIndexData, err = getInFileIndexData(f)
+		if err != nil {
+			return err
+		}
+	} else {
+		previousIndexData = p.InfileIndexRows.String()
+	}
+	// loggermdl.LogDebug("previous index data", previousIndexData)
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+	dataString := rs.String()
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		return err
+	}
+	// write data
+	dataSize, err := addFileDataInFile(f, footerStartOffset, dataString, true, rs, p.SecurityProvider)
+	if err != nil {
+		return err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	// append new entry in infile index
+	parsedindexRowJSON := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(previousIndexData, "-1", parsedindexRowJSON.Value())
+	// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+	// updating infile index
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return err
+	}
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return err
+	}
+	err = f.Sync()
+	if err != nil {
+		return err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	p.InfileIndexRows = &updatedIndexDataObj
+	return nil
+}
+
+func (p *PackFile) Read(queries []string, data *gjson.Result) (string, error) {
+	filePath := p.Fp.Name()
+	if !filemdl.FileAvailabilityCheck(filePath) {
+		return "", errormdl.Wrap("file not found at:" + filePath)
+	}
+
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+	indexDataString := ""
+	var err error
+	if p.InfileIndexRows == nil {
+		indexDataString, err = getInFileIndexData(p.Fp)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", filePath, err)
+			return "", err
+		}
+	} else {
+		indexDataString = p.InfileIndexRows.String()
+	}
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	for i := 0; i < len(queries); i++ {
+		indexRows = indexRows.Get(queries[i] + "#")
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRows.ForEach(func(key, indexRow gjson.Result) bool {
+		// read files
+		startOffSet := indexRow.Get("startOffset").Int()
+		dataSize := indexRow.Get("dataSize").Int()
+		if startOffSet == 0 || dataSize == 0 {
+			return true
+		}
+		dataByte := []byte{'{', '}'}
+		var err error
+		// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
+		dataByte, err = getFileDataFromPack(p.Fp, startOffSet, dataSize, data, p.SecurityProvider)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		_, err = sb.Write(dataByte)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		sb.WriteString(",")
+
+		return true // keep iterating
+	})
+	sb.WriteString("]")
+	finalResult := strings.Replace(sb.String(), ",]", "]", 1)
+	return finalResult, nil
+}
+
+func (p *PackFile) Update(queries []string, rs *gjson.Result) (gjson.Result, error) {
+	// check fileType index availability
+	// check is data present
+	// if data present
+	// then calculate size of updated data
+	// if size is less than or equal to previuos data size
+	// then write at the same location
+	// else if size of updated data is more than existing data then append it to end of data
+	// update startOffset and data size of file in index row
+	// update footer offset and footer size
+	updatedData := gjson.Result{}
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return updatedData, errormdl.Wrap("please specify fileType")
+	}
+	_, ok := p.infileIndexSchemaMap[fileType]
+	if !ok {
+		return updatedData, errormdl.Wrap("infileIndex schema for specified fileType not found: " + fileType)
+	}
+	if !filemdl.FileAvailabilityCheck(p.Fp.Name()) {
+		return updatedData, errormdl.Wrap("file not found: " + p.Fp.Name())
+	}
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+
+	indexDataString := "[]"
+	var err error
+	if p.InfileIndexRows == nil {
+		indexDataString, err = getInFileIndexData(p.Fp)
+		if err != nil {
+			loggermdl.LogError(err)
+			return updatedData, err
+		}
+	} else {
+		indexDataString = p.InfileIndexRows.String()
+	}
+	indexRows := gjson.Parse(indexDataString)
+	indexRecordsToUpdate := indexRows
+	for _, query := range queries {
+		indexRecordsToUpdate = indexRecordsToUpdate.Get(query + "#")
+	}
+
+	indexRecordsToUpdateObjs := indexRecordsToUpdate.Array()
+	if len(indexRecordsToUpdateObjs) == 0 {
+		return updatedData, errormdl.Wrap("no data found")
+	}
+	resultArrayStr := "[]"
+	var updatedInfileIndex *gjson.Result
+	var result *gjson.Result
+	for _, recordToUpdateIndexRow := range indexRecordsToUpdateObjs {
+		result, updatedInfileIndex, err = updateSingleRecordInPackFileUsingFp(p.Fp, recordToUpdateIndexRow, updatedInfileIndex, rs, p.SecurityProvider)
+		if err != nil {
+			return updatedData, errormdl.Wrap("fail to update data" + err.Error())
+		}
+		resultArrayStr, _ = sjson.Set(resultArrayStr, "-1", result.Value())
+	}
+	resultData := gjson.Parse(resultArrayStr)
+	return resultData, nil
+}
+
+func (p *PackFile) Remove(queries []string) (recordsDeletedCnt int, err error) {
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+
+	indexDataStr := "[]"
+	if p.InfileIndexRows == nil {
+		indexDataStr, err = getInFileIndexData(p.Fp)
+		if err != nil {
+			return recordsDeletedCnt, err
+		}
+	} else {
+		indexDataStr = p.InfileIndexRows.String()
+	}
+
+	indexData := gjson.Parse(indexDataStr)
+	indexRecordsToDelete := indexData
+	// loggermdl.LogDebug("indexRecordsToDelete file type", indexRecordsToDelete)
+	for _, query := range queries {
+		indexRecordsToDelete = indexRecordsToDelete.Get(query + "#")
+	}
+	indexRowsToDelete := indexRecordsToDelete.Array()
+	if len(indexRowsToDelete) == 0 {
+		loggermdl.LogError("ErrNoDataFound")
+		return recordsDeletedCnt, errormdl.Wrap("not found")
+	}
+	updatedIndexRecords := indexData
+	for _, indexRowToRemove := range indexRowsToDelete {
+		updatedIndexRecords, err = removeIndexRow(updatedIndexRecords, indexRowToRemove.String())
+		if err != nil {
+			loggermdl.LogError("fail to delete record:", err)
+			return recordsDeletedCnt, errormdl.Wrap("fail to delete record:" + err.Error())
+		}
+		recordsDeletedCnt++
+	}
+
+	footerOffset := getFooterOffset(p.Fp)
+	if footerOffset == -1 {
+		return recordsDeletedCnt, errormdl.Wrap("fail to fetch infile index offset")
+	}
+	newIndexDataSize := len(updatedIndexRecords.String())
+	err = setFileStatusFlag(p.Fp, fileStatusUpdatingIndex)
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+	err = setIndexDataInFile(p.Fp, footerOffset, updatedIndexRecords.String())
+	if err != nil {
+		loggermdl.LogError("fail to update infile index data :", err)
+		return recordsDeletedCnt, err
+	}
+	err = setFileStatusFlag(p.Fp, fileStatusReady)
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+	p.InfileIndexRows = &updatedIndexRecords
+	return recordsDeletedCnt, setFooterSize(p.Fp, int64(newIndexDataSize))
+}
+
+func (p *PackFile) WriteMedia(mediaData []byte, rs *gjson.Result) (recordID string, err error) {
+
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+
+	fileHash, err := securitymdl.GetHash(rs.String())
+	if err != nil {
+		loggermdl.LogError("error writing to bucket: ", err)
+		return recordID, err
+	}
+	previousIndexData := "[]"
+	if p.InfileIndexRows == nil {
+		previousIndexData, err = getInFileIndexData(p.Fp)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+	} else {
+		previousIndexData = p.InfileIndexRows.String()
+	}
+	// loggermdl.LogDebug("previous index data", previousIndexData)
+	footerStartOffset := getFooterOffset(p.Fp)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, errormdl.Wrap("fail to fetch infile index data")
+	}
+	// write data
+	dataSize, err := addByteDataInFile(p.Fp, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+
+	recordID = guidmdl.GetGUID()
+	indexRowJSON := ""
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", "Media")
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	// append new entry in infile index
+	parsedindexRowJSON := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(previousIndexData, "-1", parsedindexRowJSON.Value())
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(p.Fp, footerNewOffset)
+	err = setFooterSize(p.Fp, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(p.Fp, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(p.Fp, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	err = p.Fp.Sync()
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	p.InfileIndexRows = &updatedIndexDataObj
+	return recordID, nil
+
+}
+
+func (p *PackFile) UpdateMedia(recordID string, mediaData []byte, rs *gjson.Result) (err error) {
+	isValid, err := isValidPackFile(p.Fp)
+	if err != nil {
+		return err
+	}
+	if !isValid {
+		return errormdl.Wrap("file not found at:" + p.Fp.Name())
+	}
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+	indexDataString := "[]"
+
+	if p.InfileIndexRows == nil {
+
+		indexDataString, err = getInFileIndexData(p.Fp)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", p.Fp.Name(), err)
+			return err
+		}
+	} else {
+		indexDataString = p.InfileIndexRows.String()
+	}
+
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	foundAtIndex := -1
+	foundIndexRow := gjson.Result{}
+	for index, indexRow := range indexData.Array() {
+		r := indexRow.Get("recordID").String()
+		if r != "" && indexRow.Get("recordID").String() == recordID {
+			foundAtIndex = index
+			foundIndexRow = indexRow
+			break
+		}
+	}
+	if foundAtIndex == -1 {
+		loggermdl.LogError("no data found to update: ", recordID)
+		return errormdl.Wrap("no data found to update: " + recordID)
+	}
+
+	footerStartOffset := getFooterOffset(p.Fp)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+
+	// write data
+	dataSize, err := addByteDataInFile(p.Fp, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	indexRowJSON := foundIndexRow.String()
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	updatedIndexRow := gjson.Parse(indexRowJSON)
+	updatedIndexData, _ := sjson.Set(indexDataString, strconv.Itoa(foundAtIndex), updatedIndexRow.Value())
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(p.Fp, footerNewOffset)
+	err = setFooterSize(p.Fp, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(p.Fp, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(p.Fp, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = p.Fp.Sync()
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	p.InfileIndexRows = &updatedIndexDataObj
+	return err
+}
+
+func (p *PackFile) UpsertMedia(recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	isValid, err := isValidPackFile(p.Fp)
+	if err != nil {
+		return recordID, err
+	}
+	if !isValid {
+		err := InitializeWithHeaderUsingFp(p.Fp)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+	}
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	p.Locker.Lock()
+
+	defer func() {
+		p.Locker.Unlock()
+	}()
+	f := p.Fp
+	indexDataString := "[]"
+
+	if p.InfileIndexRows == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", f.Name(), err)
+			return recordID, err
+		}
+	} else {
+		indexDataString = p.InfileIndexRows.String()
+	}
+
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	foundAtIndex := -1
+	foundIndexRow := gjson.Result{}
+	for index, indexRow := range indexData.Array() {
+		if indexRow.Get("recordID").String() == recordID {
+			foundAtIndex = index
+			foundIndexRow = indexRow
+			break
+		}
+	}
+
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, errormdl.Wrap("fail to fetch infile index data")
+	}
+	// TODO: write at previous location
+	// write data
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	indexRowJSON := foundIndexRow.String()
+
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", "Media")
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	updatedIndexRow := gjson.Parse(indexRowJSON)
+	updatedIndexData, _ := sjson.Set(indexDataString, strconv.Itoa(foundAtIndex), updatedIndexRow.Value())
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	err = f.Sync()
+	if err != nil {
+		return recordID, err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	p.InfileIndexRows = &updatedIndexDataObj
+	return recordID, nil
+}
+
+func (p *PackFile) ReadMedia(recordID string) ([]byte, *gjson.Result, error) {
+	dataByte := []byte{}
+	var metaData *gjson.Result
+	isValid, err := isValidPackFile(p.Fp)
+	if err != nil {
+		return dataByte, metaData, err
+	}
+	f := p.Fp
+	if !isValid {
+		return dataByte, metaData, errormdl.Wrap("file not found at:" + f.Name())
+	}
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+
+	f = p.Fp
+	indexDataString := "[]"
+	if p.InfileIndexRows == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", f.Name(), err)
+			return dataByte, metaData, err
+		}
+	} else {
+		indexDataString = p.InfileIndexRows.String()
+	}
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	if indexRows.String() == "" {
+		loggermdl.LogError("data not found for recordId: ", recordID)
+		return dataByte, metaData, errormdl.Wrap("data not found")
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRow := indexRows.Get("0")
+	startOffSet := indexRow.Get("startOffset").Int()
+	dataSize := indexRow.Get("dataSize").Int()
+	if startOffSet == 0 || dataSize == 0 {
+		return dataByte, metaData, errormdl.Wrap("data not found")
+	}
+	dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, nil, nil)
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, metaData, err
+	}
+
+	data, _ := sjson.Set("", "requiredData", indexRow.Get("requiredData").String())
+	data, _ = sjson.Set(data, "infileIndex", indexData.String())
+	metaDataObj := gjson.Parse(data)
+	return dataByte, &metaDataObj, nil
+}
+
+func (p *PackFile) RemoveMedia(recordID string) error {
+	queries := []string{`#[recordID=` + recordID + `]`}
+	_, err := p.Remove(queries)
+	return err
+}
+
+func (p *PackFile) Reorg() error {
+
+	f := p.Fp
+	p.Locker.Lock()
+	defer func() {
+		p.Locker.Unlock()
+	}()
+	loggermdl.LogDebug("reorg FilePath", p.FilePath)
+	_, sourceFileName := filepath.Split(p.FilePath)
+	desFileName := sourceFileName + "_" + strconv.FormatInt(time.Now().Unix(), 10)
+	tempFilepath, err := filepath.Abs(filepath.Join(filemdl.TempDir, desFileName))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	dir, _ := filepath.Split(tempFilepath)
+	if dir != "" {
+		createError := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(createError) != nil {
+			return errormdl.CheckErr(createError)
+		}
+	}
+	fpTemp, err := os.OpenFile(tempFilepath, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		fpTemp.Close()
+	}()
+
+	err = InitializeWithHeaderUsingFp(fpTemp)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	infileIndexData, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	infileIndexRows := gjson.Parse(infileIndexData)
+	// if len(infileIndexRows.Array()) == 0 {
+	// 	return nil
+	// }
+	tempFileFooterStartOffset := getFooterOffset(fpTemp)
+	if tempFileFooterStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+	updatedIndexRowStr := "[]"
+	for _, infileIndex := range infileIndexRows.Array() {
+		startOffset, err := strconv.Atoi(infileIndex.Get("startOffset").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching startOffset", err)
+			return err
+		}
+		dataSize, err := strconv.Atoi(infileIndex.Get("dataSize").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching dataSize", err)
+			return err
+		}
+
+		byteArr, err := getFileDataFromPack(f, int64(startOffset), int64(dataSize), nil, nil)
+		if err != nil {
+			loggermdl.LogError("Error occured while reading file data from offset", err)
+			return err
+		}
+		byteCnt, err := addByteDataInFile(fpTemp, tempFileFooterStartOffset, byteArr, false)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		indexRowJSON, _ := sjson.Set(infileIndex.String(), "startOffset", tempFileFooterStartOffset)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", byteCnt)
+		indexRowJSONObj := gjson.Parse(indexRowJSON)
+		updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "-1", indexRowJSONObj.Value())
+		tempFileFooterStartOffset = tempFileFooterStartOffset + byteCnt
+	}
+
+	err = setFooterOffset(fpTemp, tempFileFooterStartOffset)
+	if err != nil {
+		return err
+	}
+	err = setFooterSize(fpTemp, int64(len(updatedIndexRowStr)))
+	if err != nil {
+		return err
+	}
+	err = setIndexDataInFile(fpTemp, tempFileFooterStartOffset, updatedIndexRowStr)
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Sync()
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Close()
+	if err != nil {
+		return err
+	}
+	err = f.Close()
+	if err != nil {
+		return err
+	}
+
+	return filemdl.AtomicReplaceFile(tempFilepath, p.FilePath)
+}
+
+//
+func CreateIndexJSON(indexFields []InFileIndexField, rs *gjson.Result) (string, error) {
+	json := `{}`
+	for _, indexField := range indexFields {
+		val := rs.Get(indexField.Query).Value()
+		// validation
+		if val == nil {
+			return "", errormdl.Wrap("please provide value for index field: " + indexField.Query)
+		}
+		json, _ = sjson.Set(json, indexField.FieldName, val)
+	}
+	return json, nil
+}
+func updateIndexRow(indexRows *gjson.Result, previousIndexRow gjson.Result, updatedRow gjson.Result) (*gjson.Result, error) {
+	indexRowObjs := indexRows.Array()
+	if len(indexRowObjs) == 0 {
+		return nil, errormdl.Wrap("no data found to update")
+	}
+	// loggermdl.LogDebug("indexRows", indexRows)
+
+	prevIndexRowString := previousIndexRow.String()
+	foundRowIndex := -1
+
+	for index, indexRowObj := range indexRowObjs {
+		if indexRowObj.String() != "" && prevIndexRowString != "" && indexRowObj.String() == prevIndexRowString {
+			foundRowIndex = index
+			break
+		}
+	}
+
+	if foundRowIndex == -1 {
+		return nil, errormdl.Wrap("no record found to update")
+	}
+	var err error
+	updatedIndexDataString := indexRows.String()
+	// for _, foundRowIndex := range foundRowIndexes {
+	updatedIndexDataString, err = sjson.Set(updatedIndexDataString, strconv.Itoa(foundRowIndex), updatedRow.Value())
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, errormdl.Wrap("failed to update index rows")
+	}
+	// }
+	updatedIndexData := gjson.Parse(updatedIndexDataString)
+	return &updatedIndexData, nil
+}
+func removeIndexRow(indexRows gjson.Result, indexRowToDelete string) (gjson.Result, error) {
+
+	indexRowObjs := indexRows.Array()
+	if len(indexRowObjs) == 0 {
+		return indexRows, errormdl.Wrap("no data found to update")
+	}
+	// loggermdl.LogDebug("indexRows", indexRows)
+
+	foundIndexToDelete := -1
+	for index, indexRowObj := range indexRowObjs {
+		if indexRowObj.String() != "" && indexRowToDelete != "" && indexRowObj.String() == indexRowToDelete {
+			foundIndexToDelete = index
+			break
+		}
+	}
+
+	if foundIndexToDelete == -1 {
+		return indexRows, errormdl.Wrap("no record found to delete")
+	}
+	var err error
+	updatedIndexDataString, err := sjson.Delete(indexRows.String(), strconv.Itoa(foundIndexToDelete))
+	if err != nil {
+		loggermdl.LogError(err)
+	}
+	return gjson.Parse(updatedIndexDataString), nil
+}
+
+func isValidPackFile(f *os.File) (bool, error) {
+	if f == nil {
+		return false, errormdl.Wrap("file pointer not valid")
+	}
+	isFilePresent := filemdl.FileAvailabilityCheck(f.Name())
+	if !isFilePresent {
+		loggermdl.LogDebug(isFilePresent)
+		return false, nil
+	}
+	info, err := f.Stat()
+	if err != nil {
+		loggermdl.LogDebug(err)
+		return false, err
+	}
+	if info.Size() == 0 {
+		return false, nil
+	}
+	return true, nil
+}
+
+func updateSingleRecordInPackFileUsingFp(f *os.File, recordToUpdateIndexRow gjson.Result, infileIndex, rs *gjson.Result, securityProvider securityprovider.SecurityProvider) (*gjson.Result, *gjson.Result, error) {
+	fileStartOffset := recordToUpdateIndexRow.Get("startOffset").Int()
+	dataSize := recordToUpdateIndexRow.Get("dataSize").Int()
+	if fileStartOffset == 0 || dataSize == 0 {
+		loggermdl.LogError("index row details incorrect - start offset :", fileStartOffset, " data size :", dataSize)
+		return nil, nil, errormdl.Wrap("index row details incorrect")
+	}
+
+	existingData, err := getFileDataFromPack(f, fileStartOffset, dataSize, rs, securityProvider)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	updatedDataStr := strings.TrimSpace(string(existingData))
+	// updating existing data
+	rs.ForEach(func(key, val gjson.Result) bool {
+		updatedDataStr, _ = sjson.Set(updatedDataStr, key.String(), val.Value())
+		return true
+	})
+	newDataSize := int64(len(updatedDataStr))
+	footerStartOffset := getFooterOffset(f)
+	updatedFooterOffset := footerStartOffset
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		return nil, nil, err
+	}
+	indexDataString := "[]"
+	if infileIndex == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, nil, err
+		}
+	} else {
+		indexDataString = infileIndex.String()
+	}
+	existingIndexRows := gjson.Parse(indexDataString)
+	if len(strings.TrimSpace(updatedDataStr)) <= len(strings.TrimSpace(string(existingData))) {
+		newDataSize, err = addFileDataInFile(f, fileStartOffset, updatedDataStr, false, rs, securityProvider)
+		if err != nil {
+			return nil, nil, err
+		}
+	} else {
+
+		newDataSize, err = addFileDataInFile(f, footerStartOffset, updatedDataStr, true, rs, securityProvider)
+		updatedFooterOffset = footerStartOffset + newDataSize
+		fileStartOffset = footerStartOffset
+	}
+
+	updatedIndexRowStr := recordToUpdateIndexRow.String()
+
+	recordToUpdateIndexRow.ForEach(func(key, value gjson.Result) bool {
+		indexFieldKey := key.String()
+		if rs.Get(indexFieldKey).Exists() {
+			updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, indexFieldKey, rs.Get(indexFieldKey).Value())
+		}
+		return true
+	})
+	fileHash, err := securitymdl.GetHash(updatedDataStr)
+	if err != nil {
+		return nil, nil, err
+	}
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "startOffset", fileStartOffset)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "dataSize", newDataSize)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "fileHash", fileHash)
+	updatedIndexRows, err := updateIndexRow(&existingIndexRows, recordToUpdateIndexRow, gjson.Parse(updatedIndexRowStr))
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setIndexDataInFile(f, updatedFooterOffset, updatedIndexRows.String())
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFooterOffset(f, updatedFooterOffset)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFooterSize(f, int64(len(updatedIndexRows.String())))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = f.Sync()
+	if err != nil {
+		return nil, nil, err
+	}
+	updatedData := gjson.Parse(updatedDataStr)
+	return &updatedData, updatedIndexRows, nil
+}
+
+func initializeFile(fp *os.File) (err error) {
+	filePath := fp.Name()
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	info, err := fp.Stat()
+	if err != nil {
+		return
+	}
+	if !isFilePresent || info.Size() == 0 {
+		dir, _ := filepath.Split(filePath)
+		err = filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			err = err
+			return
+		}
+
+		err = InitializeWithHeaderUsingFp(fp)
+		if err != nil {
+			loggermdl.LogError(err)
+			return
+		}
+	}
+	return
+}
+
+func InitializeWithHeaderUsingFp(f *os.File) error {
+	_, err := f.WriteAt([]byte(strconv.Itoa(fileStatusReady)), fileStatusOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// isFile ready for upload  =0
+	_, err = f.WriteAt([]byte("0"), isReadyForUploadOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isUpdatedAndNotCommitedOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReorgRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReindexRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// _, err = f.WriteAt([]byte(appendPaddingToNumber(sizeReservedForHeaders, 15)), footerOffsetInFile)
+	err = setFooterOffset(f, sizeReservedForHeaders+int64(len(lineBreak)))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = setFooterSize(f, 0)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("filehash"), filehashOffest)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	timestamp := strconv.FormatInt(time.Now().Unix(), 10)
+	_, err = f.WriteAt([]byte(timestamp), lastUpdatedOffset)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("\r\n"), sizeReservedForHeaders)
+	return err
+}
+
+func getInFileIndexData(f *os.File) (string, error) {
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return "[]", errormdl.Wrap("fail to fetch infile index data")
+	}
+	footerSize, err := getFooterSize(f)
+	if err != nil {
+		return "[]", nil
+	}
+	if footerSize == 0 {
+		return "[]", nil
+	}
+	dataByte, err := filemdl.ReadFileFromOffset(f, footerStartOffset, footerSize)
+	if err != nil {
+		if err.Error() == "EOF" {
+			loggermdl.LogError("EOF")
+			return "[]", nil
+		}
+		loggermdl.LogError("error while fetching index data", err)
+		return "[]", err
+	}
+	return string(dataByte), nil
+}
+
+func addFileDataInFile(f *os.File, offset int64, data string, breakLine bool, rs *gjson.Result, encrypter securityprovider.SecurityProvider) (int64, error) {
+	dataBytes := []byte(data)
+	var err error
+	if encrypter != nil {
+		dataBytes, err = encrypter.Encrypt(dataBytes, f.Name(), rs)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	if breakLine {
+		dataBytes = append(dataBytes, []byte(lineBreak)...)
+	}
+
+	return filemdl.WriteFileAtOffset(f, offset, dataBytes)
+}
+
+func addByteDataInFile(f *os.File, offset int64, dataBytes []byte, breakLine bool) (int64, error) {
+	var err error
+
+	if breakLine {
+		dataBytes = append(dataBytes, []byte(lineBreak)...)
+	}
+	dataSize, err := filemdl.WriteFileAtOffset(f, offset, dataBytes)
+	return dataSize, err
+}
+
+func getFileDataFromPack(f *os.File, startOffset, dataSize int64, rs *gjson.Result, decrypter securityprovider.SecurityProvider) ([]byte, error) {
+
+	ba, err := filemdl.ReadFileFromOffset(f, startOffset, dataSize)
+
+	if decrypter != nil {
+		ba, err = decrypter.Decrypt(ba, f.Name(), rs)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return ba, err
+}
+
+func appendPaddingPadValue(value int64, padNumber int) string {
+	no := strconv.Itoa(padNumber)
+	return fmt.Sprintf("%0"+no+"d", value)
+}
+
+func getFileStatus(f *os.File) (int, error) {
+
+	data, err := filemdl.ReadFileFromOffset(f, fileStatusOffsetInFile, 1)
+	if err != nil {
+		loggermdl.LogError(err)
+		return -1, err
+	}
+	status, err := strconv.Atoi(string(data))
+	return status, err
+}
+
+func getFooterOffset(f *os.File) int64 {
+	data, err := filemdl.ReadFileFromOffset(f, footerOffsetInFile, footerOffsetReservedSize)
+	if err != nil {
+		loggermdl.LogError(err)
+		return -1
+	}
+	footerOffset, err := strconv.Atoi(string(data))
+	if err != nil {
+		loggermdl.LogError("err", err)
+		return -1
+	}
+
+	return int64(footerOffset)
+}
+
+func setFileStatusFlag(f *os.File, fileStatus int) error {
+	status := strconv.Itoa(fileStatus)
+	_, err := filemdl.WriteFileAtOffset(f, fileStatusOffsetInFile, []byte(status))
+	return err
+}
+
+func setFileReadyForUploadFlag(f *os.File, isReadyToUpload bool) error {
+	flagVal := strconv.FormatBool(isReadyToUpload)
+	_, err := filemdl.WriteFileAtOffset(f, isReadyForUploadOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileUpdatedAndNotCommitedFlag(f *os.File, isUpdatedAndNotCommited bool) error {
+	flagVal := strconv.FormatBool(isUpdatedAndNotCommited)
+	_, err := filemdl.WriteFileAtOffset(f, isUpdatedAndNotCommitedOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileReorgRequiredFlag(f *os.File, isReorgRequired bool) error {
+	flagVal := strconv.FormatBool(isReorgRequired)
+	_, err := filemdl.WriteFileAtOffset(f, isReorgRequiredOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileReindexRequiredFlag(f *os.File, isReindexRequired bool) error {
+	flagVal := strconv.FormatBool(isReindexRequired)
+	_, err := filemdl.WriteFileAtOffset(f, isReindexRequiredOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFooterOffset(f *os.File, footerOffset int64) error {
+	footerOffestInString := appendPaddingPadValue(footerOffset, 15)
+	_, err := filemdl.WriteFileAtOffset(f, footerOffsetInFile, []byte(footerOffestInString))
+	return err
+}
+
+func setFooterSize(f *os.File, footerSize int64) error {
+	footerSizeInString := appendPaddingPadValue(footerSize, 15)
+	_, err := filemdl.WriteFileAtOffset(f, footerSizeOffset, []byte(footerSizeInString))
+	return err
+}
+
+func getFooterSize(f *os.File) (int64, error) {
+	data, err := filemdl.ReadFileFromOffset(f, footerSizeOffset, 15)
+	if err != nil {
+		return -1, err
+	}
+	footerSize, err := strconv.Atoi(string(data))
+	if err != nil {
+		loggermdl.LogError("err", err)
+		return -1, err
+	}
+
+	return int64(footerSize), nil
+}
+
+func setIndexDataInFile(f *os.File, footerOffset int64, indexData string) error {
+	_, err := filemdl.WriteFileAtOffset(f, footerOffset, []byte(indexData))
+	return err
+}
diff --git a/dalmdl/corefdb/filetype/simple.go b/dalmdl/corefdb/filetype/simple.go
new file mode 100644
index 0000000000000000000000000000000000000000..dc9439434159de14c6fb8718e0e1e9f3a77bf136
--- /dev/null
+++ b/dalmdl/corefdb/filetype/simple.go
@@ -0,0 +1,141 @@
+package filetype
+
+import (
+	"errors"
+	"io"
+	"os"
+	"path/filepath"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+type SimpleFile struct {
+	FilePath         string
+	Fp               *os.File
+	IsLazyEnable     bool
+	securityProvider securityprovider.SecurityProvider
+	Locker           locker.Locker
+}
+
+func NewSimpleFile(filePath string, securityProvider securityprovider.SecurityProvider, locker locker.Locker) (*SimpleFile, error) {
+	if filePath == "" {
+		return nil, errormdl.Wrap("please provide valid filepath")
+	}
+	if locker == nil {
+		return nil, errormdl.Wrap("please provide locker")
+	}
+	path, err := filepath.Abs(filePath)
+	if err != nil {
+		return nil, err
+	}
+	dir, _ := filepath.Split(path)
+	if dir != "" {
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if err != nil {
+			return nil, err
+		}
+	}
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to open file: " + err.Error())
+	}
+	file := SimpleFile{
+		FilePath:         filePath,
+		Fp:               f,
+		securityProvider: securityProvider,
+		Locker:           locker,
+	}
+	return &file, nil
+}
+
+func (s *SimpleFile) Write(rs *gjson.Result) (err error) {
+	dataBytes := []byte(rs.String())
+	if s.securityProvider != nil {
+		dataBytes, err = s.securityProvider.Encrypt(dataBytes, s.Fp.Name(), rs)
+		if err != nil {
+			loggermdl.LogError(err)
+			return
+		}
+	}
+	s.Locker.Lock()
+	defer func() {
+		s.Locker.Unlock()
+	}()
+	err = filemdl.WriteFileUsingFp(s.Fp, dataBytes, true, false)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+	return nil
+}
+
+func (s *SimpleFile) Read(data *gjson.Result) ([]byte, error) {
+	s.Locker.Lock()
+	defer func() {
+		s.Locker.Unlock()
+	}()
+	ba, err := filemdl.ReadFileUsingFp(s.Fp)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+
+	if len(ba) == 0 {
+		return ba, nil
+	}
+
+	if s.securityProvider != nil {
+		ba, err = s.securityProvider.Decrypt(ba, s.Fp.Name(), data)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+	}
+	return ba, nil
+}
+
+func (s *SimpleFile) Update(rs *gjson.Result) (gjson.Result, error) {
+	data, err := s.Read(rs)
+	if err != nil {
+		return gjson.Result{}, err
+	}
+	existingDataStr := string(data)
+	rs.ForEach(func(key, val gjson.Result) bool {
+		existingDataStr, _ = sjson.Set(existingDataStr, key.String(), val.Value())
+		return true
+	})
+
+	updatedData := gjson.Parse(existingDataStr)
+	err = s.Write(&updatedData)
+	return updatedData, err
+}
+
+func (s *SimpleFile) Remove() error {
+	s.Locker.Lock()
+	defer func() {
+		s.Locker.Unlock()
+	}()
+	if filemdl.FileAvailabilityCheck(s.Fp.Name()) {
+		err := s.Fp.Truncate(0)
+		if err != nil {
+			return err
+		}
+
+		if _, err := s.Fp.Seek(0, io.SeekStart); err != nil {
+			return err
+		}
+		return nil
+	}
+	return errors.New("not found")
+}
+
+func (s *SimpleFile) Close() error {
+	return s.Fp.Close()
+}
diff --git a/dalmdl/corefdb/index.go b/dalmdl/corefdb/index/index.go
similarity index 53%
rename from dalmdl/corefdb/index.go
rename to dalmdl/corefdb/index/index.go
index 278af27306d401064df77c5516ddf4b841b05a91..6bbbeb0f3e1e7d7398ff61487b47b6d960a5b182 100644
--- a/dalmdl/corefdb/index.go
+++ b/dalmdl/corefdb/index/index.go
@@ -1,9 +1,15 @@
-package corefdb
+package index
 
 import (
 	"path/filepath"
 	"strings"
 
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/lazycache"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/lazywriter"
+
 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
@@ -12,14 +18,21 @@ import (
 	"github.com/tidwall/sjson"
 )
 
+const (
+	LineBreak            = "\r\n"
+	IndexKeyValSeperator = "="
+)
+
 // Index - Index
 type Index struct {
-	indexStore     indexStore
-	IndexID        string       `json:"indexId"`
-	IndexNameQuery string       `json:"indexNameQuery"`
-	BucketSequence []string     `json:"bucketSequence"`
-	IndexFields    []IndexField `json:"indexFields"`
-	IsDynamicName  bool         `json:"isDynamicName"`
+	indexStore       IndexStore
+	IndexID          string       `json:"indexId"`
+	IndexNameQuery   string       `json:"indexNameQuery"`
+	BucketSequence   []string     `json:"bucketSequence"`
+	IndexFields      []IndexField `json:"indexFields"`
+	IsDynamicName    bool         `json:"isDynamicName"`
+	IndexFilePath    string
+	SecurityProvider securityprovider.SecurityProvider
 }
 
 // IndexField - IndexField
@@ -28,27 +41,33 @@ type IndexField struct {
 	Query     string `json:"query"`
 }
 
-func NewIndex(indexID, indexNameQuery string, IsDynamicName bool) (Index, error) {
+func NewIndex(indexID, indexNameQuery string, IsDynamicName bool, indexFilePath string) (*Index, error) {
 	idx := Index{
-		IndexID:        indexID,
-		IndexNameQuery: indexNameQuery,
-		IsDynamicName:  IsDynamicName,
+		IndexID:          indexID,
+		IndexNameQuery:   indexNameQuery,
+		IsDynamicName:    IsDynamicName,
+		IndexFilePath:    indexFilePath,
+		SecurityProvider: securityprovider.New(securityprovider.SecurityConfig{}),
 	}
+
 	var err error
 	idx.indexStore, err = NewStore()
 	if err != nil {
-		return idx, err
+		return nil, err
 	}
-	return idx, nil
-}
-
-// SetBucket - set bucket in index
-func (i *Index) SetBucket(bucket *Bucket) *Index {
-	if bucket != nil {
-		i.BucketSequence = append(i.BucketSequence, bucket.BucketID)
-		bucket.Indices = append(bucket.Indices, i.IndexID)
+	err = idx.LoadIndexEntriesFromFile()
+	if err != nil {
+		return nil, err
 	}
-	return i
+	lazyObj := lazywriter.LazyCacheObject{
+		FileName:      indexFilePath,
+		Identifier:    indexID,
+		InterfaceData: idx,
+		SaveFn:        lazyCallBackFnSaveIndex,
+	}
+
+	lazycache.IndexLazyObjHolder.SetNoExpiration(indexID, lazyObj)
+	return &idx, nil
 }
 
 // SetFields - SetFields
@@ -68,6 +87,7 @@ func (i *Index) CreateIndex() error {
 	}
 	return nil
 }
+
 func (i *Index) ReplaceIndex() error {
 	var fns []func(a, b string) bool
 	for _, idx := range i.IndexFields {
@@ -104,15 +124,18 @@ func (i *Index) AddEntry(path string, rs *gjson.Result) error {
 		}
 		json, _ = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
 	}
+	UpdateLazyCache(i)
 	path = strings.Trim(path, string(filepath.Separator))
 	return i.indexStore.Set(path, json)
 }
 
 func (i *Index) AddEntries(keyValMap map[string]string) error {
+	UpdateLazyCache(i)
 	return i.indexStore.AddMany(keyValMap)
 }
 
 func (i *Index) Delete(path string) error {
+	UpdateLazyCache(i)
 	return i.indexStore.Delete(path)
 }
 
@@ -123,6 +146,7 @@ func (i *Index) DeleteMany(paths []string) error {
 			return err
 		}
 	}
+	UpdateLazyCache(i)
 	return nil
 }
 
@@ -130,32 +154,27 @@ func (i *Index) CloseStore() error {
 	return i.indexStore.Close()
 }
 
-// LoadFDBIndexFromFile -
-func LoadFDBIndexFromFile(indexFilePath string, fdb *FDB, indexID string) error {
-	loggermdl.LogError("LoadFDBIndexFromFile", indexFilePath)
-	index, found := fdb.GetFDBIndex(indexID)
-	if !found {
-		return errormdl.Wrap("index not found")
-	}
-	if !filemdl.FileAvailabilityCheck(indexFilePath) {
+func (index *Index) LoadIndexEntriesFromFile() error {
+
+	if !filemdl.FileAvailabilityCheck(index.IndexFilePath) {
 		return nil
 	}
-	fileData, err := filemdl.FastReadFile(indexFilePath)
+	fileData, err := filemdl.FastReadFile(index.IndexFilePath)
 	if err != nil {
-		loggermdl.LogError("failed to load FDB index from: ", indexFilePath)
+		loggermdl.LogError("failed to load FDB index from: ", index.IndexFilePath)
 		return err
 	}
 	if len(fileData) == 0 {
 		return nil
 	}
-	_, fileName := filepath.Split(indexFilePath)
-	fileData, err = decryptData(fileData, fileName)
+	_, fileName := filepath.Split(index.IndexFilePath)
+	fileData, err = index.SecurityProvider.Decrypt(fileData, fileName, nil)
 	if err != nil {
 		loggermdl.LogError("failed to decrypt FDB index data: ", err)
 		return errormdl.Wrap("failed to decrypt FDB index data: " + err.Error())
 	}
 	data := string(fileData)
-	indexRecords := strings.Split(data, lineBreak)
+	indexRecords := strings.Split(data, LineBreak)
 	indexDataMap := make(map[string]string)
 	for _, indexRecord := range indexRecords {
 		indexValues := strings.Split(indexRecord, IndexKeyValSeperator)
@@ -167,15 +186,12 @@ func LoadFDBIndexFromFile(indexFilePath string, fdb *FDB, indexID string) error
 	for _, idx := range index.IndexFields {
 		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
 	}
-	loggermdl.LogError("indexDataMap", indexDataMap)
 	// update index file by reading all data and updating index file
 	return index.AddEntries(indexDataMap)
 }
 
 // LogFDBIndexFile -LogFDBIndexFile
-func LogFDBIndexFile(indexFilePath string, index *Index) error {
-	// dbPath := filepath.Join(fdbPath, INDEXFOLDER)
-	// loggermdl.LogDebug("in log fdb index")
+func (index *Index) WriteIndexEntriesInFile() error {
 	dataToStore := ``
 	indeKeyValMap, err := index.GetAllEntries()
 	if err != nil {
@@ -183,17 +199,55 @@ func LogFDBIndexFile(indexFilePath string, index *Index) error {
 		return err
 	}
 	for key, value := range indeKeyValMap {
-		dataToStore = dataToStore + key + IndexKeyValSeperator + value + lineBreak
+		dataToStore = dataToStore + key + IndexKeyValSeperator + value + LineBreak
 	}
-	_, fileName := filepath.Split(indexFilePath)
+	_, fileName := filepath.Split(index.IndexFilePath)
 	var dataByteToWriteRes = []byte{}
 	var hashError error
 	if len(indeKeyValMap) > 0 {
-		dataByteToWriteRes, hashError = encryptData([]byte(dataToStore), fileName)
+		dataByteToWriteRes, hashError = index.SecurityProvider.Encrypt([]byte(dataToStore), fileName, nil)
 		if errormdl.CheckErr1(hashError) != nil {
 			return errormdl.CheckErr1(hashError)
 		}
 	}
-	// dataByteToWriteRes := []byte(dataToStore)
-	return filemdl.WriteFile(indexFilePath, dataByteToWriteRes, true, false)
+	return filemdl.WriteFile(index.IndexFilePath, dataByteToWriteRes, true, false)
+}
+
+var lazyCallBackFnSaveIndex lazywriter.SaveDataFn = func(indexID string, data *lazywriter.LazyCacheObject) {
+	index, ok := data.InterfaceData.(*Index)
+	if !ok {
+		return
+	}
+
+	err := index.WriteIndexEntriesInFile()
+	if err != nil {
+		loggermdl.LogError(err)
+		return
+	}
+}
+
+// UpdateLazyCache - updates index data in lay writer cache
+func UpdateLazyCache(index *Index) error {
+	// lazy cache must be present for provided indexID
+	lazyObj, ok := lazycache.IndexLazyObjHolder.Get(index.IndexID)
+	if !ok {
+		loggermdl.LogError("index not found in lazy writer cache")
+		return errormdl.Wrap("index not found in lazy writer cache")
+	}
+
+	idxLazyData, ok := lazyObj.(lazywriter.LazyCacheObject)
+	if !ok {
+		loggermdl.LogError("interface type is not lazywriter.LazyCacheObject")
+		return errormdl.Wrap("interface type is not lazywriter.LazyCacheObject")
+	}
+
+	// idxLazyData.GJSONData = index
+	idxLazyData.InterfaceData = index
+	if ok := lazycache.IndexMaster.SaveOrUpdateDataInCache(idxLazyData); !ok {
+		loggermdl.LogError("failed to update index data in lazy cache")
+		return errormdl.Wrap("failed to update index data in lazy cache")
+	}
+
+	lazycache.IndexLazyObjHolder.SetNoExpiration(index.IndexID, idxLazyData)
+	return nil
 }
diff --git a/dalmdl/corefdb/indexStore.go b/dalmdl/corefdb/index/indexstore.go
similarity index 74%
rename from dalmdl/corefdb/indexStore.go
rename to dalmdl/corefdb/index/indexstore.go
index a5ba615e06274d62b72b6a9a232b948c210cf021..500a0f59e9f5b031025b47e9b96b1c5af6a05c46 100644
--- a/dalmdl/corefdb/indexStore.go
+++ b/dalmdl/corefdb/index/indexstore.go
@@ -1,16 +1,17 @@
-package corefdb
+package index
 
 import (
 	"path/filepath"
 	"strings"
 
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
 
 	"github.com/tidwall/buntdb"
 	"github.com/tidwall/gjson"
 )
 
-type indexStore struct {
+type IndexStore struct {
 	store *buntdb.DB
 }
 
@@ -19,23 +20,25 @@ type Entry struct {
 	Value string
 }
 
-func NewStore() (indexStore, error) {
+// NewStore - returns new store object
+func NewStore() (IndexStore, error) {
 	db, err := buntdb.Open(":memory:")
 	if err != nil {
-		return indexStore{}, err
+		return IndexStore{}, err
 	}
-	store := indexStore{
+	store := IndexStore{
 		store: db,
 	}
 	return store, nil
 }
 
-func (i indexStore) Close() error {
+func (i IndexStore) Close() error {
 	return i.store.Close()
 }
-func (i *indexStore) Set(key, value string) (err error) {
+func (i *IndexStore) Set(key, value string) (err error) {
 	err = i.store.Update(func(tx *buntdb.Tx) error {
 		key = strings.ReplaceAll(key, "\\", "/")
+		loggermdl.LogError("set path", key)
 		_, _, err := tx.Set(key, value, nil)
 		if err != nil {
 			return err
@@ -44,10 +47,12 @@ func (i *indexStore) Set(key, value string) (err error) {
 	})
 	return
 }
-func (i *indexStore) AddMany(keyValMap map[string]string) (err error) {
+
+func (i *IndexStore) AddMany(keyValMap map[string]string) (err error) {
 	err = i.store.Update(func(tx *buntdb.Tx) error {
 		for key, val := range keyValMap {
 			key = strings.ReplaceAll(key, "\\", "/")
+			loggermdl.LogError("set many", key)
 			_, _, err := tx.Set(key, val, nil)
 			if err != nil {
 				return err
@@ -58,8 +63,10 @@ func (i *indexStore) AddMany(keyValMap map[string]string) (err error) {
 	return
 }
 
-func (i *indexStore) Get(path string) (val string, err error) {
+func (i *IndexStore) Get(path string) (val string, err error) {
 	path = strings.ReplaceAll(path, "\\", "/")
+	loggermdl.LogError("Get path", path)
+
 	found := false
 	err = i.store.View(func(tx *buntdb.Tx) error {
 		return tx.Ascend("", func(key, value string) bool {
@@ -74,7 +81,7 @@ func (i *indexStore) Get(path string) (val string, err error) {
 	return
 }
 
-func (i *indexStore) GetOneByQuery(queries []string) (entry Entry, found bool, err error) {
+func (i *IndexStore) GetOneByQuery(queries []string) (entry Entry, found bool, err error) {
 	err = i.store.View(func(tx *buntdb.Tx) error {
 		return tx.Ascend("", func(key, value string) bool {
 			rsJSON := gjson.Parse("[" + value + "]")
@@ -87,6 +94,8 @@ func (i *indexStore) GetOneByQuery(queries []string) (entry Entry, found bool, e
 					Key:   filepath.Join(key),
 					Value: value,
 				}
+				loggermdl.LogError("get one", entry)
+
 				return true
 			}
 			return true
@@ -95,7 +104,7 @@ func (i *indexStore) GetOneByQuery(queries []string) (entry Entry, found bool, e
 	return
 }
 
-func (i *indexStore) GetManyByQuery(queries []string) (map[string]string, error) {
+func (i *IndexStore) GetManyByQuery(queries []string) (map[string]string, error) {
 	entryMap := make(map[string]string, 0)
 	err := i.store.View(func(tx *buntdb.Tx) error {
 		return tx.Ascend("", func(key, value string) bool {
@@ -110,10 +119,11 @@ func (i *indexStore) GetManyByQuery(queries []string) (map[string]string, error)
 			return true
 		})
 	})
+	loggermdl.LogError("Get GetManyByQuery", entryMap)
 	return entryMap, err
 }
 
-func (i *indexStore) GetMany() (map[string]string, error) {
+func (i *IndexStore) GetMany() (map[string]string, error) {
 	entryMap := make(map[string]string, 0)
 	err := i.store.View(func(tx *buntdb.Tx) error {
 		return tx.Ascend("", func(key, value string) bool {
@@ -122,12 +132,15 @@ func (i *indexStore) GetMany() (map[string]string, error) {
 			return true
 		})
 	})
+	loggermdl.LogError("Get GetMany", entryMap)
+
 	return entryMap, err
 }
 
-func (i *indexStore) Delete(key string) error {
+func (i *IndexStore) Delete(key string) error {
 	key = strings.ReplaceAll(key, "\\", "/")
 	err := i.store.Update(func(tx *buntdb.Tx) error {
+		loggermdl.LogError("delete", key)
 		_, err := tx.Delete(key)
 		if err != nil {
 			return err
diff --git a/dalmdl/corefdb/lazyIndex.go b/dalmdl/corefdb/lazycache/lazycache.go
similarity index 99%
rename from dalmdl/corefdb/lazyIndex.go
rename to dalmdl/corefdb/lazycache/lazycache.go
index 31405d844749ae5762e9115aadddd6311b1f2865..b9df3fab654799f06832dfc8a51061e80a7a56a1 100644
--- a/dalmdl/corefdb/lazyIndex.go
+++ b/dalmdl/corefdb/lazycache/lazycache.go
@@ -1,4 +1,4 @@
-package corefdb
+package lazycache
 
 import (
 	"time"
diff --git a/dalmdl/corefdb/locker/locker.go b/dalmdl/corefdb/locker/locker.go
new file mode 100644
index 0000000000000000000000000000000000000000..d47dcf91d1d2260d559320d66d3ce6ad28e96b68
--- /dev/null
+++ b/dalmdl/corefdb/locker/locker.go
@@ -0,0 +1,36 @@
+package locker
+
+import "sync"
+
+var mutexMap = map[string]*sync.Mutex{}
+var getMapSyncMutex = &sync.Mutex{}
+
+func NewLocker(filePath string) *FileLocker {
+	getMapSyncMutex.Lock()
+	defer getMapSyncMutex.Unlock()
+	m, found := mutexMap[filePath]
+	if !found {
+		m = &sync.Mutex{}
+		mutexMap[filePath] = m
+	}
+	locker := FileLocker{
+		m,
+	}
+	return &locker
+}
+
+type Locker interface {
+	Lock()
+	Unlock()
+}
+
+type FileLocker struct {
+	Locker
+}
+
+func (l *FileLocker) Lock() {
+	l.Locker.Lock()
+}
+func (l *FileLocker) Unlock() {
+	l.Locker.Unlock()
+}
diff --git a/dalmdl/corefdb/securityprovider/securityprovider.go b/dalmdl/corefdb/securityprovider/securityprovider.go
new file mode 100644
index 0000000000000000000000000000000000000000..103728b3278afd6fb2321ee48b4ad236a73878b3
--- /dev/null
+++ b/dalmdl/corefdb/securityprovider/securityprovider.go
@@ -0,0 +1,111 @@
+package securityprovider
+
+import (
+	"errors"
+	"path/filepath"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+	"github.com/tidwall/gjson"
+)
+
+const (
+	// SharedPrefix prefix represents the file is sharable i.e encryption key does not include (fdbSec.fieldQuery)
+	SharedPrefix = "ss_"
+	// EmptySTR represents empty string
+	EmptySTR = ""
+)
+
+type SecurityProvider interface {
+	Encrypter
+	Decrypter
+}
+
+type Encrypter interface {
+	Encrypt([]byte, string, *gjson.Result) ([]byte, error)
+}
+
+type Decrypter interface {
+	Decrypt([]byte, string, *gjson.Result) ([]byte, error)
+}
+
+type FdbSecurityProvider struct {
+	encKey         string // the global encryption key used in the project. This key will be applicable in all cases.
+	userDefinedKey string // the user defined key in the project. This key will be applicable in all cases.
+	fieldQuery     string // query to get dynamic field. Ex. Each student data can be encrypted with studentID. Applicable only for the shared bucket.
+}
+
+type SecurityConfig struct {
+	EncKey         string // the global encryption key used in the project. This key will be applicable in all cases.
+	UserDefinedKey string // the user defined key in the project. This key will be applicable in all cases.
+	FieldQuery     string // query to get dynamic field. Ex. Each student data can be encrypted with studentID. Applicable only for the shared bucket.
+}
+
+func New(config SecurityConfig) FdbSecurityProvider {
+
+	fdbSecurity := FdbSecurityProvider{
+		encKey:         config.EncKey,
+		userDefinedKey: config.UserDefinedKey,
+		fieldQuery:     config.FieldQuery,
+	}
+
+	return fdbSecurity
+}
+
+func (fs FdbSecurityProvider) GenerateSecurityKey(fileName string, data *gjson.Result) (key []byte, err error) {
+	_, fileName = filepath.Split(fileName)
+	if fileName == EmptySTR {
+		return key, errors.New("GenerateSecurityKey: fileName must not be empty")
+	}
+
+	// Warning: The order of string concatenation must be preserved as specified.
+	skey := fs.encKey + fs.userDefinedKey + fileName
+	if !strings.HasPrefix(fileName, SharedPrefix) && fs.fieldQuery != "" {
+		// this is a shared file OR no query provided. No need to check for dynamic query result.
+		if data == nil || data.Get(fs.fieldQuery).String() == EmptySTR {
+			return key, errormdl.Wrap("please provide value of field: " + fs.fieldQuery)
+		}
+		skey = data.Get(fs.fieldQuery).String() + skey
+	}
+
+	// loggermdl.LogDebug("key", string(skey))
+	hash, err := hashmdl.Get128BitHash([]byte(skey))
+	if err != nil {
+		return key, errors.New("GenerateSecurityKey: " + err.Error())
+	}
+	key = hash[:]
+	return
+}
+
+// Encrypt - encrypts provide data
+func (fs FdbSecurityProvider) Encrypt(dataByte []byte, fileName string, data *gjson.Result) (res []byte, err error) {
+	res, err = filemdl.ZipBytes(dataByte)
+	if err != nil {
+		return
+	}
+	securityKey, gerr := fs.GenerateSecurityKey(fileName, data)
+	if gerr != nil {
+		err = gerr
+		return
+	}
+	return securitymdl.AESEncrypt(res, securityKey)
+}
+
+// Decrypt - decrypts provide data
+func (fs FdbSecurityProvider) Decrypt(dataByte []byte, fileName string, data *gjson.Result) (res []byte, err error) {
+	res = dataByte
+	securityKey, gerr := fs.GenerateSecurityKey(fileName, data)
+	if gerr != nil {
+		err = gerr
+		return
+	}
+	res, err = securitymdl.AESDecrypt(res, securityKey)
+	if err != nil {
+		return
+	}
+
+	return filemdl.UnZipBytes(res)
+}
diff --git a/dalmdl/dgraph/dgraph.go b/dalmdl/dgraph/dgraph.go
index 0bb755dc2645e35dd648d5290283bcdd8e1c8fe0..540400b5c228a3e7468451ab804137831b76e378 100644
--- a/dalmdl/dgraph/dgraph.go
+++ b/dalmdl/dgraph/dgraph.go
@@ -3,219 +3,270 @@ package dgraph
 import (
 	"context"
 	"errors"
+	"strconv"
+	"strings"
+	"time"
 
-	"github.com/tidwall/sjson"
-	"google.golang.org/grpc"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
 	"github.com/dgraph-io/dgo"
 	"github.com/dgraph-io/dgo/protos/api"
-	"github.com/tidwall/gjson"
+	"google.golang.org/grpc"
 )
 
-type DGraph interface {
-	ConnInit()
-	Create()
-	Drop()
-	Get()
-	Mutate()
+type Host struct {
+	Name       string `json:"hostName"`
+	Server     string `json:"server"`
+	Port       int    `json:"port"`
+	IsDefault  bool   `json:"isDefault"`
+	IsDisabled bool   `json:"IsDisabled"`
+
+	// UserName  string
+	// Password  string
 }
 
-/** ------------------------------------------ **/
-//
-// DGraphHost Parameters
-//
-// ==========================================
-//
-//* Server = Database Host IP Address
-//
-//* Port = Database Port
-//
-//* Username = Database Username
-//
-//* Password = Database Password
-//
-//* MaxConnectionPools = Connection pool to store. Default is 10.
-type DGraphHost struct {
-	Server             string `json:"server"`
-	Port               string `json:"port"`
-	Username           string `json:"username"`
-	Password           string `json:"password"`
-	MaxConnectionPools int    `json:"maxConnPools"`
+type Instance struct {
+	client *dgo.Dgraph
+	host   Host
 }
 
-/** ------------------------------------------ **/
-//
-// DGraphObj Parameters
-//
-// ==========================================
-//
-//* Dbg = Dgraph Connection Object
-//
-//* Ctx = Background Context
-//
-//* Txn = Transaction Object
-type DGraphObj struct {
-	Dbg *dgo.Dgraph
-	Ctx context.Context
-	Txn *dgo.Txn
+type DGraphDAO struct {
+	HostName string
 }
 
-/** ------------------------------------------ **/
-//
-// Mutate Operation Parameters
-//
-// ==========================================
-//
-//* Dbg = Dgraph Connection Object
-//
-//* Ctx = Background Context
-//
-//* Txn = Transaction Object
-//
-//* EnableTxn = Hold transaction for multiple write ops and commit once everything is done
-//
-//* Query = Data object
-type MutateOperation struct {
-	EnableTxn bool
-	JSONData  string
+var (
+	instances   map[string]*Instance
+	defaultHost string
+	configured  bool
+)
+
+// NewClient returns a new dgraph client for provided configuration.
+func NewClient(h Host) (*dgo.Dgraph, error) {
+
+	if strings.TrimSpace(h.Server) == "" {
+		return nil, errors.New("host address can not be empty")
+	}
+
+	address := bindDgraphServerWithPort(h.Server, h.Port)
+
+	// Dial a gRPC connection. The address to dial to can be configured when
+	// setting up the dgraph cluster.
+	dialOpts := []grpc.DialOption{
+		grpc.WithInsecure(),
+		grpc.WithBlock(), // block till we connect to the server
+		// grpc.WithTimeout(time.Second * 5),
+		grpc.WithDefaultCallOptions(
+			// grpc.UseCompressor(gzip.Name),
+			grpc.WaitForReady(true),
+		),
+	}
+
+	ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) // wait for 5 seconds to connect to grpc server. Exit if the deadline exceeds.
+	defer cancel()
+	d, err := grpc.DialContext(ctx, address, dialOpts...)
+	if err == context.DeadlineExceeded {
+		return nil, errors.New("graphdb connect error, connection timed out for host " + address)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	client := dgo.NewDgraphClient(api.NewDgraphClient(d))
+
+	// Note: Supported in Enterprise version only
+	// if h.UserName != "" {
+	// 	if err = client.Login(context.TODO(), h.UserName, h.Password); err != nil {
+	// 		return nil, err
+	// 	}
+	// }
+
+	return client, nil
 }
 
-/** ------------------------------------------ **/
-//
-// DROP Operation Parameters
-//
-// ==========================================
-//
-//* Operation = 1 || 2 || 3 || 4
-//
-//** "schema" = Drop complete Schema with data
-//
-//** "data" = Drop complete data but maintain the schema
-//
-//** "attr" = Drop specific Attribute completely from data
-//
-//* AttributeName = REQUIRED IF OPERATION TYPE = 3
-type DropParams struct {
-	Operation     string
-	AttributeName string
-}
-
-// Drop Operation Method
-func (d *DGraphObj) Drop(q *DropParams) error {
-	switch q.Operation {
-	// Drop complete Schema with data
-	case "schema":
-		err := d.Dbg.Alter(d.Ctx, &api.Operation{DropOp: api.Operation_ALL})
-		if err != nil {
-			loggermdl.LogError(err)
-			return err
+// NewInstance creates n new v2 instance of dgraph client. This instance can be saved in cache with host name as identifier for further operations.
+func NewInstance(client *dgo.Dgraph, host Host) *Instance {
+	return &Instance{
+		client: client,
+		host:   host,
+	}
+}
+
+func InitInstances(configs []Host) error {
+
+	if configured {
+		return nil
+	}
+
+	instances = make(map[string]*Instance, len(configs))
+
+	for _, host := range configs {
+		if host.IsDisabled {
+			continue
 		}
-		break
 
-	// Drop complete data but maintain the schema
-	case "data":
-		err := d.Dbg.Alter(d.Ctx, &api.Operation{DropOp: api.Operation_DATA})
+		client, err := NewClient(host)
 		if err != nil {
-			loggermdl.LogError(err)
 			return err
 		}
-		break
 
-	// Drop specific Attribute completely from data
-	case "attr":
-		err := d.Dbg.Alter(d.Ctx, &api.Operation{DropOp: api.Operation_ATTR, DropValue: q.AttributeName})
-		if err != nil {
-			loggermdl.LogError(err)
-			return err
+		instances[host.Name] = &Instance{
+			client: client,
+			host:   host,
 		}
-		break
 
-	default:
-		return errors.New("Invalid drop operation: " + q.Operation)
+		if host.IsDefault {
+			defaultHost = host.Name
+		}
 	}
 
+	configured = true
+
 	return nil
 }
 
-// Connect to DB via Pools
-// Return connection object
-func (d *DGraphHost) ConnInit() (*dgo.Dgraph, error) {
-	serverAddr := d.Server + ":" + d.Port
-	conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
+// GetInstance returns a preconfigured dgraph instance from cache. If not present, returns an error.
+func GetInstance(hostName string) (*Instance, error) {
+	if hostName == "" {
+		hostName = defaultHost
+	}
+
+	i, ok := instances[hostName]
+
+	if !ok {
+		return nil, errors.New("instance not found")
+	}
+
+	return i, nil
+}
+
+// GetDAO returns a dao instance to access and manipulate graph data and schema.
+func GetDAO(hostName string) *DGraphDAO {
+	return &DGraphDAO{HostName: hostName}
+}
+
+// CreateSchema sets the provided schema for the nodes data.
+func (dg *DGraphDAO) CreateSchema(ctx context.Context, schema string) error {
+	instance, err := GetInstance(dg.HostName)
 	if err != nil {
-		loggermdl.LogError(err)
-		return nil, err
+		return err
 	}
 
-	return dgo.NewDgraphClient(
-		api.NewDgraphClient(conn),
-	), nil
+	return instance.client.Alter(ctx, &api.Operation{Schema: schema})
 }
 
-// Create new schema in database with input DQuery json
-func (q *DGraphObj) Create(data string) error {
-	err := q.Dbg.Alter(q.Ctx, &api.Operation{
-		Schema: data,
+// SetData sets the provided data as a node. Can be used to create or update a node.
+//
+// For update, the data must contain `uid` field.
+//
+// Set `commitNow` to true to commit or discard the changes immediately.
+func (dg *DGraphDAO) SetData(ctx context.Context, data []byte, commitNow bool) error {
+	return dg.mutate(ctx, data, &api.Mutation{
+		SetJson:   data,
+		CommitNow: commitNow,
 	})
+}
 
+// DeleteData deletes the node or provided node attribute.
+//
+// Set `commitNow` to true to commit or discard the changes immediately.
+func (dg *DGraphDAO) DeleteData(ctx context.Context, data []byte, commitNow bool) error {
+	return dg.mutate(ctx, data, &api.Mutation{
+		DeleteJson: data,
+		CommitNow:  commitNow,
+	})
+}
+
+// mutate creates or updates the node data.
+func (dg *DGraphDAO) mutate(ctx context.Context, data []byte, mtn *api.Mutation) error {
+	instance, err := GetInstance(dg.HostName)
 	if err != nil {
-		loggermdl.LogError(err)
 		return err
 	}
 
-	return nil
+	txn := instance.client.NewTxn()
+
+	_, err = txn.Mutate(ctx, mtn)
+
+	return err
 }
 
-// Execute Query with DB Obj, Txn Obj and Query string
-// Returns gjson result string
-func (q *DGraphObj) Get(query string) (*gjson.Result, error) {
+// GetData returns the nodes matching to the provided query.
+//
+// query variables can be provided in `vars` param. Safe to provide `nil` if no variables required.
+//
+// The result is against the provided key in the query.
+func (dg *DGraphDAO) GetData(ctx context.Context, query string, vars map[string]string) ([]byte, error) {
+	instance, err := GetInstance(dg.HostName)
+	if err != nil {
+		return nil, err
+	}
 
-	txn := q.Dbg.NewReadOnlyTxn().BestEffort()
-	defer txn.Discard(q.Ctx)
+	txn := instance.client.NewReadOnlyTxn().BestEffort()
+	var res *api.Response
+	if len(vars) == 0 {
+		res, err = txn.Query(ctx, query)
+	} else {
+		res, err = txn.QueryWithVars(ctx, query, vars)
+	}
 
-	resp, err := txn.Query(q.Ctx, query)
 	if err != nil {
-		loggermdl.LogError(err)
 		return nil, err
 	}
 
-	rs := gjson.ParseBytes(resp.Json)
-	return &rs, nil
+	return res.GetJson(), nil
 }
 
-// Mutates query to database
-// Returns error
-func (d *DGraphObj) Mutate(m *MutateOperation) error {
+// DropSchema deletes the current schema along with the data.
+func (dg *DGraphDAO) DropSchema(ctx context.Context) error {
+	instance, err := GetInstance(dg.HostName)
+	if err != nil {
+		return err
+	}
 
-	commitNow := false
+	return instance.client.Alter(ctx, &api.Operation{DropOp: api.Operation_ALL})
+}
 
-	// If transaction object input is nil, return error
-	if m.EnableTxn && d.Txn == nil {
-		loggermdl.LogError("Transaction object is nil")
-		return errors.New("Transaction object is nil")
+// DropData deletes complete data but maintains the schema.
+func (dg *DGraphDAO) DropData(ctx context.Context) error {
+	instance, err := GetInstance(dg.HostName)
+	if err != nil {
+		return err
 	}
 
-	// If commit is True, discard the transaction immediately after commit
-	if !m.EnableTxn {
-		d.Txn = d.Dbg.NewTxn()
-		commitNow = true // Commit the transaction immediately
-		defer d.Txn.Discard(d.Ctx)
-	}
+	return instance.client.Alter(ctx, &api.Operation{DropOp: api.Operation_DATA})
+}
 
-	// Parse gjson and set the query
-	res := gjson.Parse(m.JSONData)
-	obj, err := sjson.Set("", "set", res.Value())
+// DropAttr deletes a specific attribute completely from data and the schema.
+func (dg *DGraphDAO) DropAttr(ctx context.Context, attr string) error {
+	instance, err := GetInstance(dg.HostName)
 	if err != nil {
-		loggermdl.LogError(err)
 		return err
 	}
 
-	_, err = d.Txn.Mutate(d.Ctx, &api.Mutation{SetJson: []byte(obj), CommitNow: commitNow})
+	return instance.client.Alter(ctx, &api.Operation{DropOp: api.Operation_ATTR, DropValue: attr})
+}
+
+// DropEdge deletes the edges for the mentioned node. predicate is the name of relationship between the node.
+//
+// Ex. Persion1 `follows` Person2.
+func (dg *DGraphDAO) DropEdge(ctx context.Context, uid string, predicates ...string) error {
+	instance, err := GetInstance(dg.HostName)
 	if err != nil {
-		loggermdl.LogError(err)
 		return err
 	}
 
-	return nil
+	mu := &api.Mutation{}
+	dgo.DeleteEdges(mu, uid, predicates...)
+
+	mu.CommitNow = true
+	_, err = instance.client.NewTxn().Mutate(ctx, mu)
+	return err
+}
+
+func bindDgraphServerWithPort(server string, port int) string {
+	// if port is empty then use default port 9080(GRPC Port) & bind to server ip
+
+	if port <= 0 || strings.TrimSpace(strconv.Itoa(port)) == "" {
+		return server + ":9080"
+	}
+
+	return server + ":" + strconv.Itoa(port)
 }
diff --git a/dalmdl/dgraph/dgraph_test.go b/dalmdl/dgraph/dgraph_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4afb0837b4b909fb256e58d1c0e7fad9b4b471d3
--- /dev/null
+++ b/dalmdl/dgraph/dgraph_test.go
@@ -0,0 +1,391 @@
+package dgraph
+
+import (
+	"context"
+	"testing"
+)
+
+var dgraphHost = &Host{
+	Name:   "DGraphHost",
+	Server: "localhost",
+	Port:   9080,
+}
+
+func Test_NewClient(t *testing.T) {
+	type args struct {
+		h Host
+	}
+	tests := []struct {
+		name string
+		args args
+		// want    *dgo.Dgraph
+		wantErr bool
+	}{
+		{
+			name:    "success on valid connection",
+			args:    args{h: Host{Name: "graphDBHost", Server: "10.1.20.14", Port: 9080}},
+			wantErr: false,
+		},
+		{
+			name:    "fail on connection fail",
+			args:    args{h: Host{Name: "graphDBHost", Server: "10.1.20.14", Port: 8080}},
+			wantErr: true,
+		},
+		{
+			name:    "success on default port used",
+			args:    args{h: Host{Name: "graphDBHost", Server: "10.1.20.14"}},
+			wantErr: false,
+		},
+		{
+			name:    "fail on blank address",
+			args:    args{h: Host{Name: "graphDBHost", Server: ""}},
+			wantErr: true,
+		},
+		{
+			name:    "fail on invalid address",
+			args:    args{h: Host{Name: "graphDBHost", Server: "10.1.0"}},
+			wantErr: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			_, err := NewClient(tt.args.h)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("NewClient() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
+
+func Test_CreateSchema(t *testing.T) {
+	type args struct {
+		c      context.Context
+		schema string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "create schema successfully",
+			args: args{c: context.Background(), schema: `
+			 name: string @index(exact) .
+			 age: int .
+			`},
+			wantErr: false,
+		},
+		{
+			name: "pass invalid schema",
+			args: args{c: context.Background(), schema: `
+			name string @index(exact) .
+			age int .
+			 `},
+			wantErr: true,
+		},
+		{
+			name:    "pass blank schema",
+			args:    args{c: context.Background(), schema: ``},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			err := InitInstances([]Host{*dgraphHost})
+			if err != nil {
+				t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+			}
+
+			dg := GetDAO(dgraphHost.Name)
+			err = dg.CreateSchema(tt.args.c, tt.args.schema)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("CreateSchema() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
+
+func Test_SetData(t *testing.T) {
+	type args struct {
+		c      context.Context
+		data   []byte
+		commit bool
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "success on correct data",
+			args: args{c: context.Background(), data: []byte(`{
+				"name": "Person 1",
+				"age": 29,
+				"follows": {
+					"name": "Person 2",
+					"age": 18,
+					"follows": {
+						"name": "Person 3",
+						"age": 37
+					}
+				}
+			}`), commit: true},
+			wantErr: false,
+		},
+		{
+			name:    "failure on incorrect data",
+			args:    args{c: context.Background(), data: []byte(``), commit: true},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg := GetDAO(dgraphHost.Name)
+		err = dg.SetData(tt.args.c, tt.args.data, tt.args.commit)
+		if err != nil && !tt.wantErr {
+			t.Errorf("SetData() error = %v", err)
+		}
+	}
+}
+
+func Test_DropAttr(t *testing.T) {
+	type args struct {
+		c    context.Context
+		attr string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "success case for delete known attribute",
+			args:    args{c: context.Background(), attr: "age"},
+			wantErr: false,
+		},
+		{
+			name:    "fail case for deleting absent attribute",
+			args:    args{c: context.Background(), attr: "height"},
+			wantErr: true,
+		},
+		{
+			name:    "fail case for blank attribute",
+			args:    args{c: context.Background(), attr: ""},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg := GetDAO(dgraphHost.Name)
+		err = dg.DropAttr(tt.args.c, tt.args.attr)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DropAttr() error = %v", err)
+		}
+	}
+}
+
+func Test_GetData(t *testing.T) {
+	type args struct {
+		c     context.Context
+		query string
+		vars  map[string]string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "success query to fetch data without params",
+			args: args{c: context.Background(), query: `
+			{
+				people(func: has(name)) {
+				  name
+				  age,
+				  follows
+				}
+			  }
+			`},
+			wantErr: false,
+		},
+		{
+			name:    "failure on blank query",
+			args:    args{c: context.Background(), query: ""},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg := GetDAO(dgraphHost.Name)
+		_, err = dg.GetData(tt.args.c, tt.args.query, tt.args.vars)
+		if err != nil && !tt.wantErr {
+			t.Errorf("GetData() error = %v", err)
+		}
+	}
+}
+
+func Test_DropEdge(t *testing.T) {
+	type args struct {
+		c          context.Context
+		uid        string
+		predicates []string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "success case to delete an edge",
+			args:    args{c: context.Background(), uid: "0x754f", predicates: []string{"follows"}},
+			wantErr: false,
+		},
+		{
+			name:    "fail case to delete blank edge",
+			args:    args{c: context.Background(), uid: "0x7551", predicates: []string{""}},
+			wantErr: true,
+		},
+		{
+			name:    "fail case to delete blank UID",
+			args:    args{c: context.Background(), uid: "", predicates: []string{""}},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg := GetDAO(dgraphHost.Name)
+		err = dg.DropEdge(tt.args.c, tt.args.uid, tt.args.predicates...)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DropEdge() error = %v", err)
+		}
+	}
+}
+
+func Test_DeleteData(t *testing.T) {
+	type args struct {
+		c      context.Context
+		data   []byte
+		commit bool
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "success on delete correct data",
+			args: args{c: context.Background(), data: []byte(`{
+				"uid": "0x754f"
+			}`), commit: true},
+			wantErr: false,
+		},
+		{
+			name:    "failure on incorrect delete data",
+			args:    args{c: context.Background(), data: []byte(``), commit: true},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg := GetDAO(dgraphHost.Name)
+		err = dg.DeleteData(tt.args.c, tt.args.data, tt.args.commit)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DeleteData() error = %v", err)
+		}
+	}
+}
+func Test_DropData(t *testing.T) {
+	type args struct {
+		c context.Context
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "Drop data case",
+			args:    args{c: context.Background()},
+			wantErr: false,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg := GetDAO(dgraphHost.Name)
+		err = dg.DropData(tt.args.c)
+		if err != nil {
+			t.Errorf("DropData() error = %v, wantErr %v", err, tt.wantErr)
+		}
+	}
+}
+
+func Test_DropSchema(t *testing.T) {
+	type args struct {
+		c context.Context
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "Drop schema case",
+			args:    args{c: context.Background()},
+			wantErr: false,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg := GetDAO(dgraphHost.Name)
+		err = dg.DropSchema(tt.args.c)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DropSchema() error = %v", err)
+		}
+	}
+}
diff --git a/filemdl/filemdl_darwin.go b/filemdl/filemdl_darwin.go
index 824438f0018c14ee0ad7759c1540a506806aee68..acd187767d9c8ebcafabb998afd29d37eca8e74f 100644
--- a/filemdl/filemdl_darwin.go
+++ b/filemdl/filemdl_darwin.go
@@ -1,51 +1,15 @@
+// TODO: Build flag needs to be passed while building exe/executable
+// +build !windows
+
 package filemdl
 
 import (
 	"os"
-	"syscall"
-	"unsafe"
-)
-
-const (
-	moveFileReplacExisting = 0x1
-	moveFileWriteThrough   = 0x8
-)
-
-var (
-	modkernel32     = syscall.NewLazyDLL("kernel32.dll")
-	procMoveFileExW = modkernel32.NewProc("MoveFileExW")
 )
 
-//sys moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) = MoveFileExW
-
 // AtomicReplaceFile atomically replaces the destination file or directory with the
 // source.  It is guaranteed to either replace the target file entirely, or not
 // change either file.
 func AtomicReplaceFile(source, destination string) error {
-	src, err := syscall.UTF16PtrFromString(source)
-	if err != nil {
-		return &os.LinkError{"replace", source, destination, err}
-	}
-	dest, err := syscall.UTF16PtrFromString(destination)
-	if err != nil {
-		return &os.LinkError{"replace", source, destination, err}
-	}
-
-	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
-	if err := moveFileEx(src, dest, moveFileReplacExisting|moveFileWriteThrough); err != nil {
-		return &os.LinkError{"replace", source, destination, err}
-	}
-	return nil
-}
-
-func moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) {
-	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(lpExistingFileName)), uintptr(unsafe.Pointer(lpNewFileName)), uintptr(dwFlags))
-	if r1 == 0 {
-		if e1 != 0 {
-			err = error(e1)
-		} else {
-			err = syscall.EINVAL
-		}
-	}
-	return
+	return os.Rename(source, destination)
 }
diff --git a/filemdl/filepack/packFile.go b/filemdl/filepack/packFile.go
index 5203a184b5b2ab624a8322b5edd44484c4472e9c..f76f23b1f5a2dd6f852f2c8878c9b9c2e12d2d2b 100644
--- a/filemdl/filepack/packFile.go
+++ b/filemdl/filepack/packFile.go
@@ -281,7 +281,7 @@ func addFileDataInFile(f *os.File, offset int64, data string, breakLine bool, rs
 	return filemdl.WriteFileAtOffset(f, offset, dataBytes)
 }
 
-func addMediaDataInFile(f *os.File, offset int64, dataBytes []byte, breakLine bool) (int64, error) {
+func addByteDataInFile(f *os.File, offset int64, dataBytes []byte, breakLine bool) (int64, error) {
 	var err error
 
 	if breakLine {
@@ -320,22 +320,6 @@ func getFileDataFromPack(f *os.File, startOffset, dataSize int64, rs *gjson.Resu
 
 	ba, err := filemdl.ReadFileFromOffset(f, startOffset, dataSize)
 
-	// if isSecurityEnabled {
-	// 	dataByte, err = decryptData(dataByte, f.Name())
-	// 	if err != nil {
-	// 		loggermdl.LogError("decryptData", err)
-	// 		return dataByte, err
-	// 	}
-	// 	return dataByte, nil
-	// }
-	// if isCompressionEnabled {
-	// 	dataByte, err = decompressData(dataByte)
-	// 	if err != nil {
-	// 		return dataByte, err
-	// 	}
-	// 	return dataByte, nil
-	// }
-
 	if secParams.EnableSecurity {
 		// _, fileName := filepath.Split(fp.Name())
 		key, err := securitymdl.GenSecKeyBytes(f.Name(), rs)
@@ -351,9 +335,6 @@ func getFileDataFromPack(f *os.File, startOffset, dataSize int64, rs *gjson.Resu
 	}
 	return ba, err
 }
-func getMediaDataFromPack(f *os.File, startOffset, dataSize int64) ([]byte, error) {
-	return filemdl.ReadFileFromOffset(f, startOffset, dataSize)
-}
 
 // GetKeyWithFileNameAndDefaultKey generates key using file name + Default key
 func GetKeyWithFileNameAndDefaultKey(filePath string) ([]byte, error) {
@@ -644,7 +625,7 @@ func addMediaInPackFile(filePath string, mediaData []byte, infileIndexFields []I
 			return recordID, err
 		}
 		// write data
-		dataSize, err := addMediaDataInFile(f, footerStartOffset, mediaData, true)
+		dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
 		if err != nil {
 			loggermdl.LogError(err)
 			return recordID, err
@@ -747,7 +728,7 @@ func addMediaInPackFileUsingFp(f *os.File, infileIndex *gjson.Result, mediaData
 		return recordID, infileIndex, err
 	}
 	// write data
-	dataSize, err := addMediaDataInFile(f, footerStartOffset, mediaData, true)
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
 	if err != nil {
 		loggermdl.LogError(err)
 		return recordID, infileIndex, err
@@ -1004,7 +985,7 @@ func GetMediaFromPackFile(filePath string, recordID string) ([]byte, gjson.Resul
 	// dataByte := []byte{'{', '}'}
 	// var err error
 	// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
-	dataByte, err = getMediaDataFromPack(f, startOffSet, dataSize)
+	dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, nil, securitymdl.FDBSecParams{EnableSecurity: false})
 	if err != nil {
 		loggermdl.LogError(err)
 		return dataByte, indexRow, err
@@ -1069,7 +1050,7 @@ func GetMediaFromPackFileUsingFp(f *os.File, infileIndex *gjson.Result, recordID
 	// dataByte := []byte{'{', '}'}
 	// var err error
 	// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
-	dataByte, err = getMediaDataFromPack(f, startOffSet, dataSize)
+	dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, nil, securitymdl.FDBSecParams{EnableSecurity: false})
 	if err != nil {
 		loggermdl.LogError(err)
 		return dataByte, metaData, err
@@ -1136,7 +1117,7 @@ func UpdateMediaInPackFile(filePath string, recordID string, mediaData []byte, i
 	}
 
 	// write data
-	dataSize, err := addMediaDataInFile(f, footerStartOffset, mediaData, true)
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
 	if err != nil {
 		loggermdl.LogError(err)
 		return recordID, err
@@ -1238,7 +1219,7 @@ func UpdateMediaInPackFileUsingFp(f *os.File, infileIndex *gjson.Result, recordI
 	}
 
 	// write data
-	dataSize, err := addMediaDataInFile(f, footerStartOffset, mediaData, true)
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
 	if err != nil {
 		loggermdl.LogError(err)
 		return recordID, infileIndex, err
@@ -1359,7 +1340,7 @@ func UpsertMediaInPackFileUsingFp(f *os.File, infileIndex *gjson.Result, recordI
 	}
 
 	// write data
-	dataSize, err := addMediaDataInFile(f, footerStartOffset, mediaData, true)
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
 	if err != nil {
 		loggermdl.LogError(err)
 		return recordID, infileIndex, err
@@ -2221,8 +2202,10 @@ func DeletDataFromPackFileUsingFp(f *os.File, infileIndex *gjson.Result, infileI
 		if err != nil {
 			return recordsDeletedCnt, infileIndex, err
 		}
+		loggermdl.LogError("infile index in file", indexDataStr)
 	} else {
 		indexDataStr = infileIndex.String()
+		loggermdl.LogError("infile index in cache", indexDataStr)
 	}
 
 	indexData := gjson.Parse(indexDataStr)
@@ -2236,6 +2219,7 @@ func DeletDataFromPackFileUsingFp(f *os.File, infileIndex *gjson.Result, infileI
 		loggermdl.LogError("ErrNoDataFound")
 		return recordsDeletedCnt, infileIndex, ErrNoDataFound
 	}
+	loggermdl.LogError("before delete ", indexData)
 	updatedIndexRecords := indexData
 	for _, indexRowToRemove := range indexRowsToDelete {
 		updatedIndexRecords, err = removeIndexRow(updatedIndexRecords, indexRowToRemove.String())
@@ -2245,7 +2229,7 @@ func DeletDataFromPackFileUsingFp(f *os.File, infileIndex *gjson.Result, infileI
 		}
 		recordsDeletedCnt++
 	}
-
+	loggermdl.LogError("updatedIndexRecords after delete ", updatedIndexRecords, f.Name())
 	footerOffset := getFooterOffset(f)
 	if footerOffset == -1 {
 		return recordsDeletedCnt, infileIndex, errormdl.Wrap("fail to fetch infile index offset")
@@ -2290,3 +2274,127 @@ func decryptwithDecompression(data []byte, deCompress bool, encKey []byte) (res
 
 	return
 }
+
+func ReorgPackFile(filePath string) error {
+
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	if !isFilePresent {
+		return errormdl.Wrap("file not found")
+	}
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+	if err != nil {
+		loggermdl.LogError("Error occured during reOrg of file data", err)
+		return errormdl.Wrap("Error occured during reOrg of file data")
+	}
+	_, sourceFileName := filepath.Split(filePath)
+	desFileName := sourceFileName + "_" + strconv.FormatInt(time.Now().Unix(), 10)
+	tempFilepath, err := filepath.Abs(filepath.Join(filemdl.TempDir, desFileName))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	tempFilDir, _ := filepath.Split(tempFilepath)
+	err = filemdl.CreateDirectoryRecursive(tempFilDir)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	err = initializeWithHeader(tempFilepath)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	dir, _ := filepath.Split(tempFilepath)
+	if dir != "" {
+		createError := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(createError) != nil {
+			return errormdl.CheckErr(createError)
+		}
+	}
+	fpTemp, err := os.OpenFile(tempFilepath, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		fpTemp.Close()
+	}()
+	infileIndexData, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	infileIndexRows := gjson.Parse(infileIndexData)
+	if len(infileIndexRows.Array()) == 0 {
+		return nil
+	}
+	tempFileFooterStartOffset := getFooterOffset(fpTemp)
+	if tempFileFooterStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+	updatedIndexRowStr := "[]"
+	for _, infileIndex := range infileIndexRows.Array() {
+		startOffset, err := strconv.Atoi(infileIndex.Get("startOffset").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching startOffset", err)
+			return err
+		}
+		dataSize, err := strconv.Atoi(infileIndex.Get("dataSize").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching dataSize", err)
+			return err
+		}
+
+		byteArr, err := getFileDataFromPack(f, int64(startOffset), int64(dataSize), nil, securitymdl.FDBSecParams{EnableSecurity: false})
+		if err != nil {
+			loggermdl.LogError("Error occured while reading file data from offset", err)
+			return err
+		}
+		byteCnt, err := addByteDataInFile(fpTemp, tempFileFooterStartOffset, byteArr, false)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		indexRowJSON, _ := sjson.Set(infileIndex.String(), "startOffset", tempFileFooterStartOffset)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", byteCnt)
+		indexRowJSONObj := gjson.Parse(indexRowJSON)
+		updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "-1", indexRowJSONObj.Value())
+		tempFileFooterStartOffset = tempFileFooterStartOffset + byteCnt
+	}
+
+	err = setFooterOffset(fpTemp, tempFileFooterStartOffset)
+	if err != nil {
+		return err
+	}
+	err = setFooterSize(fpTemp, int64(len(updatedIndexRowStr)))
+	if err != nil {
+		return err
+	}
+	err = setIndexDataInFile(fpTemp, tempFileFooterStartOffset, updatedIndexRowStr)
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Sync()
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Close()
+	if err != nil {
+		return err
+	}
+	err = f.Close()
+	if err != nil {
+		return err
+	}
+
+	return filemdl.AtomicReplaceFile(tempFilepath, filePath)
+}
diff --git a/grpcclientmdl/grpcclientmdl.go b/grpcclientmdl/grpcclientmdl.go
index 268ee035d3bbbfbabd310861754fa9b1b26eb083..75b3565c09f091176da8569ca3b8e860bcf3c42b 100644
--- a/grpcclientmdl/grpcclientmdl.go
+++ b/grpcclientmdl/grpcclientmdl.go
@@ -49,6 +49,7 @@ func ByteHandler(req *grpcbuildermdl.GRPCRequest, grpcServerURL string) ([]byte,
 			loggermdl.LogError("Failed to create gRPC pool: %v", err)
 			return nil, 0, "", err
 		}
+		defer conn.Close()
 		client := grpcbuildermdl.NewGRPCServiceClient(conn.ClientConn)
 		res, err := client.GRPCHandler(context.Background(), req)
 		if err != nil {