diff --git a/authmdl/jwtmdl/jwtmdl.go b/authmdl/jwtmdl/jwtmdl.go
index 3934b49e0482b3c3afb2ea57629dbe6c99048d85..1b55c6d38e8d38793842e6a7f0c4f65003faca7e 100644
--- a/authmdl/jwtmdl/jwtmdl.go
+++ b/authmdl/jwtmdl/jwtmdl.go
@@ -1,6 +1,8 @@
 package jwtmdl
 
 import (
+	"fmt"
+	"strings"
 	"time"
 
 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
@@ -103,3 +105,33 @@ func GenerateTokenWithJWTKey(loginID string, groups []string, clientIP string, m
 	}
 	return t, nil
 }
+
+//GeneratePricipleObjUsingToken GeneratePricipleObjUsingToken
+func GeneratePricipleObjUsingToken(tokenReq string, jwtKey string) (jwt.MapClaims, error) {
+	tokenArray := strings.Split(tokenReq, "Bearer")
+	if len(tokenArray) <= 1 {
+		return nil, errormdl.Wrap("Provided JWT token is nil or invalid ")
+	}
+	tokenFromRequest := strings.Trim(tokenArray[1], " ")
+	// get data i.e.Claims from token
+	token, err := jwt.Parse(tokenFromRequest, func(token *jwt.Token) (interface{}, error) {
+		// Don't forget to validate the alg is what you expect:
+		_, ok := token.Method.(*jwt.SigningMethodHMAC)
+		if !ok {
+			return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
+		}
+		return []byte(jwtKey), nil
+	})
+
+	if err != nil {
+		loggermdl.LogError("Error while parsing JWT Token: ", err)
+		return nil, err
+	}
+
+	claims, ok := token.Claims.(jwt.MapClaims)
+	if !errormdl.CheckBool1(ok) {
+		loggermdl.LogError("Error while parsing claims to MapClaims")
+		return nil, errormdl.Wrap("Error while getting claims")
+	}
+	return claims, nil
+}
diff --git a/dalmdl/corefdb/bucket.go b/dalmdl/corefdb/bucket.go
index acbcb98cf928a047651d7b75fc336e1331a35976..da36b5bd030da898c88573fc72f33cad6301b8f7 100644
--- a/dalmdl/corefdb/bucket.go
+++ b/dalmdl/corefdb/bucket.go
@@ -1,292 +1,432 @@
-package corefdb
-
-import (
-	"path/filepath"
-	"strings"
-
-	"github.com/tidwall/sjson"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
-	"github.com/tidwall/buntdb"
-	"github.com/tidwall/gjson"
-)
-
-// Bucket - Bucket
-type Bucket struct {
-	BucketID        string `json:"bucketId"`
-	IsDynamicName   bool   `json:"isDynamicName"`
-	BucketNameQuery string `json:"bucketNameQuery"`
-}
-
-// Index - Index
-type Index struct {
-	indexObj       *buntdb.DB
-	IndexID        string       `json:"indexId"`
-	IndexNameQuery string       `json:"indexNameQuery"`
-	BucketSequence []string     `json:"bucketSequence"`
-	IndexFields    []IndexField `json:"indexFields"`
-	IsDynamicName  bool         `json:"isDynamicName"`
-}
-
-// IndexField - IndexField
-type IndexField struct {
-	FieldName string `json:"fieldName"`
-	Query     string `json:"query"`
-}
-
-// GetNewIndex - GetNewIndex returns new index
-func (fdb *FDB) GetNewIndex(indexNameQuery string, IsDynamicName bool) (*Index, error) {
-	fdb.indexMux.Lock()
-	defer fdb.indexMux.Unlock()
-	if _, ok := fdb.indices[indexNameQuery]; ok {
-		return nil, errormdl.Wrap("Index name already found")
-	}
-	idx := &Index{
-		IndexNameQuery: indexNameQuery,
-		IndexID:        indexNameQuery,
-		IsDynamicName:  IsDynamicName,
-	}
-	db, err := fdb.GetIndexDB(idx)
-	if err != nil {
-		return nil, err
-	}
-	idx.indexObj = db
-
-	fdb.indices[idx.IndexID] = idx
-	return idx, nil
-}
-
-// RegisterNewIndex - RegisterNewIndex returns new index
-func (fdb *FDB) RegisterNewIndex(idx *Index) (*Index, error) {
-	fdb.indexMux.Lock()
-	defer fdb.indexMux.Unlock()
-	if _, ok := fdb.indices[idx.IndexID]; ok {
-		return nil, errormdl.Wrap("Index ID already found")
-	}
-
-	db, err := fdb.GetIndexDB(idx)
-	if err != nil {
-		return nil, err
-	}
-	idx.indexObj = db
-
-	fdb.indices[idx.IndexID] = idx
-	return idx, nil
-}
-
-// GetIndexDB -  GetIndexDB
-func (fdb *FDB) GetIndexDB(index *Index) (*buntdb.DB, error) {
-	dbPath := filepath.Join(fdb.DBPath, INDEXFOLDER)
-	filemdl.CreateDirectoryRecursive(dbPath)
-	dbPath = filepath.Join(dbPath, index.IndexID+".db")
-	db, err := buntdb.Open(dbPath)
-	if err != nil {
-		return nil, err
-	}
-	return db, nil
-}
-
-// GetNewBucket - return Bucket Obj
-func (fdb *FDB) GetNewBucket(BucketNameQuery string, IsDynamicName bool) *Bucket {
-	fdb.bLocker.Lock()
-	defer fdb.bLocker.Unlock()
-	bucket := &Bucket{
-		BucketID:        guidmdl.GetGUID(),
-		BucketNameQuery: BucketNameQuery,
-		IsDynamicName:   IsDynamicName,
-	}
-	fdb.buckets[bucket.BucketID] = bucket
-	return bucket
-}
-
-// SetBucket - set bucket in index
-func (index *Index) SetBucket(bucket *Bucket) *Index {
-	if bucket != nil {
-		index.BucketSequence = append(index.BucketSequence, bucket.BucketID)
-
-	}
-	return index
-}
-
-// SetFields - SetFields
-func (index *Index) SetFields(indexField ...IndexField) *Index {
-	index.IndexFields = indexField
-	return index
-}
-
-func (fdb *FDB) bucketNameResolver(bucketID string, rs *gjson.Result) (string, error) {
-	fdb.bLocker.Lock()
-	defer fdb.bLocker.Unlock()
-	bucket, ok := fdb.buckets[bucketID]
-	if !ok {
-		loggermdl.LogError("Bucket not found: " + bucketID)
-		return "", errormdl.Wrap("Bucket not found: " + bucketID)
-	}
-	name := bucket.BucketNameQuery
-	if bucket.IsDynamicName {
-		name = rs.Get(name).String()
-	}
-	if name == "" {
-		return name, errormdl.Wrap("Bucket name should not be empty: " + bucketID)
-	}
-	return name, nil
-}
-
-func (fdb *FDB) resolveIndex(index *Index, rs *gjson.Result) (string, error) {
-
-	path := ""
-	for _, bucketID := range index.BucketSequence {
-		bucketName, err := fdb.bucketNameResolver(bucketID, rs)
-		if err != nil {
-			return path, err
-		}
-		path = filepath.Join(path, bucketName)
-	}
-	indexName := index.IndexNameQuery
-	if index.IsDynamicName {
-		indexName = rs.Get(index.IndexNameQuery).String()
-	}
-	if indexName == "" {
-		return path, errormdl.Wrap("Index resolver name is empty:" + index.IndexID)
-	}
-	path = filepath.Join(path, indexName)
-	return path, nil
-}
-
-func createIndexJSON(index *Index, rs *gjson.Result) string {
-	json := `{}`
-	for _, indexField := range index.IndexFields {
-		json, _ = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
-	}
-	return json
-}
-
-// SaveData - SaveData
-func SaveData(dbName, indexID string, rs *gjson.Result) error {
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	path, err := fdb.resolveIndex(index, rs)
-	if errormdl.CheckErr(err) != nil {
-		loggermdl.LogError(err)
-		return errormdl.CheckErr(err)
-	}
-	json := createIndexJSON(index, rs)
-	path = path + ".json"
-	err = index.indexObj.Update(func(tx *buntdb.Tx) error {
-		_, _, err := tx.Set(path, json, nil)
-		if err != nil {
-			loggermdl.LogError(err)
-			return err
-		}
-		path = filepath.Join(fdb.DBPath, path)
-		err = filemdl.WriteFile(path, []byte(rs.String()), true, false)
-		if errormdl.CheckErr(err) != nil {
-			loggermdl.LogError(err)
-			return errormdl.CheckErr(err)
-		}
-		return nil
-	})
-	return err
-}
-
-// GetData - GetData
-func GetData(dbName, indexID string, query ...string) (*gjson.Result, error) {
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return nil, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return nil, errormdl.Wrap("INDEX not found: " + indexID)
-	}
-	path := []string{}
-	index.indexObj.View(func(tx *buntdb.Tx) error {
-		return tx.Ascend(indexID, func(key, value string) bool {
-			rsJSON := gjson.Parse("[" + value + "]")
-			for i := 0; i < len(query); i++ {
-				rsJSON = rsJSON.Get(query[i] + "#")
-			}
-			if rsJSON.Value() != nil {
-				path = append(path, key)
-			}
-			return true
-		})
-	})
-	return fdb.readDataFile(path)
-}
-
-func (fdb *FDB) readDataFile(path []string) (*gjson.Result, error) {
-	sb := strings.Builder{}
-	sb.WriteString("[")
-	for i := 0; i < len(path); i++ {
-		path[i] = filepath.Join(fdb.DBPath, path[i])
-		ba := []byte{'{', '}'}
-		var err error
-		if filemdl.FileAvailabilityCheck(path[i]) {
-			ba, err = filemdl.ReadFile(path[i])
-			if err != nil {
-				loggermdl.LogError(err)
-				return nil, err
-			}
-		}
-		_, err = sb.Write(ba)
-		if err != nil {
-			loggermdl.LogError(err)
-			return nil, err
-		}
-		sb.WriteString(",")
-	}
-	sb.WriteString("]")
-	finalResult := strings.Replace(sb.String(), ",]", "]", 1)
-	rs := gjson.Parse(finalResult)
-	return &rs, nil
-}
-
-// GetDataByConcat - GetDataByConcat
-func GetDataByConcat(dbName, indexID string, query ...string) (*gjson.Result, error) {
-	fdb, err := GetFDBInstance(dbName)
-	if err != nil {
-		return nil, err
-	}
-	index, ok := fdb.GetFDBIndex(indexID)
-	if !ok {
-		loggermdl.LogError("INDEX not found: " + indexID)
-		return nil, errormdl.Wrap("INDEX not found: " + indexID)
-	}
-
-	// path := []string{}
-	sb := strings.Builder{}
-	sb.WriteString("[")
-	index.indexObj.View(func(tx *buntdb.Tx) error {
-
-		tx.Ascend(indexID, func(key, value string) bool {
-			sb.WriteString(value)
-			sb.WriteString(",")
-			return true
-		})
-
-		return nil
-	})
-	sb.WriteString("]")
-	finalData := strings.Replace(sb.String(), ",]", "]", 1)
-	rsJSON := gjson.Parse(finalData)
-	for i := 0; i < len(query); i++ {
-		rsJSON = rsJSON.Get(query[i] + "#")
-		if rsJSON.Value() == nil {
-			break
-		}
-	}
-
-	return nil, nil
-}
+package corefdb
+
+import (
+	"path/filepath"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl/filepack"
+
+	"github.com/tidwall/sjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+
+	"github.com/tidwall/buntdb"
+	"github.com/tidwall/gjson"
+)
+
+// Bucket - Bucket
+type Bucket struct {
+	BucketID        string   `json:"bucketId"`
+	IsDynamicName   bool     `json:"isDynamicName"`
+	BucketNameQuery string   `json:"bucketNameQuery"`
+	Indices         []string `json:"indices"`
+	BucketPath      string   `json:"bucketPath"`
+}
+
+// Index - Index
+type Index struct {
+	indexObj       *buntdb.DB
+	IndexID        string       `json:"indexId"`
+	IndexNameQuery string       `json:"indexNameQuery"`
+	BucketSequence []string     `json:"bucketSequence"`
+	IndexFields    []IndexField `json:"indexFields"`
+	IsDynamicName  bool         `json:"isDynamicName"`
+}
+
+// IndexField - IndexField
+type IndexField struct {
+	FieldName string `json:"fieldName"`
+	Query     string `json:"query"`
+}
+
+// GetNewIndex - GetNewIndex returns new index
+func (fdb *FDB) GetNewIndex(indexNameQuery string, IsDynamicName bool) (*Index, error) {
+	fdb.indexMux.Lock()
+	defer fdb.indexMux.Unlock()
+	if _, ok := fdb.indices[indexNameQuery]; ok {
+		return nil, errormdl.Wrap("Index name already found")
+	}
+	idx := &Index{
+		IndexNameQuery: indexNameQuery,
+		IndexID:        indexNameQuery,
+		IsDynamicName:  IsDynamicName,
+	}
+	db, err := fdb.GetIndexDB(idx)
+	if err != nil {
+		return nil, err
+	}
+	idx.indexObj = db
+
+	fdb.indices[idx.IndexID] = idx
+	return idx, nil
+}
+
+// RegisterNewIndex - RegisterNewIndex returns new index
+func (fdb *FDB) RegisterNewIndex(idx *Index) (*Index, error) {
+	fdb.indexMux.Lock()
+	defer fdb.indexMux.Unlock()
+	if _, ok := fdb.indices[idx.IndexID]; ok {
+		return nil, errormdl.Wrap("Index ID already found")
+	}
+
+	db, err := fdb.GetIndexDB(idx)
+	if err != nil {
+		return nil, err
+	}
+	idx.indexObj = db
+
+	fdb.indices[idx.IndexID] = idx
+	return idx, nil
+}
+
+// GetIndexDB -  GetIndexDB
+func (fdb *FDB) GetIndexDB(index *Index) (*buntdb.DB, error) {
+	dbPath := filepath.Join(fdb.DBPath, INDEXFOLDER)
+	filemdl.CreateDirectoryRecursive(dbPath)
+	dbPath = filepath.Join(dbPath, index.IndexID)
+	db, err := buntdb.Open(dbPath)
+	if err != nil {
+		return nil, err
+	}
+	return db, nil
+}
+
+// GetNewBucket - return Bucket Obj
+func (fdb *FDB) GetNewBucket(BucketNameQuery string, IsDynamicName bool, parentBucket *Bucket) *Bucket {
+	fdb.bLocker.Lock()
+	defer fdb.bLocker.Unlock()
+	bucket := &Bucket{
+		BucketID:        guidmdl.GetGUID(),
+		BucketNameQuery: BucketNameQuery,
+		IsDynamicName:   IsDynamicName,
+	}
+	if IsDynamicName {
+		BucketNameQuery = "$$" + BucketNameQuery
+	}
+	bucket.BucketPath = filepath.Join(parentBucket.BucketPath, BucketNameQuery)
+	fdb.buckets[bucket.BucketID] = bucket
+	return bucket
+}
+
+// SetBucket - set bucket in index
+func (index *Index) SetBucket(bucket *Bucket) *Index {
+	if bucket != nil {
+		index.BucketSequence = append(index.BucketSequence, bucket.BucketID)
+		bucket.Indices = append(bucket.Indices, index.IndexID)
+	}
+	return index
+}
+
+// SetFields - SetFields
+func (index *Index) SetFields(indexField ...IndexField) *Index {
+	index.IndexFields = indexField
+	return index
+}
+
+// bucketNameResolver - returns bucket name
+func (fdb *FDB) bucketNameResolver(bucketID string, rs *gjson.Result) (string, error) {
+	fdb.bLocker.Lock()
+	defer fdb.bLocker.Unlock()
+	bucket, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return "", errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	name := bucket.BucketNameQuery
+	if bucket.IsDynamicName {
+		name = rs.Get(name).String()
+	}
+	if name == "" {
+		return name, errormdl.Wrap("Bucket name should not be empty: " + bucketID)
+	}
+	return name, nil
+}
+
+// resolveIndex- returns bucket path by reolving index
+func (fdb *FDB) resolveIndex(index *Index, rs *gjson.Result) (string, error) {
+
+	path := ""
+	for _, bucketID := range index.BucketSequence {
+		bucketName, err := fdb.bucketNameResolver(bucketID, rs)
+		if err != nil {
+			return path, err
+		}
+		path = filepath.Join(path, bucketName)
+	}
+	indexName := index.IndexNameQuery
+	if index.IsDynamicName {
+		indexName = rs.Get(index.IndexNameQuery).String()
+	}
+	if indexName == "" {
+		return path, errormdl.Wrap("Index resolver name is empty:" + index.IndexID)
+	}
+	path = filepath.Join(path, indexName)
+	return path, nil
+}
+
+// createIndexJSON - create JSON with index field data
+func createIndexJSON(index *Index, rs *gjson.Result) string {
+	json := `{}`
+	for _, indexField := range index.IndexFields {
+		json, _ = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
+	}
+	return json
+}
+
+// SaveData - saves record in fdb
+func SaveData(dbName, indexID string, rs *gjson.Result) error {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		return err
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return errormdl.Wrap("INDEX not found: " + indexID)
+	}
+	path, err := fdb.resolveIndex(index, rs)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+	json := createIndexJSON(index, rs)
+	// path = path + ".json"
+	err = index.indexObj.Update(func(tx *buntdb.Tx) error {
+		_, _, err := tx.Set(path, json, nil)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+
+		path = filepath.Join(fdb.DBPath, path)
+		err = filemdl.WriteFile(path, []byte(rs.String()), true, false)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return errormdl.CheckErr(err)
+		}
+		return nil
+	})
+	return err
+}
+
+// GetData - return records with matching query
+func GetData(dbName, indexID string, query ...string) (*gjson.Result, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		return nil, err
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return nil, errormdl.Wrap("INDEX not found: " + indexID)
+	}
+	paths := []string{}
+	index.indexObj.View(func(tx *buntdb.Tx) error {
+		return tx.Ascend(indexID, func(key, value string) bool {
+			rsJSON := gjson.Parse("[" + value + "]")
+			for i := 0; i < len(query); i++ {
+				rsJSON = rsJSON.Get(query[i] + "#")
+			}
+			if rsJSON.Value() != nil {
+				paths = append(paths, key)
+			}
+			return true
+		})
+	})
+	data, err := fdb.readDataFile(paths)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	if len(paths) > 0 && len(data.Array()) == 0 {
+		data, err = searchDataInPack(paths, fdb.DBPath, fdb.restoreFileFromPack)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+	}
+	return data, nil
+}
+
+// searchDataInPack - searchDataInPack
+func searchDataInPack(paths []string, searchTillPath string, restoreFileFromPack bool) (*gjson.Result, error) {
+	dirPath, _ := filepath.Split(paths[0])
+	packPath, err := filepack.SearchPackFilePath(dirPath, searchTillPath)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	if len(packPath) == 0 {
+		return &gjson.Result{}, nil
+	}
+	fileDataMap, err := filepack.GetFileDataListFromPack(packPath, paths)
+
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	dataString := "[]"
+	for fileName, dataByte := range fileDataMap {
+		if len(dataByte) > 0 {
+			dataString, _ = sjson.Set(dataString, "-1", gjson.Parse(string(dataByte)).Value())
+			if restoreFileFromPack {
+				restoreFilesFromFilePack(fileName, dataByte)
+			}
+		}
+	}
+	data := gjson.Parse(dataString)
+
+	return &data, nil
+}
+
+// restoreFilesFromFilePack - restoreFilesFromFilePack
+func restoreFilesFromFilePack(fileName string, fileData []byte) error {
+	if len(fileData) > 0 {
+		err := filemdl.WriteFile(fileName, fileData, true, false)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return errormdl.CheckErr(err)
+		}
+	}
+	return nil
+}
+
+// readDataFile - reads file and returns file data
+func (fdb *FDB) readDataFile(path []string) (*gjson.Result, error) {
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	for i := 0; i < len(path); i++ {
+		path[i] = filepath.Join(fdb.DBPath, path[i])
+		ba := []byte{'{', '}'}
+		var err error
+		if filemdl.FileAvailabilityCheck(path[i]) {
+			ba, err = filemdl.ReadFile(path[i])
+			if err != nil {
+				loggermdl.LogError(err)
+				return nil, err
+			}
+			_, err = sb.Write(ba)
+			if err != nil {
+				loggermdl.LogError(err)
+				return nil, err
+			}
+			sb.WriteString(",")
+		}
+	}
+	sb.WriteString("]")
+	finalResult := strings.Replace(sb.String(), ",]", "]", 1)
+	rs := gjson.Parse(finalResult)
+	return &rs, nil
+}
+
+// GetDataByConcat - GetDataByConcat
+func GetDataByConcat(dbName, indexID string, query ...string) (*gjson.Result, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		return nil, err
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return nil, errormdl.Wrap("INDEX not found: " + indexID)
+	}
+	// path := []string{}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	index.indexObj.View(func(tx *buntdb.Tx) error {
+
+		tx.Ascend(indexID, func(key, value string) bool {
+			sb.WriteString((value + ","))
+			return true
+		})
+
+		return nil
+	})
+	sb.WriteString("]")
+	finalData := strings.Replace(sb.String(), ",]", "]", 1)
+	rsJSON := gjson.Parse(finalData)
+	for i := 0; i < len(query); i++ {
+		rsJSON = rsJSON.Get(query[i] + "#")
+		if rsJSON.Value() == nil {
+			break
+		}
+	}
+
+	return nil, nil
+}
+
+// DeleteData - deletes record from fdb
+func DeleteData(dbName, indexID string, query ...string) (*gjson.Result, []error) {
+	errList := make([]error, 0)
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		return nil, []error{err}
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return nil, []error{errormdl.Wrap("INDEX not found: " + indexID)}
+	}
+
+	paths := make([]string, 0)
+
+	index.indexObj.View(func(tx *buntdb.Tx) error {
+		return tx.Ascend(indexID, func(key, value string) bool {
+			rsJSON := gjson.Parse("[" + value + "]")
+			for i := 0; i < len(query); i++ {
+				rsJSON = rsJSON.Get(query[i] + "#")
+			}
+			if rsJSON.Value() != nil {
+				paths = append(paths, key)
+			}
+			return true
+		})
+	})
+	if len(paths) == 0 {
+		return nil, []error{errormdl.Wrap("No Records to delete")}
+	}
+	var indexKeys = make([]string, len(paths))
+	copy(indexKeys, paths)
+
+	result, err := fdb.readDataFile(paths)
+	if err != nil {
+		return nil, []error{err}
+	}
+	for _, path := range indexKeys {
+
+		index.indexObj.Update(func(tx *buntdb.Tx) error {
+			// delete index entry
+			_, err := tx.Delete(path)
+			if err != nil {
+				errList = append(errList, errormdl.Wrap("unable to delete index :"+path+err.Error()))
+				return nil
+			}
+			path = filepath.Join(fdb.DBPath, path)
+
+			dirPath, _ := filepath.Split(path)
+			// search filepack
+			packPath, err := filepack.SearchPackFilePath(dirPath, fdb.DBPath)
+			if err != nil && err.Error() != filepack.PackNotFound {
+				loggermdl.LogError("err", err)
+				return nil
+			}
+			// delete file from filepack
+			err = filepack.DeleteFileFromPack(packPath, path)
+			if err != nil {
+				errList = append(errList, errormdl.Wrap("unable to delete file from Pack :"+path))
+				return nil
+			}
+			// delete file
+			err = filemdl.DeleteFile(path)
+			if err != nil {
+				errList = append(errList, errormdl.Wrap("unable to delete file : "+path+err.Error()))
+			}
+			return nil
+		})
+	}
+
+	return result, errList
+}
diff --git a/dalmdl/corefdb/bucket_test.go b/dalmdl/corefdb/bucket_test.go
index 942f1ab222e6b2b6237f2797adeec6c27297adde..ace735983acfbadc0e2063c716f6beb51b8822d4 100644
--- a/dalmdl/corefdb/bucket_test.go
+++ b/dalmdl/corefdb/bucket_test.go
@@ -1,48 +1,270 @@
-package corefdb
-
-import (
-	"log"
-	"testing"
-)
-
-func init() {
-	db, err := CreateFDBInstance("./myCorefdb", "myFirstFDB")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	b := db.GetNewBucket("Candidates", false)
-	i, err := db.GetNewIndex("id", true)
-	if err != nil {
-		log.Fatal(err)
-	}
-	i.SetBucket(b)
-	fields := []IndexField{
-		IndexField{
-			FieldName: "name",
-			Query:     "name",
-		},
-		IndexField{
-			FieldName: "age",
-			Query:     "age",
-		},
-	}
-	i.SetFields(fields...)
-	err = db.CreateIndex(i)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func BenchmarkGetData(b *testing.B) {
-	for index := 0; index < b.N; index++ {
-		GetData("myFirstFDB", "id", `#[name=="a"]`, `#[age==25]#`)
-	}
-}
-
-func BenchmarkGetDataByConcat(b *testing.B) {
-	for index := 0; index < b.N; index++ {
-		GetDataByConcat("myFirstFDB", "id", `#[name=="a"]`, `#[age==25]#`)
-
-	}
-}
+package corefdb
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+var dbInstance *FDB
+var bucketInstance *Bucket
+var indexInstance *Index
+
+func init() {
+
+	db, err := CreateFDBInstance("/home/vivekn/fdb_data", "myfdb", false)
+	if err != nil {
+		log.Fatal("CreateFDBInstance = ", err)
+	}
+	dbInstance = db
+	// step 1:  create bucket
+	bucket := db.GetNewBucket("Candidates", false, &Bucket{})
+	bucketInstance = bucket
+	i, err := db.GetNewIndex("studentId", true)
+	if err != nil {
+		log.Fatal(err)
+	}
+	i.SetBucket(bucket)
+	fields := []IndexField{
+		IndexField{
+			FieldName: "name",
+			Query:     "name",
+		},
+		IndexField{
+			FieldName: "class",
+			Query:     "class",
+		},
+	}
+	i.SetFields(fields...)
+	indexInstance = i
+	// step 2:  create index
+	err = db.CreateIndex(i)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func TestDir(t *testing.T) {
+	files, err := ioutil.ReadDir("")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	for _, file := range files {
+		fmt.Println(file.Name())
+	}
+	return
+}
+func TestSaveData(t *testing.T) {
+	db, err := CreateFDBInstance("/home/vivekn/fdb_data", "myfdb", false)
+	if err != nil {
+		log.Fatal("CreateFDBInstance = ", err)
+	}
+	// step 1:  create bucket
+	b := db.GetNewBucket("Candidates", false, &Bucket{})
+
+	i, err := db.GetNewIndex("studentId", true)
+	if err != nil {
+		log.Fatal(err)
+	}
+	i.SetBucket(b)
+	fields := []IndexField{
+		IndexField{
+			FieldName: "name",
+			Query:     "name",
+		},
+		IndexField{
+			FieldName: "class",
+			Query:     "class",
+		},
+	}
+	i.SetFields(fields...)
+	// step 2:  create index
+	err = db.CreateIndex(i)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	data, _ := sjson.Set("", "name", "vivek")
+	data, _ = sjson.Set(data, "studentId", 10008)
+	data, _ = sjson.Set(data, "class", "SY_MCA")
+	data, _ = sjson.Set(data, "age", 26)
+
+	studentObj := gjson.Parse(data)
+	// step 3: save data
+	err = SaveData("myfdb", i.IndexID, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func TestGetData(t *testing.T) {
+	db, err := CreateFDBInstance("/home/vivekn/fdb_data", "myfdb", false)
+	if err != nil {
+		log.Fatal("CreateFDBInstance = ", err)
+	}
+	b := db.GetNewBucket("Candidates", false, &Bucket{})
+
+	i, err := db.GetNewIndex("studentId", true)
+	if err != nil {
+		log.Fatal(err)
+	}
+	i.SetBucket(b)
+	fields := []IndexField{
+		IndexField{
+			FieldName: "name",
+			Query:     "name",
+		},
+		IndexField{
+			FieldName: "class",
+			Query:     "class",
+		},
+	}
+	i.SetFields(fields...)
+	err = db.CreateIndex(i)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, err := GetData("myfdb", i.IndexID, `#[name=="vivek"]`)
+	if err != nil {
+		log.Fatal(err)
+	}
+	// loggermdl.LogDebug("IsArray", data.IsArray(), len(data.Array()))
+	loggermdl.LogDebug("data", data)
+}
+func TestDeleteData(t *testing.T) {
+	db, err := CreateFDBInstance("/home/vivekn/fdb_data", "myfdb", false)
+	if err != nil {
+		log.Fatal("CreateFDBInstance = ", err)
+	}
+	b := db.GetNewBucket("Candidates", false, &Bucket{})
+
+	i, err := db.GetNewIndex("studentId", true)
+	if err != nil {
+		log.Fatal(err)
+	}
+	i.SetBucket(b)
+	fields := []IndexField{
+		IndexField{
+			FieldName: "name",
+			Query:     "name",
+		},
+		IndexField{
+			FieldName: "class",
+			Query:     "class",
+		},
+	}
+	i.SetFields(fields...)
+	err = db.CreateIndex(i)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	_, errList := DeleteData("myfdb", i.IndexID, `#[name=="vivek"]`)
+	if len(errList) > 0 {
+		loggermdl.LogError(errList)
+		// log.Fatal(err)
+	}
+}
+
+func TestReindex(t *testing.T) {
+
+	db, err := CreateFDBInstance("/home/vivekn/fdb_data", "myfdb", false)
+	if err != nil {
+		log.Fatal("CreateFDBInstance = ", err)
+	}
+	// step 1:  create bucket
+	b := db.GetNewBucket("Candidates", false, &Bucket{})
+
+	i, err := db.GetNewIndex("studentId", true)
+	if err != nil {
+		log.Fatal(err)
+	}
+	i.SetBucket(b)
+	fields := []IndexField{
+		IndexField{
+			FieldName: "name",
+			Query:     "name",
+		},
+		IndexField{
+			FieldName: "class",
+			Query:     "class",
+		},
+	}
+	i.SetFields(fields...)
+	// step 2:  create index
+	err = db.CreateIndex(i)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	data, _ := sjson.Set("", "name", "vivek")
+	data, _ = sjson.Set(data, "studentId", 1000)
+
+	data, _ = sjson.Set(data, "class", "TY_MCA")
+	data, _ = sjson.Set(data, "age", 26)
+
+	studentObj := gjson.Parse(data)
+	// step 3: save data
+	err = SaveData("myfdb", i.IndexID, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	// step 4: fetch data
+	result, err := GetData("myfdb", i.IndexID, `#[name=="vivek"]`)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogDebug("before reindex", result)
+
+	// step 5: delete index file
+	err = i.indexObj.Close()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	err = filemdl.DeleteFile("/home/vivekn/fdb_data/index/studentId")
+	if err != nil {
+		loggermdl.LogError(err)
+		log.Fatal(err)
+	}
+
+	// step 6: reindex
+	err = db.ReIndex(i.IndexID)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// step 7: fetch data
+	data2, err := GetData("myfdb", i.IndexID, `#[name=="vivek"]`)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogDebug("after reindex", data2)
+}
+
+func BenchmarkGetData(b *testing.B) {
+	for index := 0; index < b.N; index++ {
+		// loggermdl.LogDebug("dbInstance.DBName", dbInstance.DBName)
+		GetData(dbInstance.DBName, indexInstance.IndexID, `#[name=="vivek"]`)
+	}
+}
+
+func BenchmarkReindex(b *testing.B) {
+
+	for i := 0; i < 300; i++ {
+		data, _ := sjson.Set(`{"age":26,"class":"SY_MCA","studentId":10008,"name":"vivek"}`, "studentId", 1000+i)
+		obj := gjson.Parse(data)
+		SaveData(dbInstance.DBName, indexInstance.IndexID, &obj)
+	}
+	for index := 0; index < b.N; index++ {
+		dbInstance.ReIndex(indexInstance.IndexID)
+	}
+}
diff --git a/dalmdl/corefdb/buntdbmdl.go b/dalmdl/corefdb/buntdbmdl.go
index aa86c27ad459a255d3aaff32435021deadf2bcba..a2070f1db57474b6c118e792668738f36a016741 100644
--- a/dalmdl/corefdb/buntdbmdl.go
+++ b/dalmdl/corefdb/buntdbmdl.go
@@ -1,90 +1,250 @@
-package corefdb
-
-import (
-	"sync"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
-
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
-	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
-
-	"github.com/tidwall/buntdb"
-)
-
-const (
-	// INDEXFOLDER -INDEXFOLDER
-	INDEXFOLDER = "index"
-)
-
-var databases cachemdl.FastCacheHelper
-var defaultDB string
-
-func init() {
-	databases.Setup(1, 1000, 1000)
-}
-
-// FDB - FDB
-type FDB struct {
-	DBName   string
-	DBPath   string `json:"dbPath"`
-	indices  map[string]*Index
-	indexMux sync.Mutex
-	buckets  map[string]*Bucket
-	bLocker  sync.Mutex
-}
-
-// CreateFDBInstance - CreateFDBInstance
-func CreateFDBInstance(dbPath, dbName string, isDefault bool) (*FDB, error) {
-	fdb := &FDB{
-		DBPath:   dbPath,
-		indices:  make(map[string]*Index),
-		indexMux: sync.Mutex{},
-		buckets:  make(map[string]*Bucket),
-		bLocker:  sync.Mutex{},
-	}
-	if isDefault {
-		defaultDB = dbName
-	}
-	databases.SetNoExpiration(dbName, fdb)
-	return fdb, nil
-}
-
-// GetFDBInstance - GetFDBInstance
-func GetFDBInstance(dbName string) (*FDB, error) {
-	if dbName == "" {
-		dbName = defaultDB
-	}
-
-	rawDB, ok := databases.Get(dbName)
-	if !ok {
-		loggermdl.LogError("Database instance not found")
-		return nil, errormdl.Wrap("Database instance not found")
-	}
-	fdb, ok := rawDB.(*FDB)
-	if !ok {
-		loggermdl.LogError("Can not cast object into *FDB")
-		return nil, errormdl.Wrap("Can not cast object into *FDB")
-	}
-	return fdb, nil
-}
-
-// GetFDBIndex - GetFDBIndex
-func (f *FDB) GetFDBIndex(indexName string) (*Index, bool) {
-	f.indexMux.Lock()
-	defer f.indexMux.Unlock()
-	index, ok := f.indices[indexName]
-	return index, ok
-}
-
-// CreateIndex - CreateIndex
-func (f *FDB) CreateIndex(index *Index) error {
-	var fns []func(a, b string) bool
-	for _, idx := range index.IndexFields {
-		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
-	}
-	err := index.indexObj.CreateIndex(index.IndexID, "*", fns...)
-	if err != nil {
-		return err
-	}
-	return nil
-}
+package corefdb
+
+import (
+	"os"
+	"strings"
+
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"path/filepath"
+	"sync"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/tidwall/buntdb"
+)
+
+const (
+	// INDEXFOLDER -INDEXFOLDER
+	INDEXFOLDER = "index"
+)
+
+var databases cachemdl.FastCacheHelper
+var defaultDB string
+
+func init() {
+	databases.Setup(1, 1000, 1000)
+}
+
+// FDB - FDB
+type FDB struct {
+	DBName              string
+	DBPath              string `json:"dbPath"`
+	indices             map[string]*Index
+	indexMux            sync.Mutex
+	buckets             map[string]*Bucket
+	bLocker             sync.Mutex
+	restoreFileFromPack bool
+}
+
+// CreateFDBInstance - creates fdb instance
+func CreateFDBInstance(dbPath, dbName string, isDefault bool) (*FDB, error) {
+	fdb := &FDB{
+		DBPath:              dbPath,
+		indices:             make(map[string]*Index),
+		indexMux:            sync.Mutex{},
+		buckets:             make(map[string]*Bucket),
+		bLocker:             sync.Mutex{},
+		restoreFileFromPack: true,
+	}
+	if isDefault {
+		defaultDB = dbName
+	}
+	databases.SetNoExpiration(dbName, fdb)
+	return fdb, nil
+}
+
+// GetBucketByName - reaturn bucket with specified bucket name
+func (f *FDB) GetBucketByName(bucketName string) *Bucket {
+	for _, val := range f.buckets {
+		if val.BucketNameQuery == bucketName {
+			return val
+		}
+	}
+	return nil
+}
+
+// SetFileRestoreFormPackFlag - set whether to restore file from pack while performing Get operation
+func (f *FDB) SetFileRestoreFormPackFlag(restoreFile bool) {
+	f.restoreFileFromPack = restoreFile
+}
+
+// GetFDBInstance - returns fdb instance
+func GetFDBInstance(dbName string) (*FDB, error) {
+	if dbName == "" {
+		dbName = defaultDB
+	}
+
+	rawDB, ok := databases.Get(dbName)
+	if !ok {
+		loggermdl.LogError("Database instance not found")
+		return nil, errormdl.Wrap("Database instance not found")
+	}
+	fdb, ok := rawDB.(*FDB)
+	if !ok {
+		loggermdl.LogError("Can not cast object into *FDB")
+		return nil, errormdl.Wrap("Can not cast object into *FDB")
+	}
+	return fdb, nil
+}
+
+// GetFDBIndex - returns index
+func (f *FDB) GetFDBIndex(indexName string) (*Index, bool) {
+	f.indexMux.Lock()
+	defer f.indexMux.Unlock()
+	index, ok := f.indices[indexName]
+	return index, ok
+}
+
+// CreateIndex - Creates index
+func (f *FDB) CreateIndex(index *Index) error {
+	var fns []func(a, b string) bool
+	for _, idx := range index.IndexFields {
+		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
+	}
+	err := index.indexObj.CreateIndex(index.IndexID, "*", fns...)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// GetBucketIndexes ; returns list of indexes of bucket
+func (f *FDB) GetBucketIndexes(bucketName string) ([]*Index, error) {
+
+	var bucket *Bucket
+	var foundBucket = false
+	for _, bucketObj := range f.buckets {
+		if bucketObj.BucketNameQuery == bucketName {
+			foundBucket = true
+			bucket = bucketObj
+			break
+		}
+
+	}
+
+	if !foundBucket {
+		return nil, errormdl.Wrap("Bucket not found")
+	}
+
+	indexList := make([]*Index, 0)
+	for _, indexID := range bucket.Indices {
+		index, ok := f.indices[indexID]
+
+		if !ok {
+			return nil, errormdl.Wrap("index not found")
+		}
+		indexList = append(indexList, index)
+	}
+	return indexList, nil
+}
+
+// ReIndex - performs reindexing
+func (f *FDB) ReIndex(indexID string) error {
+
+	index, found := f.GetFDBIndex(indexID)
+	if !found {
+		return errormdl.Wrap("index not found")
+	}
+	// find path to start file walk
+	pathToStartWalk := f.DBPath
+	for _, bucketID := range index.BucketSequence {
+		bucket := f.buckets[bucketID]
+		if bucket.IsDynamicName {
+			break
+		}
+		pathToStartWalk = filepath.Join(pathToStartWalk, bucket.BucketNameQuery)
+	}
+
+	// get required data for index file
+	indexDataMap, err := getFDBIndexData(pathToStartWalk, index, f.DBPath)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// create or replace index
+	var fns []func(a, b string) bool
+	for _, idx := range index.IndexFields {
+		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
+	}
+	index.indexObj.Close()
+	indexFilePath := filepath.Join(f.DBPath, INDEXFOLDER, index.IndexID)
+	if filemdl.FileAvailabilityCheck(indexFilePath) {
+		err = filemdl.DeleteFile(indexFilePath)
+		if err != nil {
+			return err
+		}
+	}
+	db, err := f.GetIndexDB(index)
+	if err != nil {
+		return err
+	}
+	index.indexObj = db
+	err = index.indexObj.ReplaceIndex(index.IndexID, "*", fns...)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// update index file by reading all data and updating index file
+	err = index.indexObj.Update(func(tx *buntdb.Tx) error {
+
+		for indexKey, indexData := range indexDataMap {
+			_, _, err := tx.Set(indexKey, indexData, nil)
+			if err != nil {
+				loggermdl.LogError(err)
+				return err
+			}
+		}
+
+		return nil
+	})
+	return nil
+}
+
+// getFDBIndexData - returns index data with index fields recursively from specified direcory
+func getFDBIndexData(path string, index *Index, dbPath string) (map[string]string, error) {
+
+	indexDataMap := make(map[string]string)
+	if !filemdl.FileAvailabilityCheck(path) {
+		return nil, errormdl.Wrap("invalid path: " + path)
+	}
+
+	dbPath = filepath.Join(dbPath) + string(filepath.Separator)
+	ferr := filemdl.Walk(path, func(filePath string, info os.FileInfo, err error) error {
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil
+		}
+
+		if !info.IsDir() {
+			fileData, err := filemdl.ReadFile(filePath)
+			if err != nil {
+				loggermdl.LogError(err)
+				return nil
+			}
+
+			dataObj := gjson.Parse(string(fileData))
+			indexDataObj := ""
+			for _, indexField := range index.IndexFields {
+				indexDataObj, _ = sjson.Set(indexDataObj, indexField.FieldName, dataObj.Get(indexField.Query).String())
+			}
+
+			if indexDataObj != "" {
+				pathToSave := strings.TrimPrefix(filePath, dbPath)
+				indexDataMap[pathToSave] = indexDataObj
+			}
+		}
+
+		return nil
+	})
+	if ferr != nil {
+		return indexDataMap, ferr
+	}
+	return indexDataMap, nil
+
+}
diff --git a/filemdl/filemdl.go b/filemdl/filemdl.go
old mode 100755
new mode 100644
index edadb37a9af36ee922ac61282fa801c9058d3f7b..d0d160581604badbb4506bc459479bcc4eb9f778
--- a/filemdl/filemdl.go
+++ b/filemdl/filemdl.go
@@ -493,3 +493,191 @@ func CloseFilePointer(filePath string) error {
 	filePtrs.Delete(filePath)
 	return nil
 }
+
+// WalkFunc - WalkFunc
+type WalkFunc func(path string, info os.FileInfo, err error) error
+
+// Walk - walks folder recursively
+func Walk(root string, walkFunc WalkFunc) error {
+	info, err := FileInfo(root)
+	if err != nil {
+		err = walkFunc(root, nil, err)
+	} else {
+		err = walk(root, info, walkFunc)
+	}
+	return err
+}
+
+// walk - walks specified path
+func walk(path string, fileInfo os.FileInfo, walkFunc WalkFunc) error {
+	// if not dir
+	if !fileInfo.IsDir() {
+		return walkFunc(path, fileInfo, nil)
+	}
+	// if dir
+	err := walkFunc(path, fileInfo, nil)
+	if err != nil {
+		return err
+	}
+	fileInfos, err := ListDirectory(path)
+
+	if err != nil {
+		return err
+	}
+
+	for _, fileInfo := range fileInfos {
+		filePath := path + string(filepath.Separator) + fileInfo.Name()
+		_, err := FileInfo(filePath)
+		if err != nil {
+			if err = walkFunc(filePath, fileInfo, err); err != nil {
+				return err
+			}
+		} else {
+			err = walk(filePath, fileInfo, walkFunc)
+			if err != nil {
+				if !fileInfo.IsDir() {
+					return err
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+// ListFileRecursively - returns array of filepath recursively from specified path
+func ListFileRecursively(path string) ([]string, error) {
+	paths := make([]string, 0)
+	err := Walk(path, func(filePath string, info os.FileInfo, err error) error {
+		if !info.IsDir() {
+			paths = append(paths, filePath)
+		}
+		return nil
+	})
+	if err != nil {
+		return paths, err
+	}
+	return paths, nil
+}
+
+// OpenFile - opens file with specified mode
+func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
+	f, err := os.OpenFile(path,
+		flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+// Open - opens file with read mode
+func Open(name string) (*os.File, error) {
+	return OpenFile(name, os.O_RDONLY, 0)
+}
+
+// FastReadFile - reads contents from provided file path with fast read method
+func FastReadFile(filePath string) ([]byte, error) {
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	file, err := Open(filePath)
+	if errormdl.CheckErr(err) != nil {
+		return nil, errormdl.CheckErr(err)
+	}
+	defer file.Close()
+	fileStat, err := file.Stat()
+	if errormdl.CheckErr(err) != nil {
+		return nil, errormdl.CheckErr(err)
+	}
+	fileBytes := make([]byte, fileStat.Size())
+	bytesRead, err := file.Read(fileBytes)
+	if errormdl.CheckErr(err) == nil && bytesRead < len(fileBytes) {
+		err = errormdl.Wrap("short read")
+	}
+	return fileBytes, err
+}
+
+// FastWriteFile - writes provided bytes to file with fast write method
+func FastWriteFile(filePath string, data []byte, makeDir bool, createBackup bool, safeMode bool) error {
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	if makeDir {
+		createError := createRecursiveDirectoryForFile(filePath)
+		if errormdl.CheckErr(createError) != nil {
+			loggermdl.LogError(createError)
+			return errormdl.CheckErr(createError)
+		}
+	}
+	if createBackup {
+		backupErr := createFileBackup(filePath)
+		if backupErr != nil {
+			loggermdl.LogError(backupErr)
+		}
+	}
+
+	if safeMode {
+		return writeFileSafely(filePath, data, 0644)
+	}
+
+	return writeFile(filePath, data, 0644)
+}
+
+func writeFileSafely(filePath string, data []byte, perm os.FileMode) error {
+	_, name := filepath.Split(filePath)
+	tmpFile, err := ioutil.TempFile("", name)
+	if err != nil {
+		return err
+	}
+
+	defer tmpFile.Close()
+
+	n, err := tmpFile.Write(data)
+	if err != nil {
+		return errormdl.Wrap("cannot create temp file:" + err.Error())
+	}
+
+	if n < len(data) {
+		return errormdl.Wrap("cannot create temp file: short write")
+	}
+	err = tmpFile.Sync()
+	if err != nil {
+		return err
+	}
+	tmpFileName := tmpFile.Name()
+
+	info, err := os.Stat(filePath)
+	if err != nil && !os.IsNotExist(err) {
+		return err
+	}
+	// get the file mode from the original file and use that for the replacement
+	// file, too.
+	if err == nil {
+		if err := os.Chmod(tmpFileName, info.Mode()); err != nil {
+			return errormdl.Wrap("can't set filemode on tempfile: " + tmpFileName + ", error: " + err.Error())
+		}
+	}
+
+	if err := AtomicReplaceFile(tmpFileName, filePath); err != nil {
+		return errormdl.Wrap("cannot replace " + filePath + " with " + tmpFileName)
+	}
+	return nil
+}
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+	f, err := OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	n, err := f.Write(data)
+	if err != nil {
+		return err
+	}
+	if n < len(data) {
+		return errormdl.Wrap("short write")
+	}
+	return f.Sync()
+}
diff --git a/filemdl/filemdl_linux.go b/filemdl/filemdl_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..acd187767d9c8ebcafabb998afd29d37eca8e74f
--- /dev/null
+++ b/filemdl/filemdl_linux.go
@@ -0,0 +1,15 @@
+// TODO: Build flag needs to be passed while building exe/executable
+// +build !windows
+
+package filemdl
+
+import (
+	"os"
+)
+
+// AtomicReplaceFile atomically replaces the destination file or directory with the
+// source.  It is guaranteed to either replace the target file entirely, or not
+// change either file.
+func AtomicReplaceFile(source, destination string) error {
+	return os.Rename(source, destination)
+}
diff --git a/filemdl/filemdl_windows.go b/filemdl/filemdl_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..824438f0018c14ee0ad7759c1540a506806aee68
--- /dev/null
+++ b/filemdl/filemdl_windows.go
@@ -0,0 +1,51 @@
+package filemdl
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+const (
+	moveFileReplacExisting = 0x1
+	moveFileWriteThrough   = 0x8
+)
+
+var (
+	modkernel32     = syscall.NewLazyDLL("kernel32.dll")
+	procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+//sys moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) = MoveFileExW
+
+// AtomicReplaceFile atomically replaces the destination file or directory with the
+// source.  It is guaranteed to either replace the target file entirely, or not
+// change either file.
+func AtomicReplaceFile(source, destination string) error {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, moveFileReplacExisting|moveFileWriteThrough); err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	return nil
+}
+
+func moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(lpExistingFileName)), uintptr(unsafe.Pointer(lpNewFileName)), uintptr(dwFlags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
diff --git a/filemdl/filepack/filePack.go b/filemdl/filepack/filePack.go
new file mode 100644
index 0000000000000000000000000000000000000000..835cdb309499c3062ff8bf831ffcd1a16fc60524
--- /dev/null
+++ b/filemdl/filepack/filePack.go
@@ -0,0 +1,560 @@
+package filepack
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
+
+	"github.com/tidwall/gjson"
+
+	"github.com/tidwall/sjson"
+
+	"strconv"
+)
+
+const (
+	// PackNotFound - PackNotFound
+	PackNotFound = "packNotFound"
+)
+
+var (
+	encryptionKey = "3Kc7$3cu31tyCFDB"
+)
+
+// Init - Init
+func Init(key string) {
+	encryptionKey = key
+}
+
+// CreatePackFile - creating new/initial pack file
+func CreatePackFile(filePathArray []string, packCreationPath string) error {
+	// open pack file to read
+	f, err := filemdl.OpenFile(packCreationPath,
+		os.O_CREATE|os.O_WRONLY, 0644)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	f.WriteAt([]byte("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"), 0)
+	indexTableStartOffset := int64(30)
+
+	indexTableRecords := "[]"
+	for index := 0; index < len(filePathArray); index++ {
+		// read data of given file
+		newFileData, readErr := filemdl.FastReadFile(filePathArray[index])
+		if readErr != nil {
+			return readErr
+		}
+
+		newFileSize := int64(len(newFileData))
+
+		// create hash for file content
+		hashText, err := getHashOfFile(newFileData)
+		if err != nil {
+			loggermdl.LogError("failed to create pack: ", err)
+			return err
+		}
+
+		// write content to file
+		f.WriteAt((newFileData), indexTableStartOffset)
+
+		// append new record to indext table
+		indexTableRecords = createNewIndexRecord(indexTableRecords, filePathArray[index], indexTableStartOffset, newFileSize, hashText)
+
+		// increment startOffset with file size
+		indexTableStartOffset += newFileSize
+	}
+
+	// write final updated index table to pack file
+	return wrapUpSession(indexTableStartOffset, []byte(indexTableRecords), packCreationPath)
+}
+
+// CreateUpdatedPack - This create an new pack by checking updated contents
+func CreateUpdatedPack(filePathArray []string, packFilePath string, destinationPackFilePath string) error {
+	// fetch indexTableRecords and it's startOffset from pack file
+	indexTableRecords, _, err := fetchIndexTable(packFilePath)
+	if err != nil {
+		return err
+	}
+
+	listOfUpdatedFiles := []string{}
+
+	for index := 0; index < len(filePathArray); index++ {
+		// read data of given file
+		newFileData, readErr := filemdl.FastReadFile(filePathArray[index])
+		if readErr != nil {
+			loggermdl.LogError("failed to create pack: ", readErr)
+			return readErr
+		}
+
+		// calculate hash of source file content
+		newHashText, err := getHashOfFile(newFileData)
+		if err != nil {
+			loggermdl.LogError("failed to create pack: ", err)
+			return err
+		}
+
+		// fetch hashText of given file in pack file
+		existingFileHashText := getFileHashTextFromPack(indexTableRecords, filePathArray[index])
+
+		// compare is file hash with existing record in pack file
+		if newHashText != existingFileHashText {
+			listOfUpdatedFiles = append(listOfUpdatedFiles, filePathArray[index])
+		}
+	}
+
+	if len(listOfUpdatedFiles) == 0 {
+		// no changes to create pack
+		return nil
+	}
+
+	// create new pack with only changed content
+	return CreatePackFile(listOfUpdatedFiles, destinationPackFilePath)
+}
+
+// MergePack - MergePack
+func MergePack(sourcePackFilePath string, destinationPackFilePath string) error {
+	// open destination pack file to read
+	f, err := filemdl.OpenFile(destinationPackFilePath,
+		os.O_CREATE|os.O_WRONLY, 0644)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	// fetch records from sourcePackFilePath
+	sourcePackIndexTableRecords, _, err := fetchIndexTable(sourcePackFilePath)
+	if err != nil {
+		return err
+	}
+
+	//fetch All Filenames from sourcePackFilePath
+	fileListFromSourcePack := gjson.Get(sourcePackIndexTableRecords, "#.fileName").Array()
+
+	// fetch indexTableRecords and it's startOffset from pack file
+	destinationPackIndexTableRecords, destinationPackIndexTableStartOffset, err := fetchIndexTable(destinationPackFilePath)
+	if err != nil {
+		return err
+	}
+
+	for index := 0; index < len(fileListFromSourcePack); index++ {
+		// read data of given file
+		newFileData, err := GetFileDataFromPack(sourcePackFilePath, fileListFromSourcePack[index].String())
+		if err != nil {
+			loggermdl.LogError("failed to merge pack: ", err)
+			return err
+		}
+		// calculate length of source file content
+		newFileSize := int64(len(newFileData))
+
+		// create hash for file content
+		hashText, err := getHashOfFile(newFileData)
+		if err != nil {
+			loggermdl.LogError("failed to merge pack: ", err)
+			return err
+		}
+		// fetch existing file size from pack file
+		existingFileSize := getFileSizeFromPack(destinationPackIndexTableRecords, fileListFromSourcePack[index].String())
+
+		// fetch startOffset of given file in pack file if exist (If not then it is new file)
+		existingFileStartOffset := getFileStartOffsetFromPack(destinationPackIndexTableRecords, fileListFromSourcePack[index].String())
+
+		// compare is file smaller or larger than existing record in pack file
+		if newFileSize <= existingFileSize {
+			// write changed content to file
+			f.WriteAt(newFileData, existingFileStartOffset)
+
+			// update indext table with updated changes
+			destinationPackIndexTableRecords = updateJSON(destinationPackIndexTableRecords, fileListFromSourcePack[index].String(), existingFileStartOffset, newFileSize, hashText)
+		} else {
+			// write changed content to file
+			f.WriteAt(newFileData, destinationPackIndexTableStartOffset)
+
+			// check it is existing file or new file
+			if existingFileStartOffset == 0 {
+				// append new record to indext table
+				destinationPackIndexTableRecords = createNewIndexRecord(destinationPackIndexTableRecords, fileListFromSourcePack[index].String(), destinationPackIndexTableStartOffset, newFileSize, hashText)
+			} else {
+				// update indext table with updated changes
+				destinationPackIndexTableRecords = updateJSON(destinationPackIndexTableRecords, fileListFromSourcePack[index].String(), destinationPackIndexTableStartOffset, newFileSize, hashText)
+			}
+			// increment startOffset for moving index table ahead
+			destinationPackIndexTableStartOffset += newFileSize
+		}
+	}
+
+	// write final updated index table to pack file
+	return wrapUpSession(destinationPackIndexTableStartOffset, []byte(destinationPackIndexTableRecords), destinationPackFilePath)
+}
+
+// InsertFiles - inserts file data in pack
+func InsertFiles(packPath string, filePathArray []string) error {
+	mutex := sync.Mutex{}
+	mutex.Lock()
+	f, err := filemdl.OpenFile(packPath, os.O_RDWR, 0777)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	defer f.Close()
+	indexTableRecords, indexStartOffset, err := fetchIndexTableMeta(f)
+	if err != nil {
+		loggermdl.LogError("failed to insert file in pack: ", err)
+		return err
+	}
+	if len(indexTableRecords) == 0 {
+		indexTableRecords = "[]"
+	}
+
+	for _, filePath := range filePathArray {
+
+		// read data of given file
+		newFileData, readErr := filemdl.FastReadFile(filePath)
+		if readErr != nil {
+			return readErr
+		}
+
+		newFileSize := int64(len(newFileData))
+
+		// create hash for file content
+		hashText, err := getHashOfFile(newFileData)
+		if err != nil {
+			loggermdl.LogError("failed to insert file in pack: ", err)
+			return err
+		}
+		// write content to file
+		byteWritten, err := f.WriteAt((newFileData), indexStartOffset)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		if byteWritten < len(newFileData) {
+			loggermdl.LogError("short write: ", err)
+			return err
+		}
+
+		// append new record to indext table
+		indexTableRecords = createNewIndexRecord(indexTableRecords, filePath, indexStartOffset, newFileSize, hashText)
+
+		// increment startOffset with file size
+		indexStartOffset += newFileSize
+	}
+	err = f.Sync()
+	if err != nil {
+		loggermdl.LogError("failed to insert file in pack: ", err)
+		return errormdl.Wrap("failed to insert file in pack" + err.Error())
+	}
+	err = f.Close()
+	if err != nil {
+		loggermdl.LogError(err)
+		return errormdl.Wrap("failed to insert file in pack" + err.Error())
+	}
+	err = wrapUpSession(indexStartOffset, []byte(indexTableRecords), packPath)
+	if err != nil {
+		loggermdl.LogError("failed to insert file in pack: ", err)
+		return errormdl.Wrap("failed to insert file in pack" + err.Error())
+	}
+
+	mutex.Unlock()
+
+	return nil
+}
+
+// GetFileDataListFromPack - GetFileDataListFromPack
+func GetFileDataListFromPack(packPath string, fileNameList []string) (map[string][]byte, error) {
+	mapToReturn := make(map[string][]byte)
+	f, err := filemdl.Open(packPath)
+	if err != nil {
+		return mapToReturn, err
+	}
+	defer f.Close()
+	indexData, _, err := fetchIndexTableMeta(f)
+	if err != nil {
+		return mapToReturn, err
+	}
+
+	for ind := 0; ind < len(fileNameList); ind++ {
+		sizeOfFile := gjson.Get(indexData, `#[fileName=`+fileNameList[ind]+`].allocation.byteSize`)
+		startRead := gjson.Get(indexData, `#[fileName=`+fileNameList[ind]+`].allocation.startOffset`)
+		_, err = f.Seek(startRead.Int(), 0)
+		if err != nil {
+			continue
+		}
+		bytesOfFile := make([]byte, sizeOfFile.Int())
+		_, err = f.Read(bytesOfFile)
+		if err != nil {
+			continue
+		}
+		mapToReturn[fileNameList[ind]] = bytesOfFile
+	}
+
+	return mapToReturn, nil
+}
+
+// GetFileDataFromPack - GetFileDataFromPack
+func GetFileDataFromPack(packPath string, fileName string) ([]byte, error) {
+	f, err := filemdl.Open(packPath)
+	if err != nil {
+		return []byte{}, err
+	}
+	defer f.Close()
+	indexData, _, err := fetchIndexTableMeta(f)
+	if err != nil {
+		return []byte{}, err
+	}
+	// read data from pack
+	sizeOfFile := gjson.Get(indexData, `#[fileName=`+fileName+`].allocation.byteSize`)
+	startRead := gjson.Get(indexData, `#[fileName=`+fileName+`].allocation.startOffset`)
+	_, err = f.Seek(startRead.Int(), 0)
+	if err != nil {
+		return []byte{}, err
+	}
+	bytesOfFile := make([]byte, sizeOfFile.Int())
+	_, err = f.Read(bytesOfFile)
+	if err != nil {
+		return []byte{}, err
+	}
+	return bytesOfFile, nil
+}
+
+// DeleteFileFromPack - deletes file from pack
+func DeleteFileFromPack(packPath string, fileName string) error {
+	mutex := sync.Mutex{}
+	mutex.Lock()
+	f, err := filemdl.Open(packPath)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	defer f.Close()
+	indexTableRecords, indexStartOffset, err := fetchIndexTableMeta(f)
+	if err != nil {
+		loggermdl.LogError("failed to delete file from pack: ", err)
+		return err
+	}
+	indexRecords := gjson.Parse(indexTableRecords)
+	if !indexRecords.IsArray() {
+		loggermdl.LogError("no file found to delete")
+		return errormdl.Wrap("no file found to delete")
+	}
+
+	indexToDelete := -1
+	for index, record := range indexRecords.Array() {
+		if fileName == record.Get("fileName").String() {
+			indexToDelete = index
+			break
+		}
+	}
+	if indexToDelete == -1 {
+		loggermdl.LogError("no file found to delete")
+		return errormdl.Wrap("no file found to delete")
+	}
+
+	updatedIndexTableRecords, err := sjson.Delete(indexTableRecords, strconv.Itoa(indexToDelete))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	err = wrapUpSession(indexStartOffset, []byte(updatedIndexTableRecords), packPath)
+	if err != nil {
+		loggermdl.LogError("failed to delete file from pack: ", err)
+		return errormdl.Wrap("failed to delete file from pack" + err.Error())
+	}
+	mutex.Unlock()
+
+	return nil
+
+}
+
+// SearchPackFilePath - SearchPackFilePath
+func SearchPackFilePath(dirPath, searchTillPath string) (string, error) {
+	path := ""
+	dirPath = filepath.Join(dirPath)
+	pathChunkList := strings.Split(strings.Trim(dirPath, string(filepath.Separator)), string(filepath.Separator))
+	lenPathChunkList := len(pathChunkList)
+	trimmedBucket := ""
+	for i := 1; true; i++ {
+		fileInfoList, err := filemdl.ListDirectory(dirPath)
+		if err != nil {
+			loggermdl.LogError("failed to find pack path: ", err)
+			return "", errormdl.Wrap("failed to find pack path: " + err.Error())
+		}
+
+		for _, fileInfo := range fileInfoList {
+			if trimmedBucket+".pack" == fileInfo.Name() {
+				path = filepath.Join(dirPath, fileInfo.Name())
+				break
+			}
+		}
+		if len(dirPath) == 0 || dirPath == searchTillPath {
+			break
+		}
+
+		trimmedBucket = pathChunkList[lenPathChunkList-i]
+		dirPath = strings.TrimSuffix(dirPath, string(filepath.Separator)+trimmedBucket)
+
+	}
+	if len(path) == 0 {
+		return "", errormdl.Wrap(PackNotFound)
+	}
+	return path, nil
+}
+
+func fetchIndexTableMeta(file *os.File) (string, int64, error) {
+	_, err := file.Seek(0, 0)
+	if err != nil {
+		return "", -1, err
+	}
+	indexData := make([]byte, 15)
+	_, err = file.Read(indexData)
+	if err != nil {
+		return "", -1, err
+	}
+	indexDataStartHeader := string(indexData)
+	_, err = file.Seek(15, 0)
+	if err != nil {
+		return "", -1, err
+	}
+	indexSize := make([]byte, 15)
+	_, err = file.Read(indexSize)
+	if err != nil {
+		return "", -1, err
+	}
+	startOffset, err := strconv.ParseInt(indexDataStartHeader, 10, 64)
+	indexDataSizeHeader := string(indexSize)
+	start, _ := strconv.Atoi(indexDataStartHeader)
+	size, _ := strconv.Atoi(indexDataSizeHeader)
+	_, err = file.Seek(int64(start), 0)
+	if err != nil {
+		return "", -1, err
+	}
+	metaByte := make([]byte, size)
+	_, err = file.Read(metaByte)
+	if err != nil {
+		return "", -1, err
+	}
+
+	decryptedData, err := securitymdl.AESDecrypt(metaByte, []byte(encryptionKey))
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", -1, err
+	}
+	return string(decryptedData), startOffset, nil
+}
+
+func fetchIndexTable(packFilePath string) (string, int64, error) {
+	f, err := filemdl.Open(packFilePath)
+	if err != nil {
+		return "", 0, errormdl.Wrap("no pack found: " + packFilePath)
+	}
+	defer f.Close()
+	return fetchIndexTableMeta(f)
+}
+
+func getFileSizeFromPack(data string, name string) int64 {
+	sizeOfFile := gjson.Get(data, `#[fileName=`+name+`].allocation.byteSize`)
+	return (sizeOfFile.Int())
+}
+
+func getFileStartOffsetFromPack(data string, name string) int64 {
+	startoffset := gjson.Get(data, `#[fileName=`+name+`].allocation.startOffset`)
+	return (startoffset.Int())
+}
+
+func getFileHashTextFromPack(data string, name string) string {
+	hashText := gjson.Get(data, `#[fileName=`+name+`].allocation.hashText`)
+	return hashText.String()
+}
+
+func appendPaddingToNumber(value int64) string {
+	return fmt.Sprintf("%015d", value)
+}
+
+func wrapUpSession(endOffset int64, indexingData []byte, packCreationPath string) error {
+	f, err := filemdl.OpenFile(packCreationPath,
+		os.O_CREATE|os.O_WRONLY, 0644)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	incryptedIndexData, err := securitymdl.AESEncrypt(indexingData, []byte(encryptionKey))
+	if err != nil {
+		return err
+	}
+	f.WriteAt(incryptedIndexData, endOffset)
+	f.WriteAt([]byte(appendPaddingToNumber(endOffset)), 0)
+	f.WriteAt([]byte(appendPaddingToNumber(int64(len(incryptedIndexData)))), 15)
+	return nil
+}
+
+func updateJSON(jsonData string, name string, startOffset int64, size int64, hashText string) string {
+	res := gjson.Parse(jsonData)
+	var updatedRes []interface{}
+
+	res.ForEach(func(key, value gjson.Result) bool {
+		// fmt.Println("val:", value.Get("Filename").String())
+		if value.Get("fileName").String() == name {
+			// Update byte size
+			updateValue, err := sjson.Set(value.String(), "allocation.byteSize", size)
+			if err != nil {
+				return false
+			}
+			// Update startOffset
+			updateValue, err = sjson.Set(updateValue, "allocation.startOffset", startOffset)
+			if err != nil {
+				return false
+			}
+			// Update hashText
+			updateValue, err = sjson.Set(updateValue, "allocation.hashText", hashText)
+			if err != nil {
+				return false
+			}
+			updatedRes = append(updatedRes, gjson.Parse(updateValue).Value())
+		} else {
+			updatedRes = append(updatedRes, value.Value())
+		}
+		return true // keep iterating
+	})
+	temp := `{}`
+	updateValue, _ := sjson.Set(temp, "writeData", updatedRes)
+	fileData := gjson.Parse(updateValue)
+
+	return fileData.Get("writeData").String()
+}
+
+func getHashOfFile(data []byte) (string, error) {
+	// create uint64 hash for file content
+	hashOfSourceData, err := hashmdl.GetHashChecksumOfByteArray(data)
+	if err != nil {
+		return "", err
+	}
+	// convert uint64 hash to string
+	hashText := strconv.FormatUint(hashOfSourceData, 10)
+	return hashText, nil
+}
+
+func createNewIndexRecord(indexTableRecords string, Filename string, startoffset int64, bytesize int64, hashText string) string {
+	jsonzobj := `{}`
+	jsonzobj, _ = sjson.Set(jsonzobj, "fileName", Filename)
+	jsonzobj, _ = sjson.Set(jsonzobj, "allocation.startOffset", startoffset)
+	jsonzobj, _ = sjson.Set(jsonzobj, "allocation.byteSize", bytesize)
+	jsonzobj, _ = sjson.Set(jsonzobj, "allocation.hashText", hashText)
+	parsedJsonzObj := gjson.Parse(jsonzobj)
+	indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+
+	return indexTableRecords
+}
diff --git a/filemdl/filepack/filepack_test.go b/filemdl/filepack/filepack_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba637e82b635585fc91c81056812f8c132ce31d9
--- /dev/null
+++ b/filemdl/filepack/filepack_test.go
@@ -0,0 +1,95 @@
+package filepack
+
+import (
+	"log"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+)
+
+func TestCreatePack(t *testing.T) {
+	// get file list
+	pathList, err := filemdl.ListFileRecursively("/home/vivekn/fdb_data/Candidates")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// create pack
+	err = CreatePackFile(pathList, "/home/vivekn/fdb_data/Candidates.pack")
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func TestInsertFiles(t *testing.T) {
+	// get file list
+	fileList := []string{"/home/vivekn/cmd"}
+	err := InsertFiles("/home/vivekn/fdb_data/Candidates.pack", fileList)
+
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+func TestGetFileData(t *testing.T) {
+	// data, err := GetFileDataFromPack("/home/vivekn/fdb_data/Candidates.pack", "/home/vivekn/fdb_data/Candidates/10008")
+	data, err := GetFileDataFromPack("/home/vivekn/fdb_data/Candidates.pack", "/home/vivekn/cmd")
+
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogDebug("data", string(data))
+}
+func TestDeleteFile(t *testing.T) {
+	// create pack
+	err := DeleteFileFromPack("/home/vivekn/fdb_data/Candidates.pack", "/home/vivekn/fdb_data/Candidates/10008")
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func BenchmarkCreatePack(b *testing.B) {
+	// get file list
+	pathList, err := filemdl.ListFileRecursively("/home/vivekn/fdb_data/Candidates")
+	if err != nil {
+		log.Fatal(err)
+	}
+	for index := 0; index < b.N; index++ {
+		// create pack
+		err = CreatePackFile(pathList, "/home/vivekn/fdb_data/Candidates.pack")
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+}
+
+func BenchmarkGetFileDataListFromPack(b *testing.B) {
+	pathList := []string{"/home/vivekn/fdb_data/Candidates/file200", "/home/vivekn/fdb_data/Candidates/file100", "/home/vivekn/fdb_data/Candidates/file202"}
+	for index := 0; index < b.N; index++ {
+		_, err := GetFileDataListFromPack("/home/vivekn/fdb_data/Candidates.pack", pathList)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+}
+
+func BenchmarkGetFileDataFromPack(b *testing.B) {
+	// pathList := []string{"/home/vivekn/fdb_data/Candidates/file200", "/home/vivekn/fdb_data/Candidates/file100", "/home/vivekn/fdb_data/Candidates/file202"}
+	for index := 0; index < b.N; index++ {
+		_, err := GetFileDataFromPack("/home/vivekn/fdb_data/Candidates.pack", "/home/vivekn/fdb_data/Candidates/file1")
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+}
+
+func TestFastWriteFile(t *testing.T) {
+	err := filemdl.FastWriteFile("C:/Users/shridharp/Desktop/Test/testF.txt", []byte("how are you"), true, true)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
diff --git a/generate.sh b/generate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2830f60b7acebcabd513a0820587ad5f2e2de99d
--- /dev/null
+++ b/generate.sh
@@ -0,0 +1,3 @@
+#!/bin/bash or zsh
+
+protoc grpcbuilder/grpcbuildermdl.proto --go_out=plugins=grpc:.
\ No newline at end of file
diff --git a/grpcbuildermdl/grpcbuildermdl.pb.go b/grpcbuildermdl/grpcbuildermdl.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..527d7adc78dc3f03a8245092beac2054852decbb
--- /dev/null
+++ b/grpcbuildermdl/grpcbuildermdl.pb.go
@@ -0,0 +1,414 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: grpcbuildermdl.proto
+
+package grpcbuildermdl
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type GRPCMessage struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	IsRestricted         bool     `protobuf:"varint,3,opt,name=isRestricted,proto3" json:"isRestricted,omitempty"`
+	IsRoleBased          bool     `protobuf:"varint,4,opt,name=isRoleBased,proto3" json:"isRoleBased,omitempty"`
+	Token                string   `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GRPCMessage) Reset()         { *m = GRPCMessage{} }
+func (m *GRPCMessage) String() string { return proto.CompactTextString(m) }
+func (*GRPCMessage) ProtoMessage()    {}
+func (*GRPCMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d80590c423530f2, []int{0}
+}
+
+func (m *GRPCMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCMessage.Unmarshal(m, b)
+}
+func (m *GRPCMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCMessage.Marshal(b, m, deterministic)
+}
+func (m *GRPCMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCMessage.Merge(m, src)
+}
+func (m *GRPCMessage) XXX_Size() int {
+	return xxx_messageInfo_GRPCMessage.Size(m)
+}
+func (m *GRPCMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCMessage proto.InternalMessageInfo
+
+func (m *GRPCMessage) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *GRPCMessage) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *GRPCMessage) GetIsRestricted() bool {
+	if m != nil {
+		return m.IsRestricted
+	}
+	return false
+}
+
+func (m *GRPCMessage) GetIsRoleBased() bool {
+	if m != nil {
+		return m.IsRoleBased
+	}
+	return false
+}
+
+func (m *GRPCMessage) GetToken() string {
+	if m != nil {
+		return m.Token
+	}
+	return ""
+}
+
+type GRPCRequest struct {
+	GrpcMessage          *GRPCMessage `protobuf:"bytes,1,opt,name=grpcMessage,proto3" json:"grpcMessage,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *GRPCRequest) Reset()         { *m = GRPCRequest{} }
+func (m *GRPCRequest) String() string { return proto.CompactTextString(m) }
+func (*GRPCRequest) ProtoMessage()    {}
+func (*GRPCRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d80590c423530f2, []int{1}
+}
+
+func (m *GRPCRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCRequest.Unmarshal(m, b)
+}
+func (m *GRPCRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCRequest.Marshal(b, m, deterministic)
+}
+func (m *GRPCRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCRequest.Merge(m, src)
+}
+func (m *GRPCRequest) XXX_Size() int {
+	return xxx_messageInfo_GRPCRequest.Size(m)
+}
+func (m *GRPCRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCRequest proto.InternalMessageInfo
+
+func (m *GRPCRequest) GetGrpcMessage() *GRPCMessage {
+	if m != nil {
+		return m.GrpcMessage
+	}
+	return nil
+}
+
+type GRPCResponse struct {
+	Data                 string   `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GRPCResponse) Reset()         { *m = GRPCResponse{} }
+func (m *GRPCResponse) String() string { return proto.CompactTextString(m) }
+func (*GRPCResponse) ProtoMessage()    {}
+func (*GRPCResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d80590c423530f2, []int{2}
+}
+
+func (m *GRPCResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCResponse.Unmarshal(m, b)
+}
+func (m *GRPCResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCResponse.Marshal(b, m, deterministic)
+}
+func (m *GRPCResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCResponse.Merge(m, src)
+}
+func (m *GRPCResponse) XXX_Size() int {
+	return xxx_messageInfo_GRPCResponse.Size(m)
+}
+func (m *GRPCResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCResponse proto.InternalMessageInfo
+
+func (m *GRPCResponse) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+type GRPCByteResponse struct {
+	Data                 []byte   `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+	ErrorCode            int32    `protobuf:"varint,2,opt,name=errorCode,proto3" json:"errorCode,omitempty"`
+	Error                string   `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GRPCByteResponse) Reset()         { *m = GRPCByteResponse{} }
+func (m *GRPCByteResponse) String() string { return proto.CompactTextString(m) }
+func (*GRPCByteResponse) ProtoMessage()    {}
+func (*GRPCByteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d80590c423530f2, []int{3}
+}
+
+func (m *GRPCByteResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCByteResponse.Unmarshal(m, b)
+}
+func (m *GRPCByteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCByteResponse.Marshal(b, m, deterministic)
+}
+func (m *GRPCByteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCByteResponse.Merge(m, src)
+}
+func (m *GRPCByteResponse) XXX_Size() int {
+	return xxx_messageInfo_GRPCByteResponse.Size(m)
+}
+func (m *GRPCByteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCByteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCByteResponse proto.InternalMessageInfo
+
+func (m *GRPCByteResponse) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *GRPCByteResponse) GetErrorCode() int32 {
+	if m != nil {
+		return m.ErrorCode
+	}
+	return 0
+}
+
+func (m *GRPCByteResponse) GetError() string {
+	if m != nil {
+		return m.Error
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*GRPCMessage)(nil), "grpcbuildermdl.GRPCMessage")
+	proto.RegisterType((*GRPCRequest)(nil), "grpcbuildermdl.GRPCRequest")
+	proto.RegisterType((*GRPCResponse)(nil), "grpcbuildermdl.GRPCResponse")
+	proto.RegisterType((*GRPCByteResponse)(nil), "grpcbuildermdl.GRPCByteResponse")
+}
+
+func init() { proto.RegisterFile("grpcbuildermdl.proto", fileDescriptor_8d80590c423530f2) }
+
+var fileDescriptor_8d80590c423530f2 = []byte{
+	// 308 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xb1, 0x4e, 0xf3, 0x30,
+	0x14, 0x85, 0xff, 0xfc, 0xb4, 0x88, 0xdc, 0x54, 0xa8, 0xb2, 0x3a, 0x44, 0xd0, 0x21, 0xf2, 0x94,
+	0xa9, 0x43, 0x98, 0x59, 0x92, 0x81, 0x0e, 0x80, 0x90, 0x11, 0x4b, 0x25, 0x06, 0x37, 0xbe, 0x2a,
+	0x51, 0xd3, 0x38, 0xd8, 0x2e, 0x12, 0x8f, 0xc1, 0x1b, 0x23, 0xdb, 0xa9, 0x9a, 0x56, 0x11, 0xdb,
+	0x3d, 0xc7, 0x47, 0xd7, 0xe7, 0xb3, 0x0c, 0xb3, 0x8d, 0x6a, 0xcb, 0xf5, 0xbe, 0xaa, 0x05, 0xaa,
+	0x9d, 0xa8, 0x17, 0xad, 0x92, 0x46, 0x92, 0xeb, 0x53, 0x97, 0xfe, 0x04, 0x10, 0x3d, 0xb0, 0x97,
+	0xe2, 0x09, 0xb5, 0xe6, 0x1b, 0x24, 0x04, 0x46, 0x0d, 0xdf, 0x61, 0x1c, 0x24, 0x41, 0x1a, 0x32,
+	0x37, 0x5b, 0x4f, 0x70, 0xc3, 0xe3, 0xff, 0x49, 0x90, 0x4e, 0x98, 0x9b, 0x09, 0x85, 0x49, 0xa5,
+	0x19, 0x6a, 0xa3, 0xaa, 0xd2, 0xa0, 0x88, 0x2f, 0x92, 0x20, 0xbd, 0x62, 0x27, 0x1e, 0x49, 0x20,
+	0xaa, 0x34, 0x93, 0x35, 0xe6, 0x5c, 0xa3, 0x88, 0x47, 0x2e, 0xd2, 0xb7, 0xc8, 0x0c, 0xc6, 0x46,
+	0x6e, 0xb1, 0x89, 0xc7, 0xee, 0x3a, 0x2f, 0xe8, 0xa3, 0xaf, 0xc4, 0xf0, 0x73, 0x8f, 0xda, 0x90,
+	0x7b, 0x88, 0x6c, 0xe9, 0xae, 0xa1, 0x6b, 0x16, 0x65, 0xb7, 0x8b, 0x33, 0xbc, 0x1e, 0x04, 0xeb,
+	0xe7, 0x29, 0x85, 0x89, 0xdf, 0xa6, 0x5b, 0xd9, 0xe8, 0x23, 0x4d, 0x47, 0x68, 0x67, 0xba, 0x82,
+	0xa9, 0xcd, 0xe4, 0xdf, 0x06, 0x07, 0x73, 0x07, 0xea, 0x39, 0x84, 0xa8, 0x94, 0x54, 0x85, 0x14,
+	0xe8, 0x9e, 0x63, 0xcc, 0x8e, 0x86, 0xa5, 0x71, 0xc2, 0x3d, 0x46, 0xc8, 0xbc, 0xc8, 0xde, 0x3d,
+	0xcd, 0x2b, 0xaa, 0xaf, 0xaa, 0x44, 0xf2, 0xec, 0xe5, 0x92, 0x37, 0xa2, 0x46, 0x45, 0x06, 0x39,
+	0x3a, 0xf2, 0x9b, 0x64, 0xe8, 0xb0, 0x5f, 0x92, 0xfe, 0xcb, 0xde, 0x20, 0xb4, 0x6e, 0xf1, 0x81,
+	0xe5, 0x96, 0x2c, 0xfb, 0xe2, 0xcf, 0xd5, 0xf3, 0xe1, 0xc3, 0xc3, 0xda, 0x7c, 0xba, 0x3a, 0xfb,
+	0x29, 0xeb, 0x4b, 0xf7, 0x81, 0xee, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xce, 0x75, 0x30, 0xf3,
+	0x58, 0x02, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// GRPCServiceClient is the client API for GRPCService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GRPCServiceClient interface {
+	GRPCHandler(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCByteResponse, error)
+}
+
+type gRPCServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewGRPCServiceClient(cc *grpc.ClientConn) GRPCServiceClient {
+	return &gRPCServiceClient{cc}
+}
+
+func (c *gRPCServiceClient) GRPCHandler(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCByteResponse, error) {
+	out := new(GRPCByteResponse)
+	err := c.cc.Invoke(ctx, "/grpcbuildermdl.GRPCService/GRPCHandler", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// GRPCServiceServer is the server API for GRPCService service.
+type GRPCServiceServer interface {
+	GRPCHandler(context.Context, *GRPCRequest) (*GRPCByteResponse, error)
+}
+
+// UnimplementedGRPCServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedGRPCServiceServer struct {
+}
+
+func (*UnimplementedGRPCServiceServer) GRPCHandler(ctx context.Context, req *GRPCRequest) (*GRPCByteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GRPCHandler not implemented")
+}
+
+func RegisterGRPCServiceServer(s *grpc.Server, srv GRPCServiceServer) {
+	s.RegisterService(&_GRPCService_serviceDesc, srv)
+}
+
+func _GRPCService_GRPCHandler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GRPCRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(GRPCServiceServer).GRPCHandler(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/grpcbuildermdl.GRPCService/GRPCHandler",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(GRPCServiceServer).GRPCHandler(ctx, req.(*GRPCRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _GRPCService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "grpcbuildermdl.GRPCService",
+	HandlerType: (*GRPCServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GRPCHandler",
+			Handler:    _GRPCService_GRPCHandler_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "grpcbuildermdl.proto",
+}
+
+// GRPCCheckClient is the client API for GRPCCheck service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GRPCCheckClient interface {
+	GRPCCheck(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCResponse, error)
+}
+
+type gRPCCheckClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewGRPCCheckClient(cc *grpc.ClientConn) GRPCCheckClient {
+	return &gRPCCheckClient{cc}
+}
+
+func (c *gRPCCheckClient) GRPCCheck(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCResponse, error) {
+	out := new(GRPCResponse)
+	err := c.cc.Invoke(ctx, "/grpcbuildermdl.GRPCCheck/GRPCCheck", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// GRPCCheckServer is the server API for GRPCCheck service.
+type GRPCCheckServer interface {
+	GRPCCheck(context.Context, *GRPCRequest) (*GRPCResponse, error)
+}
+
+// UnimplementedGRPCCheckServer can be embedded to have forward compatible implementations.
+type UnimplementedGRPCCheckServer struct {
+}
+
+func (*UnimplementedGRPCCheckServer) GRPCCheck(ctx context.Context, req *GRPCRequest) (*GRPCResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GRPCCheck not implemented")
+}
+
+func RegisterGRPCCheckServer(s *grpc.Server, srv GRPCCheckServer) {
+	s.RegisterService(&_GRPCCheck_serviceDesc, srv)
+}
+
+func _GRPCCheck_GRPCCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GRPCRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(GRPCCheckServer).GRPCCheck(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/grpcbuildermdl.GRPCCheck/GRPCCheck",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(GRPCCheckServer).GRPCCheck(ctx, req.(*GRPCRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _GRPCCheck_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "grpcbuildermdl.GRPCCheck",
+	HandlerType: (*GRPCCheckServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GRPCCheck",
+			Handler:    _GRPCCheck_GRPCCheck_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "grpcbuildermdl.proto",
+}
diff --git a/grpcbuildermdl/grpcbuildermdl.proto b/grpcbuildermdl/grpcbuildermdl.proto
new file mode 100644
index 0000000000000000000000000000000000000000..657ee52bc3e692325ea33bf35cf5c0d4ec258ae1
--- /dev/null
+++ b/grpcbuildermdl/grpcbuildermdl.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+
+package grpcbuildermdl;
+option go_package = "grpcbuildermdl";
+
+message GRPCMessage {
+    string name =1 ;
+    bytes data =2;
+    bool isRestricted = 3 ;
+    bool isRoleBased = 4;
+    string token = 5;
+}
+
+
+
+
+message GRPCRequest {
+    GRPCMessage grpcMessage = 1;
+}
+
+message GRPCResponse {
+    string data = 1;
+}
+
+message GRPCByteResponse {
+    bytes data = 1;
+    int32  errorCode = 2;
+    string error = 3;
+}
+
+service GRPCService {
+    rpc GRPCHandler(GRPCRequest) returns (GRPCByteResponse) {};
+}
+
+service GRPCCheck {
+    rpc GRPCCheck(GRPCRequest) returns (GRPCResponse) { };
+}
\ No newline at end of file
diff --git a/grpcclientmdl/grpcclientmdl.go b/grpcclientmdl/grpcclientmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd3f5344d7f2aa56fd18e35b1d973925d1e534db
--- /dev/null
+++ b/grpcclientmdl/grpcclientmdl.go
@@ -0,0 +1,80 @@
+package grpcclientmdl
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/grpcbuildermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	grpcpool "github.com/processout/grpc-go-pool"
+	"google.golang.org/grpc"
+)
+
+// TotalCheck TotalCheck
+func TotalCheck() (string, error) {
+	// TODO: review
+	conn, err := grpc.Dial("0.0.0.0:50051", grpc.WithInsecure())
+	if err != nil {
+		loggermdl.LogError("Could not connect: ", err)
+	}
+	defer conn.Close()
+	c := grpcbuildermdl.NewGRPCCheckClient(conn)
+
+	req := &grpcbuildermdl.GRPCRequest{}
+	res, err := c.GRPCCheck(context.Background(), req)
+
+	return res.GetData(), nil
+}
+
+var instances map[string]*grpcpool.Pool
+var instancesList map[string]map[string]*grpcpool.Pool
+
+// Init init
+func Init(grpcServerURLList []string, grpcClients []string) {
+	instances = make(map[string]*grpcpool.Pool)
+	loggermdl.LogError("Length of grpcServerURLList", len(grpcServerURLList))
+
+	for index := 0; index < len(grpcServerURLList); index++ {
+		CreateConnection(grpcServerURLList[index], grpcClients[index])
+	}
+	loggermdl.LogError("instances", instances)
+}
+
+//ByteHandler ByteHandler
+func ByteHandler(req *grpcbuildermdl.GRPCRequest, grpcServerURL string) ([]byte, int32, string, error) {
+	if instances[grpcServerURL] != nil {
+		conn, err := instances[grpcServerURL].Get(context.Background())
+		if err != nil {
+			loggermdl.LogError("Failed to create gRPC pool: %v", err)
+			return nil, 0, "", err
+		}
+		client := grpcbuildermdl.NewGRPCServiceClient(conn.ClientConn)
+		loggermdl.LogError("request Data send", req)
+		res, err := client.GRPCHandler(context.Background(), req)
+		if err != nil {
+			loggermdl.LogError("GRPCHandler err:", res.GetError())
+			return res.GetData(), res.GetErrorCode(), res.GetError(), err
+		}
+		loggermdl.LogError("response Data received", res)
+		return res.GetData(), res.GetErrorCode(), res.GetError(), nil
+	}
+	return nil, 0, "", errors.New("no grpc connection found")
+}
+
+// CreateConnection method
+func CreateConnection(serverURL string, grpcClientName string) {
+	var factory grpcpool.Factory
+	factory = func() (*grpc.ClientConn, error) {
+		conn, err := grpc.Dial(serverURL, grpc.WithInsecure())
+		if err != nil {
+			loggermdl.LogError("Failed to start gRPC connection: %v", err)
+		}
+		return conn, err
+	}
+	pool, err := grpcpool.New(factory, 5, 5, time.Second)
+	if err != nil {
+		loggermdl.LogError("Failed to create gRPC pool: %v", err)
+	}
+	instances[grpcClientName] = pool
+}
diff --git a/routebuildermdl/grpcservermdl.go b/routebuildermdl/grpcservermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae4228b5da99c520ea67582fa7a471d780b3d232
--- /dev/null
+++ b/routebuildermdl/grpcservermdl.go
@@ -0,0 +1,121 @@
+package routebuildermdl
+
+import (
+	"context"
+	"encoding/json"
+	"net"
+
+	"google.golang.org/grpc"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/jwtmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/roleenforcemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/grpcbuildermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+)
+
+// Server server
+type Server struct{}
+
+// GRPCInit init
+func GRPCInit(GRPCPort net.Listener) {
+	loggermdl.LogInfo("In GRPCInit")
+	s := grpc.NewServer()
+	grpcbuildermdl.RegisterGRPCCheckServer(s, &Server{})
+	grpcbuildermdl.RegisterGRPCServiceServer(s, &Server{})
+	if err := s.Serve(GRPCPort); err != nil {
+		loggermdl.LogError("Unable to start GRPC server: %v", err)
+	}
+	loggermdl.LogError("GRPC server started on :", GRPCPort.Addr().String())
+}
+
+// GRPCHandler GRPCHandler
+func (*Server) GRPCHandler(ctx context.Context, req *grpcbuildermdl.GRPCRequest) (*grpcbuildermdl.GRPCByteResponse, error) {
+	loggermdl.LogError("GRPC Handler inoked:")
+	principal := servicebuildermdl.Principal{}
+	errExecutingActivity := ""
+	if req.GetGrpcMessage().GetIsRestricted() {
+		claim, _ := jwtmdl.GeneratePricipleObjUsingToken(req.GetGrpcMessage().GetToken(), jwtmdl.GlobalJWTKey)
+		groups, grperr := roleenforcemdl.GetGroupNames(claim, "groups")
+		if errormdl.CheckErr(grperr) != nil {
+			loggermdl.LogError("Error accessing group", grperr)
+			return nil, errormdl.CheckErr(grperr)
+		}
+		userID, ok := claim["userId"].(string)
+		if !ok || len(userID) < 2 {
+			loggermdl.LogError("Unable to parse UserID from JWT Token")
+			return nil, errormdl.Wrap("Unable to parse UserID from JWT Token")
+		}
+		rawMetadata, ok := claim["metadata"]
+		if ok {
+			loggermdl.LogError(rawMetadata)
+			metadata, ok := rawMetadata.(string)
+			if !ok {
+				loggermdl.LogError("Unable to parse metadata from JWT Token")
+				return nil, errormdl.Wrap("Unable to parse metadata from JWT Token")
+			}
+			principal.Metadata = metadata
+		}
+		principal.Groups = groups
+		principal.UserID = userID
+		principal.Token = req.GetGrpcMessage().GetToken()
+	}
+
+	loggermdl.LogError("Request Data send to activity", req)
+	result, nextDynamicPage, ab, isCompressed, errorCode, err := executeService(
+		req.GetGrpcMessage().GetName(),
+		req.GetGrpcMessage().GetData(), req.GetGrpcMessage().GetIsRestricted(),
+		req.GetGrpcMessage().GetIsRoleBased(), false, principal)
+	loggermdl.LogError("Response after execution of activity", result, nextDynamicPage, ab, isCompressed, errorCode, err)
+	e, _ := json.Marshal(result)
+	if err != nil {
+		errExecutingActivity = err.Error()
+	}
+	res := &grpcbuildermdl.GRPCByteResponse{
+		Data:      e,
+		ErrorCode: int32(errorCode),
+		Error:     errExecutingActivity,
+	}
+	return res, nil
+}
+
+// GRPCCheck GRPCCheck
+func (*Server) GRPCCheck(ctx context.Context, req *grpcbuildermdl.GRPCRequest) (*grpcbuildermdl.GRPCResponse, error) {
+	loggermdl.LogError("GRPC Check Invoked: ")
+	claim, _ := jwtmdl.GeneratePricipleObjUsingToken(req.GetGrpcMessage().GetToken(), jwtmdl.GlobalJWTKey)
+	principal := servicebuildermdl.Principal{}
+	groups, grperr := roleenforcemdl.GetGroupNames(claim, "groups")
+	if errormdl.CheckErr(grperr) != nil {
+		loggermdl.LogError(grperr)
+		return nil, errormdl.CheckErr(grperr)
+	}
+	userID, ok := claim["userId"].(string)
+	if !ok || len(userID) < 2 {
+		loggermdl.LogError("Unable to parse UserID from JWT Token")
+		return nil, errormdl.Wrap("Unable to parse UserID from JWT Token")
+	}
+	rawMetadata, ok := claim["metadata"]
+	if ok {
+		loggermdl.LogError(rawMetadata)
+		metadata, ok := rawMetadata.(string)
+		if !ok {
+			loggermdl.LogError("Unable to parse metadata from JWT Token")
+			return nil, errormdl.Wrap("Unable to parse metadata from JWT Token")
+		}
+		principal.Metadata = metadata
+	}
+	principal.Groups = groups
+	principal.UserID = userID
+	principal.Token = req.GetGrpcMessage().GetToken()
+	result, nextDynamicPage, ab, isCompressed, errorCode, err := executeService(
+		req.GetGrpcMessage().GetName(),
+		req.GetGrpcMessage().GetData(),
+		req.GetGrpcMessage().GetIsRestricted(),
+		req.GetGrpcMessage().GetIsRoleBased(), false, principal)
+	res := &grpcbuildermdl.GRPCResponse{
+		Data: "Response from GRPC Check service",
+	}
+	loggermdl.LogError(result, nextDynamicPage, ab, isCompressed, errorCode, err)
+	return res, err
+}