diff --git a/v2/authmdl/aclmdl/aclmdl.go b/v2/authmdl/aclmdl/aclmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..50856c6b1cc0907de87434b4e79b7c3d7eb11c72
--- /dev/null
+++ b/v2/authmdl/aclmdl/aclmdl.go
@@ -0,0 +1,21 @@
+package aclmdl
+
+import (
+	"net/http"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/roleenforcemdl"
+	"github.com/gin-gonic/gin"
+)
+
+// ACLMiddleware ACLMiddleware
+func ACLMiddleware(configFilePath, JWTKey, groupKey string) gin.HandlerFunc {
+	return func(c *gin.Context) {
+		URL := c.Request.Header.Get("Service-Header")
+		roleenforcemdl.LoadRoleConfiguration(configFilePath)
+		if roleenforcemdl.Enforce(c.Request, URL, JWTKey, groupKey) {
+			c.Next()
+		} else {
+			c.AbortWithStatus(http.StatusUnauthorized)
+		}
+	}
+}
diff --git a/v2/authmdl/jwtmdl/jwtmdl.go b/v2/authmdl/jwtmdl/jwtmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..240bc5a8d7edaf80c07d10318d4fdaf48981234e
--- /dev/null
+++ b/v2/authmdl/jwtmdl/jwtmdl.go
@@ -0,0 +1,130 @@
+package jwtmdl
+
+import (
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/sessionmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	jwt "github.com/dgrijalva/jwt-go"
+	"github.com/tidwall/gjson"
+)
+
+// GlobalJWTKey - key to decode and encode token
+var GlobalJWTKey string
+
+var keyFunc = func(key string) jwt.Keyfunc {
+	return func(*jwt.Token) (interface{}, error) {
+		return []byte(key), nil
+	}
+}
+
+type jwtCustomClaim struct {
+	UserID    string   `json:"userId"`
+	SessionId string   `json:"sessionId,omitempty"`
+	Groups    []string `json:"groups"`
+	ClientIP  string   `json:"clientIP"`
+	HitsCount int      `json:"hitsCount"`
+	Token     string   `json:"token"`
+	Metadata  string   `json:"metadata"`
+	jwt.StandardClaims
+}
+
+func generate(claims jwtCustomClaim, key string) (string, error) {
+	return jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(key))
+}
+
+// extract return token from header string
+func extract(tokenReq string) (string, error) {
+	tokenArray := strings.Split(tokenReq, "Bearer")
+	if len(tokenArray) <= 1 {
+		return "", errormdl.Wrap("Provided JWT token is nil or invalid ")
+	}
+
+	return strings.Trim(tokenArray[1], " "), nil
+}
+
+// decode accepts a parsed token and error from parse operation.
+func decode(token *jwt.Token, err error) (jwt.MapClaims, error) {
+	if err != nil {
+		// loggermdl.LogError("Error while parsing JWT Token: ", err)
+		return nil, err
+	}
+
+	claims, ok := token.Claims.(jwt.MapClaims)
+	if !ok {
+		// loggermdl.LogError("Error while parsing claims to MapClaims")
+		return nil, errormdl.Wrap("Error while getting claims")
+	}
+
+	// validate user session from session id present in token
+	if err := sessionmdl.ValidateSessionFromToken(claims); err != nil {
+		// loggermdl.LogError("session validation failed with err:", err)
+		return nil, sessionmdl.ErrSessionValidationFailed
+	}
+
+	return claims, nil
+}
+
+func GenerateTokenWithOptions(args ...Option) (string, error) {
+	options := new(Options)
+
+	options.Key = GlobalJWTKey
+
+	for i := range args {
+		args[i](options)
+	}
+
+	claims := jwtCustomClaim{
+		ClientIP:  options.ClientIP,
+		Groups:    options.Groups,
+		Metadata:  options.Metadata,
+		SessionId: options.Session.SessionId,
+		UserID:    options.UserID,
+		StandardClaims: jwt.StandardClaims{
+			ExpiresAt: options.ExpiresAt,
+		},
+	}
+
+	t, err := generate(claims, options.Key)
+	if err != nil {
+		return "", err
+	}
+
+	if len(options.Session.SessionId) > 0 {
+		sessionmdl.Set(options.UserID, options.Session)
+	}
+
+	return t, nil
+}
+
+// GenerateToken generates JWT token from Login object
+func GenerateToken(loginID string, groups []string, clientIP string, metadata gjson.Result, expirationTime time.Duration) (string, error) {
+	return GenerateTokenWithJWTKey(loginID, groups, clientIP, metadata, expirationTime, GlobalJWTKey)
+}
+
+// GenerateTokenWithJWTKey generates JWT token from Login object
+func GenerateTokenWithJWTKey(loginID string, groups []string, clientIP string, metadata gjson.Result, expirationTime time.Duration, JWTKey string) (string, error) {
+	claims := jwtCustomClaim{
+		UserID:   loginID,
+		Groups:   groups,
+		ClientIP: clientIP,
+		Metadata: metadata.String(),
+		StandardClaims: jwt.StandardClaims{
+			ExpiresAt: time.Now().Add(expirationTime).Unix(),
+		},
+	}
+
+	return generate(claims, JWTKey)
+}
+
+//GeneratePricipleObjUsingToken GeneratePricipleObjUsingToken
+func GeneratePricipleObjUsingToken(tokenReq string, jwtKey string) (jwt.MapClaims, error) {
+
+	token, err := extract(tokenReq)
+	if err != nil {
+		return nil, err
+	}
+
+	return decode(jwt.Parse(token, keyFunc(jwtKey)))
+}
diff --git a/v2/authmdl/jwtmdl/jwtmdl_fasthttp.go b/v2/authmdl/jwtmdl/jwtmdl_fasthttp.go
new file mode 100644
index 0000000000000000000000000000000000000000..10ff39b107c1bb3c888f3110f6607ced4988d84d
--- /dev/null
+++ b/v2/authmdl/jwtmdl/jwtmdl_fasthttp.go
@@ -0,0 +1,24 @@
+//  +build fasthttp
+
+package jwtmdl
+
+import (
+	jwt "github.com/dgrijalva/jwt-go"
+	"github.com/valyala/fasthttp"
+)
+
+// DecodeTokenWithJWTKey decode token
+func DecodeTokenWithJWTKey(req *fasthttp.Request, jwtKey string) (jwt.MapClaims, error) {
+
+	tokenFromRequest, err := extract(string(req.Header.Peek("Authorization")))
+	if err != nil {
+		return nil, err
+	}
+
+	return decode(jwt.Parse(tokenFromRequest, keyFunc(jwtKey)))
+}
+
+// DecodeToken decode token
+func DecodeToken(req *fasthttp.Request) (jwt.MapClaims, error) {
+	return DecodeTokenWithJWTKey(req, GlobalJWTKey)
+}
diff --git a/v2/authmdl/jwtmdl/jwtmdl_http.go b/v2/authmdl/jwtmdl/jwtmdl_http.go
new file mode 100644
index 0000000000000000000000000000000000000000..0e2da98a9e793b89da5a550da8cb1a7274154eb5
--- /dev/null
+++ b/v2/authmdl/jwtmdl/jwtmdl_http.go
@@ -0,0 +1,21 @@
+//  +build !fasthttp
+
+package jwtmdl
+
+import (
+	"net/http"
+
+	"github.com/dgrijalva/jwt-go/request"
+
+	jwt "github.com/dgrijalva/jwt-go"
+)
+
+// DecodeTokenWithJWTKey decode token
+func DecodeTokenWithJWTKey(req *http.Request, jwtKey string) (jwt.MapClaims, error) {
+	return decode(request.ParseFromRequest(req, request.OAuth2Extractor, keyFunc(jwtKey)))
+}
+
+// DecodeToken decode token
+func DecodeToken(req *http.Request) (jwt.MapClaims, error) {
+	return DecodeTokenWithJWTKey(req, GlobalJWTKey)
+}
diff --git a/v2/authmdl/jwtmdl/jwtmdl_test.go b/v2/authmdl/jwtmdl/jwtmdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..62565948974effb67b15fef92ea1fca193a33767
--- /dev/null
+++ b/v2/authmdl/jwtmdl/jwtmdl_test.go
@@ -0,0 +1,191 @@
+package jwtmdl
+
+import (
+	"fmt"
+	"net/http"
+	"testing"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/sessionmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	jwt "github.com/dgrijalva/jwt-go"
+	"github.com/gin-gonic/gin"
+	"github.com/stretchr/testify/assert"
+	"github.com/tidwall/gjson"
+)
+
+const (
+	TestKey = "vJUufKHyu2xiMYmDj1TmojHR11ciUaq3"
+)
+
+func checkToken(token string) error {
+	claims, err := decode(jwt.Parse(token, keyFunc(TestKey)))
+	if err != nil {
+		return err
+	}
+
+	return sessionmdl.ValidateSessionFromToken(claims)
+}
+
+func server() {
+	g := gin.Default()
+	g.GET("/status", func(c *gin.Context) {
+		fmt.Println("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ")
+
+		c.JSON(200, "Hello")
+	})
+	g.Run("localhost:8081")
+}
+func TestDecodeTokenSuccess(t *testing.T) {
+	// go server()
+	// fmt.Println(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>calling rq ")
+	// resp, err := http.NewRequest("GET", "http://localhost:8081/status", nil)
+	// fmt.Println("resp  ", resp)
+	// fmt.Println("err  ", err)
+
+	//time.Sleep(30 * time.Second)
+
+	// got, _ := DecodeToken("Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJZCI6InNhbmRlZXBzcyIsImdyb3VwcyI6ImFkbWluIn0.qmA-brbKScqBEFr3wJIuBmcBsERYoNPLnOAHBe4HNh8", "3212mysupersecretpassword")
+	// assert.Equal(t, "sandeepss", got["Id"], "it is equal")
+
+}
+
+func TestDecodeTokenvalid(t *testing.T) {
+	req, _ := http.NewRequest("POST", "/r/c/mql", nil)
+	req.Header.Set("Authorization", "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJZCI6InNhbmRlZXBzcyIsImdyb3VwIjpbImFkbWluIl19.dGSKQZMGSEBjgPrjTHJwylcxIxxMKylahuFn7DHR4pc")
+	_, derror := DecodeTokenWithJWTKey(req, "mysupersecretpassword")
+	assert.NoError(t, derror, "No error occured")
+
+}
+
+// func TestDecodeTokenInvalid(t *testing.T) {
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", nil)
+// 	req.Header.Set("Authorization", "Bearer yJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJZCI6InNhbmRlZXBzcyIsImdyb3VwcyI6ImFkbWluIn0.qmA-brbKScqBEFr3wJIuBmcBsERYoNPLnOAHBe4HNh8")
+// 	_, derror := DecodeToken(req, "mysupersecretpassword")
+// 	assert.Error(t, derror, "error occured")
+
+// }
+// func TestDecodeTokenEmpty(t *testing.T) {
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", nil)
+// 	req.Header.Set("Authorization", "")
+// 	_, derror := DecodeToken(req, "mysupersecretpassword")
+// 	assert.Error(t, derror, "error occured")
+
+// }
+
+// func TestDecodeTokenWrongAlo(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", nil)
+// 	req.Header.Set("Authorization", "Bearer yJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJZCI6InNhbmRlZXBzcyIsImdyb3VwcyI6ImFkbWluIn0.qmA-brbKScqBEFr3wJIuBmcBsERYoNPLnOAHBe4HNh8")
+// 	_, derror := DecodeToken(req, "mysupersecretpassword")
+// 	assert.Error(t, derror, "error occured")
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// }
+// func TestDecodeTokenWrongTypeConversion(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", nil)
+// 	req.Header.Set("Authorization", "Bearer yJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJZCI6InNhbmRlZXBzcyIsImdyb3VwcyI6ImFkbWluIn0.qmA-brbKScqBEFr3wJIuBmcBsERYoNPLnOAHBe4HNh8")
+// 	_, derror := DecodeToken(req, "mysupersecretpassword")
+// 	assert.Error(t, derror, "error occured")
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+// }
+
+func TestGenerateTokenWithOptions(t *testing.T) {
+	sessionmdl.InitUserSessionCache(cachemdl.TypeFastCache)
+
+	type args struct {
+		args []Option
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    string
+		wantErr bool
+	}{
+		{
+			name: "Token without session",
+			args: args{
+				args: []Option{
+					WithUserID("tom@company.org"),
+					WithExpiration(0),
+					WithKey(TestKey),
+					WithMetaData(`{"name":"tom"}`),
+				},
+			},
+			wantErr: false,
+		},
+		{
+			name: "Token with session",
+			args: args{
+				args: []Option{
+					WithUserID("tom@company.org"),
+					WithExpiration(0),
+					WithKey(TestKey),
+					WithMetaData(`{"name":"tom"}`),
+					WithSession(guidmdl.GetGUID(), "me"),
+				},
+			},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := GenerateTokenWithOptions(tt.args.args...)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("GenerateTokenWithOptions() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+
+			err = checkToken(got)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("GenerateTokenWithOptions() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
+
+func TestGenerateTokenWithJWTKey(t *testing.T) {
+	type args struct {
+		loginID        string
+		groups         []string
+		clientIP       string
+		metadata       gjson.Result
+		expirationTime time.Duration
+		JWTKey         string
+	}
+	tests := []struct {
+		name string
+		args args
+		// want    string
+		wantErr bool
+	}{
+		{
+			name: "Test genrate token",
+			args: args{
+				JWTKey:         TestKey,
+				expirationTime: time.Minute * 5,
+				groups:         []string{"admin"},
+				loginID:        "tom@company.org",
+				metadata:       gjson.Parse(`{"name":"tom"}`),
+			},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := GenerateTokenWithJWTKey(tt.args.loginID, tt.args.groups, tt.args.clientIP, tt.args.metadata, tt.args.expirationTime, tt.args.JWTKey)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("GenerateTokenWithJWTKey() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+
+			err = checkToken(got)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("GenerateTokenWithJWTKey() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
diff --git a/v2/authmdl/jwtmdl/options.go b/v2/authmdl/jwtmdl/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..e0db9986c0e24d749df21fb361c35250c77a0c21
--- /dev/null
+++ b/v2/authmdl/jwtmdl/options.go
@@ -0,0 +1,73 @@
+package jwtmdl
+
+import (
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/sessionmdl"
+)
+
+type Options struct {
+	Key       string
+	UserID    string
+	ClientIP  string
+	Metadata  string
+	Groups    []string
+	ExpiresAt int64
+	Session   sessionmdl.Session
+}
+
+type Option func(*Options)
+
+// WithKey uses provided jwt key for token generation
+func WithKey(k string) Option {
+	return func(args *Options) {
+		args.Key = k
+	}
+}
+
+func WithUserID(uid string) Option {
+	return func(args *Options) {
+		args.UserID = uid
+	}
+}
+
+// WithSession enables session validation on jwt decode. Required fields must not be empty.
+func WithSession(sid, sessionFor string) Option {
+	return func(args *Options) {
+		args.Session = sessionmdl.Session{
+			SessionId:  sid,
+			SessionFor: sessionFor,
+		}
+	}
+}
+
+func WithClientIP(ip string) Option {
+	return func(args *Options) {
+		args.ClientIP = ip
+	}
+}
+
+// WithMetaData embeds provided data in token. It is available againt `metadata` key. **It must be a valid json**
+func WithMetaData(data string) Option {
+	return func(args *Options) {
+		args.Metadata = data
+	}
+}
+
+func WithGroups(gs []string) Option {
+	return func(args *Options) {
+		args.Groups = gs
+	}
+}
+
+// WithExpiration adds provided expiration to jwt token. Use `0` or ignore this option to generate a token witout expiry.
+func WithExpiration(e time.Duration) Option {
+	return func(args *Options) {
+		if e == 0 {
+			args.ExpiresAt = 0
+			return
+		}
+
+		args.ExpiresAt = time.Now().Add(e).Unix()
+	}
+}
diff --git a/v2/authmdl/ldapmdl/ldapmdl.go b/v2/authmdl/ldapmdl/ldapmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..3a5b4b378398b7792b15aece5dfa8264605aa983
--- /dev/null
+++ b/v2/authmdl/ldapmdl/ldapmdl.go
@@ -0,0 +1,154 @@
+package ldapmdl
+
+import (
+	"crypto/tls"
+	"errors"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/configmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	ldap "github.com/go-ldap/ldap/v3"
+)
+
+/*
+Author : RahulS
+LDAP authentication module - Authenticates the user with given LDAP server
+Requires 	: loginID (string), password (string)
+Returns		: 0 - If any error occurs, error - respective error object. Will occur if in following conditions
+					a. Blank loginID
+					b. Blank password
+					c. Error connecting to LDAP server
+					d. Error reconnecting to LDAP using TSL
+					e. Error binding admin username and password with LDAP
+					f. Error searhin user on LDAP server
+			  1 - If user does not exist on LDAP server, error - nil
+			  2 - If multiple entries found against loginID, error - nil
+			  3 - If user enters wrong password, error - nil
+			  4 - Successful authentication, error - nil
+*/
+
+//LDAPConfig to get read and store LDAP server configuration
+type LDAPConfig struct {
+	BaseDN               string
+	LDAPServerIPWithPort string
+	FilterDN             string
+	LDAPUsername         string
+	LDAPPassword         string
+}
+
+var ldapConfig LDAPConfig
+
+//InitLDAP get LDAP server configuration
+func InitLDAP(configFilePath string) error {
+	_, configError := configmdl.InitConfig(configFilePath, &ldapConfig)
+	if errormdl.CheckErr(configError) != nil {
+		return configError
+	}
+	return nil
+}
+
+//AuthenticateOnLDAP authenticates user with LDAP server
+func AuthenticateOnLDAP(loginID, password string) (int, error) {
+
+	if strings.TrimSpace(loginID) == "" {
+
+		loggermdl.LogError("ldapmdl : loginID required")
+		return 0, errors.New("ldapmdl : loginID required")
+
+	} else if strings.TrimSpace(password) == "" {
+
+		loggermdl.LogError("ldapmdl : password required")
+		return 0, errors.New("ldapmdl : password required")
+
+	}
+
+	//Check if LDAP configuration is properly set through config file. (Call InitLDAP() to set configuration)
+	ldapInitConfigError := CheckLDAPConfig()
+	if ldapInitConfigError != nil {
+		return 0, ldapInitConfigError
+	}
+
+	//Create connection to LDAP server
+	ldapConnection, ldapConnectionError := ldap.Dial("tcp", ldapConfig.LDAPServerIPWithPort)
+	if errormdl.CheckErr(ldapConnectionError) != nil {
+		loggermdl.LogError("ldapmdl connectionError : ", ldapConnectionError)
+		return 0, ldapConnectionError
+	}
+
+	defer ldapConnection.Close()
+
+	//Reconnect with TLS(Transport Layer Security Protocol)
+	startTLSError := ldapConnection.StartTLS(&tls.Config{InsecureSkipVerify: true})
+	if errormdl.CheckErr1(startTLSError) != nil {
+		loggermdl.LogError("ldapmdl startTLSError : ", startTLSError)
+		return 0, startTLSError
+	}
+
+	//Bind with administrator user who has credentials to operation like 'search'
+	ldapBindError := ldapConnection.Bind(ldapConfig.LDAPUsername, ldapConfig.LDAPPassword)
+	if errormdl.CheckErr2(ldapBindError) != nil {
+		loggermdl.LogInfo("ldapmdl ldapBindError: ", ldapBindError)
+		return 0, ldapBindError
+	}
+
+	//Search for required username which is to be authenticated
+	result, searchError := ldapConnection.Search(ldap.NewSearchRequest(
+		ldapConfig.BaseDN,
+		ldap.ScopeWholeSubtree,
+		ldap.NeverDerefAliases,
+		0,
+		0,
+		false,
+		filter(loginID, ldapConfig.FilterDN),
+		[]string{"dn"},
+		nil,
+	))
+
+	//Hand search error
+	if errormdl.CheckErr3(searchError) != nil {
+		loggermdl.LogError("ldapmdl searchError : ", searchError)
+		return 0, searchError
+	}
+
+	//Return 1 if user does not exist on LDAP server
+	if len(result.Entries) < 1 {
+		return 1, nil
+	}
+
+	//Return 2 if multiple entries found against one userId
+	if errormdl.CheckBool(len(result.Entries) > 1) {
+		return 2, nil
+	}
+
+	//Bind the password with given userId if errors occurs while binding, user has entered wrong password
+	if userCredentialsBindError := ldapConnection.Bind(result.Entries[0].DN, password); userCredentialsBindError != nil {
+		return 3, nil
+	}
+
+	//return 4 if authentication is successful
+	loggermdl.LogDebug("ldapmdl : Authentication successful")
+	return 4, nil
+}
+
+func filter(needle string, filterDN string) string {
+	res := strings.Replace(filterDN, "{username}", needle, -1)
+	return res
+}
+
+//CheckLDAPConfig checks of LDAP configuration is initialized or not
+func CheckLDAPConfig() error {
+
+	if strings.TrimSpace(ldapConfig.BaseDN) == "" {
+		return errors.New("LDAP baseDN not found")
+	} else if strings.TrimSpace(ldapConfig.FilterDN) == "" {
+		return errors.New("LDAP filterDN not found")
+	} else if strings.TrimSpace(ldapConfig.LDAPPassword) == "" {
+		return errors.New("LDAP password not found")
+	} else if strings.TrimSpace(ldapConfig.LDAPServerIPWithPort) == "" {
+		return errors.New("LDAP IPAddress not found IPAdress should be with port")
+	} else if strings.TrimSpace(ldapConfig.LDAPUsername) == "" {
+		return errors.New("LDAP username not found")
+	}
+	return nil
+}
diff --git a/v2/authmdl/ldapmdl/ldapmdl_test.go b/v2/authmdl/ldapmdl/ldapmdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f1ed626b308cd50912d5b74c34c40c1f999e6b6
--- /dev/null
+++ b/v2/authmdl/ldapmdl/ldapmdl_test.go
@@ -0,0 +1,107 @@
+package ldapmdl
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+)
+
+func TestLDAPAuthentication(t *testing.T) {
+
+	//LDAP init config error
+	errormdl.IsTestingNegetiveCaseOn = true
+	InitLDAP("../../testingdata/testData/ldapmdl/config.toml")
+	errormdl.IsTestingNegetiveCaseOn = false
+
+	//LDAP BaseDSN not set
+	InitLDAP("../../testingdata/testData/ldapmdl/config.toml")
+	ldapConfig.BaseDN = ""
+	authStatusCode, _ := AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	//LDAP FilterDN not set
+	InitLDAP("../../testingdata/testData/ldapmdl/config.toml")
+	ldapConfig.FilterDN = ""
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	//LDAP LDAPPassword not set
+	InitLDAP("../../testingdata/testData/ldapmdl/config.toml")
+	ldapConfig.LDAPPassword = ""
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	//LDAP LDAPServerIPWithPort not set
+	InitLDAP("../../testingdata/testData/ldapmdl/config.toml")
+	ldapConfig.LDAPServerIPWithPort = ""
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	//LDAP LDAPUsername not set
+	InitLDAP("../../testingdata/testData/ldapmdl/config.toml")
+	ldapConfig.LDAPUsername = ""
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	InitLDAP("../../testingdata/testData/ldapmdl/config.toml")
+
+	//LDAP connection error
+	errormdl.IsTestingNegetiveCaseOn = true
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	//TLS connection establishment error
+	errormdl.IsTestingNegetiveCaseOn = false
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	//Administrative user bind error
+	errormdl.IsTestingNegetiveCaseOn = false
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 0, authStatusCode)
+
+	//AD user search error
+	errormdl.IsTestingNegetiveCaseOn = false
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	errormdl.IsTestingNegetiveCaseOn3 = true
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls123", "dfgh")
+	assert.Equal(t, 0, authStatusCode)
+
+	//Blank username
+	authStatusCode, _ = AuthenticateOnLDAP("", "dfgh")
+	assert.Equal(t, 0, authStatusCode)
+
+	//Blank password
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "")
+	assert.Equal(t, 0, authStatusCode)
+
+	//User does not exist test
+	errormdl.IsTestingNegetiveCaseOn = false
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	errormdl.IsTestingNegetiveCaseOn3 = false
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls123", "dfgh")
+	assert.Equal(t, 1, authStatusCode)
+
+	//Mutiple user entries on AD server
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 2, authStatusCode)
+
+	//Wrong password
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "fghjkl")
+	assert.Equal(t, 3, authStatusCode)
+
+	//Successful authentication
+	//TODO: Enter correct username and password
+	authStatusCode, _ = AuthenticateOnLDAP("rahuls", "aug#1234")
+	assert.Equal(t, 4, authStatusCode)
+
+}
diff --git a/v2/authmdl/roleenforcemdl/roleenforcement.go b/v2/authmdl/roleenforcemdl/roleenforcement.go
new file mode 100644
index 0000000000000000000000000000000000000000..d686cbb59672de3fffccb4bd5c3832ccb361e082
--- /dev/null
+++ b/v2/authmdl/roleenforcemdl/roleenforcement.go
@@ -0,0 +1,148 @@
+package roleenforcemdl
+
+import (
+	"encoding/csv"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	jwt "github.com/dgrijalva/jwt-go"
+)
+
+var roleManagementConfig map[string]URLGroupMapping
+var once sync.Once
+
+// URLGroupMapping URLGroupMapping
+type URLGroupMapping struct {
+	All        bool
+	Allowed    map[string]bool
+	NotAllowed map[string]bool
+}
+
+func init() {
+	roleManagementConfig = make(map[string]URLGroupMapping)
+}
+
+func LoadRoleConfiguration(csvFilePath string) {
+	// Open CSV file
+	once.Do(func() {
+		f, err := os.Open(csvFilePath)
+		if err != nil {
+			panic(err)
+		}
+		defer f.Close()
+
+		// Read File into a Variable
+		lines, err := csv.NewReader(f).ReadAll()
+		if err != nil {
+			panic(err)
+		}
+
+		for _, line := range lines {
+			group := strings.Trim(line[0], " ")
+			URL := strings.Trim(line[1], " ")
+			// method := strings.Trim(line[2], " ")
+
+			boolValue := strings.Trim(line[3], " ")
+			isAllowed, parseError := strconv.ParseBool(boolValue)
+			if parseError != nil {
+				loggermdl.LogError("error occured while parsing string to boolean")
+			}
+			registerUser(group, URL, isAllowed)
+		}
+	})
+}
+
+// RegisterUser - RegisterUser register use
+func RegisterUser(group, URL string, isAllowed bool) {
+	roleObject, ok := roleManagementConfig[URL]
+	if ok {
+		roleObject = assignURL(group, isAllowed, roleObject)
+	} else {
+		roleObject = URLGroupMapping{
+			Allowed:    make(map[string]bool),
+			NotAllowed: make(map[string]bool),
+		}
+		roleObject = assignURL(group, isAllowed, roleObject)
+	}
+	roleManagementConfig[URL] = roleObject
+}
+
+func registerUser(group, URL string, isAllowed bool) {
+	roleObject, ok := roleManagementConfig[URL]
+	if ok {
+		roleObject = assignURL(group, isAllowed, roleObject)
+	} else {
+		roleObject = URLGroupMapping{
+			Allowed:    make(map[string]bool),
+			NotAllowed: make(map[string]bool),
+		}
+		roleObject = assignURL(group, isAllowed, roleObject)
+	}
+	roleManagementConfig[URL] = roleObject
+}
+
+func assignURL(group string, isAllowed bool, roleObj URLGroupMapping) URLGroupMapping {
+	if group == "*" {
+		roleObj.All = isAllowed
+	} else {
+		if isAllowed {
+			roleObj.Allowed[group] = true
+		} else {
+			roleObj.NotAllowed[group] = true
+		}
+	}
+	return roleObj
+}
+
+func checkAccess(URL string, groupList []string) bool {
+	if roleObj, ok := roleManagementConfig[URL]; ok {
+		if roleObj.All {
+			for _, group := range groupList {
+				if _, ok := roleObj.NotAllowed[group]; ok {
+					return false
+				}
+			}
+			return true
+		}
+		for _, group := range groupList {
+			if _, ok := roleObj.Allowed[group]; ok {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// GetGroupNames gets the group names from the request.
+func GetGroupNames(decodedToken jwt.MapClaims, groupKey string) ([]string, error) {
+
+	if decodedToken == nil {
+		loggermdl.LogError("decodedToken is empty")
+		return nil, errormdl.Wrap("Error while getting claims")
+	}
+	groups, ok := decodedToken[groupKey]
+
+	if !ok {
+		loggermdl.LogError("either group key is invalid or token does not contain the group data")
+		return nil, errormdl.Wrap("either group key is invalid or token does not contain the group data")
+	}
+
+	eachGroup, ok := groups.([]interface{})
+	if !ok {
+		return nil, nil
+	}
+	stringArray := []string{}
+	for _, str := range eachGroup {
+		group, ok1 := str.(string)
+		if !ok1 {
+			return nil, errormdl.Wrap("Group not casted to string")
+		}
+		stringArray = append(stringArray, group)
+	}
+
+	return stringArray, nil
+}
diff --git a/v2/authmdl/roleenforcemdl/roleenforcement_fasthttp.go b/v2/authmdl/roleenforcemdl/roleenforcement_fasthttp.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c6225a3ce266e6f80c2288b06e113f6d55bc88f
--- /dev/null
+++ b/v2/authmdl/roleenforcemdl/roleenforcement_fasthttp.go
@@ -0,0 +1,38 @@
+// +build fasthttp
+
+package roleenforcemdl
+
+import (
+	"github.com/valyala/fasthttp"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/jwtmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+// Enforce Enforce
+func Enforce(req *fasthttp.Request, URL, jwtKey, groupKey string) bool {
+	groupList, geterr := GetGroupFromToken(req, jwtKey, groupKey)
+	if errormdl.CheckErr(geterr) != nil {
+		loggermdl.LogError("error occured while fetching groups from token : ", errormdl.CheckErr(geterr))
+		return false
+	}
+
+	return checkAccess(URL, groupList)
+}
+
+// GetGroupFromToken - GetGroupFromToken
+func GetGroupFromToken(req *fasthttp.Request, jwtKey, groupKey string) ([]string, error) {
+	claims, decodeError := jwtmdl.DecodeTokenWithJWTKey(req, jwtKey)
+	if errormdl.CheckErr(decodeError) != nil {
+		loggermdl.LogError("error occured while decoding jwt token : ", errormdl.CheckErr(decodeError))
+		return nil, errormdl.CheckErr(decodeError)
+	}
+	groups, groupFetchError := GetGroupNames(claims, groupKey)
+	if errormdl.CheckErr(groupFetchError) != nil {
+		loggermdl.LogError("error occured while fetching groups from token : ", errormdl.CheckErr(groupFetchError))
+		return nil, errormdl.CheckErr(groupFetchError)
+	}
+	return groups, nil
+}
diff --git a/v2/authmdl/roleenforcemdl/roleenforcement_gin.go b/v2/authmdl/roleenforcemdl/roleenforcement_gin.go
new file mode 100644
index 0000000000000000000000000000000000000000..485aafc472d5d928b55be048dea6b0d6a568781d
--- /dev/null
+++ b/v2/authmdl/roleenforcemdl/roleenforcement_gin.go
@@ -0,0 +1,37 @@
+// +build !fasthttp
+
+package roleenforcemdl
+
+import (
+	"net/http"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/jwtmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+// Enforce Enforce
+func Enforce(req *http.Request, URL, jwtKey, groupKey string) bool {
+	groupList, geterr := GetGroupFromToken(req, jwtKey, groupKey)
+	if errormdl.CheckErr(geterr) != nil {
+		loggermdl.LogError("error occured while fetching groups from token : ", errormdl.CheckErr(geterr))
+		return false
+	}
+
+	return checkAccess(URL, groupList)
+}
+
+func GetGroupFromToken(req *http.Request, jwtKey, groupKey string) ([]string, error) {
+	claims, decodeError := jwtmdl.DecodeTokenWithJWTKey(req, jwtKey)
+	if errormdl.CheckErr(decodeError) != nil {
+		loggermdl.LogError("error occured while decoding jwt token : ", errormdl.CheckErr(decodeError))
+		return nil, errormdl.CheckErr(decodeError)
+	}
+	groups, groupFetchError := GetGroupNames(claims, groupKey)
+	if errormdl.CheckErr(groupFetchError) != nil {
+		loggermdl.LogError("error occured while fetching groups from token : ", errormdl.CheckErr(groupFetchError))
+		return nil, errormdl.CheckErr(groupFetchError)
+	}
+	return groups, nil
+}
diff --git a/v2/authmdl/roleenforcemdl/roleenforcement_test.go b/v2/authmdl/roleenforcemdl/roleenforcement_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d720650a742e7ad22fd04397a830ae6a537b1157
--- /dev/null
+++ b/v2/authmdl/roleenforcemdl/roleenforcement_test.go
@@ -0,0 +1,59 @@
+package roleenforcemdl
+
+import (
+	"fmt"
+	"net/http"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/jwtmdl"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestLoadRoleConfiguration(t *testing.T) {
+	LoadRoleConfiguration("../../testingdata/roleenforcer/rolemapping.csv")
+	fmt.Println(roleManagementConfig)
+}
+
+// func TestEnforce(t *testing.T) {
+// 	LoadRoleConfiguration("../../testingdata/roleenforcer/rolemapping.csv")
+// 	boolVal := Enforce("checkstatus", "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJZCI6InNhbmRlZXBzcyIsImdyb3VwcyI6WyJhZG1pbiIsInRlYWNoZXIiXX0.Bx0KWD7Q5joxoFcODy5xRdkO7NuHGD0tJFH4PUBlOjA", "mysupersecretpassword", "groups")
+// 	fmt.Println("boolVal : ", boolVal)
+// }
+
+func Test_checkAccess(t *testing.T) {
+	LoadRoleConfiguration("../../testingdata/roleenforcer/rolemapping.csv")
+	groupList := []string{"admin"}
+	rs := checkAccess("login", groupList)
+	assert.True(t, rs)
+
+	groupList = []string{"admin"}
+	rs = checkAccess("something", groupList)
+	assert.True(t, rs)
+
+	groupList = []string{"teacher"}
+	rs = checkAccess("checkstatus", groupList)
+	assert.False(t, rs)
+
+	groupList = []string{"admin"}
+	rs = checkAccess("checkstatus", groupList)
+	assert.True(t, rs)
+
+	groupList = []string{"teacher"}
+	rs = checkAccess("something", groupList)
+	assert.False(t, rs)
+
+	groupList = []string{"teacher"}
+	rs = checkAccess("login", groupList)
+	assert.False(t, rs)
+}
+
+func TestDecodeTokenvalid(t *testing.T) {
+	req, _ := http.NewRequest("POST", "/r/c/mql", nil)
+	req.Header.Set("Authorization", "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJZCI6InNhbmRlZXBzcyIsImdyb3VwIjpbImFkbWluIl19.dGSKQZMGSEBjgPrjTHJwylcxIxxMKylahuFn7DHR4pc")
+	claims, derror := jwtmdl.DecodeTokenWithJWTKey(req, "mysupersecretpassword")
+	fmt.Println(claims)
+	GetGroupNames(claims, "group")
+	assert.NoError(t, derror, "No error occured")
+
+}
diff --git a/v2/authmdl/sessionmdl/session.go b/v2/authmdl/sessionmdl/session.go
new file mode 100644
index 0000000000000000000000000000000000000000..10bfc22fa525274b9ccbdaded87c32d55e405cb8
--- /dev/null
+++ b/v2/authmdl/sessionmdl/session.go
@@ -0,0 +1,165 @@
+// package sessionmdl provides APIs to Add, Validate and Delete user sessions. These APIs must be used along with JWT Auth.
+// If you want to use this functionality, a jwt token must contain `userId` and `sessionId` fields.
+// A user can have multiple active sessions for different use cases. To check if user has an active session for particular usecase use `CheckForSessionAvailability()`.
+// And to check user session on each request use `ValidateSessionFromToken()`.
+//
+// An in memory cache is used to store sessions. It automatically falls back to redis cache if -gridmode=1 is set.
+//
+// The expiraion of session must be same as of token expiration.
+
+package sessionmdl
+
+import (
+	"errors"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+)
+
+type Session struct {
+	SessionFor string
+	SessionId  string
+}
+
+// store is used to store sessions in memory, falls back to redis cache on grid mode.
+var store cachemdl.Cacher
+
+var (
+	ErrUserNotFound            = errors.New("user not found")
+	ErrSessionNotFound         = errors.New("session not found")
+	ErrInvalidSessionInstance  = errors.New("got invalid session instance id")
+	ErrSessionValidationFailed = errors.New("session validation failed")
+)
+
+// Init initializes sessions with provided cache. Subsequent calls will not have any effect after first initialization.
+func Init(cache cachemdl.Cacher) {
+	if store != nil {
+		return
+	}
+
+	store = cache
+}
+
+// Set stores the sessions for provided userId. Session is appended to the list. It does not check if the same session exists or not.
+func Set(userId string, s ...Session) {
+	i, ok := store.Get(userId)
+	if !ok || i == nil {
+		set(userId, s)
+		return
+	}
+
+	sessions, ok := i.([]Session)
+	if !ok {
+		set(userId, s)
+		return
+	}
+
+	set(userId, append(sessions, s...))
+}
+
+func set(key string, val interface{}) {
+	// Set the user sessions with no expiry as each session can have different expiry depending on the JWT token expiry.
+	store.SetNoExpiration(key, val)
+}
+
+// Get returns all the available sessions for the user. This may contain expired but not deleted sessions.
+func Get(userId string) ([]Session, error) {
+	var (
+		s  []Session
+		i  interface{}
+		ok bool
+	)
+
+	i, ok = store.Get(userId)
+	if !ok {
+		return s, ErrUserNotFound
+	}
+
+	s, _ = i.([]Session)
+	// if !ok {
+	// 	return s, errors.New("failed to retrieve previous sessions")
+	// }
+
+	return s, nil
+}
+
+// Delete removes all the sessions associated with the user.
+func Delete(userId string) {
+	store.Delete(userId)
+}
+
+// DeleteSession removes a particular session for user, if present.
+func DeleteSession(userId, sessionFor string) {
+
+	sessions, err := Get(userId)
+	if err != nil {
+		return
+	}
+
+	for i := 0; i < len(sessions); i++ {
+		if sessions[i].SessionFor == sessionFor {
+			sessions[i] = sessions[len(sessions)-1]
+			sessions = sessions[:len(sessions)-1]
+		}
+	}
+
+	if len(sessions) == 0 {
+		store.Delete(userId)
+		return
+	}
+
+	set(userId, sessions)
+}
+
+// ValidateSessionFromToken checks for session id in claims against available sessions.
+// Validate only if a nonempty `sessionId` is present. The claims must contain `userId` field if session is present.
+func ValidateSessionFromToken(claims map[string]interface{}) error {
+
+	// check for sessionId field, if not present then it is ignored at the time of token generation.
+	// This means user doesn't want to validate session.
+	i, ok := claims["sessionId"]
+	if !ok || i == nil {
+		return nil
+	}
+
+	sessionId, _ := i.(string)
+	if len(sessionId) == 0 {
+		return errors.New("\"sessionId\" field is empty")
+	}
+
+	i, ok = claims["userId"]
+	if !ok {
+		return errors.New("\"userId\" field not found in token")
+	}
+
+	userId, _ := i.(string)
+	if len(userId) == 0 {
+		return errors.New("\"userId\" field is empty")
+	}
+
+	sessions, err := Get(userId)
+	if err != nil {
+		return err
+	}
+
+	for i := range sessions {
+		if sessions[i].SessionId == sessionId {
+			return nil
+		}
+	}
+
+	return ErrSessionNotFound
+}
+
+// CheckForSessionAvailability checks if the user has active session for provided `sessionFor`. Returns true if session is available.
+func CheckForSessionAvailability(userId, sessionFor string) bool {
+
+	sessions, _ := Get(userId)
+
+	for i := range sessions {
+		if sessions[i].SessionFor == sessionFor {
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/v2/authmdl/sessionmdl/session_test.go b/v2/authmdl/sessionmdl/session_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb27c67a2d8f2bd801f772bfcc0c651379121e53
--- /dev/null
+++ b/v2/authmdl/sessionmdl/session_test.go
@@ -0,0 +1,147 @@
+package sessionmdl
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+)
+
+func init() {
+	Init(cachemdl.SetupFastCache(cachemdl.FCWithMaxEntries(10000)))
+}
+
+func TestSet(t *testing.T) {
+	Set("tom@company.org", Session{SessionFor: "xyz", SessionId: "789"})
+
+	type args struct {
+		userId string
+		s      []Session
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		{
+			name: "User present",
+			args: args{
+				s:      []Session{Session{SessionFor: "abc", SessionId: "123"}},
+				userId: "tom@company.org",
+			},
+		},
+		{
+			name: "User not present",
+			args: args{
+				s:      []Session{Session{SessionFor: "abc", SessionId: "123"}},
+				userId: "ronny@company.org",
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			Set(tt.args.userId, tt.args.s...)
+		})
+	}
+
+	all := store.GetItemsCount()
+	if all != len(tests) {
+		t.Errorf("expected %d users got %d", len(tests), all)
+	}
+}
+
+func TestValidateSessionFromToken(t *testing.T) {
+	Set("tom@company.org", Session{SessionFor: "xyz", SessionId: "789"})
+	type args struct {
+		claims map[string]interface{}
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "Session present",
+			args:    args{claims: map[string]interface{}{"userId": "tom@company.org", "sessionId": "789"}},
+			wantErr: false,
+		},
+		{
+			name:    "Session not present",
+			args:    args{claims: map[string]interface{}{"userId": "invalid@company.org", "sessionId": "123"}},
+			wantErr: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := ValidateSessionFromToken(tt.args.claims); (err != nil) != tt.wantErr {
+				t.Errorf("ValidateSessionFromToken() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestCheckForSessionAvailability(t *testing.T) {
+	Set("tom@company.org", Session{SessionFor: "xyz", SessionId: "789"})
+	type args struct {
+		userId     string
+		sessionFor string
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		{
+			name: "Session present",
+			args: args{userId: "tom@company.org", sessionFor: "xyz"},
+			want: true,
+		},
+		{
+			name: "Session not present",
+			args: args{userId: "tom@company.org", sessionFor: "someRandomID"},
+			want: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if res := CheckForSessionAvailability(tt.args.userId, tt.args.sessionFor); res != tt.want {
+				t.Errorf("CheckForSessionAvailability() got = %v, want %v", res, tt.want)
+			}
+		})
+	}
+}
+
+func TestDeleteSession(t *testing.T) {
+	Set("TestDeleteSession", Session{SessionFor: "xyz", SessionId: "789"})
+	Set("TestDeleteSession", Session{SessionFor: "abc", SessionId: "123"})
+	type args struct {
+		userId     string
+		sessionFor string
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "Delete existing session",
+			args: args{userId: "TestDeleteSession", sessionFor: "xyz"},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			DeleteSession(tt.args.userId, tt.args.sessionFor)
+			i, _ := store.Get(tt.args.userId)
+			// if !ok {
+			// 	t.Error("expected", tt.args.userId, "not to be deleted")
+			// }
+
+			sessions := i.([]Session)
+
+			for _, s := range sessions {
+				if s.SessionFor == tt.args.sessionFor {
+					t.Error("expected", tt.args.sessionFor, "to be deleted")
+				}
+			}
+		})
+	}
+
+}
diff --git a/v2/cachemdl/cache.go b/v2/cachemdl/cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..01452d9ec7266b7a53349a6430c784d6b44958f8
--- /dev/null
+++ b/v2/cachemdl/cache.go
@@ -0,0 +1,38 @@
+package cachemdl
+
+import (
+	"time"
+)
+
+const (
+	// TypeFastCache indicates fast cache as cache storage
+	TypeFastCache = iota + 1
+	// TypeRedisCache indicates redis server as cache storage. Use this in grid mode.
+	TypeRedisCache
+)
+
+// Cacher provides access to underlying cache, make sure all caches implement these methods.
+//
+// The return types of data can be different. Ex. In case of redis cache it is `string`. The caller needs to handle this with the help of Type() method.
+//
+// Ex.
+// (Cacher{}).Type() == TypeRedisCache { // your implementation }
+type Cacher interface {
+	// SET
+	Set(key string, val interface{})
+	SetWithExpiration(key string, val interface{}, exp time.Duration)
+	SetNoExpiration(key string, val interface{})
+
+	// GET
+	Get(key string) (interface{}, bool)
+	GetAll() map[string]interface{}
+
+	// DELETE
+	Delete(key string)
+	Purge()
+
+	// GetItemsCount
+	GetItemsCount() int
+
+	Type() int
+}
diff --git a/v2/cachemdl/cache_redis.go b/v2/cachemdl/cache_redis.go
new file mode 100644
index 0000000000000000000000000000000000000000..416b900a5de0afc16e8d382cb39b4b169594575e
--- /dev/null
+++ b/v2/cachemdl/cache_redis.go
@@ -0,0 +1,313 @@
+package cachemdl
+
+/*
+
+Provides cache access and manipulation methods for redis server
+Implements cacher interface.
+
+Official docs -
+1. redis client - https://github.com/go-redis/redis
+2. redis server - https://redis.io/
+
+
+Note - corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl must be initialized before use
+
+*/
+
+import (
+	"encoding/json"
+	"errors"
+	"log"
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/go-redis/redis/v7"
+)
+
+const (
+	noExp time.Duration = 0
+
+	keySplitter = ":"
+)
+
+// RedisCache represents a Redis client with provided configuration. Do not change configuration at runtime.
+type RedisCache struct {
+	cli       *redis.Client  // represents redis client
+	opt       *redis.Options //
+	keyStr    string         // "<Prefix>:"
+	addPrefix bool           //
+	connected bool           // will be enabled if redis client connects to server
+
+	Addr       string        // redis server address, default "127.0.0.1:6379"
+	DB         int           // redis DB on provided server, default 0
+	Password   string        //
+	Expiration time.Duration // this duration will be used for Set() method
+	Prefix     string        // this will be used for storing keys for provided project
+}
+
+type configRedis struct {
+	addr       string        // redis server address, default "127.0.0.1:6379"
+	db         int           // redis DB on provided server, default 0
+	password   string        //
+	expiration time.Duration // this duration will be used for Set() method
+	prefix     string        // this will be used for storing keys for provided project
+}
+
+type redisOption func(*configRedis)
+
+func RedisWithAddr(addr string) redisOption {
+	return func(cfg *configRedis) {
+		cfg.addr = addr
+	}
+}
+func RedisWithDB(db int) redisOption {
+	return func(cfg *configRedis) {
+		cfg.db = db
+	}
+}
+func RedisWithPrefix(pfx string) redisOption {
+	return func(cfg *configRedis) {
+		cfg.prefix = pfx
+	}
+}
+func RedisWithPassword(p string) redisOption {
+	return func(cfg *configRedis) {
+		cfg.password = p
+	}
+}
+func RedisWithExpiration(exp time.Duration) redisOption {
+	return func(cfg *configRedis) {
+		cfg.expiration = exp
+	}
+}
+
+// Setup initializes redis cache for application. Must be called only once.
+func (rc *RedisCache) Setup(addr, password, prefix string, db int, exp time.Duration) {
+
+	if rc == nil {
+		rc = new(RedisCache)
+	}
+
+	rc.Addr = addr
+	rc.Password = password
+	rc.DB = db
+	rc.Expiration = exp
+	rc.Prefix = prefix
+	opt := redis.Options{
+		Addr:     addr,
+		Password: password,
+		DB:       db,
+	}
+
+	rc.opt = &opt
+	rc.cli = redis.NewClient(&opt)
+
+	if _, err := rc.cli.Ping().Result(); err != nil {
+		// exit if connection to redis server fails
+		loggermdl.LogError("connection to redis server failed: ", err)
+		log.Fatal("connection to redis server failed: ", err)
+	}
+
+	rc.connected = true
+
+	if prefix != "" {
+		rc.keyStr = contcat(rc.Prefix, keySplitter)
+		rc.addPrefix = true
+	}
+
+}
+
+// SetupRedisCache initializes redis cache for application and returns it. Must be called only once.
+func SetupRedisCache(opts ...redisOption) (*RedisCache, error) {
+
+	rc := new(RedisCache)
+
+	cfg := new(configRedis)
+
+	for i := range opts {
+		opts[i](cfg)
+	}
+
+	rc.Addr = cfg.addr
+	rc.Password = cfg.password
+	rc.DB = cfg.db
+	rc.Expiration = cfg.expiration
+	rc.Prefix = cfg.prefix
+
+	rc.opt = &redis.Options{
+		Addr:     cfg.addr,
+		Password: cfg.password,
+		DB:       cfg.db,
+	}
+
+	rc.cli = redis.NewClient(rc.opt)
+
+	if _, err := rc.cli.Ping().Result(); err != nil {
+
+		return nil, errors.New("connection to redis server failed: " + err.Error())
+	}
+
+	rc.connected = true
+
+	if cfg.prefix != "" {
+		rc.keyStr = contcat(rc.Prefix, keySplitter)
+		rc.addPrefix = true
+	}
+
+	return rc, nil
+}
+
+// Set marshalls provided value and stores against provided key. Errors will be logged to initialized logger.
+func (rc *RedisCache) Set(key string, val interface{}) {
+	ba, err := marshalWithTypeCheck(val)
+	if err != nil {
+		loggermdl.LogError("error setting key ", key, " error: ", err)
+		return
+	}
+
+	rc.cli.Set(rc.key(key), ba, rc.Expiration)
+}
+
+// SetWithExpiration marshalls provided value and stores against provided key for given duration. Errors will be logged to initialized logger.
+func (rc *RedisCache) SetWithExpiration(key string, val interface{}, exp time.Duration) {
+	ba, err := marshalWithTypeCheck(val)
+	if err != nil {
+		loggermdl.LogError("error setting key ", key, " error: ", err)
+		return
+	}
+
+	rc.cli.Set(rc.key(key), ba, exp)
+}
+
+// SetNoExpiration marshalls provided value and stores against provided key.
+// Errors will be logged to initialized logger.
+func (rc *RedisCache) SetNoExpiration(key string, val interface{}) {
+	ba, err := marshalWithTypeCheck(val)
+	if err != nil {
+		loggermdl.LogError("error setting key ", key, " error: ", err)
+		return
+	}
+
+	rc.cli.Set(rc.key(key), ba, noExp)
+}
+
+// Get returns data against provided key. Returns false if not present.
+func (rc *RedisCache) Get(key string) (interface{}, bool) {
+
+	// Get returns error if key is not present.
+	val, err := rc.cli.Get(rc.key(key)).Result()
+	if err != nil {
+		loggermdl.LogError("error getting key", key, "from redis cache with error:", err)
+		return nil, false
+	}
+
+	return val, true
+}
+
+// Delete -
+func (rc *RedisCache) Delete(key string) {
+	rc.cli.Del(rc.key(key)).Result()
+}
+
+// GetItemsCount -
+func (rc *RedisCache) GetItemsCount() int {
+	// pattern := rc.Prefix + "*"
+	// keys, err := rc.cli.Keys(pattern).Result()
+	// if err != nil {
+	// 	loggermdl.LogError("error getting item count for ", pattern, " error: ", err)
+	// 	return 0
+	// }
+	return len(rc.keys())
+}
+
+func (rc *RedisCache) flushDB() (string, error) {
+	return rc.cli.FlushDB().Result()
+}
+
+// Purge deletes for current redis db
+func (rc *RedisCache) Purge() {
+	_, err := rc.flushDB()
+	if err != nil {
+		loggermdl.LogError("error purging redis cache for db ", rc.Addr, "/", rc.DB, " error: ", err)
+	}
+}
+
+func marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// marshalWithTypeCheck checks type before marshsal. Save allocations and time significantly if the existing data is string or []byte
+func marshalWithTypeCheck(v interface{}) ([]byte, error) {
+	switch d := v.(type) {
+	default:
+		return json.Marshal(v)
+	case string:
+		return []byte(d), nil
+	case []byte:
+		return d, nil
+	}
+}
+
+func contcat(s ...string) string {
+	sb := strings.Builder{}
+	for i := range s {
+		sb.WriteString(s[i])
+	}
+
+	return sb.String()
+}
+
+func (rc *RedisCache) key(key string) string {
+	// prepare in format "<Prefix>:<key>"
+	if rc.addPrefix {
+		return contcat(rc.keyStr, key)
+	}
+	return key
+}
+
+func (rc *RedisCache) actualKey(key string) string {
+	if rc.addPrefix {
+		return strings.TrimPrefix(key, rc.keyStr)
+	}
+	return key
+}
+
+func (rc *RedisCache) Type() int {
+	return TypeRedisCache
+}
+
+// GetAll returns all keys with values present in redis server. Excludes the keys which does not have specified prefix. If prefix is empty, then returns all keys.
+//
+// **This is not intended for production use. May hamper performance**
+func (rc *RedisCache) GetAll() map[string]interface{} {
+	keys := rc.keys()
+
+	result := make(map[string]interface{}, len(keys))
+
+	for i := range keys {
+		ba, err := rc.cli.Get(keys[i]).Bytes()
+		if err != nil {
+			loggermdl.LogError("error getting key", keys[i], "from redis cache with error:", err)
+			continue
+		}
+
+		var val interface{}
+		_ = json.Unmarshal(ba, &val)
+
+		result[rc.actualKey(keys[i])] = val
+	}
+
+	return result
+}
+
+// GetItemsCount -
+func (rc *RedisCache) keys() []string {
+	pattern := rc.Prefix + "*"
+	keys, err := rc.cli.Keys(pattern).Result()
+	if err != nil {
+		loggermdl.LogError("error getting item count for ", pattern, " error: ", err)
+	}
+	return keys
+}
diff --git a/v2/cachemdl/cache_redis_test.go b/v2/cachemdl/cache_redis_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..5be7d5bb37f0b54c6d953adf28e518fd84fd8ebf
--- /dev/null
+++ b/v2/cachemdl/cache_redis_test.go
@@ -0,0 +1,455 @@
+package cachemdl
+
+import (
+	"reflect"
+	"testing"
+	"time"
+
+	"github.com/tidwall/gjson"
+)
+
+var (
+	res         = "TestVal"
+	failMarshal = func() {}
+)
+
+func setup(rc *RedisCache) {
+	rc.Setup("127.0.0.1:6379", "", "", 0, time.Second*3)
+	rc.flushDB()
+}
+
+func TestRedisCache_Set(t *testing.T) {
+	var (
+		rc        = &RedisCache{}
+		testName1 = "SET"
+		key1      = "test_set"
+		val1      = "test_set_val"
+		testName2 = "SET_MarshalErr"
+		key2      = "test_set_err"
+		val2      = failMarshal
+	)
+
+	setup(rc)
+	type args struct {
+		key string
+		val interface{}
+	}
+	tests := []struct {
+		name string
+		rc   *RedisCache
+		args args
+	}{
+		{
+			name: testName1,
+			rc:   rc,
+			args: args{key: key1, val: val1},
+		},
+		{
+			name: testName2,
+			rc:   rc,
+			args: args{key: key2, val: val2},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.rc.Set(tt.args.key, tt.args.val)
+		})
+	}
+
+	res, ok := rc.Get(key1)
+	if !ok {
+		t.Error("failed to get value for ", key1)
+		return
+	}
+
+	if v, ok := res.(string); !ok || v != val1 {
+		t.Error("invalid value for ", key1)
+	}
+}
+
+func TestRedisCache_SetWithExpiration(t *testing.T) {
+	var (
+		rc        = &RedisCache{}
+		testName1 = "SET_EXP"
+		key1      = "test_set_exp"
+		val1      = "test_set_exp_val"
+		testName2 = "SET_EXP_MarshalErr"
+		key2      = "test_set_exp_err"
+		val2      = failMarshal
+	)
+
+	setup(rc)
+	type args struct {
+		key string
+		val interface{}
+		exp time.Duration
+	}
+	tests := []struct {
+		name string
+		rc   *RedisCache
+		args args
+	}{
+		{
+			name: testName1,
+			rc:   rc,
+			args: args{key: key1, val: val1, exp: time.Second * 2},
+		},
+		{
+			name: testName2,
+			rc:   rc,
+			args: args{key: key2, val: val2, exp: time.Second * 2},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.rc.SetWithExpiration(tt.args.key, tt.args.val, tt.args.exp)
+		})
+	}
+
+	time.Sleep(time.Second * 3)
+	val, ok := rc.Get(key1)
+	if !ok {
+		return
+	}
+
+	if v, ok := val.(string); ok || v == val1 {
+		t.Error("value not expired for", key1)
+	}
+}
+
+func TestRedisCache_SetNoExpiration(t *testing.T) {
+	var (
+		rc        = &RedisCache{}
+		testName1 = "SET_NOEXP"
+		key1      = "test_set_noexp"
+		val1      = "test_set_noexp_val"
+		testName2 = "SET_NOEXP_MarshalErr"
+		key2      = "test_set_noexp_err"
+		val2      = failMarshal
+	)
+
+	setup(rc)
+	type args struct {
+		key string
+		val interface{}
+	}
+	tests := []struct {
+		name string
+		rc   *RedisCache
+		args args
+	}{
+		{
+			name: testName1,
+			rc:   rc,
+			args: args{key: key1, val: val1},
+		},
+		{
+			name: testName2,
+			rc:   rc,
+			args: args{key: key2, val: val2},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.rc.SetNoExpiration(tt.args.key, tt.args.val)
+		})
+	}
+
+	time.Sleep(time.Second * 3)
+	_, ok := rc.Get(key1)
+	if !ok {
+		t.Error("value expired for", key1)
+	}
+}
+
+func TestRedisCache_Get(t *testing.T) {
+	var (
+		rc        = &RedisCache{}
+		testName1 = "GET_NOT_PRESENT"
+		testName2 = "GET_PRESENT"
+		key1      = "test_get_not_present"
+		key2      = "test_get_present"
+		val2      = "test_get_present_val"
+	)
+
+	setup(rc)
+	rc.Set(key2, val2)
+	type args struct {
+		key string
+	}
+	tests := []struct {
+		name  string
+		rc    *RedisCache
+		args  args
+		want  interface{}
+		want1 bool
+	}{
+		{
+			name:  testName1,
+			rc:    rc,
+			args:  args{key: key1},
+			want:  nil,
+			want1: false,
+		},
+		{
+			name:  testName2,
+			rc:    rc,
+			args:  args{key: key2},
+			want:  val2,
+			want1: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, got1 := tt.rc.Get(tt.args.key)
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("RedisCache.Get() got = %v, want %v", got, tt.want)
+			}
+			if got1 != tt.want1 {
+				t.Errorf("RedisCache.Get() got1 = %v, want %v", got1, tt.want1)
+			}
+		})
+	}
+}
+
+func TestRedisCache_Delete(t *testing.T) {
+	var (
+		rc        = &RedisCache{}
+		testName1 = "DEL"
+		key1      = "test_del"
+		val1      = "test_del_val"
+	)
+
+	setup(rc)
+	rc.Set(key1, val1)
+	type args struct {
+		key string
+	}
+	tests := []struct {
+		name string
+		rc   *RedisCache
+		args args
+	}{
+		{
+			name: testName1,
+			rc:   rc,
+			args: args{key: key1},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.rc.Delete(tt.args.key)
+		})
+	}
+
+	if _, ok := rc.Get(key1); ok {
+		t.Error("value not deleted for", key1)
+	}
+}
+
+func TestRedisCache_GetItemsCount(t *testing.T) {
+	var (
+		rc        = &RedisCache{}
+		testName1 = "GET_CNT"
+		key1      = "cnt_1"
+		val1      = 1
+		key2      = "cnt_2"
+		val2      = 2
+	)
+
+	setup(rc)
+	rc.Set(key1, val1)
+	rc.Set(key2, val2)
+	tests := []struct {
+		name string
+		rc   *RedisCache
+		want int
+	}{
+		{
+			name: testName1,
+			rc:   rc,
+			want: 2,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := tt.rc.GetItemsCount(); got != tt.want {
+				t.Errorf("RedisCache.GetItemsCount() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestRedisCache_Purge(t *testing.T) {
+	var (
+		rc        = &RedisCache{}
+		testName1 = "PURGE"
+		key1      = "test_purge"
+		val1      = "test_purge_val"
+	)
+
+	setup(rc)
+	rc.Set(key1, val1)
+	tests := []struct {
+		name string
+		rc   *RedisCache
+	}{
+		// TODO: Add test cases.
+		{
+			name: testName1,
+			rc:   rc,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.rc.Purge()
+		})
+	}
+
+	if _, ok := rc.Get(key1); ok {
+		t.Error("value not deleted for", key1)
+	}
+}
+
+func TestRedisCache_Setup(t *testing.T) {
+	var (
+		rc        *RedisCache = nil
+		testName1             = "SETUP"
+	)
+	type args struct {
+		addr     string
+		password string
+		db       int
+		expr     time.Duration
+		prefix   string
+	}
+	tests := []struct {
+		name string
+		rc   *RedisCache
+		args args
+	}{
+		// TODO: Add test cases.
+		{
+			name: testName1,
+			rc:   rc,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.rc.Setup(tt.args.addr, tt.args.password, tt.args.prefix, tt.args.db, time.Second*3)
+		})
+	}
+}
+
+// BENCHMARKS >>
+
+func BenchmarkMarshalString(b *testing.B) {
+	s := `some string`
+	for i := 0; i < b.N; i++ {
+		_, _ = marshal(s)
+	}
+}
+
+func BenchmarkMarshalWithTypeCheckString(b *testing.B) {
+	s := `some string`
+	for i := 0; i < b.N; i++ {
+		_, _ = marshalWithTypeCheck(s)
+	}
+}
+
+func BenchmarkMarshalBytes(b *testing.B) {
+	s := []byte(`some string`)
+	for i := 0; i < b.N; i++ {
+		_, _ = marshal(s)
+	}
+}
+
+func BenchmarkMarshalWithTypeCheckBytes(b *testing.B) {
+	s := []byte(`some string`)
+	for i := 0; i < b.N; i++ {
+		_, _ = marshalWithTypeCheck(s)
+	}
+}
+func BenchmarkMarshalGjsonVal(b *testing.B) {
+	s := gjson.Parse(`{"name":"testcase"}`).Value()
+	for i := 0; i < b.N; i++ {
+		_, _ = marshal(s)
+	}
+}
+
+func BenchmarkMarshalWithTypeCheckGjsonVal(b *testing.B) {
+	s := gjson.Parse(`{"name":"testcase"}`).Value()
+	for i := 0; i < b.N; i++ {
+		_, _ = marshalWithTypeCheck(s)
+	}
+}
+
+func BenchmarkMarshalStruct(b *testing.B) {
+	type Struct struct {
+		Name string `json:"name"`
+	}
+
+	s := Struct{"test"}
+	for i := 0; i < b.N; i++ {
+		_, _ = marshal(s)
+	}
+}
+
+func BenchmarkMarshalWithTypeCheckStruct(b *testing.B) {
+	type Struct struct {
+		Name string `json:"name"`
+	}
+
+	s := Struct{"test"}
+	for i := 0; i < b.N; i++ {
+		_, _ = marshalWithTypeCheck(s)
+	}
+}
+
+func TestRedisCache_GetAll(t *testing.T) {
+	tests := []struct {
+		name string
+		rc   *RedisCache
+		want map[string]interface{}
+		init func(rc *RedisCache)
+	}{
+		{
+			name: "Get All Items",
+			rc:   &RedisCache{},
+			want: map[string]interface{}{
+				"a": 1.24,
+				"b": 1.25,
+			},
+			init: func(rc *RedisCache) {
+				rc.Setup("127.0.0.1:6379", "", "tests", 0, time.Second*60)
+				rc.flushDB()
+
+				rc.Set("a", 1.24)
+				rc.Set("b", 1.25)
+			},
+		},
+		{
+			name: "Get All Items without prefix",
+			rc:   &RedisCache{},
+			want: map[string]interface{}{
+				"a": 5.24,
+				"b": 5.25,
+			},
+			init: func(rc *RedisCache) {
+				rc.Setup("127.0.0.1:6379", "", "", 0, time.Second*60)
+				rc.flushDB()
+
+				rc.Set("a", 5.24)
+				rc.Set("b", 5.25)
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.init(tt.rc)
+			if got := tt.rc.GetAll(); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("RedisCache.GetAll() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
diff --git a/v2/cachemdl/cachemdl.go b/v2/cachemdl/cachemdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..6d7b7e5e65c36bbd066e876969d7fabf2f776840
--- /dev/null
+++ b/v2/cachemdl/cachemdl.go
@@ -0,0 +1,117 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:12:07 GMT+0530 (IST)
+
+// Package cachemdl will help cache object into memory. It Uses LRU algo
+package cachemdl
+
+import (
+	"time"
+
+	"github.com/patrickmn/go-cache"
+)
+
+// FastCacheHelper -
+type FastCacheHelper struct {
+	FastCache   *cache.Cache
+	Expiration  time.Duration
+	CleanupTime time.Duration
+
+	MaxEntries int
+}
+
+type fastCacheOption func(*FastCacheHelper)
+
+func FCWithMaxEntries(i int) fastCacheOption {
+	return func(cfg *FastCacheHelper) {
+		cfg.MaxEntries = i
+	}
+}
+
+func FCWithExpiration(exp time.Duration) fastCacheOption {
+	return func(cfg *FastCacheHelper) {
+		cfg.Expiration = exp
+	}
+}
+
+func FCWithCleanupInterval(ivl time.Duration) fastCacheOption {
+	return func(cfg *FastCacheHelper) {
+		cfg.CleanupTime = ivl
+	}
+}
+
+// Setup initializes fastcache cache for application. Must be called only once.
+func (fastCacheHelper *FastCacheHelper) Setup(maxEntries int, expiration time.Duration, cleanupTime time.Duration) {
+
+	fastCacheHelper.MaxEntries = maxEntries
+	fastCacheHelper.Expiration = expiration
+	fastCacheHelper.FastCache = cache.New(fastCacheHelper.Expiration, fastCacheHelper.CleanupTime)
+
+}
+
+// SetupFastCache initializes fastcache cache for application and returns its instance.
+func SetupFastCache(opts ...fastCacheOption) *FastCacheHelper {
+	fc := new(FastCacheHelper)
+
+	for i := range opts {
+		opts[i](fc)
+	}
+
+	fc.FastCache = cache.New(fc.Expiration, fc.CleanupTime)
+	return fc
+}
+
+// Get -
+func (fastCacheHelper *FastCacheHelper) Get(key string) (interface{}, bool) {
+	return fastCacheHelper.FastCache.Get(key)
+}
+
+// GetItems -
+func (fastCacheHelper *FastCacheHelper) GetItems() map[string]cache.Item {
+	return fastCacheHelper.FastCache.Items()
+}
+
+// SetNoExpiration -
+func (fastCacheHelper *FastCacheHelper) SetNoExpiration(key string, object interface{}) {
+	fastCacheHelper.FastCache.Set(key, object, cache.NoExpiration)
+}
+
+// Set -
+func (fastCacheHelper *FastCacheHelper) Set(key string, object interface{}) {
+	fastCacheHelper.FastCache.Set(key, object, cache.DefaultExpiration)
+}
+
+// SetWithExpiration -
+func (fastCacheHelper *FastCacheHelper) SetWithExpiration(key string, object interface{}, duration time.Duration) {
+	fastCacheHelper.FastCache.Set(key, object, duration)
+}
+
+// Purge -
+func (fastCacheHelper *FastCacheHelper) Purge() {
+	fastCacheHelper.FastCache.Flush()
+}
+
+// Delete -
+func (fastCacheHelper *FastCacheHelper) Delete(key string) {
+	fastCacheHelper.FastCache.Delete(key)
+}
+
+// GetItemsCount : Number of items in the cache
+func (fastCacheHelper *FastCacheHelper) GetItemsCount() int {
+	return fastCacheHelper.FastCache.ItemCount()
+}
+
+func (fh *FastCacheHelper) Type() int {
+	return TypeFastCache
+}
+
+// GetAll returns all keys with values present in memory. **This is not intended for production use. May hamper performance**
+func (fastCacheHelper *FastCacheHelper) GetAll() map[string]interface{} {
+	items := fastCacheHelper.FastCache.Items()
+
+	result := make(map[string]interface{}, len(items))
+	for k, v := range items {
+		result[k] = v.Object
+	}
+
+	return result
+}
diff --git a/v2/cachemdl/cachemdl_test.go b/v2/cachemdl/cachemdl_test.go
new file mode 100755
index 0000000000000000000000000000000000000000..de9d36173da7e3e663106b93e2edd0b9942a892e
--- /dev/null
+++ b/v2/cachemdl/cachemdl_test.go
@@ -0,0 +1,87 @@
+//@author  Ajit Jagtap
+
+//@version Thu Jul 05 2018 06:11:54 GMT+0530 (IST)
+
+package cachemdl
+
+import (
+	"reflect"
+	"testing"
+)
+
+// import (
+// 	"testing"
+// 	"time"
+
+// 	"github.com/stretchr/testify/assert"
+// )
+
+// var ch GCCacheHelper
+
+// func init() {
+// 	ch = GCCacheHelper{}
+// 	ch.Setup(500, time.Minute*50)
+// }
+
+// func TestCacheGCHelper_Setup(t *testing.T) {
+// 	assert.NotPanics(t, func() { ch.Setup(500, time.Minute*50) }, "This Setup method should never panic")
+// }
+
+// func TestCacheGCHelper_Set(t *testing.T) {
+// 	ch.Set("a", 1)
+// 	val, _ := ch.Get("a")
+// 	assert.Equal(t, val, 1, "This Cache get should return same value as set")
+// }
+
+// func TestCacheGCHelper_GetAll(t *testing.T) {
+// 	ch.Set("a", 1)
+// 	val := ch.GetAll()
+
+// 	assert.NotZero(t, len(val), "Check if GetAll return more than zero values")
+// 	cnt := ch.Count()
+// 	assert.NotZero(t, cnt, "Check if Count method will give more than zero value")
+
+// 	ch.Remove("a")
+// 	ch.Purge()
+// 	cnt = ch.Count()
+// 	assert.Zero(t, cnt, "After Purge Count should be zero")
+// }
+
+func TestFastCacheHelper_GetAll(t *testing.T) {
+	tests := []struct {
+		name            string
+		fastCacheHelper *FastCacheHelper
+		want            map[string]interface{}
+		init            func(fs *FastCacheHelper)
+	}{
+		{
+			name:            "Get all items Success",
+			fastCacheHelper: &FastCacheHelper{},
+			want: map[string]interface{}{
+				"a": 1,
+				"b": 2,
+			},
+			init: func(fs *FastCacheHelper) {
+				fs.Setup(2, 0, 0)
+				fs.Set("a", 1)
+				fs.Set("b", 2)
+			},
+		},
+		{
+			name:            "Get all items Empty",
+			fastCacheHelper: &FastCacheHelper{},
+			want:            map[string]interface{}{},
+			init: func(fs *FastCacheHelper) {
+				fs.Setup(2, 0, 0)
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.init(tt.fastCacheHelper)
+			if got := tt.fastCacheHelper.GetAll(); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("FastCacheHelper.GetAll() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
diff --git a/v2/configmdl/configmdl.go b/v2/configmdl/configmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..49869a3cfb3e2834208caf217877a9e09090b7b7
--- /dev/null
+++ b/v2/configmdl/configmdl.go
@@ -0,0 +1,71 @@
+package configmdl
+
+import (
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+	"github.com/BurntSushi/toml"
+	pellgotoml "github.com/pelletier/go-toml"
+)
+
+// InitConfig initConfig
+func InitConfig(fpath string, config interface{}) (toml.MetaData, error) {
+	return toml.DecodeFile(fpath, config)
+}
+
+// InitConfigData initConfig using byte array
+func InitConfigData(data []byte, config interface{}) (toml.MetaData, error) {
+	return toml.Decode(string(data), config)
+}
+
+// var config tomlConfig
+// InitConfig("../testingdata/testData/config/config.toml", &config)
+// fmt.Println(config.Name)
+
+//******* SECURE *******//
+
+// InitConfigSecure - Init Config Secure
+func InitConfigSecure(fpath string, config interface{}, key []byte) (toml.MetaData, error) {
+	fileBytes, fileReadErr := filemdl.ReadFile(fpath)
+	if fileReadErr != nil {
+		return toml.MetaData{}, fileReadErr
+	}
+	fileContent, decryptErr := securitymdl.AESDecrypt(fileBytes, key)
+	if errormdl.CheckErr2(decryptErr) != nil {
+		return toml.MetaData{}, errormdl.CheckErr2(decryptErr)
+	}
+	return toml.Decode(string(fileContent), config)
+}
+
+// SaveConfig - SaveConfig
+func SaveConfig(fpath string, config interface{}) error {
+	bytes, tomlMarshalError := pellgotoml.Marshal(config)
+	if errormdl.CheckErr(tomlMarshalError) != nil {
+		return errormdl.CheckErr(tomlMarshalError)
+	}
+	writeError := filemdl.WriteFile(fpath, bytes, false, false)
+	if errormdl.CheckErr2(writeError) != nil {
+		return errormdl.CheckErr2(writeError)
+	}
+	return nil
+}
+
+// SaveConfigSecure - SaveConfigSecure
+func SaveConfigSecure(fpath string, config interface{}, key []byte) error {
+	configBytes, tomlMarshalError := pellgotoml.Marshal(config)
+	if errormdl.CheckErr(tomlMarshalError) != nil {
+		return errormdl.CheckErr(tomlMarshalError)
+	}
+
+	encryptedBytes, encryptError := securitymdl.AESEncrypt(configBytes, key)
+
+	if errormdl.CheckErr1(encryptError) != nil {
+		return errormdl.CheckErr1(encryptError)
+	}
+
+	writeError := filemdl.WriteFile(fpath, encryptedBytes, false, false)
+	if errormdl.CheckErr2(writeError) != nil {
+		return errormdl.CheckErr2(writeError)
+	}
+	return nil
+}
diff --git a/v2/configmdl/configmdl_test.go b/v2/configmdl/configmdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8d1e6478622c39d78d4a56ec611370862139784e
--- /dev/null
+++ b/v2/configmdl/configmdl_test.go
@@ -0,0 +1,126 @@
+package configmdl
+
+import (
+	"io/ioutil"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"github.com/stretchr/testify/assert"
+)
+
+type tomlConfig struct {
+	Title           string
+	Name            string
+	AdditionalField string
+	Version         string
+}
+
+func TestInitConfig(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	assert.True(t, config.Name == "github.com/OneOfOne/xxhash", "this should not throw error")
+}
+
+func TestInitConfigData(t *testing.T) {
+	var config tomlConfig
+	ba, _ := ioutil.ReadFile("../testingdata/testData/config/config.toml")
+	InitConfigData(ba, &config)
+	assert.True(t, config.Name == "github.com/OneOfOne/xxhash", "this should not throw error")
+}
+
+func TestSaveConfig(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	err := SaveConfig("../testingdata/testData/config/config-save.toml", config)
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1SaveConfig(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := SaveConfig("../testingdata/testData/config/config-save.toml", config)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2SaveConfig(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	err := SaveConfig("../testingdata/testData/config/config-save.toml", config)
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func TestSaveConfigSecure(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	key := "1234567891234567"
+	err := SaveConfigSecure("../testingdata/testData/config/config-save-secure.toml", config, []byte(key))
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1SaveConfigSecure(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	key := "1234567891234567"
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := SaveConfigSecure("../testingdata/testData/config/config-save-secure.toml", config, []byte(key))
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2SaveConfigSecure(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	key := "1234567891234567"
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	err := SaveConfigSecure("../testingdata/testData/config/config-save-secure.toml", config, []byte(key))
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test3SaveConfigSecure(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	key := "1234567891234567"
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	err := SaveConfigSecure("../testingdata/testData/config/config-save-secure.toml", config, []byte(key))
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func TestInitConfigSecure(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	key := "1234567891234567"
+	SaveConfigSecure("../testingdata/testData/config/config-save-secure.toml", config, []byte(key))
+
+	_, err := InitConfigSecure("../testingdata/testData/config/config-save-secure.toml", &config, []byte(key))
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1InitConfigSecure(t *testing.T) {
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	key := "1234567891234567"
+	SaveConfigSecure("../testingdata/testData/config/config-save-secure.toml", config, []byte(key))
+
+	_, err := InitConfigSecure("../testingdata/testData/config/config-save-secure.error", &config, []byte(key))
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2InitConfigSecure(t *testing.T) {
+	// errormdl.IsTestingNegetiveCaseOn = false
+	var config tomlConfig
+	InitConfig("../testingdata/testData/config/config.toml", &config)
+	key := "1234567891234567"
+	SaveConfigSecure("../testingdata/testData/config/config-save-secure.toml", config, []byte(key))
+
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	_, err := InitConfigSecure("../testingdata/testData/config/config-save-secure.toml", &config, []byte(key))
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err, "This should throw error")
+}
diff --git a/v2/constantmdl/constantmdl.go b/v2/constantmdl/constantmdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..b0d79a3ce23bfaf464c73f9fd8e0276540fdba56
--- /dev/null
+++ b/v2/constantmdl/constantmdl.go
@@ -0,0 +1,81 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:13:18 GMT+0530 (IST)
+
+// Package constantmdl helps saving constants
+package constantmdl
+
+import "time"
+
+// ZERO gives 0
+const ZERO = 0
+
+// MINUS_ONE is constant for -1
+const MINUS_ONE = -1
+
+// STAR gives *
+const STAR = "*"
+
+// HTTP CLIENT DEFAULT Setting : START
+
+// MAXIDLECONNS - max idle connections
+const MAXIDLECONNS = 100
+
+// MAXIDLECONNSPERHOST - max connections per host
+const MAXIDLECONNSPERHOST = 100
+
+// IDLECONNTIMEOUT - Idle time out
+const IDLECONNTIMEOUT = time.Second * 90
+
+// HTTP CLIENT DEFAULT Setting : END
+
+// TASKCOUNT is used as default task count in filepipe
+const TASKCOUNT = 5
+
+// constants used for CreateSecurityKey function in securitymdl
+const (
+	MAX_RANDOM_STRING_LENGTH = 256
+	RANDOM_STRING_LENGTH     = 16
+	NUMBERS_PERCENT          = 10
+	SMALL_CHARS_PERCENT      = 40
+	CAP_CHARS_PERCENT        = 40
+	SPECIAL_CHARS_PERCENT    = 10
+	CharSet                  = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!#$%&'()*+,-./:;<=>?@[]^_`{|}~0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!#$%&'()*+,-./:;<=>?@[]^_`{|}~0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!#$%&'()*@"
+	NUMBERS_SET              = "0123456789"
+	SMALL_CHARS_SET          = "abcdefghijklmnopqrstuvwxyz"
+	CAP_CHARS_SET            = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+	SPECIAL_CHARS_SET        = "!#$%&'()*+,-./:;<=>?@[]^_`{|}~"
+)
+
+// HTTP400ERROR is used to check 400 status
+const HTTP400ERROR = 400
+
+// MySQL Default Parameters
+const (
+	// MAX_IDLE_CONNECTIONS - MaxIdleConns
+	MAX_IDLE_CONNECTIONS = 100
+	// MAX_OPEN_CONNECTIONS - MaxOpenConns
+	MAX_OPEN_CONNECTIONS = 5000
+	// CONNECTION_MAX_LIFETIME - ConnMaxLifetime
+	CONNECTION_MAX_LIFETIME = 3 * 24 * time.Hour
+
+	MIME        = "MIME-version: 1.0;\nContent-Type: text/html; charset=\"UTF-8\";\n\n"
+	COUNTRYCODE = "91"
+
+	MQLRequestData = "MQLRequestData"
+
+	ResponseSizeThreshold = 50000 //(bytes)
+	// ServiceTypes
+
+	HEAVYDATA = "HEAVYDATA"
+
+	// SEQUENTIAL - SEQUENTIAL
+	SEQUENTIAL = "SEQUENTIAL"
+
+	// CONDITIONAL - CONDITIONAL
+	CONDITIONAL = "CONDITIONAL"
+
+	// BranchSeparator separates entity name and branch name ex. <entityName>_<branchName>
+	Branch_Separator = "_"
+	// Branch_Main represents default "main" branch
+	Branch_Main = "main"
+)
diff --git a/v2/dalmdl/boltdb/boltdb.go b/v2/dalmdl/boltdb/boltdb.go
new file mode 100644
index 0000000000000000000000000000000000000000..8bdb14c31dc216d81056d89f1fdfca2c2bf6a72c
--- /dev/null
+++ b/v2/dalmdl/boltdb/boltdb.go
@@ -0,0 +1,130 @@
+package boltdb
+
+import (
+	"encoding/json"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/boltdb/bolt"
+)
+
+//InitDB - To create database and bucket
+func InitDB(boltDatabasePath, bucketName string) (*bolt.DB, error) {
+	db, err := bolt.Open(boltDatabasePath, 0777, nil)
+	if errormdl.CheckErr(err) != nil {
+		return nil, errormdl.CheckErr(err)
+	}
+	err = db.Update(func(tx *bolt.Tx) error {
+		_, bktErr := tx.CreateBucketIfNotExists([]byte(bucketName))
+		if errormdl.CheckErr1(bktErr) != nil {
+			return errormdl.CheckErr1(bktErr)
+		}
+		return nil
+	})
+	if errormdl.CheckErr2(err) != nil {
+		return nil, errormdl.Wrap("ERROR: Could not set up bucket " + bucketName)
+	}
+	return db, nil
+}
+
+// AddKey - To add key into bucket
+func AddKey(db *bolt.DB, bucket string, key string, val string) error {
+	if db != nil {
+		return db.Update(func(tx *bolt.Tx) error {
+			_, bktErr := tx.CreateBucketIfNotExists([]byte(bucket))
+			if errormdl.CheckErr(bktErr) != nil {
+				return errormdl.CheckErr(bktErr)
+			}
+			return tx.Bucket([]byte(bucket)).Put([]byte(key), []byte(val))
+		})
+	}
+	return errormdl.Wrap("ERROR: Could not Add key in nil db instance")
+}
+
+// RemoveKey - To remove key from bucket
+func RemoveKey(db *bolt.DB, bucket, key string) error {
+	if db != nil {
+		return db.Update(func(tx *bolt.Tx) error {
+			bkt := tx.Bucket([]byte(bucket))
+			if bkt == nil {
+				return errormdl.Wrap("Bucket not found: " + bucket)
+			}
+			return bkt.Delete([]byte(key))
+		})
+	}
+	return errormdl.Wrap("ERROR: Could not Delete key in nil db instance")
+}
+
+// GetKey - To get data in bucket of given key
+func GetKey(db *bolt.DB, bucket, key string) ([]byte, error) {
+	if db != nil {
+		var byteData []byte
+		err := db.View(func(tx *bolt.Tx) error {
+			bkt := tx.Bucket([]byte(bucket))
+			if bkt == nil {
+				return errormdl.Wrap("Bucket not found: " + bucket)
+			}
+			byteData = bkt.Get([]byte(key))
+			return nil
+		})
+		if errormdl.CheckErr(err) != nil {
+			return nil, errormdl.CheckErr(err)
+		}
+		return byteData, nil
+	}
+	return nil, errormdl.Wrap("ERROR: Could not Get key in nil db instance")
+}
+
+//AddRecord - To Add,append or update data into a bucket
+func AddRecord(db *bolt.DB, bucket string, key string, data interface{}) error {
+	if db != nil {
+		entryBytes, err := json.Marshal(data)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.Wrap("Could not marshal entry json")
+		}
+		err = db.Update(func(tx *bolt.Tx) error {
+			_, bktErr := tx.CreateBucketIfNotExists([]byte(bucket))
+			if errormdl.CheckErr1(bktErr) != nil {
+				return errormdl.CheckErr1(bktErr)
+			}
+			tx.Bucket([]byte(bucket)).Put([]byte(key), entryBytes)
+			if errormdl.CheckErr2(err) != nil {
+				return errormdl.CheckErr2(err)
+			}
+			return nil
+		})
+		return errormdl.CheckErr3(err)
+	}
+	return errormdl.Wrap("ERROR: Could not AddRecord in nil db instance")
+}
+
+// GetRecord - To get all data in bucket
+func GetRecord(db *bolt.DB, bucket string) ([]byte, error) {
+	if db != nil {
+		var bucketData []interface{}
+		err := db.View(func(tx *bolt.Tx) error {
+			bkt := tx.Bucket([]byte(bucket))
+			if bkt == nil {
+				return errormdl.Wrap("Bucket not found: " + bucket)
+			}
+			cursor := bkt.Cursor()
+			for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
+				var interfaceObj interface{}
+				unmarshallErr := json.Unmarshal(v, &interfaceObj)
+				if errormdl.CheckErr(unmarshallErr) != nil {
+					return errormdl.CheckErr(unmarshallErr)
+				}
+				bucketData = append(bucketData, interfaceObj)
+			}
+			return nil
+		})
+		if errormdl.CheckErr1(err) != nil {
+			return nil, errormdl.CheckErr1(err)
+		}
+		byteData, marshallErr := json.Marshal(bucketData)
+		if errormdl.CheckErr2(marshallErr) != nil {
+			return nil, errormdl.CheckErr2(marshallErr)
+		}
+		return byteData, nil
+	}
+	return nil, errormdl.Wrap("ERROR: Could not AddRecord in nil db instance")
+}
diff --git a/v2/dalmdl/boltdb/boltdb_test.go b/v2/dalmdl/boltdb/boltdb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..034c2daeb138f475ff2ce994ae180f36b3a58f77
--- /dev/null
+++ b/v2/dalmdl/boltdb/boltdb_test.go
@@ -0,0 +1,187 @@
+package boltdb
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/boltdb/bolt"
+	"github.com/stretchr/testify/assert"
+)
+
+const (
+	bucketName  = "test"
+	dbPathWrong = "C:/test/test.db"
+)
+
+func TestInitDB(t *testing.T) {
+	_, err := InitDB("C:/test.db", bucketName)
+	assert.NoError(t, err, "This should not return error")
+}
+func TestInitDBOpenDbError(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	_, err := InitDB(dbPathWrong, bucketName)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should return error")
+}
+func TestInitDBBktError(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	_, err := InitDB("C:/test1.db", bucketName)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should return error")
+}
+func TestInitDBBktError2(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	_, err := InitDB("C:/test111.db", bucketName)
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err, "This should return error")
+}
+func TestAddKey(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test2.db", bucketName)
+	err2 := AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	assert.NoError(t, err2, "This should not return error")
+}
+
+func TestAddKeyDbnil(t *testing.T) {
+	var dbIntsance *bolt.DB
+	err2 := AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	assert.Error(t, err2, "This should return error")
+}
+func TestAddKeyError(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test22.db", bucketName)
+	errormdl.IsTestingNegetiveCaseOn = true
+	err2 := AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err2, "This should return error")
+}
+
+func TestRemoveKey(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test3.db", bucketName)
+	AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	err3 := RemoveKey(dbIntsance, bucketName, "testKey")
+	assert.NoError(t, err3, "This should not return error")
+}
+func TestRemoveKeyDbNil(t *testing.T) {
+	var dbIntsance *bolt.DB
+	AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	err3 := RemoveKey(dbIntsance, "bktnil", "testKey")
+	assert.Error(t, err3, "This should return error")
+}
+func TestRemoveKeyBktNil(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test31.db", bucketName)
+	AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	err3 := RemoveKey(dbIntsance, "bktnil", "testKey")
+	assert.Error(t, err3, "This should return error")
+}
+func TestGetKey(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test4.db", bucketName)
+	AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	_, err := GetKey(dbIntsance, bucketName, "testKey")
+	assert.NoError(t, err, "This should not return error")
+}
+func TestGetKeyDbNil(t *testing.T) {
+	var dbIntsance *bolt.DB
+	AddKey(dbIntsance, bucketName, "testKey", "testValue")
+	_, err := GetKey(dbIntsance, bucketName, "testKey")
+	assert.Error(t, err, "This should return error")
+}
+
+func TestGetKeyBktErr(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test5.db", bucketName)
+	_, err := GetKey(dbIntsance, "mdasda", "testKey")
+	assert.Error(t, err, "This should return error")
+}
+
+type BucketData struct {
+	Name string `json:"name"`
+}
+
+func TestAddRecord(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test6.db", bucketName)
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	err := AddRecord(dbIntsance, bucketName, "myname", bucketData)
+	assert.NoError(t, err, "This should not return error")
+}
+func TestAddRecordDbNil(t *testing.T) {
+	var dbIntsance *bolt.DB
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	err := AddRecord(dbIntsance, bucketName, "myname", bucketData)
+	assert.Error(t, err, "This should return error")
+}
+
+func TestAddRecordError(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test7.db", bucketName)
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := AddRecord(dbIntsance, bucketName, "myname", bucketData)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should return error")
+}
+func TestAddRecordError1(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test8.db", bucketName)
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	err := AddRecord(dbIntsance, bucketName, "myname", bucketData)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should return error")
+}
+func TestAddRecordError2(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test81.db", bucketName)
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	err := AddRecord(dbIntsance, bucketName, "myname", bucketData)
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err, "This should return error")
+}
+func TestGetRecord(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test9.db", bucketName)
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	AddRecord(dbIntsance, bucketName, "myname", bucketData)
+	_, err2 := GetRecord(dbIntsance, bucketName)
+	assert.NoError(t, err2, "This should not return error")
+}
+func TestGetRecordDbNil(t *testing.T) {
+	var dbIntsance *bolt.DB
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	AddRecord(dbIntsance, bucketName, "myname", bucketData)
+	_, err2 := GetRecord(dbIntsance, bucketName)
+	assert.Error(t, err2, "This should return error")
+}
+func TestGetRecordBktNil(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test91.db", bucketName)
+	bucketData := BucketData{}
+	bucketData.Name = "test"
+	AddRecord(dbIntsance, "nilBkt", "myname", bucketData)
+	_, err2 := GetRecord(dbIntsance, "bktNil")
+	assert.Error(t, err2, "This should return error")
+}
+
+func TestGetRecordError(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test10.db", bucketName)
+	errormdl.IsTestingNegetiveCaseOn = true
+	_, err2 := GetRecord(dbIntsance, bucketName)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.NoError(t, err2, "This should not return error")
+}
+
+func TestGetRecordError1(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test11.db", bucketName)
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	_, err2 := GetRecord(dbIntsance, bucketName)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err2, "This should return error")
+}
+
+func TestGetRecordError2(t *testing.T) {
+	dbIntsance, _ := InitDB("C:/test12.db", bucketName)
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	_, err2 := GetRecord(dbIntsance, bucketName)
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err2, "This should return error")
+}
diff --git a/v2/dalmdl/corefdb/bucket/appendbucket.go b/v2/dalmdl/corefdb/bucket/appendbucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..df9c3a7fe6cfcafd98dab302a47be10c1a4a7647
--- /dev/null
+++ b/v2/dalmdl/corefdb/bucket/appendbucket.go
@@ -0,0 +1,57 @@
+package bucket
+
+import (
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+)
+
+type AppendBucket struct {
+	Bucket
+}
+
+func NewAppendBucket(bucketNameQuery string, isDynamicName bool, isLazyEnable bool, bucketPath string) (*AppendBucket, error) {
+	if bucketNameQuery == "" {
+		return nil, errormdl.Wrap("please provide value of bucketNameQuery")
+	}
+
+	b := Bucket{
+		BucketID:        guidmdl.GetGUID(),
+		BucketNameQuery: bucketNameQuery,
+		IsDynamicName:   isDynamicName,
+		BucketPath:      bucketPath,
+	}
+	bucket := AppendBucket{}
+	bucket.Bucket = b
+	return &bucket, nil
+}
+
+func (ab *AppendBucket) Insert(filePath string, data *gjson.Result) error {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	appendFile, err := filetype.NewAppendFile(filePath, ab.Bucket.SecurityProvider)
+	defer func() {
+		appendFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return err
+	}
+
+	err = appendFile.Write(data)
+	return err
+}
+
+func (ab *AppendBucket) Find(filePaths []string, queries []string, data *gjson.Result) (string, error) {
+	return "", errormdl.Wrap("operation not allowed")
+}
+
+func (ab *AppendBucket) Update(filePaths []string, queries []string, data *gjson.Result) (*gjson.Result, []error) {
+	return nil, []error{errormdl.Wrap("operation not allowed")}
+}
+
+func (ab *AppendBucket) Delete(filePaths, queries []string, data *gjson.Result) (recordsDeletedCnt int, errList []error) {
+	return 0, []error{errormdl.Wrap("operation not allowed")}
+}
diff --git a/v2/dalmdl/corefdb/bucket/bucket.go b/v2/dalmdl/corefdb/bucket/bucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..6559cbe81841d4a90b636e362bab4febd55ff001
--- /dev/null
+++ b/v2/dalmdl/corefdb/bucket/bucket.go
@@ -0,0 +1,85 @@
+package bucket
+
+import (
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/index"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/tidwall/gjson"
+)
+
+const (
+	PathSeperator     = "/"
+	DynamicPathPrefix = "$$"
+)
+
+type PathProvider interface {
+	GetPath(rs *gjson.Result) (string, error)
+}
+
+type Securable interface {
+	Secure(securityprovider.SecurityProvider)
+}
+type Store interface {
+	Insert(string, *gjson.Result) error
+	Find([]string, []string, *gjson.Result) (string, error)
+	Update([]string, []string, *gjson.Result) (*gjson.Result, []error)
+	Delete([]string, []string, *gjson.Result) (int, []error)
+}
+
+type MediaStore interface {
+	WriteMedia(filePath string, mediaData []byte, rs *gjson.Result) (string, error)
+	ReadMedia(filePath string, recordID string) ([]byte, *gjson.Result, error)
+	UpdateMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) (err error)
+	UpsertMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) (string, error)
+}
+
+type Bucket struct {
+	BucketID        string `json:"bucketId"`
+	IsDynamicName   bool   `json:"isDynamicName"`
+	BucketNameQuery string `json:"bucketNameQuery"`
+	// TODO: rename to Indexex
+	Indexes          []string `json:"indices"`
+	BucketPath       string   `json:"bucketPath"`
+	SecurityProvider securityprovider.SecurityProvider
+}
+
+func (bucket *Bucket) AddIndex(index *index.Index) error {
+	if index == nil {
+		return errormdl.Wrap("index value is nil")
+	}
+	bucket.Indexes = append(bucket.Indexes, index.IndexID)
+	index.BucketSequence = append(index.BucketSequence, bucket.BucketID)
+	return nil
+}
+
+// ResolveName - returns bucket name
+func (bucket *Bucket) GetPath(rs *gjson.Result) (string, error) {
+	path := ""
+	pathChunks := strings.Split(bucket.BucketPath, PathSeperator)
+	for i := range pathChunks {
+		pathVal := pathChunks[i]
+		if strings.HasPrefix(pathChunks[i], DynamicPathPrefix) {
+			dynamicField := strings.TrimSpace(strings.TrimPrefix(pathChunks[i], DynamicPathPrefix))
+			pathVal = strings.TrimSpace(rs.Get(dynamicField).String())
+			if pathVal == "" {
+				return "", errormdl.Wrap("please provide value for bucket name: " + dynamicField)
+			}
+		}
+		path = path + PathSeperator + pathVal
+	}
+	name := bucket.BucketNameQuery
+	if bucket.IsDynamicName {
+		name = rs.Get(name).String()
+	}
+	if name == "" {
+		return name, errormdl.Wrap("please provide value for bucket name: " + bucket.BucketNameQuery)
+	}
+	path = strings.TrimPrefix(path+PathSeperator+name, PathSeperator)
+	return path, nil
+}
+
+func (bucket *Bucket) Secure(securityprovider securityprovider.SecurityProvider) {
+	bucket.SecurityProvider = securityprovider
+}
diff --git a/v2/dalmdl/corefdb/bucket/packbucket.go b/v2/dalmdl/corefdb/bucket/packbucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..3832d03eca11a39c3810f3fb5aa9d27c8abcda81
--- /dev/null
+++ b/v2/dalmdl/corefdb/bucket/packbucket.go
@@ -0,0 +1,292 @@
+package bucket
+
+import (
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+)
+
+type PackBucket struct {
+	Bucket
+	InFileIndexSchemaMap map[string]filetype.InFileIndex `json:"inFileIndexMap"`
+	// TODO: filepointer cache
+	packFiles map[string]filetype.PackFile
+}
+
+func NewPackBucket(bucketNameQuery string, isDynamicName bool, bucketPath string, inFileIndexSchemaMap map[string]filetype.InFileIndex) (*PackBucket, error) {
+	if bucketNameQuery == "" {
+		return nil, errormdl.Wrap("please provide value of bucketNameQuery")
+	}
+
+	bucket := Bucket{
+		BucketID:        guidmdl.GetGUID(),
+		BucketNameQuery: bucketNameQuery,
+		IsDynamicName:   isDynamicName,
+		BucketPath:      bucketPath,
+	}
+	packBucket := PackBucket{}
+	packBucket.Bucket = bucket
+	if inFileIndexSchemaMap != nil {
+		packBucket.InFileIndexSchemaMap = inFileIndexSchemaMap
+	} else {
+		packBucket.InFileIndexSchemaMap = make(map[string]filetype.InFileIndex)
+	}
+	return &packBucket, nil
+}
+
+// TODO: add fdb index data call
+func (pb *PackBucket) Insert(filePath string, data *gjson.Result) error {
+	requestedFileType := data.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		return errormdl.Wrap("please specify fileType")
+	}
+	_, ok := pb.InFileIndexSchemaMap[requestedFileType]
+	if !ok {
+		return errormdl.Wrap("filetype not found: " + requestedFileType)
+	}
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return err
+	}
+	err = packFile.Write(data)
+	return err
+}
+
+func (pb *PackBucket) findOne(filePath string, queries []string, data *gjson.Result) (string, error) {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return "", err
+	}
+	result, err := packFile.Read(queries, data)
+	return result, err
+}
+
+func (pb *PackBucket) Find(filePaths []string, queries []string, data *gjson.Result) (string, error) {
+	requestedFileType := data.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		return "", errormdl.Wrap("please specify fileType")
+	}
+	_, ok := pb.InFileIndexSchemaMap[requestedFileType]
+	if !ok {
+		return "", errormdl.Wrap("filetype not found: " + requestedFileType)
+	}
+	queries = append(queries, `#[fileType==`+requestedFileType+`]`)
+	resultArray := "[]"
+	for i := range filePaths {
+		result, err := pb.findOne(filePaths[i], queries, data)
+		if err != nil {
+			return resultArray, err
+		}
+		for _, val := range gjson.Parse(result).Array() {
+			resultArray, _ = sjson.Set(resultArray, "-1", val.Value())
+		}
+	}
+	return resultArray, nil
+}
+func (pb *PackBucket) updateOne(filePath string, queries []string, data *gjson.Result) (gjson.Result, error) {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return gjson.Result{}, err
+	}
+	result, err := packFile.Update(queries, data)
+	return result, err
+}
+
+func (pb *PackBucket) Update(filePaths []string, queries []string, data *gjson.Result) (*gjson.Result, []error) {
+	requestedFileType := data.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		loggermdl.LogError("please specify fileType")
+		return nil, []error{errormdl.Wrap("please specify fileType")}
+	}
+	_, ok := pb.InFileIndexSchemaMap[requestedFileType]
+	if !ok {
+		return nil, []error{errormdl.Wrap("filetype not found: " + requestedFileType)}
+	}
+	queries = append(queries, `#[fileType=="`+requestedFileType+`"]`)
+
+	finalResultArray := []gjson.Result{}
+	errList := []error{}
+
+	for i := range filePaths {
+		resultArray, err := pb.updateOne(filePaths[i], queries, data)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		finalResultArray = append(finalResultArray, resultArray.Array()...)
+	}
+
+	resultListStr := "[]"
+	for _, resultObj := range finalResultArray {
+		resultListStr, _ = sjson.Set(resultListStr, "-1", resultObj.Value())
+	}
+	result := gjson.Parse(resultListStr)
+	return &result, errList
+}
+
+func (pb *PackBucket) deleteOne(filePath string, queries []string, data *gjson.Result) (int, error) {
+
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	recordsDeletedCnt, err := packFile.Remove(queries)
+	return recordsDeletedCnt, err
+}
+
+func (pb *PackBucket) Delete(filePaths []string, queries []string, data *gjson.Result) (int, []error) {
+	recordsDeletedCnt := 0
+	fileType := data.Get("fileType").String()
+	if len(fileType) == 0 {
+		loggermdl.LogError("fileType value not provided")
+		return recordsDeletedCnt, []error{errormdl.Wrap("please specify fileType")}
+	}
+
+	_, ok := pb.InFileIndexSchemaMap[fileType]
+	if !ok {
+		loggermdl.LogError("infileIndex for specified fileType not found")
+		return recordsDeletedCnt, []error{errormdl.Wrap("infileIndex for specified fileType not found")}
+	}
+
+	queries = append(queries, `#[fileType=="`+fileType+`"]`)
+	noDataFoundCnt := 0
+	errList := []error{}
+	for i := range filePaths {
+		deletedRecordsCnt, err := pb.deleteOne(filePaths[i], queries, data)
+		if err != nil {
+			if err.Error() == "not found" {
+				noDataFoundCnt++
+				continue
+			}
+			errList = append(errList, err)
+			continue
+		}
+		recordsDeletedCnt += deletedRecordsCnt
+	}
+
+	if noDataFoundCnt == len(filePaths) {
+		errList = []error{errormdl.Wrap("data not found")}
+	}
+	return recordsDeletedCnt, errList
+}
+
+func (pb *PackBucket) WriteMedia(filePath string, mediaData []byte, rs *gjson.Result) (string, error) {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return "", err
+	}
+	recordID, err := packFile.WriteMedia(mediaData, rs)
+	return recordID, err
+}
+
+func (pb *PackBucket) ReadMedia(filePath string, recordID string) ([]byte, *gjson.Result, error) {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+
+	if err != nil {
+		return nil, nil, err
+	}
+	dataByte, result, err := packFile.ReadMedia(recordID)
+	return dataByte, result, err
+}
+
+func (pb *PackBucket) UpdateMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) error {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return err
+	}
+
+	err = packFile.UpdateMedia(recordID, mediaData, rs)
+	return err
+}
+
+func (pb *PackBucket) UpsertMedia(filePath string, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return recordID, err
+	}
+	result, err := packFile.UpsertMedia(recordID, mediaData, rs)
+	return result, err
+}
+
+func (pb *PackBucket) DeleteMedia(filePath string, recordID string) error {
+	// TODO: implement media delete
+	return nil
+}
+
+func (pb *PackBucket) ReorgFile(filePath string) error {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	packFile, err := filetype.NewPackFile(filePath, pb.InFileIndexSchemaMap, pb.Bucket.SecurityProvider)
+	defer func() {
+		packFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return err
+	}
+	err = packFile.Reorg()
+	return err
+}
+
+func (pb *PackBucket) Reorg(filePaths []string) (errList []error) {
+	for i := range filePaths {
+		err := pb.ReorgFile(filePaths[i])
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+	}
+	return
+}
diff --git a/v2/dalmdl/corefdb/bucket/simplebucket.go b/v2/dalmdl/corefdb/bucket/simplebucket.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2891e4423db2a51b0703f896ea0bb26600364b2
--- /dev/null
+++ b/v2/dalmdl/corefdb/bucket/simplebucket.go
@@ -0,0 +1,153 @@
+package bucket
+
+import (
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+type SimpleBucket struct {
+	Bucket
+	// TODO: implement lazy
+	EnableLazy       bool
+	securityProvider securityprovider.SecurityProvider
+}
+
+func NewSimpleBucket(bucketNameQuery string, isDynamicName bool, isLazyEnable bool, bucketPath string) (*SimpleBucket, error) {
+	if bucketNameQuery == "" {
+		return nil, errormdl.Wrap("please provide value of bucketNameQuery")
+	}
+
+	b := Bucket{
+		BucketID:        guidmdl.GetGUID(),
+		BucketNameQuery: bucketNameQuery,
+		IsDynamicName:   isDynamicName,
+		BucketPath:      bucketPath,
+	}
+
+	bucket := SimpleBucket{
+		EnableLazy: isLazyEnable,
+	}
+	bucket.Bucket = b
+	return &bucket, nil
+}
+
+func (sb *SimpleBucket) Insert(filePath string, data *gjson.Result) error {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	simpleFile, err := filetype.NewSimpleFile(filePath, sb.Bucket.SecurityProvider)
+	defer func() {
+		simpleFile.Close()
+		locker.Unlock()
+	}()
+	if err != nil {
+		return err
+	}
+	err = simpleFile.Write(data)
+	return err
+}
+
+func (sb *SimpleBucket) findOne(filePath string, data *gjson.Result) ([]byte, error) {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	simpleFile, err := filetype.NewSimpleFile(filePath, sb.Bucket.SecurityProvider)
+	defer func() {
+		simpleFile.Close()
+		locker.Unlock()
+	}()
+
+	if err != nil {
+		return nil, err
+	}
+
+	dataByte, err := simpleFile.Read(data)
+	return dataByte, err
+}
+
+func (sb *SimpleBucket) Find(filePaths []string, queries []string, data *gjson.Result) (string, error) {
+	resultArray := "[]"
+
+	for i := range filePaths {
+
+		result, err := sb.findOne(filePaths[i], data)
+		if err != nil {
+			return resultArray, err
+		}
+		resultArray, _ = sjson.Set(resultArray, "-1", gjson.ParseBytes(result).Value())
+	}
+	return resultArray, nil
+}
+
+func (sb *SimpleBucket) updateOne(filePath string, queries []string, data *gjson.Result) (gjson.Result, error) {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	simpleFile, err := filetype.NewSimpleFile(filePath, sb.Bucket.SecurityProvider)
+	defer func() {
+		simpleFile.Close()
+		locker.Unlock()
+	}()
+
+	if err != nil {
+		return gjson.Result{}, err
+	}
+	result, err := simpleFile.Update(data)
+	return result, err
+}
+
+func (sb *SimpleBucket) Update(filePaths []string, queries []string, data *gjson.Result) (*gjson.Result, []error) {
+	errList := []error{}
+	resultListStr := "[]"
+
+	for i := range filePaths {
+		updatedData, err := sb.updateOne(filePaths[i], queries, data)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		resultListStr, _ = sjson.Set(resultListStr, "-1", updatedData.Value())
+	}
+	result := gjson.Parse(resultListStr)
+	return &result, errList
+}
+
+func (sb *SimpleBucket) deleteOne(filePath string, queries []string, data *gjson.Result) error {
+	locker := locker.NewLocker(filePath)
+	locker.Lock()
+	simpleFile, err := filetype.NewSimpleFile(filePath, sb.Bucket.SecurityProvider)
+	defer func() {
+		simpleFile.Close()
+		locker.Unlock()
+	}()
+
+	if err != nil {
+		return err
+	}
+	err = simpleFile.Remove()
+	return err
+}
+
+func (sb *SimpleBucket) Delete(filePaths, queries []string, data *gjson.Result) (recordsDeletedCnt int, errList []error) {
+	noDataFoundCnt := 0
+	for i := range filePaths {
+		err := sb.deleteOne(filePaths[i], queries, data)
+		if err != nil {
+			if err.Error() == "not found" {
+				noDataFoundCnt++
+				continue
+			}
+			errList = append(errList, err)
+			continue
+		}
+		recordsDeletedCnt++
+	}
+
+	if noDataFoundCnt == len(filePaths) {
+		errList = []error{errormdl.Wrap("no data found")}
+	}
+	return
+}
diff --git a/v2/dalmdl/corefdb/corefdb.go b/v2/dalmdl/corefdb/corefdb.go
new file mode 100644
index 0000000000000000000000000000000000000000..eca4468b96942f9e023d88f2b5524a747e59c169
--- /dev/null
+++ b/v2/dalmdl/corefdb/corefdb.go
@@ -0,0 +1,1542 @@
+package corefdb
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/bucket"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/index"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/locker"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+	"github.com/tidwall/buntdb"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+const (
+	// INDEXFOLDER -INDEXFOLDER
+	INDEXFOLDER = "index"
+	// LazyCallBackFnAppendBucket - LazyCallBackFnAppendBucket
+	LazyCallBackFnAppendBucket = "LazyWriterAppendBucketCallBackFn"
+	// LazyCallBackFnSaveIndex - LazyCallBackFnSaveIndex
+	LazyCallBackFnSaveIndex = "LazyWriterCallBackFnAppendBucketSaveIndex"
+	lineBreak               = "\r\n"
+	IndexKeyValSeperator    = "="
+	FileType                = "fileType"
+	MigrationTypeUpdate     = "MigrationTypeUpdate"
+	MigrationTypeReplace    = "MigrationTypeReplace"
+	MigrationTypeKeyword    = "migrationType"
+	MigrationConfigFilename = "migrationConfig"
+	PathSeperator           = "/"
+)
+
+// ErrNoDataFound - This error describes that the required data might be deleted and not found. Kindly ignore this error in caller.
+var ErrNoDataFound = errors.New("data not found")
+
+var databases cachemdl.FastCacheHelper
+var defaultDB string
+var IsDebugModeOn bool
+
+func init() {
+	databases.Setup(1, 1000, 1000)
+	if os.Getenv("FDBMODE") == "debug" {
+		fmt.Println("fdb debug mode is on")
+		IsDebugModeOn = true
+	}
+}
+
+// FDB - FDB
+type FDB struct {
+	DBName            string
+	DBPath            string `json:"dbPath"`
+	EnableSecurity    bool   `json:"enableSec"` // if enabled, fdb files will be encrypted
+	EnableCompression bool   `json:"enableCmp"` // if enabled, fdb files will be compressed and then encrypted
+	indexes           map[string]*index.Index
+	indexMux          sync.Mutex
+	buckets           map[string]bucket.Store
+	bLocker           sync.Mutex
+	securityProvider  securityprovider.SecurityProvider
+}
+
+// CreateFDBInstance - creates fdb instance
+func CreateFDBInstance(dbPath, dbName string, isDefault bool) (*FDB, error) {
+	fdb := &FDB{
+		DBPath:   dbPath,
+		indexes:  make(map[string]*index.Index),
+		indexMux: sync.Mutex{},
+		buckets:  make(map[string]bucket.Store),
+		bLocker:  sync.Mutex{},
+		DBName:   dbName,
+	}
+
+	if isDefault {
+		defaultDB = dbName
+	}
+	databases.SetNoExpiration(dbName, fdb)
+	return fdb, nil
+}
+
+// GetFDBInstance - returns fdb instance
+func GetFDBInstance(dbName string) (*FDB, error) {
+	if dbName == "" {
+		dbName = defaultDB
+	}
+
+	rawDB, ok := databases.Get(dbName)
+	if !ok {
+		loggermdl.LogError("Database instance not found")
+		return nil, errormdl.Wrap("Database instance not found")
+	}
+	fdb, ok := rawDB.(*FDB)
+	if !ok {
+		loggermdl.LogError("Can not cast object into *FDB")
+		return nil, errormdl.Wrap("Can not cast object into *FDB")
+	}
+	return fdb, nil
+}
+
+func (fdb *FDB) SetSecurityProvider(securityProvider securityprovider.SecurityProvider) error {
+	if securityProvider == nil {
+		return errormdl.Wrap("please provide security provider")
+	}
+	for key := range fdb.buckets {
+		val := fdb.buckets[key]
+		if bucketObj, ok := val.(bucket.Securable); ok {
+			bucketObj.Secure(securityProvider)
+			bucketStore, _ := bucketObj.(bucket.Store)
+			loggermdl.LogError("bucketStore typof", bucketStore)
+			fdb.buckets[key] = bucketStore
+		}
+	}
+	fdb.securityProvider = securityProvider
+	return nil
+}
+
+// EnableFDBSecurity enables security. Files will be encrypted.
+func (fdb *FDB) EnableFDBSecurity(sec bool) {
+	if !sec {
+		return
+	}
+	fdb.EnableSecurity = sec
+}
+
+// RegisterNewIndex - RegisterNewIndex returns new index
+func (fdb *FDB) RegisterNewIndex(indexID, indexNameQuery string, isDynamicName bool, indexFields []index.IndexField) (*index.Index, error) {
+	fdb.indexMux.Lock()
+	defer fdb.indexMux.Unlock()
+
+	indexFilePath, err := filepath.Abs(filepath.Join(fdb.DBPath, INDEXFOLDER, indexID))
+	if err != nil {
+		return nil, err
+	}
+	index, err := index.NewIndex(indexID, indexNameQuery, isDynamicName, indexFilePath)
+	if err != nil {
+		return nil, err
+	}
+	index.SetFields(indexFields...)
+	if _, ok := fdb.indexes[indexID]; ok {
+		return nil, errormdl.Wrap("Index ID already found")
+	}
+	err = index.CreateIndex()
+	if err != nil {
+		return nil, err
+	}
+	fdb.indexes[indexID] = index
+	return index, nil
+}
+
+func (fdb *FDB) AddBucket(bucketID string, bucketObj bucket.Store) error {
+
+	if bucketObj == nil {
+		return errormdl.Wrap("bucket is nil")
+	}
+	if _, ok := fdb.buckets[bucketID]; ok {
+		return errormdl.Wrap("bucket already present: " + bucketID)
+	}
+	if fdb.securityProvider != nil {
+		if securable, ok := bucketObj.(bucket.Securable); ok {
+			securable.Secure(fdb.securityProvider)
+			bucketObj, _ = securable.(bucket.Store)
+		}
+
+	}
+	fdb.buckets[bucketID] = bucketObj
+
+	return nil
+}
+
+// GetFDBIndex - returns index
+func (f *FDB) GetFDBIndex(indexID string) (*index.Index, bool) {
+	index, ok := f.indexes[indexID]
+	return index, ok
+}
+
+// GetFDBBucketStore - returns bucket
+func (f *FDB) GetFDBBucketStore(bucketID string) (bucket.Store, bool) {
+	store, ok := f.buckets[bucketID]
+	return store, ok
+}
+
+func SaveDataInFDB(dbName string, indexID string, data *gjson.Result) error {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	// get index from fdb index map
+	index, ok := fdb.indexes[indexID]
+	if !ok {
+		loggermdl.LogError("index not found: ", indexID)
+		return errormdl.Wrap("index not found: " + indexID)
+	}
+	//  get bucket id from index
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return errormdl.Wrap("no buckets available")
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	//  get bucket from fdb map
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("bucket not found: ", bucketID)
+		return errormdl.Wrap("bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, data)
+	if err != nil {
+		loggermdl.LogError("could not resolve filepath: ", err)
+		return errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	prevVal, err := index.GetEntryByPath(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	filePath, err := filepath.Abs(filepath.Join(fdb.DBPath, path))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	//  call save on bucket
+	err = bucketObj.Insert(filePath, data)
+	if err != nil {
+		loggermdl.LogError("fail to insert data: ", err)
+		return errormdl.Wrap("fail to insert data:: " + err.Error())
+	}
+	if IsDebugModeOn {
+		var bucketStoreCpy bucket.Store
+		if bucketIns, ok := bucketObj.(*bucket.PackBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+		if bucketIns, ok := bucketObj.(*bucket.SimpleBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+		if bucketIns, ok := bucketObj.(*bucket.AppendBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+
+		if securable, ok := bucketStoreCpy.(bucket.Securable); ok {
+			securable.Secure(nil)
+		}
+
+		err = bucketStoreCpy.Insert(filePath+"_debug", data)
+		if err != nil {
+			loggermdl.LogError("debug error: fail to insert data: ", err)
+			// return errormdl.Wrap("fail to insert data:: " + err.Error())
+		}
+	}
+
+	// save index record in index store
+	// basepath, err := filepath.Abs(fdb.DBPath)
+	// if err != nil {
+	// 	loggermdl.LogError(err)
+	// 	return err
+	// }
+	// basepath = basepath + string(filepath.Separator)
+	// path := strings.TrimPrefix(filePath, basepath)
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	prevVal, _ = sjson.Set(prevVal, "rowID", rowID)
+	updatedJSON, err := updateIndexJSON(index, prevVal, data)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	updatedJSONObj := gjson.Parse(updatedJSON)
+	err = index.AddEntry(path, &updatedJSONObj)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	return nil
+}
+
+func GetFilePaths(dbName, indexID string, queries []string) ([]string, error) {
+	filePaths := make([]string, 0)
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return filePaths, errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	//  get index Id from index map
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return filePaths, errormdl.Wrap("INDEX not found: " + indexID)
+	}
+
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return filePaths, err
+	}
+	if len(indexKeyValueMap) == 0 {
+		return filePaths, nil
+	}
+	for filePath := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+		if err != nil {
+			loggermdl.LogError(err)
+			return filePaths, err
+		}
+		filePaths = append(filePaths, filePath)
+	}
+	return filePaths, nil
+}
+func ReadDataFromFDB(dbName, indexID string, data *gjson.Result, queries []string, infileIndexQueries []string) (*gjson.Result, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return nil, errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	//  get index Id from index map
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return nil, errormdl.Wrap("INDEX not found: " + indexID)
+	}
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return nil, errormdl.Wrap("no buckets available")
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	bucket, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return nil, errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	resultToReturn := gjson.Parse("[]")
+	if len(indexKeyValueMap) == 0 {
+		return &resultToReturn, nil
+	}
+	filePaths := make([]string, 0)
+	for filePath := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+		filePaths = append(filePaths, filePath)
+	}
+	resultArray, err := bucket.Find(filePaths, infileIndexQueries, data)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+
+	resultToReturn = gjson.Parse(resultArray)
+	return &resultToReturn, nil
+}
+
+func UpdateDataInFDB(dbName, indexID string, data *gjson.Result, queries []string, infileIndexQueries []string) (*gjson.Result, []error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return nil, []error{errormdl.Wrap("fdb instance not found " + dbName)}
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return nil, []error{errormdl.Wrap("INDEX not found: " + indexID)}
+	}
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return nil, []error{errormdl.Wrap("no buckets available")}
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return nil, []error{errormdl.Wrap("Bucket not found: " + bucketID)}
+	}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, []error{err}
+	}
+	resultToReturn := gjson.Parse("[]")
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return &resultToReturn, []error{ErrNoDataFound}
+	}
+	filePaths := make([]string, 0)
+	for filePath := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, []error{err}
+		}
+		filePaths = append(filePaths, filePath)
+	}
+	resultArray, errList := bucketObj.Update(filePaths, infileIndexQueries, data)
+	if len(errList) > 1 {
+		loggermdl.LogError(errList)
+		return nil, errList
+	}
+	if IsDebugModeOn {
+		debugFilePath := make([]string, len(filePaths))
+		for i, fpath := range filePaths {
+			debugFilePath[i] = fpath + "_debug"
+		}
+		var bucketStoreCpy bucket.Store
+		if bucketIns, ok := bucketObj.(*bucket.PackBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+		if bucketIns, ok := bucketObj.(*bucket.SimpleBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+		if bucketIns, ok := bucketObj.(*bucket.AppendBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+
+		if securable, ok := bucketStoreCpy.(bucket.Securable); ok {
+			securable.Secure(nil)
+		}
+
+		_, errList := bucketStoreCpy.Update(debugFilePath, infileIndexQueries, data)
+		if len(errList) > 1 {
+			loggermdl.LogError("debug error: ", errList)
+		}
+	}
+
+	for filePath, json := range indexKeyValueMap {
+		rowID, err := GenRowID(filePath)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+		json, _ = sjson.Set(json, "rowID", rowID)
+		updatedJSON, err := updateIndexJSON(index, json, data)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+
+		updatedJSONObj := gjson.Parse(updatedJSON)
+		err = index.AddEntry(filePath, &updatedJSONObj)
+		if err != nil {
+			errList = append(errList, err)
+			continue
+		}
+	}
+	return resultArray, nil
+}
+
+func DeleteDataFromFDB(dbName, indexID string, rs *gjson.Result, queries []string, infileIndexQueries []string) (recordsDeletedCnt int, errList []error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return recordsDeletedCnt, []error{errormdl.Wrap("fdb instance not found " + dbName)}
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return recordsDeletedCnt, []error{errormdl.Wrap("INDEX not found: " + indexID)}
+	}
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return 0, []error{errormdl.Wrap("no buckets available")}
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return recordsDeletedCnt, []error{errormdl.Wrap("Bucket not found: " + bucketID)}
+	}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordsDeletedCnt, []error{err}
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return recordsDeletedCnt, []error{ErrNoDataFound}
+	}
+	filePaths := make([]string, 0)
+	for filePath := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordsDeletedCnt, []error{err}
+		}
+		filePaths = append(filePaths, filePath)
+	}
+	cnt, errList := bucketObj.Delete(filePaths, infileIndexQueries, rs)
+	if len(errList) > 0 {
+		loggermdl.LogError(errList)
+		return recordsDeletedCnt, errList
+	}
+
+	if IsDebugModeOn {
+		debugFilePaths := make([]string, len(filePaths))
+		for i, fpath := range filePaths {
+			debugFilePaths[i] = fpath + "_debug"
+		}
+		var bucketStoreCpy bucket.Store
+		if bucketIns, ok := bucketObj.(*bucket.PackBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+		if bucketIns, ok := bucketObj.(*bucket.SimpleBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+		if bucketIns, ok := bucketObj.(*bucket.AppendBucket); ok {
+			cpy := *bucketIns
+			bucketStoreCpy = &cpy
+		}
+
+		if securable, ok := bucketStoreCpy.(bucket.Securable); ok {
+			securable.Secure(nil)
+		}
+		_, errList := bucketStoreCpy.Delete(debugFilePaths, infileIndexQueries, rs)
+		if len(errList) > 1 {
+			loggermdl.LogError("debug", errList)
+		}
+
+	}
+
+	if _, ok := bucketObj.(*bucket.SimpleBucket); ok {
+		for path := range indexKeyValueMap {
+			err := index.Delete(path)
+			if err != nil {
+				errList = append(errList, err)
+			}
+		}
+	}
+	return cnt, errList
+}
+
+func SaveMediaInFDB(dbName, indexID string, mediaData []byte, data *gjson.Result) (recordPath string, err error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return recordPath, errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	// get index from fdb index map
+	index, ok := fdb.indexes[indexID]
+	if !ok {
+		loggermdl.LogError("index not found: ", indexID)
+		return recordPath, errormdl.Wrap("index not found: " + indexID)
+	}
+	//  get bucket id from index
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return recordPath, errormdl.Wrap("no buckets available")
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	//  get bucket from fdb map
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("bucket not found: ", bucketID)
+		return recordPath, errormdl.Wrap("bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, data)
+	if err != nil {
+		loggermdl.LogError("could not resolve filepath: ", err)
+		return recordPath, errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	prevVal, err := index.GetEntryByPath(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	filePath, err := filepath.Abs(filepath.Join(fdb.DBPath, path))
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+
+	mediaStore, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return recordPath, errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+	//  call save on bucket
+	recordID, err := mediaStore.WriteMedia(filePath, mediaData, data)
+	if err != nil {
+		loggermdl.LogError("fail to insert data: ", err)
+		return recordPath, errormdl.Wrap("fail to insert data:: " + err.Error())
+	}
+	if IsDebugModeOn {
+		_, err := mediaStore.WriteMedia(filePath+"_debug", mediaData, data)
+		if err != nil {
+			loggermdl.LogError("debug error : fail to insert data: ", err)
+		}
+	}
+
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	prevVal, _ = sjson.Set(prevVal, "rowID", rowID)
+	updatedJSON, err := updateIndexJSON(index, prevVal, data)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	updatedJSONObj := gjson.Parse(updatedJSON)
+	err = index.AddEntry(path, &updatedJSONObj)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordPath, err
+	}
+	recordPath = fdb.DBName + PathSeperator + indexID + PathSeperator + rowID + PathSeperator + recordID
+	return recordPath, nil
+}
+
+func GetMediaFromFDB(dbName, indexID string, rowID, recordID string) (dataByte []byte, fileMeta gjson.Result, err error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return dataByte, fileMeta, errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("INDEX not found: " + indexID)
+		return dataByte, fileMeta, errormdl.Wrap("INDEX not found: " + indexID)
+	}
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return dataByte, fileMeta, errormdl.Wrap("no buckets available")
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return dataByte, fileMeta, errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	mediaReader, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return dataByte, fileMeta, errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+	queries := []string{`#[rowID=` + rowID + `]`}
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, fileMeta, err
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return dataByte, fileMeta, nil
+	}
+	filePath := ""
+	for path := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, path))
+		if err != nil {
+			loggermdl.LogError(err)
+			return dataByte, fileMeta, err
+		}
+		// find only one file
+		break
+	}
+	dataByte, metaData, err := mediaReader.ReadMedia(filePath, recordID)
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, fileMeta, err
+	}
+	fileMeta = metaData.Get("requiredData")
+	return dataByte, gjson.Parse(fileMeta.String()), nil
+}
+
+func UpdateMediaInFDB(dbName, indexID, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return "", errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	recordID = strings.TrimSpace(recordID)
+	if recordID == "" {
+		return "", errormdl.Wrap("please provide recordID")
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("index not found: " + indexID)
+		return "", errormdl.Wrap("index not found: " + indexID)
+	}
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return "", errormdl.Wrap("no buckets available")
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return "", errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, rs)
+	if err != nil {
+		loggermdl.LogError("could not find filepath: ", err)
+		return "", errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	queries := []string{`#[rowID=` + rowID + `]`}
+
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return "", errormdl.Wrap("no data found to update")
+	}
+	filePath := ""
+	for path := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, path))
+		if err != nil {
+			loggermdl.LogError(err)
+			return "", err
+		}
+	}
+	mediaStore, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return "", errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+
+	err = mediaStore.UpdateMedia(filePath, recordID, mediaData, rs)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+
+	if IsDebugModeOn {
+		err = mediaStore.UpdateMedia(filePath+"_debug", recordID, mediaData, rs)
+		if err != nil {
+			loggermdl.LogError("debug error:", err)
+		}
+	}
+	for path, val := range indexKeyValueMap {
+		json, _ := sjson.Set(val, "rowID", rowID)
+		updatedJSON, err := updateIndexJSON(index, json, rs)
+		if err != nil {
+			return "", err
+		}
+
+		updatedJSONObj := gjson.Parse(updatedJSON)
+		err = index.AddEntry(path, &updatedJSONObj)
+		if err != nil {
+			return "", err
+		}
+	}
+	recordPath := fdb.DBName + PathSeperator + indexID + PathSeperator + rowID + PathSeperator + recordID
+	return recordPath, nil
+}
+
+func UpsertMediaInFDB(dbName, indexID, recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", dbName)
+		return "", errormdl.Wrap("fdb instance not found " + dbName)
+	}
+	recordID = strings.TrimSpace(recordID)
+	if recordID == "" {
+		return "", errormdl.Wrap("please provide recordID")
+	}
+	index, ok := fdb.GetFDBIndex(indexID)
+	if !ok {
+		loggermdl.LogError("index not found: " + indexID)
+		return "", errormdl.Wrap("index not found: " + indexID)
+	}
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return "", errormdl.Wrap("no buckets available")
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return "", errormdl.Wrap("Bucket not found: " + bucketID)
+	}
+	path, err := fdb.ResolvePath(index, rs)
+	if err != nil {
+		loggermdl.LogError("could not find filepath: ", err)
+		return "", errormdl.Wrap("could not resolve filepath: " + err.Error())
+	}
+	rowID, err := GenRowID(path)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	queries := []string{`#[rowID=` + rowID + `]`}
+
+	indexKeyValueMap, err := index.GetEntriesByQueries(queries)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	if len(indexKeyValueMap) == 0 {
+		loggermdl.LogError("files not found")
+		return "", errormdl.Wrap("no data found to update")
+	}
+	filePath := ""
+	for path := range indexKeyValueMap {
+		filePath, err = filepath.Abs(filepath.Join(fdb.DBPath, path))
+		if err != nil {
+			loggermdl.LogError(err)
+			return "", err
+		}
+	}
+	mediaStore, ok := bucketObj.(bucket.MediaStore)
+	if !ok {
+		loggermdl.LogError("cannot write media data on this bucket: ", bucketID)
+		return "", errormdl.Wrap("cannot write media data on this bucket: " + bucketID)
+	}
+	recordID, err = mediaStore.UpsertMedia(filePath, recordID, mediaData, rs)
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", err
+	}
+	if IsDebugModeOn {
+		_, err = mediaStore.UpsertMedia(filePath+"_debug", recordID, mediaData, rs)
+		if err != nil {
+			loggermdl.LogError("debug error:", err)
+		}
+	}
+	for path, val := range indexKeyValueMap {
+		json, _ := sjson.Set(val, "rowID", rowID)
+		updatedJSON, err := updateIndexJSON(index, json, rs)
+		if err != nil {
+			return "", err
+		}
+
+		updatedJSONObj := gjson.Parse(updatedJSON)
+		err = index.AddEntry(path, &updatedJSONObj)
+		if err != nil {
+			return "", err
+		}
+	}
+	recordPath := fdb.DBName + PathSeperator + indexID + PathSeperator + rowID + PathSeperator + recordID
+	return recordPath, nil
+}
+
+func ReorganizeFiles(dbName string) (errList []error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("Error occured while fetching DB instance", err)
+		return []error{errormdl.Wrap("Error occured while fetching DB instance")}
+	}
+	for _, index := range fdb.indexes {
+		bktCnt := len(index.BucketSequence)
+		if bktCnt == 0 {
+			loggermdl.LogError("no buckets available")
+			return []error{errormdl.Wrap("no buckets available")}
+		}
+		bucketID := index.BucketSequence[bktCnt-1]
+		bucketObj, ok := fdb.buckets[bucketID]
+		if !ok {
+			loggermdl.LogError("Bucket not found: " + bucketID)
+			return []error{errormdl.Wrap("Bucket not found: " + bucketID)}
+		}
+
+		if packBucketObj, ok := bucketObj.(*bucket.PackBucket); ok {
+
+			indexKeyValMap, err := index.GetAllEntries()
+			if err != nil {
+				loggermdl.LogError("index data not found", err)
+				return []error{errormdl.Wrap("index data not found")}
+			}
+			if len(indexKeyValMap) == 0 {
+				loggermdl.LogError("no data found to reorganize")
+				return []error{}
+			}
+			filePaths := make([]string, len(indexKeyValMap))
+			i := 0
+			for filePath := range indexKeyValMap {
+				sourceFile, err := filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+				if err != nil {
+					errList = append(errList, errormdl.Wrap("Error occured during reOrg of file data"))
+					continue
+				}
+				filePaths[i] = sourceFile
+				i++
+			}
+			reorgErrs := packBucketObj.Reorg(filePaths[:i])
+			if len(reorgErrs) > 0 {
+				errList = append(errList, reorgErrs...)
+			}
+		}
+	}
+	return errList
+}
+
+func ReorganizeFDBBucketData(dbName, indexId string, queries []string) (errList []error) {
+	fdb, err := GetFDBInstance(dbName)
+	if err != nil {
+		loggermdl.LogError("Error occured while fetching DB instance", err)
+		return []error{errormdl.Wrap("Error occured while fetching DB instance")}
+	}
+	index, found := fdb.GetFDBIndex(indexId)
+	if !found {
+		loggermdl.LogError("index not found")
+		return []error{errormdl.Wrap("index not found")}
+	}
+	bktCnt := len(index.BucketSequence)
+	if bktCnt == 0 {
+		loggermdl.LogError("no buckets available")
+		return []error{errormdl.Wrap("no buckets available")}
+	}
+	bucketID := index.BucketSequence[bktCnt-1]
+	bucketObj, ok := fdb.buckets[bucketID]
+	if !ok {
+		loggermdl.LogError("Bucket not found: " + bucketID)
+		return []error{errormdl.Wrap("Bucket not found: " + bucketID)}
+	}
+
+	if packBucketObj, ok := bucketObj.(*bucket.PackBucket); ok {
+		var indexKeyValMap map[string]string
+		var err error
+		if len(queries) > 0 {
+			indexKeyValMap, err = index.GetEntriesByQueries(queries)
+		} else {
+			indexKeyValMap, err = index.GetAllEntries()
+		}
+		if err != nil {
+			loggermdl.LogError("index data not found", err)
+			return []error{errormdl.Wrap("index data not found")}
+		}
+		if indexKeyValMap == nil || len(indexKeyValMap) == 0 {
+			loggermdl.LogError("no data found to reorganize")
+			return []error{}
+		}
+		filePaths := make([]string, len(indexKeyValMap))
+		i := 0
+		for filePath := range indexKeyValMap {
+			sourceFile, err := filepath.Abs(filepath.Join(fdb.DBPath, filePath))
+			if err != nil {
+				errList = append(errList, errormdl.Wrap("Error occured during reOrg of file data"))
+				continue
+			}
+			filePaths[i] = sourceFile
+			i++
+		}
+		reorgErrs := packBucketObj.Reorg(filePaths[:i])
+		if len(reorgErrs) > 0 {
+			errList = append(errList, reorgErrs...)
+		}
+		if IsDebugModeOn {
+			var debugFilePaths []string
+			for _, fPath := range filePaths[:i] {
+				debugFilePaths = append(debugFilePaths, fPath+"_debug")
+			}
+			reorgErrs := packBucketObj.Reorg(debugFilePaths)
+			if len(reorgErrs) > 0 {
+				loggermdl.LogError("debug error", reorgErrs)
+			}
+		}
+	}
+
+	return errList
+}
+
+func (fdb *FDB) ResolvePath(index *index.Index, rs *gjson.Result) (string, error) {
+	path := ""
+	for _, bucketID := range index.BucketSequence {
+		bucketObj, ok := fdb.buckets[bucketID]
+		if !ok {
+			loggermdl.LogError("bucket not found: " + bucketID)
+			return "", errormdl.Wrap("bucket not found: " + bucketID)
+		}
+
+		pathResolver, ok := bucketObj.(bucket.PathProvider)
+		if !ok {
+			return "", errormdl.Wrap("cant not find bucket path")
+		}
+		bucketPath, err := pathResolver.GetPath(rs)
+		if err != nil {
+			return "", err
+		}
+		path = filepath.Join(path, bucketPath)
+	}
+	indexName := index.IndexNameQuery
+	if index.IsDynamicName {
+		indexName = rs.Get(index.IndexNameQuery).String()
+	}
+	if indexName == "" {
+		return "", errormdl.Wrap("required attribute not provided:" + index.IndexNameQuery)
+	}
+	path = filepath.Join(path, indexName)
+	return path, nil
+}
+
+// updateIndexJSON - update JSON with index field data
+func updateIndexJSON(index *index.Index, existingData string, rs *gjson.Result) (string, error) {
+	json := existingData
+	var err error
+	for _, indexField := range index.IndexFields {
+		if rs.Get(indexField.Query).Value() == nil {
+			continue
+		}
+		json, err = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
+		// loggermdl.LogError("Error - ", err)
+	}
+	return json, err
+}
+
+// GenRowID generates hash for the given filename. The length of hash is 16
+func GenRowID(name string) (string, error) {
+	name = strings.ReplaceAll(filepath.Clean(name), string(filepath.Separator), "")
+	rowID, err := securitymdl.GetHash(name)
+	if err != nil {
+		return "", err
+	}
+
+	if len(rowID) > 16 {
+		return rowID[:16], nil
+	}
+
+	return rowID, nil
+}
+
+func addMigrationReplaceConfig(targetBasePath string, securityProvider securityprovider.SecurityProvider) error {
+	configfilePath, err := filepath.Abs(filepath.Join(targetBasePath, MigrationConfigFilename))
+	if err != nil {
+		return err
+	}
+	migartionConfigStr, _ := sjson.Set("", MigrationTypeKeyword, MigrationTypeReplace)
+	rs := gjson.Parse(migartionConfigStr)
+	lockerObj := locker.NewLocker(configfilePath)
+	lockerObj.Lock()
+	simpleFile, err := filetype.NewSimpleFile(configfilePath, securityProvider)
+	defer func() {
+		simpleFile.Close()
+		lockerObj.Unlock()
+	}()
+	if err != nil {
+		return errormdl.Wrap("fail to add migration config: " + err.Error())
+	}
+	err = simpleFile.Write(&rs)
+	return err
+}
+
+func addMigrationUpdateConfig(targetBasePath string, securityProvider securityprovider.SecurityProvider) error {
+	configfilePath, err := filepath.Abs(filepath.Join(targetBasePath, MigrationConfigFilename))
+	if err != nil {
+		return err
+	}
+
+	migartionConfigStr, _ := sjson.Set("", MigrationTypeKeyword, MigrationTypeUpdate)
+	rs := gjson.Parse(migartionConfigStr)
+	lockerObj := locker.NewLocker(configfilePath)
+	lockerObj.Lock()
+	simpleFile, err := filetype.NewSimpleFile(configfilePath, securityProvider)
+	defer func() {
+		simpleFile.Close()
+		lockerObj.Unlock()
+	}()
+	if err != nil {
+		return errormdl.Wrap("fail to add migration config: " + err.Error())
+	}
+	return simpleFile.Write(&rs)
+}
+
+func getMigrationConfig(sourcePath string, rs *gjson.Result, securityProvider securityprovider.SecurityProvider) (*gjson.Result, error) {
+	configfilePath, err := filepath.Abs(filepath.Join(sourcePath, MigrationConfigFilename))
+	if err != nil {
+		return nil, err
+	}
+	if !filemdl.FileAvailabilityCheck(configfilePath) {
+		return nil, errormdl.Wrap("file not found")
+	}
+	lockerObj := locker.NewLocker(configfilePath)
+	lockerObj.Lock()
+	simpleFile, err := filetype.NewSimpleFile(configfilePath, securityProvider)
+	defer func() {
+		simpleFile.Close()
+		lockerObj.Unlock()
+	}()
+	if err != nil {
+		return nil, err
+	}
+	dataByte, err := simpleFile.Read(rs)
+	if err != nil {
+		return nil, err
+	}
+	migrationConfig := gjson.ParseBytes(dataByte)
+	return &migrationConfig, nil
+}
+
+type ZipImporter struct {
+	FdbName    string
+	IndexID    string
+	SourcePath string
+	Data       *gjson.Result
+}
+
+// ZipExporter is a DataExporter
+// allow to export fdb data as zip
+type ZipExporter struct {
+	FdbName       string
+	IndexID       string
+	Queries       []string
+	DestPath      string
+	MigrationType string
+}
+
+// DataExport exports fdb data as zip
+func (z ZipExporter) DataExport() (err error) {
+	fdb, err := GetFDBInstance(z.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", z.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + z.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(z.IndexID)
+	if !ok {
+		return errormdl.Wrap("INDEX not found: " + z.IndexID)
+	}
+	sourcePath := ""
+	timeStamp := time.Now().Nanosecond()
+	targetBasePath := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
+	filteredKeyValMap, err := index.GetEntriesByQueries(z.Queries)
+	if err != nil {
+		return err
+	}
+	if len(filteredKeyValMap) == 0 {
+		return errormdl.Wrap("no data found to export")
+	}
+	defer func() {
+		// removes created zip
+		filemdl.DeleteDirectory(targetBasePath)
+	}()
+	// copy data files
+	for path := range filteredKeyValMap {
+		sourcePath = filepath.Join(fdb.DBPath, path)
+		targetPath := filepath.Join(targetBasePath, path)
+		_, err := filemdl.CopyFile(sourcePath, targetPath, true)
+		if err != nil {
+			return err
+		}
+	}
+
+	// copy index file
+	targetPath := filepath.Join(targetBasePath, INDEXFOLDER, z.IndexID)
+	err = AddIndexEntriesInFile(targetPath, filteredKeyValMap, index.SecurityProvider)
+	if err != nil {
+		return err
+	}
+
+	switch z.MigrationType {
+	case MigrationTypeUpdate:
+		err = addMigrationUpdateConfig(targetBasePath, index.SecurityProvider)
+	case MigrationTypeReplace:
+		err = addMigrationReplaceConfig(targetBasePath, index.SecurityProvider)
+	default:
+		return errormdl.Wrap("fail to export data: export operation not allowed on migration type - " + z.MigrationType)
+	}
+
+	if err != nil {
+		loggermdl.LogError("fail to export data: ", err)
+		return errormdl.Wrap("fail to export data: " + err.Error())
+	}
+	// make zip of copied data to destination folder
+	// zip will have name of indexId
+	destinationPath := filepath.Join(z.DestPath, z.IndexID)
+	return filemdl.Zip(targetBasePath, destinationPath)
+}
+
+// DataImport imports data from zip
+func (z ZipImporter) DataImport() (err error) {
+	fdb, err := GetFDBInstance(z.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", z.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + z.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(z.IndexID)
+	if !ok {
+		loggermdl.LogError("index not found: ", z.IndexID)
+		return errormdl.Wrap("index not found: " + z.IndexID)
+	}
+	archivePath := z.SourcePath
+	if !filemdl.FileAvailabilityCheck(archivePath) {
+		loggermdl.LogError("archive file not found at specified location: ", archivePath)
+		return errormdl.Wrap("archive file not found at location: " + archivePath)
+	}
+	timeStamp := time.Now().Nanosecond()
+	pathToExtractZip := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
+
+	err = filemdl.Unzip(archivePath, pathToExtractZip)
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	defer func() {
+		// removes extracted files
+		filemdl.DeleteDirectory(pathToExtractZip)
+	}()
+	childDirs, err := filemdl.ListDirectory(pathToExtractZip)
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	if len(childDirs) == 0 {
+		loggermdl.LogError("no data found to import")
+		return errormdl.Wrap("no data found to import")
+	}
+	if !childDirs[0].IsDir() {
+		loggermdl.LogError("invalid archive file")
+		return errormdl.Wrap("invalid archive file")
+	}
+	sourcePath := filepath.Join(pathToExtractZip, childDirs[0].Name())
+	fdbBasePath := fdb.DBPath
+
+	// loggermdl.LogDebug(sourcePath)
+	migrationConfig, err := getMigrationConfig(sourcePath, z.Data, index.SecurityProvider)
+	if err != nil {
+		loggermdl.LogError("fail to get migration config", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	migrationType := migrationConfig.Get(MigrationTypeKeyword).String()
+	if migrationType != MigrationTypeReplace && migrationType != MigrationTypeUpdate {
+		return errormdl.Wrap("import operation not allowed on migration type - " + migrationType)
+	}
+	err = filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			loggermdl.LogError("err", err)
+			return err
+		}
+		if info.IsDir() {
+			return nil
+		}
+		//  ignore config file from copying
+		if strings.Contains(path, MigrationConfigFilename) {
+			return nil
+		}
+
+		foundAtIndex := strings.LastIndex(path, sourcePath)
+		if foundAtIndex == -1 {
+			return errormdl.Wrap("invalid archived file")
+		}
+		// loggermdl.LogDebug(path)
+
+		// if migartion type is MigrationTypeUpdate then copy index entries from index files else replace index files
+		if migrationType == MigrationTypeUpdate && strings.Contains(path, INDEXFOLDER) {
+			// load index entries from
+			err := ImportIndexEntries(path, fdb, z.IndexID)
+			if err != nil {
+				loggermdl.LogError("fail to load indexes from data", err)
+				return errormdl.Wrap("fail to load indexes")
+			}
+			err = index.WriteIndexEntriesInFile()
+			if err != nil {
+				loggermdl.LogError("fail to add indexes: ", err)
+				return errormdl.Wrap("fail to add indexes")
+			}
+			return nil
+		}
+		destPath := filepath.Join(fdbBasePath, path[foundAtIndex+len(sourcePath):])
+		if !filemdl.FileAvailabilityCheck(destPath) {
+			dir, _ := filepath.Split(destPath)
+			err = filemdl.CreateDirectoryRecursive(dir)
+			if err != nil {
+				return err
+			}
+		}
+		return filemdl.AtomicReplaceFile(path, destPath)
+	})
+	if err != nil {
+		loggermdl.LogError("fail to import data: ", err)
+		return errormdl.Wrap("fail to import data: " + err.Error())
+	}
+
+	err = index.LoadIndexEntriesFromFile()
+	if err != nil {
+		loggermdl.LogError("fail to add indexes", err)
+		return errormdl.Wrap("fail to add indexes")
+	}
+
+	return nil
+}
+
+// ImportIndexEntries -
+func ImportIndexEntries(indexFilePath string, fdb *FDB, indexID string) error {
+	index, found := fdb.GetFDBIndex(indexID)
+	if !found {
+		return errormdl.Wrap("index not found")
+	}
+	if !filemdl.FileAvailabilityCheck(indexFilePath) {
+		return nil
+	}
+	fileData, err := filemdl.FastReadFile(indexFilePath)
+	if err != nil {
+		loggermdl.LogError("failed to load FDB index from: ", indexFilePath)
+		return err
+	}
+	fileData, err = index.SecurityProvider.Decrypt(fileData, indexFilePath, nil)
+	if err != nil {
+		loggermdl.LogError("failed to decrypt FDB index data: ", err)
+		return errormdl.Wrap("failed to decrypt FDB index data: " + err.Error())
+	}
+	data := string(fileData)
+	indexRecords := strings.Split(data, lineBreak)
+	indexDataMap := make(map[string]string)
+	for _, indexRecord := range indexRecords {
+		indexValues := strings.Split(indexRecord, IndexKeyValSeperator)
+		if len(indexValues) == 2 {
+			indexDataMap[indexValues[0]] = indexValues[1]
+		}
+	}
+	var fns []func(a, b string) bool
+	for _, idx := range index.IndexFields {
+		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
+	}
+
+	// update index file by reading all data and updating index file
+	return index.AddEntries(indexDataMap)
+}
+
+// FileImporter is a DataImporter
+// allow to import fdb data from exported folder
+type FileImporter struct {
+	FdbName    string
+	IndexID    string
+	SourcePath string
+	Data       *gjson.Result
+}
+
+// FileExporter is a DataExporter
+// allow to export fdb data in a folder
+type FileExporter struct {
+	FdbName       string
+	IndexID       string
+	Queries       []string
+	DestPath      string
+	MigrationType string
+}
+
+// DataExport exports fdb data in a folder
+func (f FileExporter) DataExport() (err error) {
+	fdb, err := GetFDBInstance(f.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", f.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + f.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(f.IndexID)
+	if !ok {
+		return errormdl.Wrap("INDEX not found: " + f.IndexID)
+	}
+	sourcePath := ""
+	targetBasePath := filepath.Join(f.DestPath, f.IndexID)
+	filteredKeyValMap, err := index.GetEntriesByQueries(f.Queries)
+	if err != nil {
+		return err
+	}
+
+	if len(filteredKeyValMap) == 0 {
+		return errormdl.Wrap("no data found to export")
+	}
+
+	for path := range filteredKeyValMap {
+		sourcePath = filepath.Join(fdb.DBPath, path)
+		targetPath := filepath.Join(targetBasePath, path)
+		_, err := filemdl.CopyFile(sourcePath, targetPath, true)
+		if err != nil {
+			return err
+		}
+	}
+
+	// copy index file
+	targetPath := filepath.Join(targetBasePath, INDEXFOLDER, f.IndexID)
+	err = AddIndexEntriesInFile(targetPath, filteredKeyValMap, index.SecurityProvider)
+	if err != nil {
+		return err
+	}
+
+	switch f.MigrationType {
+	case MigrationTypeUpdate:
+		err = addMigrationUpdateConfig(targetBasePath, index.SecurityProvider)
+	case MigrationTypeReplace:
+		err = addMigrationReplaceConfig(targetBasePath, index.SecurityProvider)
+	default:
+		return errormdl.Wrap("export operation not allowed on migration type - " + f.MigrationType)
+	}
+
+	return err
+}
+
+// DataImport imports data from exported folder
+func (f FileImporter) DataImport() (err error) {
+	fdb, err := GetFDBInstance(f.FdbName)
+	if err != nil {
+		loggermdl.LogError("fdb instance not found: ", f.FdbName)
+		return errormdl.Wrap("fdb instance not found: " + f.FdbName)
+	}
+	index, ok := fdb.GetFDBIndex(f.IndexID)
+	if !ok {
+		loggermdl.LogError("index not found: ", f.IndexID)
+		return errormdl.Wrap("index not found: " + f.IndexID)
+	}
+	if !filemdl.FileAvailabilityCheck(f.SourcePath) {
+		loggermdl.LogError("archive file not found at specified location: ", f.SourcePath)
+		return errormdl.Wrap("archive file not found at location: " + f.SourcePath)
+	}
+
+	timeStamp := time.Now().Nanosecond()
+	tempDir := filepath.Join(filemdl.TempDir, strconv.Itoa(timeStamp))
+	err = filemdl.CopyDir(f.SourcePath, tempDir)
+	defer func() {
+		filemdl.DeleteDirectory(tempDir)
+	}()
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("fail to copy data")
+	}
+
+	childDirs, err := filemdl.ListDirectory(tempDir)
+	if err != nil {
+		loggermdl.LogError("failed to import data: ", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	if len(childDirs) == 0 {
+		loggermdl.LogError("no data found to import")
+		return errormdl.Wrap("no data found to import")
+	}
+	fdbBasePath := fdb.DBPath
+
+	// loggermdl.LogDebug(f.SourcePath)
+	migrationConfig, err := getMigrationConfig(tempDir, f.Data, index.SecurityProvider)
+	if err != nil {
+		loggermdl.LogError("fail to get migration config", err)
+		return errormdl.Wrap("invalid archived file")
+	}
+	migrationType := migrationConfig.Get(MigrationTypeKeyword).String()
+	if migrationType != MigrationTypeReplace && migrationType != MigrationTypeUpdate {
+		return errormdl.Wrap("import operation not allowed on migration type - " + migrationType)
+	}
+	err = filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			loggermdl.LogError("err", err)
+			return err
+		}
+		// loggermdl.LogError(path)
+		if info.IsDir() {
+			return nil
+		}
+		//  ignore config file from copying
+		if strings.Contains(path, MigrationConfigFilename) {
+			return nil
+		}
+
+		foundAtIndex := strings.LastIndex(path, tempDir)
+		if foundAtIndex == -1 {
+			return errormdl.Wrap("invalid archived file")
+		}
+		// if file is index file then copy index entries from index files
+		if strings.Contains(path, INDEXFOLDER) {
+			// load index entries from file
+			err := ImportIndexEntries(path, fdb, f.IndexID)
+			if err != nil {
+				loggermdl.LogError("fail to import indexes", err)
+				return errormdl.Wrap("fail to import indexes")
+			}
+			err = index.WriteIndexEntriesInFile()
+			if err != nil {
+				loggermdl.LogError("fail to import indexes: ", err)
+				return errormdl.Wrap("fail to import indexes")
+			}
+			return nil
+		}
+
+		destPath := filepath.Join(fdbBasePath, path[foundAtIndex+len(tempDir):])
+		if err != nil {
+			return err
+		}
+		if !filemdl.FileAvailabilityCheck(destPath) {
+			dir, _ := filepath.Split(destPath)
+			err = filemdl.CreateDirectoryRecursive(dir)
+			if err != nil {
+				return err
+			}
+		}
+		return filemdl.AtomicReplaceFile(path, destPath)
+	})
+
+	if err != nil {
+		loggermdl.LogError("fail to import data: ", err)
+		return errormdl.Wrap("fail to import data: " + err.Error())
+	}
+
+	return nil
+}
+
+// AddIndexEntriesInFile -AddIndexEntriesInFile
+func AddIndexEntriesInFile(indexFilePath string, entries map[string]string, securityProvider securityprovider.SecurityProvider) error {
+	// dbPath := filepath.Join(fdbPath, INDEXFOLDER)
+	// loggermdl.LogDebug("in log fdb index")
+	dataToStore := ""
+	for key, val := range entries {
+		dataToStore = dataToStore + key + IndexKeyValSeperator + val + lineBreak
+	}
+	dataByteToWriteRes := []byte{}
+	var hashError error
+	if len(dataToStore) > 0 {
+		_, fileName := filepath.Split(indexFilePath)
+		dataByteToWriteRes, hashError = securityProvider.Encrypt([]byte(dataToStore), fileName, nil)
+		if errormdl.CheckErr1(hashError) != nil {
+			return errormdl.CheckErr1(hashError)
+		}
+	}
+	return filemdl.WriteFile(indexFilePath, dataByteToWriteRes, true, false)
+}
diff --git a/v2/dalmdl/corefdb/corefdb_test.go b/v2/dalmdl/corefdb/corefdb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..9793bd0f79ccd11294fae63712a20fedf2f42ec3
--- /dev/null
+++ b/v2/dalmdl/corefdb/corefdb_test.go
@@ -0,0 +1,772 @@
+package corefdb
+
+import (
+	"log"
+	"testing"
+	"time"
+
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/bucket"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/filetype"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/index"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+var (
+	dbPath              = "D:\\exe\\myfdb"
+	dbName              = "myfdb"
+	CheckLazyIndexWrite = false
+	lazyWriteInterval   = 3
+)
+
+func init() {
+
+	fdb, err := CreateFDBInstance(dbPath, dbName, true)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// enable security on fdb
+	{
+
+		fdbSecurityProvider := securityprovider.New(securityprovider.SecurityConfig{
+			EncKey:         "myenckey",
+			UserDefinedKey: "mkcl",
+			FieldQuery:     "instituteId",
+		})
+
+		err = fdb.SetSecurityProvider(fdbSecurityProvider)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+	//  creating simple bucket
+	{
+		simpleBucket, err := bucket.NewSimpleBucket("Simple", false, false, "")
+		if err != nil {
+			log.Fatal(err)
+		}
+		fields := []index.IndexField{
+			index.IndexField{
+				FieldName: "name",
+				Query:     "name",
+			},
+		}
+		i, err := fdb.RegisterNewIndex("stdId", "studentId", true, fields)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = simpleBucket.AddIndex(i)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = fdb.AddBucket(simpleBucket.BucketID, simpleBucket)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	//  creating pack bucket
+	{
+
+		inFileIndexSchemaMap := map[string]filetype.InFileIndex{
+			"Exam": filetype.InFileIndex{
+				FileType: "Exam",
+				IndexFields: []filetype.InFileIndexField{
+					filetype.InFileIndexField{
+						FieldName: "examId",
+						Query:     "examId",
+					},
+				},
+			},
+			"Profile": filetype.InFileIndex{
+				FileType: "Profile",
+				IndexFields: []filetype.InFileIndexField{
+					filetype.InFileIndexField{
+						FieldName: "class",
+						Query:     "class",
+					},
+				},
+			},
+		}
+
+		packBucket, err := bucket.NewPackBucket("PackBucket", false, "", inFileIndexSchemaMap)
+		if err != nil {
+			log.Fatal(err)
+		}
+		packIndexfields := []index.IndexField{
+			index.IndexField{
+				FieldName: "name",
+				Query:     "name",
+			},
+		}
+		packbucketIndex, err := fdb.RegisterNewIndex("studentPack", "stdId", true, packIndexfields)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = packBucket.AddIndex(packbucketIndex)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = fdb.AddBucket(packBucket.BucketID, packBucket)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	//  creating append bucket
+	{
+		appendBucket, err := bucket.NewAppendBucket("Append", false, false, "")
+		if err != nil {
+			log.Fatal(err)
+		}
+		fields := []index.IndexField{
+			index.IndexField{
+				FieldName: "name",
+				Query:     "name",
+			},
+		}
+		i, err := fdb.RegisterNewIndex("stdResponse", "studentId", true, fields)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = appendBucket.AddIndex(i)
+		if err != nil {
+			log.Fatal(err)
+		}
+		err = fdb.AddBucket(appendBucket.BucketID, appendBucket)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	// load index record from index files
+	// {
+	// 	indexIds := []string{"stdId", "studentPack", "stdResponse"}
+	// 	for _, indexID := range indexIds {
+	// 		indexFilePath := filepath.Join(fdb.DBPath, INDEXFOLDER, indexID)
+	// 		err = LoadFDBIndexFromFile(indexFilePath, fdb, indexID)
+	// 		if err != nil {
+	// 			log.Fatal(err)
+	// 		}
+	// 	}
+
+	// }
+}
+func TestSaveDataInSimpleBucket(t *testing.T) {
+	tests := []struct {
+		data           string
+		ShouldGetError bool
+	}{
+		{
+			data:           `{"instituteId":"geca" ,"name": "ajay","fileType": "EXAM","studentId": 1234,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+		{
+			data:           `{"instituteId":"geca" ,"name": "ajay","fileType": "EXAM","studentId": 1235,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+		{
+			data:           `{"instituteId":"geca" ,"name": "ajay","fileType": "EXAM","studentId": 1236,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+	}
+
+	indexID := "stdId"
+
+	for i, test := range tests {
+		studentObj := gjson.Parse(test.data)
+		t.Logf("\t Test: %d", i)
+		now := time.Now()
+		err := SaveDataInFDB(dbName, indexID, &studentObj)
+		if err != nil {
+			log.Fatal(err)
+		}
+		timeElapsed := time.Since(now)
+		// loggermdl.LogError("timeElapsed", timeElapsed)
+		t.Logf("\t %s\t should be able to save data %s", "succeed", timeElapsed)
+	}
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestGetDataFromSimpleBucket(t *testing.T) {
+	indexID := "stdId"
+
+	data, _ := sjson.Set("", "fileType", "Exam")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	studentObj := gjson.Parse(data)
+
+	queries := []string{`#[name==ajay]`}
+	inFileIndexQueries := []string{}
+
+	result, err := ReadDataFromFDB(dbName, indexID, &studentObj, queries, inFileIndexQueries)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogDebug("result", result.String())
+}
+func TestUpdateDataInNormalBucket(t *testing.T) {
+	indexID := "stdId"
+	data, _ := sjson.Set("", "abc", 10)
+	data, _ = sjson.Set(data, "marks", 30)
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	queries := []string{`#[name=="ajay"]`}
+	infileIndexQueries := []string{}
+	studentObj := gjson.Parse(data)
+
+	updatedData, errList := UpdateDataInFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+
+	if len(errList) > 0 {
+		loggermdl.LogError(errList)
+	}
+	loggermdl.LogDebug("updatedData", updatedData)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestDeleteDataFromNormalBucket(t *testing.T) {
+	indexID := "stdId"
+
+	queries := []string{`#[name==ajay]`}
+	infileIndexQueries := []string{}
+
+	// data, _ := sjson.Set("", "fileType", "Exam")
+	// data, _ := sjson.Set(data, "studentId", 1234)
+
+	studentObj := gjson.Result{}
+
+	recordsDeletedCnt, errList := DeleteDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		loggermdl.LogError("errList", errList)
+	}
+	loggermdl.LogDebug("recordsDeletedCnt", recordsDeletedCnt)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestSaveDataInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+	// get fdb obj
+
+	data, _ := sjson.Set("", "name", "ajay")
+	data, _ = sjson.Set(data, "stdId", 1238)
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "examName", "unit2")
+	data, _ = sjson.Set(data, "totalQuestion", 50)
+	data, _ = sjson.Set(data, "marks", 26)
+	data, _ = sjson.Set(data, "examId", "MATH001")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+
+	err := SaveDataInFDB(dbName, indexID, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+func TestGetDataFromPackBucket(t *testing.T) {
+	indexID := "studentPack"
+	data, _ := sjson.Set("", "name", "ajay")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+
+	queries := []string{`#[name=ajay]`}
+	infileIndexQueries := []string{`#[examId=MATH001]`}
+
+	result, err := ReadDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("result", result)
+}
+
+func TestUpdateDataInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "abc", "123")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+	queries := []string{`#[name=ajay]`}
+	infileIndexQueries := []string{`#[examId=MATH002]`}
+	result, errList := UpdateDataInFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		log.Fatal(errList)
+	}
+	loggermdl.LogError("result", result)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestDeleteDataFromPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "abc", "123")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+
+	studentObj := gjson.Parse(data)
+
+	queries := []string{`#[name=vijay]`}
+	infileIndexQueries := []string{`#[examId=MATH002]`}
+	cnt, errList := DeleteDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		log.Fatal(errList)
+	}
+	loggermdl.LogError("delete cnt", cnt)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestSaveMediaInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "stdId", 1239)
+
+	studentObj := gjson.Parse(data)
+
+	filePath := "C:\\Users\\vivekn\\Pictures\\gopher.png"
+
+	dataByte, err := filemdl.ReadFile(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	recordID, err := SaveMediaInFDB(dbName, indexID, dataByte, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("recordId", recordID)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestReadMediaFromPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	rowID := "-568385317811827"
+	recordID := "1ZtdPpUYLuKmcHJpTn2LXQ4XABM"
+	_, fileMeta, err := GetMediaFromFDB(dbName, indexID, rowID, recordID)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("fileMeta", fileMeta)
+}
+
+func TestUpdateMediaInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1239)
+
+	studentObj := gjson.Parse(data)
+
+	recordID := "1ZsY055dnvgL6qutjy5sbnsubS8"
+	filePath := "C:\\Users\\vivekn\\Pictures\\gopher.png"
+
+	dataByte, err := filemdl.ReadFile(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	recordPath, err := UpdateMediaInFDB(dbName, indexID, recordID, dataByte, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("recordPath", recordPath)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestUpsertMediaInPackBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "stdId", 1239)
+
+	studentObj := gjson.Parse(data)
+
+	recordID := "dfsdg123243"
+	filePath := "C:\\Users\\vivekn\\Pictures\\ghg.jpg"
+
+	dataByte, err := filemdl.ReadFile(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	recordPath, err := UpsertMediaInFDB(dbName, indexID, recordID, dataByte, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError("recordPath", recordPath)
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestSaveDataInAppendBucket(t *testing.T) {
+	tests := []struct {
+		data           string
+		ShouldGetError bool
+	}{
+		{
+			data:           `{"name": "ajay","instituteId": "geca", "fileType": "EXAM","studentId": 1234,"examId":"exam001","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+		{
+			data:           `{"name": "sanjay","instituteId": "geca","fileType": "EXAM","studentId": 1234,"examId":"exam002","examName": "test102","totalQuestions":50,"marks": 38}`,
+			ShouldGetError: false,
+		},
+	}
+
+	indexID := "stdResponse"
+
+	for i, test := range tests {
+		studentObj := gjson.Parse(test.data)
+		t.Logf("\t Test: %d", i)
+		err := SaveDataInFDB(dbName, indexID, &studentObj)
+		if err != nil {
+			log.Fatal(err)
+		}
+		t.Logf("\t %s\t should be able to save data", "succeed")
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		fdbObj, err := GetFDBInstance(dbName)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestExportDataAsZip(t *testing.T) {
+	indexID := "studentPack"
+
+	zipExporter := ZipExporter{
+		DestPath:      "D:\\exe\\backup",
+		IndexID:       indexID,
+		MigrationType: MigrationTypeUpdate,
+		Queries:       []string{},
+		FdbName:       dbName,
+	}
+
+	err := zipExporter.DataExport()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+func TestImportDataFromZip(t *testing.T) {
+	indexID := "studentPack"
+	fdbObj, err := GetFDBInstance(dbName)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	zipImporter := ZipImporter{
+		Data:       nil,
+		SourcePath: "D:\\exe\\backup\\" + indexID,
+		FdbName:    dbName,
+		IndexID:    indexID,
+	}
+	err = zipImporter.DataImport()
+	// err := ImportZip("myfdb", "home/vivekn/fdb_data/dest/stdId")
+	if err != nil {
+		loggermdl.LogError(err)
+		log.Fatal(err)
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+}
+
+func TestExportDataAsFiles(t *testing.T) {
+	indexID := "stdId"
+	exporter := FileExporter{
+		DestPath:      "D:\\exe\\backup",
+		IndexID:       indexID,
+		MigrationType: MigrationTypeUpdate,
+		Queries:       []string{},
+		FdbName:       dbName,
+	}
+
+	err := exporter.DataExport()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+func TestImportDataFromFile(t *testing.T) {
+	indexID := "stdId"
+	fdbObj, err := GetFDBInstance(dbName)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	fileImporter := FileImporter{
+		Data:       nil,
+		SourcePath: "D:\\exe\\backup\\" + indexID,
+		FdbName:    dbName,
+		IndexID:    indexID,
+	}
+	err = fileImporter.DataImport()
+	// err := ImportZip("myfdb", "home/vivekn/fdb_data/dest/stdId")
+	if err != nil {
+		loggermdl.LogError(err)
+		log.Fatal(err)
+	}
+
+	if CheckLazyIndexWrite {
+		time.Sleep(time.Second * time.Duration(5+lazyWriteInterval))
+	} else {
+		index, ok := fdbObj.GetFDBIndex(indexID)
+		if !ok {
+			log.Fatal("index not found")
+		}
+		err = index.WriteIndexEntriesInFile()
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func TestReorg(t *testing.T) {
+	indexID := "studentPack"
+	fdbObj, err := GetFDBInstance(dbName)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ := sjson.Set("", "name", "ajay")
+	data, _ = sjson.Set(data, "stdId", 1237)
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "examName", "unit2")
+	data, _ = sjson.Set(data, "totalQuestion", 50)
+	data, _ = sjson.Set(data, "marks", 26)
+	data, _ = sjson.Set(data, "examId", "MATH001")
+	data, _ = sjson.Set(data, "fileType", "Exam")
+	studentObj := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ = sjson.Set(data, "examId", "MATH003")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1238)
+	std2 := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &std2)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ = sjson.Set(data, "examId", "MATH001")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1237)
+	std3 := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &std3)
+	if err != nil {
+		log.Fatal(err)
+	}
+	data, _ = sjson.Set(data, "examId", "MATH002")
+	data, _ = sjson.Set(data, "instituteId", "geca")
+	data, _ = sjson.Set(data, "stdId", 1238)
+	std4 := gjson.Parse(data)
+	err = SaveDataInFDB(dbName, indexID, &std4)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogDebug("saved")
+	queries := []string{`#[name=="ajay"]`}
+	infileIndexQueries := []string{`#[examId=="MATH001"]`}
+	recordsDeleted, errList := DeleteDataFromFDB(dbName, indexID, &studentObj, queries, infileIndexQueries)
+	if len(errList) > 0 {
+		loggermdl.LogError(errList)
+	}
+	index, ok := fdbObj.GetFDBIndex(indexID)
+	if !ok {
+		log.Fatal("index not found")
+	}
+	loggermdl.LogDebug("deletd cnt", recordsDeleted)
+	err = index.WriteIndexEntriesInFile()
+	if err != nil {
+		log.Fatal(err)
+	}
+	now := time.Now()
+
+	errList = ReorganizeFiles("myfdb")
+	if len(errList) > 0 {
+		log.Fatal(errList)
+	}
+	elapsed := time.Since(now)
+
+	loggermdl.LogError("elapsed", elapsed)
+
+}
+
+func TestReorgBucket(t *testing.T) {
+	indexID := "studentPack"
+
+	now := time.Now()
+	requeries := []string{}
+	errList := ReorganizeFDBBucketData(dbName, indexID, requeries)
+	if len(errList) > 0 {
+		log.Fatal(errList)
+	}
+	elapsed := time.Since(now)
+
+	loggermdl.LogError("elapsed", elapsed)
+
+}
diff --git a/v2/dalmdl/corefdb/filetype/append.go b/v2/dalmdl/corefdb/filetype/append.go
new file mode 100644
index 0000000000000000000000000000000000000000..72ee46ca4179664e27b908b616cca87214f82d9a
--- /dev/null
+++ b/v2/dalmdl/corefdb/filetype/append.go
@@ -0,0 +1,66 @@
+package filetype
+
+import (
+	"os"
+	"path/filepath"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/gjson"
+)
+
+type AppendFile struct {
+	FilePath         string
+	Fp               *os.File
+	IsLazyEnable     bool
+	securityProvider securityprovider.SecurityProvider
+}
+
+func NewAppendFile(filePath string, securityProvider securityprovider.SecurityProvider) (*AppendFile, error) {
+	if filePath == "" {
+		return nil, errormdl.Wrap("please provide valid filepath")
+	}
+
+	path, err := filepath.Abs(filePath)
+	if err != nil {
+		return nil, err
+	}
+	dir, _ := filepath.Split(path)
+	if dir != "" {
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if err != nil {
+			return nil, err
+		}
+	}
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to open file: " + err.Error())
+	}
+	file := AppendFile{
+		FilePath:         filePath,
+		Fp:               f,
+		securityProvider: securityProvider,
+	}
+	return &file, nil
+}
+
+func (af *AppendFile) Write(rs *gjson.Result) (err error) {
+	dataBytes := []byte(rs.String())
+	if af.securityProvider != nil {
+		dataBytes, err = af.securityProvider.Encrypt(dataBytes, af.Fp.Name(), rs)
+		if err != nil {
+			loggermdl.LogError(err)
+			return
+		}
+	}
+	dataBytes = append(dataBytes, []byte(lineBreak)...)
+	_, _, err = filemdl.AppendDataToFile(af.Fp.Name(), dataBytes, true)
+	return err
+}
+
+func (af *AppendFile) Close() error {
+	return af.Fp.Close()
+}
diff --git a/v2/dalmdl/corefdb/filetype/filetype.go b/v2/dalmdl/corefdb/filetype/filetype.go
new file mode 100644
index 0000000000000000000000000000000000000000..36291ca7f05ee2159588dc7b8c647e36b632caf0
--- /dev/null
+++ b/v2/dalmdl/corefdb/filetype/filetype.go
@@ -0,0 +1,5 @@
+package filetype
+
+const (
+	lineBreak = "\r\n"
+)
diff --git a/v2/dalmdl/corefdb/filetype/pack.go b/v2/dalmdl/corefdb/filetype/pack.go
new file mode 100644
index 0000000000000000000000000000000000000000..119e0041f824372f167827bdf9d4b514657fbf0b
--- /dev/null
+++ b/v2/dalmdl/corefdb/filetype/pack.go
@@ -0,0 +1,1180 @@
+package filetype
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+const (
+	fileStatusReady                     = 0
+	fileStatusUpdatingData              = 1
+	fileStatusUpdatingIndex             = 2
+	fileStatusOffsetInFile              = 0
+	isReadyForUploadOffsetInFile        = 1
+	isUpdatedAndNotCommitedOffsetInFile = 2
+	isReorgRequiredOffsetInFile         = 3
+	isReindexRequiredOffsetInFile       = 4
+	footerOffsetInFile                  = 5
+	footerOffsetReservedSize            = 15
+	footerSizeOffset                    = 20
+	filehashOffest                      = 35
+	lastUpdatedOffset                   = 43
+	dataStartOffset                     = 53
+	sizeReservedForHeaders              = 53
+
+	// IndexKeyValSeperator -
+	IndexKeyValSeperator = "="
+	// FileType - represents key for type of file. Used whenever we need to set file type field in json
+	FileType = "fileType"
+)
+
+// ErrNoDataFound - ErrNoDataFound
+var ErrNoDataFound = errors.New("No data found")
+
+type PackFile struct {
+	FilePath             string
+	InfileIndexRows      *gjson.Result
+	Fp                   *os.File
+	SecurityProvider     securityprovider.SecurityProvider
+	infileIndexSchemaMap map[string]InFileIndex
+}
+
+type InFileIndex struct {
+	FileType    string             `json:"fileType"`
+	IndexFields []InFileIndexField `json:"indexFields"`
+}
+
+type InFileIndexField struct {
+	FieldName string `json:"fieldName"`
+	Query     string `json:"query"`
+}
+
+func NewPackFile(filePath string, infileIndexSchemaMap map[string]InFileIndex, securityProvider securityprovider.SecurityProvider) (*PackFile, error) {
+	if filePath == "" {
+		return nil, errormdl.Wrap("please provide valid filepath")
+	}
+
+	path, err := filepath.Abs(filePath)
+	if err != nil {
+		return nil, err
+	}
+	dir, _ := filepath.Split(path)
+	if dir != "" {
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if err != nil {
+			return nil, err
+		}
+	}
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to open file: " + err.Error())
+	}
+	packFile := PackFile{
+		FilePath:         filePath,
+		Fp:               f,
+		SecurityProvider: securityProvider,
+	}
+	if infileIndexSchemaMap == nil {
+		packFile.infileIndexSchemaMap = make(map[string]InFileIndex)
+	} else {
+		packFile.infileIndexSchemaMap = infileIndexSchemaMap
+	}
+	err = initializeFile(f)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to create pack file: " + err.Error())
+	}
+	return &packFile, nil
+}
+
+func (p *PackFile) Close() error {
+	return p.Fp.Close()
+}
+func (p *PackFile) Write(rs *gjson.Result) (err error) {
+	// filePath := p.Fp.Name()
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return errormdl.Wrap("please specify fileType")
+	}
+
+	f := p.Fp
+	infileIndexSchema, ok := p.infileIndexSchemaMap[fileType]
+	if !ok {
+		return errormdl.Wrap("infileIndex schema for specified fileType not found: " + fileType)
+	}
+
+	indexRowJSON, err := CreateIndexJSON(infileIndexSchema.IndexFields, rs)
+	if err != nil {
+		return err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", fileType)
+	fileHash, err := securitymdl.GetHash(rs.String())
+	if err != nil {
+		return err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+	previousIndexData, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", f.Name(), err)
+		return err
+	}
+	// loggermdl.LogDebug("previous index data", previousIndexData)
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+	dataString := rs.String()
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		return err
+	}
+	// write data
+	dataSize, err := addFileDataInFile(f, footerStartOffset, dataString, true, rs, p.SecurityProvider)
+	if err != nil {
+		return err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	// append new entry in infile index
+	parsedindexRowJSON := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(previousIndexData.String(), "-1", parsedindexRowJSON.Value())
+	// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+	// updating infile index
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return err
+	}
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	if err != nil {
+		return err
+	}
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	if err != nil {
+		return err
+	}
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	if err != nil {
+		return err
+	}
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return err
+	}
+	err = f.Sync()
+	if err != nil {
+		return err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	p.InfileIndexRows = &updatedIndexDataObj
+	return nil
+}
+
+func (p *PackFile) Read(queries []string, data *gjson.Result) (string, error) {
+
+	filePath := p.Fp.Name()
+
+	indexRows, err := getInFileIndexData(p.Fp)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", filePath, err)
+		return "", err
+	}
+	for i := 0; i < len(queries); i++ {
+		indexRows = indexRows.Get(queries[i] + "#")
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRows.ForEach(func(key, indexRow gjson.Result) bool {
+		// read files
+		startOffSet := indexRow.Get("startOffset").Int()
+		dataSize := indexRow.Get("dataSize").Int()
+		if startOffSet == 0 || dataSize == 0 {
+			return true
+		}
+		dataByte := []byte{'{', '}'}
+		var err error
+		// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
+		dataByte, err = getFileDataFromPack(p.Fp, startOffSet, dataSize, data, p.SecurityProvider)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		_, err = sb.Write(dataByte)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		sb.WriteString(",")
+
+		return true // keep iterating
+	})
+	sb.WriteString("]")
+	finalResult := strings.Replace(sb.String(), ",]", "]", 1)
+	return finalResult, nil
+}
+
+func (p *PackFile) Update(queries []string, rs *gjson.Result) (gjson.Result, error) {
+	// check fileType index availability
+	// check is data present
+	// if data present
+	// then calculate size of updated data
+	// if size is less than or equal to previuos data size
+	// then write at the same location
+	// else if size of updated data is more than existing data then append it to end of data
+	// update startOffset and data size of file in index row
+	// update footer offset and footer size
+	updatedData := gjson.Result{}
+	indexRows, err := getInFileIndexData(p.Fp)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", p.Fp.Name(), err)
+		return updatedData, err
+	}
+
+	indexRecordsToUpdate := indexRows
+	for _, query := range queries {
+		indexRecordsToUpdate = indexRecordsToUpdate.Get(query + "#")
+	}
+
+	indexRecordsToUpdateObjs := indexRecordsToUpdate.Array()
+	if len(indexRecordsToUpdateObjs) == 0 {
+		return updatedData, errormdl.Wrap("no data found")
+	}
+	resultArrayStr := "[]"
+	var updatedInfileIndex *gjson.Result
+	var result *gjson.Result
+	for _, recordToUpdateIndexRow := range indexRecordsToUpdateObjs {
+		result, updatedInfileIndex, err = updateSingleRecordInPackFileUsingFp(p.Fp, recordToUpdateIndexRow, updatedInfileIndex, rs, p.SecurityProvider)
+		if err != nil {
+			return updatedData, errormdl.Wrap("fail to update data" + err.Error())
+		}
+		resultArrayStr, _ = sjson.Set(resultArrayStr, "-1", result.Value())
+	}
+	resultData := gjson.Parse(resultArrayStr)
+	return resultData, nil
+}
+
+func (p *PackFile) Remove(queries []string) (recordsDeletedCnt int, err error) {
+	indexData, err := getInFileIndexData(p.Fp)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", p.Fp.Name(), err)
+		return recordsDeletedCnt, err
+	}
+	indexRecordsToDelete := indexData
+	// loggermdl.LogDebug("indexRecordsToDelete file type", indexRecordsToDelete)
+	for _, query := range queries {
+		indexRecordsToDelete = indexRecordsToDelete.Get(query + "#")
+	}
+	indexRowsToDelete := indexRecordsToDelete.Array()
+	if len(indexRowsToDelete) == 0 {
+		loggermdl.LogError("ErrNoDataFound")
+		return recordsDeletedCnt, errormdl.Wrap("not found")
+	}
+	updatedIndexRecords := indexData
+	for _, indexRowToRemove := range indexRowsToDelete {
+		updatedIndexRecords, err = removeIndexRow(updatedIndexRecords, indexRowToRemove.String())
+		if err != nil {
+			loggermdl.LogError("fail to delete record:", err)
+			return recordsDeletedCnt, errormdl.Wrap("fail to delete record:" + err.Error())
+		}
+		recordsDeletedCnt++
+	}
+
+	footerOffset := getFooterOffset(p.Fp)
+	if footerOffset == -1 {
+		return recordsDeletedCnt, errormdl.Wrap("fail to fetch infile index offset")
+	}
+	newIndexDataSize := len(updatedIndexRecords.String())
+	err = setFileStatusFlag(p.Fp, fileStatusUpdatingIndex)
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+	err = setIndexDataInFile(p.Fp, footerOffset, updatedIndexRecords.String())
+	if err != nil {
+		loggermdl.LogError("fail to update infile index data :", err)
+		return recordsDeletedCnt, err
+	}
+	err = setFileStatusFlag(p.Fp, fileStatusReady)
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+	p.InfileIndexRows = &updatedIndexRecords
+	return recordsDeletedCnt, setFooterSize(p.Fp, int64(newIndexDataSize))
+}
+
+func (p *PackFile) WriteMedia(mediaData []byte, rs *gjson.Result) (recordID string, err error) {
+
+	fileHash, err := securitymdl.GetHash(rs.String())
+	if err != nil {
+		loggermdl.LogError("error writing to bucket: ", err)
+		return recordID, err
+	}
+	previousIndexData, err := getInFileIndexData(p.Fp)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", p.Fp.Name(), err)
+		return recordID, err
+	}
+	// loggermdl.LogDebug("previous index data", previousIndexData)
+	footerStartOffset := getFooterOffset(p.Fp)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, errormdl.Wrap("fail to fetch infile index data")
+	}
+	// write data
+	dataSize, err := addByteDataInFile(p.Fp, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+
+	recordID = guidmdl.GetGUID()
+	indexRowJSON := ""
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", "Media")
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	// append new entry in infile index
+	parsedindexRowJSON := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(previousIndexData.String(), "-1", parsedindexRowJSON.Value())
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(p.Fp, footerNewOffset)
+	if err != nil {
+		return recordID, err
+	}
+	err = setFooterSize(p.Fp, int64(len(updatedIndexData)))
+	if err != nil {
+		return recordID, err
+	}
+	err = setIndexDataInFile(p.Fp, footerNewOffset, updatedIndexData)
+	if err != nil {
+		return recordID, err
+	}
+	err = setFileStatusFlag(p.Fp, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	err = p.Fp.Sync()
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	p.InfileIndexRows = &updatedIndexDataObj
+	return recordID, nil
+
+}
+
+func (p *PackFile) UpdateMedia(recordID string, mediaData []byte, rs *gjson.Result) (err error) {
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+	indexData, err := getInFileIndexData(p.Fp)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", p.Fp.Name(), err)
+		return err
+	}
+
+	indexRows := indexData
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	foundAtIndex := -1
+	foundIndexRow := gjson.Result{}
+	for index, indexRow := range indexData.Array() {
+		r := indexRow.Get("recordID").String()
+		if r != "" && indexRow.Get("recordID").String() == recordID {
+			foundAtIndex = index
+			foundIndexRow = indexRow
+			break
+		}
+	}
+	if foundAtIndex == -1 {
+		loggermdl.LogError("no data found to update: ", recordID)
+		return errormdl.Wrap("no data found to update: " + recordID)
+	}
+
+	footerStartOffset := getFooterOffset(p.Fp)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+
+	// write data
+	dataSize, err := addByteDataInFile(p.Fp, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	indexRowJSON := foundIndexRow.String()
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	updatedIndexRow := gjson.Parse(indexRowJSON)
+	updatedIndexData, _ := sjson.Set(indexData.String(), strconv.Itoa(foundAtIndex), updatedIndexRow.Value())
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(p.Fp, footerNewOffset)
+	if err != nil {
+		return err
+	}
+	err = setFooterSize(p.Fp, int64(len(updatedIndexData)))
+	if err != nil {
+		return err
+	}
+	err = setIndexDataInFile(p.Fp, footerNewOffset, updatedIndexData)
+	if err != nil {
+		return err
+	}
+	err = setFileStatusFlag(p.Fp, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = p.Fp.Sync()
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	p.InfileIndexRows = &updatedIndexDataObj
+	return err
+}
+
+func (p *PackFile) UpsertMedia(recordID string, mediaData []byte, rs *gjson.Result) (string, error) {
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	f := p.Fp
+	indexData, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", f.Name(), err)
+		return recordID, err
+	}
+
+	indexRows := indexData
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	foundAtIndex := -1
+	foundIndexRow := gjson.Result{}
+	for index, indexRow := range indexData.Array() {
+		if indexRow.Get("recordID").String() == recordID {
+			foundAtIndex = index
+			foundIndexRow = indexRow
+			break
+		}
+	}
+
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, errormdl.Wrap("fail to fetch infile index data")
+	}
+	// TODO: write at previous location
+	// write data
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	indexRowJSON := foundIndexRow.String()
+
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", "Media")
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	updatedIndexRow := gjson.Parse(indexRowJSON)
+	updatedIndexData, _ := sjson.Set(indexData.String(), strconv.Itoa(foundAtIndex), updatedIndexRow.Value())
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	if err != nil {
+		return recordID, err
+	}
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	if err != nil {
+		return recordID, err
+	}
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	if err != nil {
+		return recordID, err
+	}
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	err = f.Sync()
+	if err != nil {
+		return recordID, err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	p.InfileIndexRows = &updatedIndexDataObj
+	return recordID, nil
+}
+
+func (p *PackFile) ReadMedia(recordID string) ([]byte, *gjson.Result, error) {
+	dataByte := []byte{}
+	var metaData *gjson.Result
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	f := p.Fp
+	indexData, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", f.Name(), err)
+		return dataByte, metaData, err
+	}
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	if indexRows.String() == "" {
+		loggermdl.LogError("data not found for recordId: ", recordID)
+		return dataByte, metaData, errormdl.Wrap("data not found")
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRow := indexRows.Get("0")
+	startOffSet := indexRow.Get("startOffset").Int()
+	dataSize := indexRow.Get("dataSize").Int()
+	if startOffSet == 0 || dataSize == 0 {
+		return dataByte, metaData, errormdl.Wrap("data not found")
+	}
+	dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, nil, nil)
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, metaData, err
+	}
+
+	data, _ := sjson.Set("", "requiredData", indexRow.Get("requiredData").String())
+	data, _ = sjson.Set(data, "infileIndex", indexData.String())
+	metaDataObj := gjson.Parse(data)
+	return dataByte, &metaDataObj, nil
+}
+
+func (p *PackFile) ReadMediaByQuery(inFileIndexQueries []string) (map[string][]byte, map[string]gjson.Result, error) {
+	f := p.Fp
+	indexData, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", f.Name(), err)
+		return nil, nil, err
+	}
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	if indexRows.String() == "" {
+		loggermdl.LogError("data not found")
+		return nil, nil, errormdl.Wrap("data not found")
+	}
+	dataMap := make(map[string][]byte, 0)
+	metaDataMap := make(map[string]gjson.Result, 0)
+	for _, indexRow := range indexRows.Array() {
+		startOffSet := indexRow.Get("startOffset").Int()
+		dataSize := indexRow.Get("dataSize").Int()
+		if startOffSet == 0 || dataSize == 0 {
+			return nil, nil, errormdl.Wrap("data not found")
+		}
+		dataByte, err := getFileDataFromPack(f, startOffSet, dataSize, nil, nil)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, nil, err
+		}
+		recordID := indexRow.Get("recordID").String()
+		if recordID == "" {
+			return nil, nil, errormdl.Wrap("record id not found")
+		}
+		data, _ := sjson.Set("", "requiredData", indexRow.Get("requiredData").String())
+		// data, _ = sjson.Set(data, "infileIndex", indexData.String())
+		metaDataObj := gjson.Parse(data)
+		dataMap[recordID] = dataByte
+		metaDataMap[recordID] = metaDataObj
+	}
+
+	return dataMap, metaDataMap, nil
+}
+
+func (p *PackFile) RemoveMedia(recordID string) error {
+	queries := []string{`#[recordID=` + recordID + `]`}
+	_, err := p.Remove(queries)
+	return err
+}
+
+func (p *PackFile) Reorg() error {
+	f := p.Fp
+	_, sourceFileName := filepath.Split(p.FilePath)
+	desFileName := sourceFileName + "_" + strconv.FormatInt(time.Now().Unix(), 10)
+	tempFilepath, err := filepath.Abs(filepath.Join(filemdl.TempDir, desFileName))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	dir, _ := filepath.Split(tempFilepath)
+	if dir != "" {
+		createError := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(createError) != nil {
+			return errormdl.CheckErr(createError)
+		}
+	}
+
+	fpTemp, err := os.OpenFile(tempFilepath, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		fpTemp.Close()
+	}()
+
+	err = InitializeWithHeaderUsingFp(fpTemp)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	infileIndexRows, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// if len(infileIndexRows.Array()) == 0 {
+	// 	return nil
+	// }
+	tempFileFooterStartOffset := getFooterOffset(fpTemp)
+	if tempFileFooterStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+	updatedIndexRowStr := "[]"
+	for _, infileIndex := range infileIndexRows.Array() {
+		startOffset, err := strconv.Atoi(infileIndex.Get("startOffset").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching startOffset", err)
+			return err
+		}
+		dataSize, err := strconv.Atoi(infileIndex.Get("dataSize").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching dataSize", err)
+			return err
+		}
+
+		byteArr, err := getFileDataFromPack(f, int64(startOffset), int64(dataSize), nil, nil)
+		if err != nil {
+			loggermdl.LogError("Error occured while reading file data from offset", err)
+			return err
+		}
+		byteCnt, err := addByteDataInFile(fpTemp, tempFileFooterStartOffset, byteArr, false)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		indexRowJSON, _ := sjson.Set(infileIndex.String(), "startOffset", tempFileFooterStartOffset)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", byteCnt)
+		indexRowJSONObj := gjson.Parse(indexRowJSON)
+		updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "-1", indexRowJSONObj.Value())
+		tempFileFooterStartOffset = tempFileFooterStartOffset + byteCnt
+	}
+
+	err = setFooterOffset(fpTemp, tempFileFooterStartOffset)
+	if err != nil {
+		return err
+	}
+	err = setFooterSize(fpTemp, int64(len(updatedIndexRowStr)))
+	if err != nil {
+		return err
+	}
+	err = setIndexDataInFile(fpTemp, tempFileFooterStartOffset, updatedIndexRowStr)
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Sync()
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Close()
+	if err != nil {
+		return err
+	}
+	err = f.Close()
+	if err != nil {
+		return err
+	}
+
+	return filemdl.AtomicReplaceFile(tempFilepath, p.FilePath)
+}
+
+//
+func CreateIndexJSON(indexFields []InFileIndexField, rs *gjson.Result) (string, error) {
+	json := `{}`
+	for _, indexField := range indexFields {
+		val := rs.Get(indexField.Query).Value()
+		// validation
+		if val == nil {
+			return "", errormdl.Wrap("please provide value for index field: " + indexField.Query)
+		}
+		json, _ = sjson.Set(json, indexField.FieldName, val)
+	}
+	return json, nil
+}
+
+func updateIndexRow(indexRows *gjson.Result, previousIndexRow gjson.Result, updatedRow gjson.Result) (string, error) {
+	indexRowObjs := indexRows.Array()
+	if len(indexRowObjs) == 0 {
+		return "", errormdl.Wrap("no data found to update")
+	}
+	// loggermdl.LogDebug("indexRows", indexRows)
+
+	prevIndexRowString := previousIndexRow.String()
+	foundRowIndex := -1
+
+	for index, indexRowObj := range indexRowObjs {
+		if len(indexRowObj.String()) == len(prevIndexRowString) && indexRowObj.String() != "" && prevIndexRowString != "" && indexRowObj.String() == prevIndexRowString {
+			foundRowIndex = index
+			break
+		}
+	}
+
+	if foundRowIndex == -1 {
+		return "", errormdl.Wrap("no record found to update")
+	}
+	var err error
+	updatedIndexDataString := indexRows.String()
+	// for _, foundRowIndex := range foundRowIndexes {
+	updatedIndexDataString, err = sjson.Set(updatedIndexDataString, strconv.Itoa(foundRowIndex), updatedRow.Value())
+	if err != nil {
+		loggermdl.LogError(err)
+		return "", errormdl.Wrap("failed to update index rows")
+	}
+	// }
+	// updatedIndexData := gjson.Parse(updatedIndexDataString)
+	return updatedIndexDataString, nil
+}
+
+func removeIndexRow(indexRows gjson.Result, indexRowToDelete string) (gjson.Result, error) {
+
+	indexRowObjs := indexRows.Array()
+	if len(indexRowObjs) == 0 {
+		return indexRows, errormdl.Wrap("no data found to update")
+	}
+	// loggermdl.LogDebug("indexRows", indexRows)
+
+	foundIndexToDelete := -1
+	for index, indexRowObj := range indexRowObjs {
+		if indexRowObj.String() != "" && indexRowToDelete != "" && indexRowObj.String() == indexRowToDelete {
+			foundIndexToDelete = index
+			break
+		}
+	}
+
+	if foundIndexToDelete == -1 {
+		return indexRows, errormdl.Wrap("no record found to delete")
+	}
+	var err error
+	updatedIndexDataString, err := sjson.Delete(indexRows.String(), strconv.Itoa(foundIndexToDelete))
+	if err != nil {
+		loggermdl.LogError(err)
+	}
+	return gjson.Parse(updatedIndexDataString), nil
+}
+
+func isValidPackFile(f *os.File) (bool, error) {
+	if f == nil {
+		return false, errormdl.Wrap("file pointer not valid")
+	}
+	isFilePresent := filemdl.FileAvailabilityCheck(f.Name())
+	if !isFilePresent {
+		loggermdl.LogDebug(isFilePresent)
+		return false, nil
+	}
+	info, err := f.Stat()
+	if err != nil {
+		loggermdl.LogDebug(err)
+		return false, err
+	}
+	if info.Size() == 0 {
+		return false, nil
+	}
+	return true, nil
+}
+
+func updateSingleRecordInPackFileUsingFp(f *os.File, recordToUpdateIndexRow gjson.Result, infileIndex, rs *gjson.Result, securityProvider securityprovider.SecurityProvider) (*gjson.Result, *gjson.Result, error) {
+	fileStartOffset := recordToUpdateIndexRow.Get("startOffset").Int()
+	dataSize := recordToUpdateIndexRow.Get("dataSize").Int()
+	if fileStartOffset == 0 || dataSize == 0 {
+		loggermdl.LogError("index row details incorrect - start offset :", fileStartOffset, " data size :", dataSize)
+		return nil, nil, errormdl.Wrap("index row details incorrect")
+	}
+
+	existingData, err := getFileDataFromPack(f, fileStartOffset, dataSize, rs, securityProvider)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	updatedDataStr := string(existingData)
+	// updating existing data
+	rs.ForEach(func(key, val gjson.Result) bool {
+		updatedDataStr, _ = sjson.Set(updatedDataStr, key.String(), val.Value())
+		return true
+	})
+	newDataSize := int64(len(updatedDataStr))
+	footerStartOffset := getFooterOffset(f)
+	updatedFooterOffset := footerStartOffset
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		return nil, nil, err
+	}
+	existingIndexRows := gjson.Parse("[]")
+	if infileIndex == nil {
+		existingIndexRows, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, nil, err
+		}
+	} else {
+		existingIndexRows = *infileIndex
+	}
+	updatedDataSize := len(updatedDataStr)
+	updatedDataBytes := []byte(updatedDataStr)
+	if securityProvider != nil {
+		updatedDataBytes, err = securityProvider.Encrypt(updatedDataBytes, f.Name(), rs)
+		if err != nil {
+			return nil, nil, err
+		}
+		updatedDataSize = len(updatedDataBytes)
+	}
+	if int64(updatedDataSize) <= dataSize {
+		newDataSize, err = addByteDataInFile(f, fileStartOffset, updatedDataBytes, false)
+		if err != nil {
+			return nil, nil, err
+		}
+	} else {
+		newDataSize, err = addByteDataInFile(f, footerStartOffset, updatedDataBytes, true)
+		if err != nil {
+			return nil, nil, err
+		}
+		updatedFooterOffset = footerStartOffset + newDataSize
+		fileStartOffset = footerStartOffset
+	}
+
+	updatedIndexRowStr := recordToUpdateIndexRow.String()
+
+	recordToUpdateIndexRow.ForEach(func(key, value gjson.Result) bool {
+		indexFieldKey := key.String()
+		if rs.Get(indexFieldKey).Exists() {
+			updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, indexFieldKey, rs.Get(indexFieldKey).Value())
+		}
+		return true
+	})
+	fileHash, err := securitymdl.GetHash(updatedDataStr)
+	if err != nil {
+		return nil, nil, err
+	}
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "startOffset", fileStartOffset)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "dataSize", newDataSize)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "fileHash", fileHash)
+	updatedIndexRowListStr, err := updateIndexRow(&existingIndexRows, recordToUpdateIndexRow, gjson.Parse(updatedIndexRowStr))
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setIndexDataInFile(f, updatedFooterOffset, updatedIndexRowListStr)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFooterOffset(f, updatedFooterOffset)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFooterSize(f, int64(len(updatedIndexRowListStr)))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = f.Sync()
+	if err != nil {
+		return nil, nil, err
+	}
+	updatedData := gjson.Parse(updatedDataStr)
+	updatedIndexRows := gjson.Parse(updatedIndexRowListStr)
+	return &updatedData, &updatedIndexRows, nil
+}
+
+func initializeFile(fp *os.File) (err error) {
+	filePath := fp.Name()
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	info, err := fp.Stat()
+	if err != nil {
+		return
+	}
+	if !isFilePresent || info.Size() == 0 {
+		dir, _ := filepath.Split(filePath)
+		err = filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return
+		}
+
+		err = InitializeWithHeaderUsingFp(fp)
+		if err != nil {
+			loggermdl.LogError(err)
+			return
+		}
+	}
+	return
+}
+
+func InitializeWithHeaderUsingFp(f *os.File) error {
+	_, err := f.WriteAt([]byte(strconv.Itoa(fileStatusReady)), fileStatusOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// isFile ready for upload  =0
+	_, err = f.WriteAt([]byte("0"), isReadyForUploadOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isUpdatedAndNotCommitedOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReorgRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReindexRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// _, err = f.WriteAt([]byte(appendPaddingToNumber(sizeReservedForHeaders, 15)), footerOffsetInFile)
+	err = setFooterOffset(f, sizeReservedForHeaders+int64(len(lineBreak)))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = setFooterSize(f, 0)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("filehash"), filehashOffest)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	timestamp := strconv.FormatInt(time.Now().Unix(), 10)
+	_, err = f.WriteAt([]byte(timestamp), lastUpdatedOffset)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("\r\n"), sizeReservedForHeaders)
+	return err
+}
+
+func getInFileIndexData(f *os.File) (gjson.Result, error) {
+	infileIndexData := gjson.Parse("[]")
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return infileIndexData, errormdl.Wrap("fail to fetch infile index data")
+	}
+	footerSize, err := getFooterSize(f)
+	if err != nil {
+		return infileIndexData, errormdl.Wrap("fail to fetch infile index size")
+	}
+	if footerSize == 0 {
+		return infileIndexData, nil
+	}
+	dataByte, err := filemdl.ReadFileFromOffset(f, footerStartOffset, footerSize)
+	if err != nil {
+		if err.Error() == "EOF" {
+			loggermdl.LogError("EOF")
+			return infileIndexData, nil
+		}
+		loggermdl.LogError("error while fetching index data", err)
+		return infileIndexData, err
+	}
+	infileIndexData = gjson.ParseBytes(dataByte)
+	if !infileIndexData.IsArray() {
+		loggermdl.LogError("invalid infile index data", infileIndexData.Value())
+		return infileIndexData, errormdl.Wrap("invalid infile index data")
+	}
+	return infileIndexData, nil
+}
+
+func addFileDataInFile(f *os.File, offset int64, data string, breakLine bool, rs *gjson.Result, encrypter securityprovider.SecurityProvider) (int64, error) {
+	dataBytes := []byte(data)
+	var err error
+	if encrypter != nil {
+		dataBytes, err = encrypter.Encrypt(dataBytes, f.Name(), rs)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	if breakLine {
+		dataBytes = append(dataBytes, []byte(lineBreak)...)
+	}
+
+	return filemdl.WriteFileAtOffset(f, offset, dataBytes)
+}
+
+func addByteDataInFile(f *os.File, offset int64, dataBytes []byte, breakLine bool) (int64, error) {
+	var err error
+
+	if breakLine {
+		dataBytes = append(dataBytes, []byte(lineBreak)...)
+	}
+	dataSize, err := filemdl.WriteFileAtOffset(f, offset, dataBytes)
+	return dataSize, err
+}
+
+func getFileDataFromPack(f *os.File, startOffset, dataSize int64, rs *gjson.Result, decrypter securityprovider.SecurityProvider) ([]byte, error) {
+
+	ba, err := filemdl.ReadFileFromOffset(f, startOffset, dataSize)
+
+	if decrypter != nil {
+		ba, err = decrypter.Decrypt(ba, f.Name(), rs)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return ba, err
+}
+
+func appendPaddingPadValue(value int64, padNumber int) string {
+	no := strconv.Itoa(padNumber)
+	return fmt.Sprintf("%0"+no+"d", value)
+}
+
+func getFileStatus(f *os.File) (int, error) {
+
+	data, err := filemdl.ReadFileFromOffset(f, fileStatusOffsetInFile, 1)
+	if err != nil {
+		loggermdl.LogError(err)
+		return -1, err
+	}
+	status, err := strconv.Atoi(string(data))
+	return status, err
+}
+
+func getFooterOffset(f *os.File) int64 {
+	data, err := filemdl.ReadFileFromOffset(f, footerOffsetInFile, footerOffsetReservedSize)
+	if err != nil {
+		loggermdl.LogError(err)
+		return -1
+	}
+	footerOffset, err := strconv.Atoi(string(data))
+	if err != nil {
+		loggermdl.LogError("err", err)
+		return -1
+	}
+
+	return int64(footerOffset)
+}
+
+func setFileStatusFlag(f *os.File, fileStatus int) error {
+	status := strconv.Itoa(fileStatus)
+	_, err := filemdl.WriteFileAtOffset(f, fileStatusOffsetInFile, []byte(status))
+	return err
+}
+
+func setFileReadyForUploadFlag(f *os.File, isReadyToUpload bool) error {
+	flagVal := strconv.FormatBool(isReadyToUpload)
+	_, err := filemdl.WriteFileAtOffset(f, isReadyForUploadOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileUpdatedAndNotCommitedFlag(f *os.File, isUpdatedAndNotCommited bool) error {
+	flagVal := strconv.FormatBool(isUpdatedAndNotCommited)
+	_, err := filemdl.WriteFileAtOffset(f, isUpdatedAndNotCommitedOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileReorgRequiredFlag(f *os.File, isReorgRequired bool) error {
+	flagVal := strconv.FormatBool(isReorgRequired)
+	_, err := filemdl.WriteFileAtOffset(f, isReorgRequiredOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileReindexRequiredFlag(f *os.File, isReindexRequired bool) error {
+	flagVal := strconv.FormatBool(isReindexRequired)
+	_, err := filemdl.WriteFileAtOffset(f, isReindexRequiredOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFooterOffset(f *os.File, footerOffset int64) error {
+	footerOffestInString := appendPaddingPadValue(footerOffset, 15)
+	_, err := filemdl.WriteFileAtOffset(f, footerOffsetInFile, []byte(footerOffestInString))
+	return err
+}
+
+func setFooterSize(f *os.File, footerSize int64) error {
+	footerSizeInString := appendPaddingPadValue(footerSize, 15)
+	_, err := filemdl.WriteFileAtOffset(f, footerSizeOffset, []byte(footerSizeInString))
+	return err
+}
+
+func getFooterSize(f *os.File) (int64, error) {
+	data, err := filemdl.ReadFileFromOffset(f, footerSizeOffset, 15)
+	if err != nil {
+		return -1, err
+	}
+	footerSize, err := strconv.Atoi(string(data))
+	if err != nil {
+		loggermdl.LogError("err", err)
+		return -1, err
+	}
+
+	return int64(footerSize), nil
+}
+
+func setIndexDataInFile(f *os.File, footerOffset int64, indexData string) error {
+	_, err := filemdl.WriteFileAtOffset(f, footerOffset, []byte(indexData))
+	return err
+}
diff --git a/v2/dalmdl/corefdb/filetype/simple.go b/v2/dalmdl/corefdb/filetype/simple.go
new file mode 100644
index 0000000000000000000000000000000000000000..cab4391286db74ec3fa7959ec88c5817f7372658
--- /dev/null
+++ b/v2/dalmdl/corefdb/filetype/simple.go
@@ -0,0 +1,124 @@
+package filetype
+
+import (
+	"errors"
+	"io"
+	"os"
+	"path/filepath"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+type SimpleFile struct {
+	FilePath         string
+	Fp               *os.File
+	IsLazyEnable     bool
+	securityProvider securityprovider.SecurityProvider
+}
+
+func NewSimpleFile(filePath string, securityProvider securityprovider.SecurityProvider) (*SimpleFile, error) {
+	if filePath == "" {
+		return nil, errormdl.Wrap("please provide valid filepath")
+	}
+	path, err := filepath.Abs(filePath)
+	if err != nil {
+		return nil, err
+	}
+	dir, _ := filepath.Split(path)
+	if dir != "" {
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if err != nil {
+			return nil, err
+		}
+	}
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return nil, errormdl.Wrap("fail to open file: " + err.Error())
+	}
+	file := SimpleFile{
+		FilePath:         filePath,
+		Fp:               f,
+		securityProvider: securityProvider,
+	}
+	return &file, nil
+}
+
+func (s *SimpleFile) Write(rs *gjson.Result) (err error) {
+	dataBytes := []byte(rs.String())
+	if s.securityProvider != nil {
+		dataBytes, err = s.securityProvider.Encrypt(dataBytes, s.Fp.Name(), rs)
+		if err != nil {
+			loggermdl.LogError(err)
+			return
+		}
+	}
+
+	err = filemdl.WriteFileUsingFp(s.Fp, dataBytes, true, false)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+	return nil
+}
+
+func (s *SimpleFile) Read(data *gjson.Result) ([]byte, error) {
+	ba, err := filemdl.ReadFileUsingFp(s.Fp)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+
+	if len(ba) == 0 {
+		return ba, nil
+	}
+
+	if s.securityProvider != nil {
+		ba, err = s.securityProvider.Decrypt(ba, s.Fp.Name(), data)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+	}
+	return ba, nil
+}
+
+func (s *SimpleFile) Update(rs *gjson.Result) (gjson.Result, error) {
+	data, err := s.Read(rs)
+	if err != nil {
+		return gjson.Result{}, err
+	}
+	existingDataStr := string(data)
+	rs.ForEach(func(key, val gjson.Result) bool {
+		existingDataStr, _ = sjson.Set(existingDataStr, key.String(), val.Value())
+		return true
+	})
+
+	updatedData := gjson.Parse(existingDataStr)
+	err = s.Write(&updatedData)
+	return updatedData, err
+}
+
+func (s *SimpleFile) Remove() error {
+	if filemdl.FileAvailabilityCheck(s.Fp.Name()) {
+		err := s.Fp.Truncate(0)
+		if err != nil {
+			return err
+		}
+
+		if _, err := s.Fp.Seek(0, io.SeekStart); err != nil {
+			return err
+		}
+		return nil
+	}
+	return errors.New("not found")
+}
+
+func (s *SimpleFile) Close() error {
+	return s.Fp.Close()
+}
diff --git a/v2/dalmdl/corefdb/index/index.go b/v2/dalmdl/corefdb/index/index.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bbbeb0f3e1e7d7398ff61487b47b6d960a5b182
--- /dev/null
+++ b/v2/dalmdl/corefdb/index/index.go
@@ -0,0 +1,253 @@
+package index
+
+import (
+	"path/filepath"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/securityprovider"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/corefdb/lazycache"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/lazywriter"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/buntdb"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+const (
+	LineBreak            = "\r\n"
+	IndexKeyValSeperator = "="
+)
+
+// Index - Index
+type Index struct {
+	indexStore       IndexStore
+	IndexID          string       `json:"indexId"`
+	IndexNameQuery   string       `json:"indexNameQuery"`
+	BucketSequence   []string     `json:"bucketSequence"`
+	IndexFields      []IndexField `json:"indexFields"`
+	IsDynamicName    bool         `json:"isDynamicName"`
+	IndexFilePath    string
+	SecurityProvider securityprovider.SecurityProvider
+}
+
+// IndexField - IndexField
+type IndexField struct {
+	FieldName string `json:"fieldName"`
+	Query     string `json:"query"`
+}
+
+func NewIndex(indexID, indexNameQuery string, IsDynamicName bool, indexFilePath string) (*Index, error) {
+	idx := Index{
+		IndexID:          indexID,
+		IndexNameQuery:   indexNameQuery,
+		IsDynamicName:    IsDynamicName,
+		IndexFilePath:    indexFilePath,
+		SecurityProvider: securityprovider.New(securityprovider.SecurityConfig{}),
+	}
+
+	var err error
+	idx.indexStore, err = NewStore()
+	if err != nil {
+		return nil, err
+	}
+	err = idx.LoadIndexEntriesFromFile()
+	if err != nil {
+		return nil, err
+	}
+	lazyObj := lazywriter.LazyCacheObject{
+		FileName:      indexFilePath,
+		Identifier:    indexID,
+		InterfaceData: idx,
+		SaveFn:        lazyCallBackFnSaveIndex,
+	}
+
+	lazycache.IndexLazyObjHolder.SetNoExpiration(indexID, lazyObj)
+	return &idx, nil
+}
+
+// SetFields - SetFields
+func (i *Index) SetFields(indexField ...IndexField) *Index {
+	i.IndexFields = indexField
+	return i
+}
+
+func (i *Index) CreateIndex() error {
+	var fns []func(a, b string) bool
+	for _, idx := range i.IndexFields {
+		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
+	}
+	err := i.indexStore.store.CreateIndex(i.IndexID, "*", fns...)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (i *Index) ReplaceIndex() error {
+	var fns []func(a, b string) bool
+	for _, idx := range i.IndexFields {
+		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
+	}
+	err := i.indexStore.store.ReplaceIndex(i.IndexID, "*", fns...)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (i *Index) GetAllEntries() (map[string]string, error) {
+	return i.indexStore.GetMany()
+}
+
+func (i *Index) GetEntryByQueries(queries []string) (Entry, bool, error) {
+	return i.indexStore.GetOneByQuery(queries)
+}
+
+func (i *Index) GetEntriesByQueries(queries []string) (map[string]string, error) {
+	return i.indexStore.GetManyByQuery(queries)
+}
+
+func (i *Index) GetEntryByPath(path string) (string, error) {
+	return i.indexStore.Get(path)
+}
+
+func (i *Index) AddEntry(path string, rs *gjson.Result) error {
+	json := rs.String()
+	for _, indexField := range i.IndexFields {
+		if rs.Get(indexField.Query).Value() == nil {
+			return errormdl.Wrap("please provide value for index field: " + indexField.Query)
+		}
+		json, _ = sjson.Set(json, indexField.FieldName, rs.Get(indexField.Query).Value())
+	}
+	UpdateLazyCache(i)
+	path = strings.Trim(path, string(filepath.Separator))
+	return i.indexStore.Set(path, json)
+}
+
+func (i *Index) AddEntries(keyValMap map[string]string) error {
+	UpdateLazyCache(i)
+	return i.indexStore.AddMany(keyValMap)
+}
+
+func (i *Index) Delete(path string) error {
+	UpdateLazyCache(i)
+	return i.indexStore.Delete(path)
+}
+
+func (i *Index) DeleteMany(paths []string) error {
+	for cnt := range paths {
+		err := i.indexStore.Delete(paths[cnt])
+		if err != nil {
+			return err
+		}
+	}
+	UpdateLazyCache(i)
+	return nil
+}
+
+func (i *Index) CloseStore() error {
+	return i.indexStore.Close()
+}
+
+func (index *Index) LoadIndexEntriesFromFile() error {
+
+	if !filemdl.FileAvailabilityCheck(index.IndexFilePath) {
+		return nil
+	}
+	fileData, err := filemdl.FastReadFile(index.IndexFilePath)
+	if err != nil {
+		loggermdl.LogError("failed to load FDB index from: ", index.IndexFilePath)
+		return err
+	}
+	if len(fileData) == 0 {
+		return nil
+	}
+	_, fileName := filepath.Split(index.IndexFilePath)
+	fileData, err = index.SecurityProvider.Decrypt(fileData, fileName, nil)
+	if err != nil {
+		loggermdl.LogError("failed to decrypt FDB index data: ", err)
+		return errormdl.Wrap("failed to decrypt FDB index data: " + err.Error())
+	}
+	data := string(fileData)
+	indexRecords := strings.Split(data, LineBreak)
+	indexDataMap := make(map[string]string)
+	for _, indexRecord := range indexRecords {
+		indexValues := strings.Split(indexRecord, IndexKeyValSeperator)
+		if len(indexValues) == 2 {
+			indexDataMap[indexValues[0]] = indexValues[1]
+		}
+	}
+	var fns []func(a, b string) bool
+	for _, idx := range index.IndexFields {
+		fns = append(fns, buntdb.IndexJSON(idx.FieldName))
+	}
+	// update index file by reading all data and updating index file
+	return index.AddEntries(indexDataMap)
+}
+
+// LogFDBIndexFile -LogFDBIndexFile
+func (index *Index) WriteIndexEntriesInFile() error {
+	dataToStore := ``
+	indeKeyValMap, err := index.GetAllEntries()
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	for key, value := range indeKeyValMap {
+		dataToStore = dataToStore + key + IndexKeyValSeperator + value + LineBreak
+	}
+	_, fileName := filepath.Split(index.IndexFilePath)
+	var dataByteToWriteRes = []byte{}
+	var hashError error
+	if len(indeKeyValMap) > 0 {
+		dataByteToWriteRes, hashError = index.SecurityProvider.Encrypt([]byte(dataToStore), fileName, nil)
+		if errormdl.CheckErr1(hashError) != nil {
+			return errormdl.CheckErr1(hashError)
+		}
+	}
+	return filemdl.WriteFile(index.IndexFilePath, dataByteToWriteRes, true, false)
+}
+
+var lazyCallBackFnSaveIndex lazywriter.SaveDataFn = func(indexID string, data *lazywriter.LazyCacheObject) {
+	index, ok := data.InterfaceData.(*Index)
+	if !ok {
+		return
+	}
+
+	err := index.WriteIndexEntriesInFile()
+	if err != nil {
+		loggermdl.LogError(err)
+		return
+	}
+}
+
+// UpdateLazyCache - updates index data in lay writer cache
+func UpdateLazyCache(index *Index) error {
+	// lazy cache must be present for provided indexID
+	lazyObj, ok := lazycache.IndexLazyObjHolder.Get(index.IndexID)
+	if !ok {
+		loggermdl.LogError("index not found in lazy writer cache")
+		return errormdl.Wrap("index not found in lazy writer cache")
+	}
+
+	idxLazyData, ok := lazyObj.(lazywriter.LazyCacheObject)
+	if !ok {
+		loggermdl.LogError("interface type is not lazywriter.LazyCacheObject")
+		return errormdl.Wrap("interface type is not lazywriter.LazyCacheObject")
+	}
+
+	// idxLazyData.GJSONData = index
+	idxLazyData.InterfaceData = index
+	if ok := lazycache.IndexMaster.SaveOrUpdateDataInCache(idxLazyData); !ok {
+		loggermdl.LogError("failed to update index data in lazy cache")
+		return errormdl.Wrap("failed to update index data in lazy cache")
+	}
+
+	lazycache.IndexLazyObjHolder.SetNoExpiration(index.IndexID, idxLazyData)
+	return nil
+}
diff --git a/v2/dalmdl/corefdb/index/indexstore.go b/v2/dalmdl/corefdb/index/indexstore.go
new file mode 100644
index 0000000000000000000000000000000000000000..9e8d87446b8f797d60e0a213868ef046f9e108be
--- /dev/null
+++ b/v2/dalmdl/corefdb/index/indexstore.go
@@ -0,0 +1,158 @@
+package index
+
+import (
+	"path/filepath"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+
+	"github.com/tidwall/buntdb"
+	"github.com/tidwall/gjson"
+)
+
+type IndexStore struct {
+	store *buntdb.DB
+}
+
+type Entry struct {
+	Key   string
+	Value string
+}
+
+// NewStore - returns new store object
+func NewStore() (IndexStore, error) {
+	db, err := buntdb.Open(":memory:")
+	if err != nil {
+		return IndexStore{}, err
+	}
+	store := IndexStore{
+		store: db,
+	}
+	return store, nil
+}
+
+func (i IndexStore) Close() error {
+	return i.store.Close()
+}
+func (i *IndexStore) Set(key, value string) (err error) {
+	err = i.store.Update(func(tx *buntdb.Tx) error {
+		key = strings.ReplaceAll(key, "\\", "/")
+		_, _, err := tx.Set(key, value, nil)
+		if err != nil {
+			return err
+		}
+		return nil
+	})
+	return
+}
+
+func (i *IndexStore) AddMany(keyValMap map[string]string) (err error) {
+	err = i.store.Update(func(tx *buntdb.Tx) error {
+		for key, val := range keyValMap {
+			key = strings.ReplaceAll(key, "\\", "/")
+			_, _, err := tx.Set(key, val, nil)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+	return
+}
+
+func (i *IndexStore) Get(path string) (val string, err error) {
+	path = strings.ReplaceAll(path, "\\", "/")
+
+	found := false
+	err = i.store.View(func(tx *buntdb.Tx) error {
+		return tx.Ascend("", func(key, value string) bool {
+			if key == path && !found {
+				val = value
+				found = true
+				return true
+			}
+			return true
+		})
+	})
+	return
+}
+
+func (i *IndexStore) GetOneByQuery(queries []string) (entry Entry, found bool, err error) {
+	err = i.store.View(func(tx *buntdb.Tx) error {
+		return tx.Ascend("", func(key, value string) bool {
+			rsJSON := gjson.Parse("[" + value + "]")
+			for i := 0; i < len(queries); i++ {
+				rsJSON = rsJSON.Get(queries[i] + "#")
+			}
+			if rsJSON.Value() != nil && rsJSON.IsArray() && len(rsJSON.Array()) > 0 && !found {
+				found = true
+				entry = Entry{
+					Key:   filepath.Join(key),
+					Value: value,
+				}
+
+				return true
+			}
+			return true
+		})
+	})
+	return
+}
+
+func (i *IndexStore) GetManyByQuery(queries []string) (map[string]string, error) {
+	entryMap := make(map[string]string, 0)
+	err := i.store.View(func(tx *buntdb.Tx) error {
+		return tx.Ascend("", func(key, value string) bool {
+			rsJSON := gjson.Parse("[" + value + "]")
+			for i := 0; i < len(queries); i++ {
+				rsJSON = rsJSON.Get(queries[i] + "#")
+			}
+			if rsJSON.Value() != nil && rsJSON.IsArray() && len(rsJSON.Array()) > 0 {
+				key = filepath.Join(key)
+				entryMap[key] = value
+			}
+			return true
+		})
+	})
+	return entryMap, err
+}
+
+func (i *IndexStore) GetMany() (map[string]string, error) {
+	entryMap := make(map[string]string, 0)
+	err := i.store.View(func(tx *buntdb.Tx) error {
+		return tx.Ascend("", func(key, value string) bool {
+			key = filepath.Join(key)
+			entryMap[key] = value
+			return true
+		})
+	})
+
+	return entryMap, err
+}
+
+func (i *IndexStore) Delete(key string) error {
+	key = strings.ReplaceAll(key, "\\", "/")
+	err := i.store.Update(func(tx *buntdb.Tx) error {
+		_, err := tx.Delete(key)
+		if err != nil {
+			return err
+		}
+		return nil
+	})
+	return err
+}
+
+// GenRowID generates hash for the given filename. The length of hash is 16
+func GenRowID(name string) (string, error) {
+	name = strings.ReplaceAll(filepath.Clean(name), string(filepath.Separator), "")
+	rowID, err := securitymdl.GetHash(name)
+	if err != nil {
+		return "", err
+	}
+
+	if len(rowID) > 16 {
+		return rowID[:16], nil
+	}
+
+	return rowID, nil
+}
diff --git a/v2/dalmdl/corefdb/lazycache/lazycache.go b/v2/dalmdl/corefdb/lazycache/lazycache.go
new file mode 100644
index 0000000000000000000000000000000000000000..b9df3fab654799f06832dfc8a51061e80a7a56a1
--- /dev/null
+++ b/v2/dalmdl/corefdb/lazycache/lazycache.go
@@ -0,0 +1,66 @@
+package lazycache
+
+import (
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/lazywriter"
+)
+
+// Create Master lazy object
+
+// IndexMaster - Holds master object for index files
+var IndexMaster lazywriter.LazyFDBHelper
+
+// AppendMaster - Holds master object for Append files
+var AppendMaster lazywriter.LazyFDBHelper
+
+// IndexLazyObjHolder - Holds lazy cache objects for indexes, indexID as key and lazy Object as value
+var IndexLazyObjHolder cachemdl.FastCacheHelper
+
+// AppendLazyObjHolder - Holds lazy cache objects for Append files, filepath as key and lazy Object as value
+var AppendLazyObjHolder cachemdl.FastCacheHelper
+
+// LazyWriteObjMaster - Holds master object for Append files
+var LazyWriteObjMaster lazywriter.LazyFDBHelper
+
+// LazyWriteObjHolder - Holds lazy cache objects for Append files, filepath as key and lazy Object as value
+var LazyWriteObjHolder cachemdl.FastCacheHelper
+
+const (
+	maxObjetsCnt                  int = 100000
+	maxRetryCnt                   int = 5
+	intervalTimeForLazyIndexWrite int = 10
+	intervalTimeForLazyWrite      int = 20
+	sleepTime                     int = 5
+	// NoExpiration -
+	NoExpiration time.Duration = -1
+
+	lazyIndexProcessName  string = "lazyIndex"
+	lazyAppendProcessName string = "lazyAppend"
+	lazyProcessName       string = "lazyWrite"
+)
+
+func init() {
+	// loggermdl.LogError("init lazyIndex")
+	// start process for lazy index file operations
+	IndexMaster.StartProcess(maxObjetsCnt, lazyIndexProcessName, intervalTimeForLazyIndexWrite, sleepTime, maxRetryCnt, false)
+	// loggermdl.LogError("Lazy writer process started")
+	// initialize a map to hold lazy writer objects for the index file data
+	IndexLazyObjHolder.Setup(10000, NoExpiration, NoExpiration)
+
+	// start process for lazy Append file operations
+	AppendMaster.StartProcess(maxObjetsCnt, lazyAppendProcessName, intervalTimeForLazyWrite, sleepTime, maxRetryCnt, false)
+
+	// initialize a map to hold lazy writer objects for the append file data
+	AppendLazyObjHolder.Setup(10000, NoExpiration, NoExpiration)
+	// loggermdl.LogError("cache setup done")
+
+	// start process for lazy Append file operations
+	LazyWriteObjMaster.StartProcess(maxObjetsCnt, lazyProcessName, intervalTimeForLazyWrite, sleepTime, maxRetryCnt, false)
+
+	// initialize a map to hold lazy writer objects for the append file data
+	LazyWriteObjHolder.Setup(10000, NoExpiration, NoExpiration)
+	// loggermdl.LogError("cache setup done")
+
+}
diff --git a/v2/dalmdl/corefdb/locker/locker.go b/v2/dalmdl/corefdb/locker/locker.go
new file mode 100644
index 0000000000000000000000000000000000000000..a0938d4bf3ad1ae26cce48a10f0f42f0fec4988f
--- /dev/null
+++ b/v2/dalmdl/corefdb/locker/locker.go
@@ -0,0 +1,40 @@
+package locker
+
+import (
+	"sync"
+)
+
+var mutexMap = map[string]*sync.Mutex{}
+var getMapSyncMutex = &sync.Mutex{}
+
+func NewLocker(filePath string) *FileLocker {
+	getMapSyncMutex.Lock()
+	defer getMapSyncMutex.Unlock()
+	m, found := mutexMap[filePath]
+	if !found {
+		m = &sync.Mutex{}
+		mutexMap[filePath] = m
+	}
+	locker := FileLocker{
+		m,
+		filePath,
+	}
+	return &locker
+}
+
+type Locker interface {
+	Lock()
+	Unlock()
+}
+
+type FileLocker struct {
+	Locker
+	LockOn string
+}
+
+func (l *FileLocker) Lock() {
+	l.Locker.Lock()
+}
+func (l *FileLocker) Unlock() {
+	l.Locker.Unlock()
+}
diff --git a/v2/dalmdl/corefdb/securityprovider/securityprovider.go b/v2/dalmdl/corefdb/securityprovider/securityprovider.go
new file mode 100644
index 0000000000000000000000000000000000000000..103728b3278afd6fb2321ee48b4ad236a73878b3
--- /dev/null
+++ b/v2/dalmdl/corefdb/securityprovider/securityprovider.go
@@ -0,0 +1,111 @@
+package securityprovider
+
+import (
+	"errors"
+	"path/filepath"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+	"github.com/tidwall/gjson"
+)
+
+const (
+	// SharedPrefix prefix represents the file is sharable i.e encryption key does not include (fdbSec.fieldQuery)
+	SharedPrefix = "ss_"
+	// EmptySTR represents empty string
+	EmptySTR = ""
+)
+
+type SecurityProvider interface {
+	Encrypter
+	Decrypter
+}
+
+type Encrypter interface {
+	Encrypt([]byte, string, *gjson.Result) ([]byte, error)
+}
+
+type Decrypter interface {
+	Decrypt([]byte, string, *gjson.Result) ([]byte, error)
+}
+
+type FdbSecurityProvider struct {
+	encKey         string // the global encryption key used in the project. This key will be applicable in all cases.
+	userDefinedKey string // the user defined key in the project. This key will be applicable in all cases.
+	fieldQuery     string // query to get dynamic field. Ex. Each student data can be encrypted with studentID. Applicable only for the shared bucket.
+}
+
+type SecurityConfig struct {
+	EncKey         string // the global encryption key used in the project. This key will be applicable in all cases.
+	UserDefinedKey string // the user defined key in the project. This key will be applicable in all cases.
+	FieldQuery     string // query to get dynamic field. Ex. Each student data can be encrypted with studentID. Applicable only for the shared bucket.
+}
+
+func New(config SecurityConfig) FdbSecurityProvider {
+
+	fdbSecurity := FdbSecurityProvider{
+		encKey:         config.EncKey,
+		userDefinedKey: config.UserDefinedKey,
+		fieldQuery:     config.FieldQuery,
+	}
+
+	return fdbSecurity
+}
+
+func (fs FdbSecurityProvider) GenerateSecurityKey(fileName string, data *gjson.Result) (key []byte, err error) {
+	_, fileName = filepath.Split(fileName)
+	if fileName == EmptySTR {
+		return key, errors.New("GenerateSecurityKey: fileName must not be empty")
+	}
+
+	// Warning: The order of string concatenation must be preserved as specified.
+	skey := fs.encKey + fs.userDefinedKey + fileName
+	if !strings.HasPrefix(fileName, SharedPrefix) && fs.fieldQuery != "" {
+		// this is a shared file OR no query provided. No need to check for dynamic query result.
+		if data == nil || data.Get(fs.fieldQuery).String() == EmptySTR {
+			return key, errormdl.Wrap("please provide value of field: " + fs.fieldQuery)
+		}
+		skey = data.Get(fs.fieldQuery).String() + skey
+	}
+
+	// loggermdl.LogDebug("key", string(skey))
+	hash, err := hashmdl.Get128BitHash([]byte(skey))
+	if err != nil {
+		return key, errors.New("GenerateSecurityKey: " + err.Error())
+	}
+	key = hash[:]
+	return
+}
+
+// Encrypt - encrypts provide data
+func (fs FdbSecurityProvider) Encrypt(dataByte []byte, fileName string, data *gjson.Result) (res []byte, err error) {
+	res, err = filemdl.ZipBytes(dataByte)
+	if err != nil {
+		return
+	}
+	securityKey, gerr := fs.GenerateSecurityKey(fileName, data)
+	if gerr != nil {
+		err = gerr
+		return
+	}
+	return securitymdl.AESEncrypt(res, securityKey)
+}
+
+// Decrypt - decrypts provide data
+func (fs FdbSecurityProvider) Decrypt(dataByte []byte, fileName string, data *gjson.Result) (res []byte, err error) {
+	res = dataByte
+	securityKey, gerr := fs.GenerateSecurityKey(fileName, data)
+	if gerr != nil {
+		err = gerr
+		return
+	}
+	res, err = securitymdl.AESDecrypt(res, securityKey)
+	if err != nil {
+		return
+	}
+
+	return filemdl.UnZipBytes(res)
+}
diff --git a/v2/dalmdl/coremongo/coremongo.go b/v2/dalmdl/coremongo/coremongo.go
new file mode 100644
index 0000000000000000000000000000000000000000..8163d69b266d9e9ea8c7984ab0bfc06530ec87fa
--- /dev/null
+++ b/v2/dalmdl/coremongo/coremongo.go
@@ -0,0 +1,699 @@
+package coremongo
+
+import (
+	"context"
+	"encoding/json"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/mongo"
+	"go.mongodb.org/mongo-driver/mongo/options"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/statemdl"
+
+	"github.com/tidwall/gjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+// MongoHost -MongoHost
+type MongoHost struct {
+	HostName        string        `json:"hostName"`
+	Server          string        `json:"server"`
+	Port            int           `json:"port"`
+	Username        string        `json:"username"`
+	Password        string        `json:"password"`
+	Database        string        `json:"database"`
+	IsDefault       bool          `json:"isDefault"`
+	MaxIdleConns    int           `json:"maxIdleConns" `
+	MaxOpenConns    int           `json:"maxOpenConns"`
+	ConnMaxLifetime time.Duration `json:"connMaxLifetime" `
+	IsDisabled      bool          `json:"isDisabled" `
+}
+
+var instances map[string]*mongo.Client
+var mutex sync.Mutex
+var once sync.Once
+
+var config map[string]MongoHost
+
+var defaultHost string
+
+func init() {
+	config = make(map[string]MongoHost)
+}
+
+// InitUsingJSON initializes Mongo Connections for give JSON data
+func InitUsingJSON(configs []MongoHost) error {
+	var sessionError error
+	once.Do(func() {
+		defer mutex.Unlock()
+		mutex.Lock()
+		config = make(map[string]MongoHost)
+		instances = make(map[string]*mongo.Client)
+		for _, hostDetails := range configs {
+			if hostDetails.IsDisabled {
+				continue
+			}
+			clientOption := options.Client()
+			clientOption.SetHosts([]string{bindMongoServerWithPort(hostDetails.Server, hostDetails.Port)}).
+				SetConnectTimeout(hostDetails.ConnMaxLifetime).
+				SetMaxPoolSize(uint64(hostDetails.MaxOpenConns)).
+				SetReadPreference(readpref.Primary()).
+				SetDirect(true) // important if in cluster, connect to primary only.
+			if hostDetails.Username != "" {
+				cred := options.Credential{}
+				cred.Username = hostDetails.Username
+				cred.Password = hostDetails.Password
+				cred.AuthSource = hostDetails.Database
+				clientOption.SetAuth(cred)
+			}
+			client, err := mongo.NewClient(clientOption)
+			if err != nil {
+				sessionError = err
+				loggermdl.LogError(sessionError)
+				return
+			}
+			err = client.Connect(context.Background())
+			if err != nil {
+				sessionError = err
+				loggermdl.LogError(sessionError)
+				return
+			}
+			err = client.Ping(context.Background(), readpref.Primary())
+			if err != nil {
+				sessionError = err
+				loggermdl.LogError("failed to connect to primary - ", sessionError)
+				return
+			}
+			instances[hostDetails.HostName] = client
+			if hostDetails.IsDefault {
+				defaultHost = hostDetails.HostName
+			}
+			config[hostDetails.HostName] = hostDetails
+		}
+	})
+	return sessionError
+}
+
+// DeleteSession -DeleteSession
+func DeleteSession(hostName string) error {
+	defer mutex.Unlock()
+	mutex.Lock()
+	if _, ok := instances[hostName]; !ok {
+		return errormdl.Wrap("NO_HOST_FOUND")
+	}
+	delete(instances, hostName)
+	return nil
+}
+
+// InitNewSession - InitNewSession
+func InitNewSession(hostDetails MongoHost) error {
+	defer mutex.Unlock()
+	mutex.Lock()
+	if instances == nil {
+		instances = make(map[string]*mongo.Client)
+	}
+	if _, ok := instances[hostDetails.HostName]; ok {
+		return errormdl.Wrap("DUPLICATE_HOSTNAME")
+	}
+	clientOption := options.Client()
+	clientOption.SetHosts([]string{bindMongoServerWithPort(hostDetails.Server, hostDetails.Port)}).
+		SetConnectTimeout(hostDetails.ConnMaxLifetime).
+		SetMaxPoolSize(uint64(hostDetails.MaxOpenConns)).
+		SetReadPreference(readpref.Primary()).
+		SetDirect(true) // important if in cluster, connect to primary only.
+	if hostDetails.Username != "" {
+		cred := options.Credential{}
+		cred.Username = hostDetails.Username
+		cred.Password = hostDetails.Password
+		cred.AuthSource = hostDetails.Database
+		clientOption.SetAuth(cred)
+	}
+	client, err := mongo.NewClient(clientOption)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = client.Connect(context.Background())
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	instances[hostDetails.HostName] = client
+	return nil
+}
+
+//GetMongoConnection method
+func GetMongoConnection(hostName string) (*mongo.Client, error) {
+	mutex.Lock()
+	defer mutex.Unlock()
+	if instances == nil {
+		return nil, errormdl.Wrap("MONGO_INIT_NOT_DONE")
+	}
+	if hostName == "" {
+		if instance, ok := instances[defaultHost]; ok {
+			statemdl.MongoHits()
+			err := instance.Ping(context.Background(), readpref.Primary())
+			if err != nil {
+				loggermdl.LogError(err)
+				return nil, err
+			}
+			return instance, nil
+		}
+	}
+	if instance, ok := instances[hostName]; ok {
+		statemdl.MongoHits()
+		err := instance.Ping(context.Background(), readpref.Primary())
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+		return instance, nil
+	}
+	return nil, errormdl.Wrap("Session not found for instance: " + hostName)
+}
+
+// MongoDAO mongo DAO struct
+type MongoDAO struct {
+	hostName       string
+	collectionName string
+}
+
+// GetMongoDAOWithHost return mongo DAO instance
+func GetMongoDAOWithHost(host, collection string) *MongoDAO {
+	return &MongoDAO{
+		hostName:       host,
+		collectionName: collection,
+	}
+}
+
+// GetMongoDAO return mongo DAO instance
+func GetMongoDAO(collection string) *MongoDAO {
+	return &MongoDAO{
+		collectionName: collection,
+	}
+}
+
+// SaveData Save data in mongo db
+func (mg *MongoDAO) SaveData(data interface{}) (string, error) {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return "", errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return "", errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	opts, insertError := collection.InsertOne(context.Background(), data)
+	if errormdl.CheckErr1(insertError) != nil {
+		return "", errormdl.CheckErr1(insertError)
+	}
+	return getInsertedId(opts.InsertedID), nil
+}
+
+// UpdateAll update all
+func (mg *MongoDAO) UpdateAll(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+
+	_, updateError := collection.UpdateMany(context.Background(), selector, bson.M{"$set": data})
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// Update will update single entry
+func (mg *MongoDAO) Update(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	_, updateError := collection.UpdateOne(context.Background(), selector, bson.M{"$set": data})
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// GetData will return query for selector
+func (mg *MongoDAO) GetData(selector map[string]interface{}) (*gjson.Result, error) {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return nil, errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return nil, errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+
+	cur, err := collection.Find(context.Background(), selector)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	defer cur.Close(context.Background())
+	var results []interface{}
+	for cur.Next(context.Background()) {
+		var result bson.M
+		err := cur.Decode(&result)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+		results = append(results, result)
+	}
+	ba, marshalError := json.Marshal(results)
+	if errormdl.CheckErr2(marshalError) != nil {
+		return nil, errormdl.CheckErr2(marshalError)
+	}
+	rs := gjson.ParseBytes(ba)
+	return &rs, nil
+}
+
+// DeleteData will delete data given for selector
+func (mg *MongoDAO) DeleteData(selector map[string]interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	_, deleteError := collection.DeleteOne(context.Background(), selector)
+	if errormdl.CheckErr1(deleteError) != nil {
+		return errormdl.CheckErr1(deleteError)
+	}
+	return deleteError
+}
+
+// DeleteAll will delete all the matching data given for selector
+func (mg *MongoDAO) DeleteAll(selector map[string]interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	_, deleteError := collection.DeleteMany(context.Background(), selector)
+	if errormdl.CheckErr1(deleteError) != nil {
+		return errormdl.CheckErr1(deleteError)
+	}
+	return deleteError
+}
+
+// GetProjectedData will return query for selector and projector
+func (mg *MongoDAO) GetProjectedData(selector map[string]interface{}, projector map[string]interface{}) (*gjson.Result, error) {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return nil, errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return nil, errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	ops := &options.FindOptions{}
+	ops.Projection = projector
+	cur, err := collection.Find(context.Background(), selector, ops)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	defer cur.Close(context.Background())
+	var results []interface{}
+	for cur.Next(context.Background()) {
+		var result bson.M
+		err := cur.Decode(&result)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+		results = append(results, result)
+	}
+
+	ba, marshalError := json.Marshal(results)
+	if errormdl.CheckErr2(marshalError) != nil {
+		return nil, errormdl.CheckErr2(marshalError)
+	}
+	rs := gjson.ParseBytes(ba)
+	return &rs, nil
+}
+
+// GetAggregateData - return result using aggregation query
+func (mg *MongoDAO) GetAggregateData(selector interface{}) (*gjson.Result, error) {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return nil, errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return nil, errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	cur, err := collection.Aggregate(context.Background(), selector)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	defer cur.Close(context.Background())
+	var results []interface{}
+	for cur.Next(context.Background()) {
+		var result bson.M
+		err := cur.Decode(&result)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+		results = append(results, result)
+	}
+	ba, marshalError := json.Marshal(results)
+	if errormdl.CheckErr2(marshalError) != nil {
+		return nil, errormdl.CheckErr2(marshalError)
+	}
+	rs := gjson.ParseBytes(ba)
+	return &rs, nil
+}
+
+// Upsert will update single entry
+func (mg *MongoDAO) Upsert(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	ops := options.UpdateOptions{}
+	ops.SetUpsert(true)
+	_, updateError := collection.UpdateOne(context.Background(), selector, bson.M{"$set": data}, &ops)
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// PushData - append in array
+func (mg *MongoDAO) PushData(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	_, updateError := collection.UpdateMany(context.Background(), selector, bson.M{"$push": data})
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+
+	return nil
+}
+
+// CustomUpdate - CustomUpdate
+func (mg *MongoDAO) CustomUpdate(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	_, updateError := collection.UpdateMany(context.Background(), selector, data)
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// CustomUpdateOne - CustomUpdateOne
+func (mg *MongoDAO) CustomUpdateOne(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	_, updateError := collection.UpdateOne(context.Background(), selector, data)
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+/************************* BULK Functionalities ******************************/
+
+// BulkSaveData ata Save data in mongo db in bulk
+func (mg *MongoDAO) BulkSaveData(data []interface{}) error {
+	if checkBulkInput(data) {
+		return nil
+	}
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	opts := &options.BulkWriteOptions{}
+	opts.SetOrdered(true)
+	var models []mongo.WriteModel
+	for i := 0; i < len(data); i++ {
+		model := mongo.NewInsertOneModel()
+		model.SetDocument(data[i])
+		models = append(models, model)
+	}
+	_, insertError := collection.BulkWrite(context.Background(), models, opts)
+	if errormdl.CheckErr1(insertError) != nil {
+		loggermdl.LogError(insertError)
+		return errormdl.CheckErr1(insertError)
+	}
+
+	return nil
+}
+
+// BulkUpdateData  update data in mongo db in bulk
+func (mg *MongoDAO) BulkUpdateData(data []interface{}) error {
+	if checkBulkInput(data) {
+		return nil
+	}
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	opts := &options.BulkWriteOptions{}
+	opts.SetOrdered(true)
+	var models []mongo.WriteModel
+	for i := 0; i < len(data); i++ {
+		model := mongo.NewUpdateOneModel()
+		model.SetFilter(data[i])
+		i++
+		model.SetUpdate(data[i])
+		models = append(models, model)
+	}
+
+	_, insertError := collection.BulkWrite(context.Background(), models, opts)
+	if errormdl.CheckErr1(insertError) != nil {
+		loggermdl.LogError(insertError)
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+// BulkDeleteData  delete data in mongo db in bulk
+func (mg *MongoDAO) BulkDeleteData(data []interface{}) error {
+	if checkBulkInput(data) {
+		return nil
+	}
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	opts := &options.BulkWriteOptions{}
+	opts.SetOrdered(true)
+	var models []mongo.WriteModel
+	for i := 0; i < len(data); i++ {
+		model := mongo.NewDeleteOneModel()
+		model.SetFilter(data[i])
+		models = append(models, model)
+	}
+	_, insertError := collection.BulkWrite(context.Background(), models, opts)
+	if errormdl.CheckErr1(insertError) != nil {
+		loggermdl.LogError(insertError)
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+// BulkUpsertData  Upsert data in mongo db in bulk
+func (mg *MongoDAO) BulkUpsertData(data []interface{}) error {
+	if checkBulkInput(data) {
+		return nil
+	}
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.Database(db.Database).Collection(mg.collectionName)
+	opts := &options.BulkWriteOptions{}
+	opts.SetOrdered(true)
+	var models []mongo.WriteModel
+	for i := 0; i < len(data); i++ {
+		model := mongo.NewUpdateOneModel()
+		model.SetUpsert(true)
+		model.SetFilter(data[i])
+		i++
+		model.SetUpdate(data[i])
+		models = append(models, model)
+	}
+
+	_, insertError := collection.BulkWrite(context.Background(), models, opts)
+	if errormdl.CheckErr1(insertError) != nil {
+		loggermdl.LogError(insertError)
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+func checkBulkInput(d []interface{}) bool {
+	return len(d) == 0
+}
+
+func bindMongoServerWithPort(server string, port int) string {
+	// if port is empty then used default port 27017 & bind to server ip
+	var serverURI string
+	if port <= 0 || strings.TrimSpace(strconv.Itoa(port)) == "" {
+		serverURI = server + ":27017"
+	} else {
+		serverURI = server + ":" + strconv.Itoa(port)
+	}
+	return serverURI
+}
+
+func getInsertedId(id interface{}) string {
+	switch v := id.(type) {
+	case string:
+		return v
+	case primitive.ObjectID:
+		return v.Hex()
+	default:
+		return ""
+	}
+}
diff --git a/v2/dalmdl/coremongo/girdfs.go b/v2/dalmdl/coremongo/girdfs.go
new file mode 100644
index 0000000000000000000000000000000000000000..15abd4561347b2ce33cc26bac314a59355b5e44d
--- /dev/null
+++ b/v2/dalmdl/coremongo/girdfs.go
@@ -0,0 +1,135 @@
+package coremongo
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"io"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo"
+	"go.mongodb.org/mongo-driver/mongo/gridfs"
+	"go.mongodb.org/mongo-driver/mongo/options"
+)
+
+//SaveFileToGridFS - Saves file to gridfs
+func SaveFileToGridFS(db *mongo.Database, bucketName, fileName string, source io.Reader) (string, string, error) {
+
+	bucketName = strings.TrimSpace(bucketName)
+	fileName = strings.TrimSpace(fileName)
+
+	//Validations
+	if db == nil {
+		return "", "", errors.New("db Required")
+	} else if bucketName == "" {
+		return "", "", errors.New("bucketName required")
+	} else if source == nil {
+		return "", "", errors.New("invalid source")
+	}
+
+	//Set bucket config
+	bucketOptions := options.BucketOptions{}
+	bucketOptions.Name = &bucketName
+
+	//Get bucket instance
+	dbBucket, bucketError := gridfs.NewBucket(db, &bucketOptions)
+	if bucketError != nil {
+		return "", "", bucketError
+	}
+
+	//Upload incomming file to bucket
+	fileID, fileError := dbBucket.UploadFromStream(fileName, source)
+	if fileError != nil {
+		return "", "", fileError
+	}
+
+	//Return generated fileId and file name
+	return fileID.String(), fileName, nil
+}
+
+//GetFileFromGridFS - Gets file from gridfs
+func GetFileFromGridFS(db *mongo.Database, bucketName, fileName string) ([]byte, error) {
+
+	bucketName = strings.TrimSpace(bucketName)
+	fileName = strings.TrimSpace(fileName)
+
+	//Validations
+	if db == nil {
+		return nil, errors.New("db Required")
+	} else if bucketName == "" {
+		return nil, errors.New("bucketName required")
+	} else if fileName == "" {
+		return nil, errors.New("fileName required'")
+	}
+
+	//Set bucket config
+	bucketOptions := options.BucketOptions{}
+	bucketOptions.Name = &bucketName
+
+	//Get bucket instance
+	dbBucket, bucketError := gridfs.NewBucket(db, &bucketOptions)
+	if bucketError != nil {
+		return nil, bucketError
+	}
+
+	//Read file from DB
+	w := bytes.NewBuffer(make([]byte, 0))
+	_, getFileError := dbBucket.DownloadToStreamByName(fileName, w)
+	if getFileError != nil {
+		return nil, getFileError
+	}
+
+	fileBytes := w.Bytes()
+
+	//Return bytes
+	return fileBytes, nil
+
+}
+
+//GetDBInstance - Gets database intance
+func GetDBInstance(serverIPAddress, port, dbName string, timeOutInSeconds int) (*mongo.Database, error) {
+
+	serverIPAddress = strings.TrimSpace(serverIPAddress)
+	dbName = strings.TrimSpace(dbName)
+	port = strings.TrimSpace(port)
+
+	//Validations
+	if serverIPAddress == "" {
+		return nil, errors.New("serverIPAddress required")
+	} else if dbName == "" {
+		return nil, errors.New("dbName required")
+	} else if timeOutInSeconds <= 0 {
+		return nil, errors.New("valid timeOutInSeconds required")
+	}
+
+	ipElements := strings.Split(serverIPAddress, ".")
+	if len(ipElements) != 4 {
+		return nil, errors.New("invalid serverIPAddress")
+	}
+
+	if port == "" {
+		port = "27017"
+	}
+
+	//Connection string
+	connectionString := "mongodb://" + serverIPAddress + ":" + port
+	client, connectionError := mongo.NewClient(options.Client().ApplyURI(connectionString))
+	if connectionError != nil {
+		return nil, connectionError
+	}
+
+	//Context with timeout
+	ctx, _ := context.WithTimeout(context.Background(), time.Duration(timeOutInSeconds)*time.Second)
+	contextError := client.Connect(ctx)
+
+	if contextError != nil {
+		return nil, contextError
+	}
+
+	//Create a db instance
+	db := client.Database(dbName)
+
+	//Return db instance
+	return db, nil
+}
diff --git a/v2/dalmdl/dalmdl.go b/v2/dalmdl/dalmdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..a1416c0b0d73e556c8817ceeecb6d22bb37b51a8
--- /dev/null
+++ b/v2/dalmdl/dalmdl.go
@@ -0,0 +1,9 @@
+package dalmdl
+
+const (
+	MONGODB   = "MONGO"
+	MYSQL     = "MYSQL"
+	FDB       = "FDB"
+	SQLSERVER = "SQLSERVER"
+	GraphDB   = "GRAPHDB"
+)
diff --git a/v2/dalmdl/dao/dao.go b/v2/dalmdl/dao/dao.go
new file mode 100755
index 0000000000000000000000000000000000000000..d1c253935d7459066e46454bf01ac92c5427382f
--- /dev/null
+++ b/v2/dalmdl/dao/dao.go
@@ -0,0 +1,75 @@
+//@author  Ajit Jagtap
+//@version Wed Jul 04 2018 20:15:13 GMT+0530 (IST)
+
+// Package dalmdl will help you access
+package dalmdl
+
+import (
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/fdb"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/gjson"
+)
+
+// BaseDAO help to fetch data
+type BaseDAO struct {
+	filePath      string
+	query         []string
+	Output        gjson.Result
+	isCacheable   bool
+	cacheDuration time.Duration
+}
+
+// DAOBuilder will help to run gson query
+type DAOBuilder struct {
+	BaseDAO
+}
+
+// GetDAO return new instance
+func GetDAO() *DAOBuilder {
+	return &DAOBuilder{}
+}
+
+// Run : Execute Query
+func (db *DAOBuilder) Run() (*gjson.Result, error) {
+	db.Output = gjson.Result{}
+	var err error
+	for _, qry := range db.query {
+		db.Output, err = fdb.GetDataDAO(db.filePath, qry, db.isCacheable, db.cacheDuration, db.Output)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError("Error in executing query: ", qry, err)
+			return nil, errormdl.CheckErr(err)
+		}
+	}
+
+	return &db.Output, nil
+}
+
+// FilePath to open File and get data from FDB
+func (db *DAOBuilder) FilePath(name string) *DAOBuilder {
+	db.BaseDAO.filePath = name
+	return db
+}
+
+// IsCacheableWithDuration to open File and get data from FDB
+func (db *DAOBuilder) IsCacheableWithDuration(duration time.Duration) *DAOBuilder {
+	if duration != 0 {
+		db.BaseDAO.isCacheable = true
+		db.BaseDAO.cacheDuration = duration
+	}
+	return db
+}
+
+// IsCacheable to open File and get data from FDB
+func (db *DAOBuilder) IsCacheable() *DAOBuilder {
+	db.BaseDAO.isCacheable = true
+	return db
+}
+
+// Query give multiple queries for and condition
+func (db *DAOBuilder) Query(query ...string) *DAOBuilder {
+	db.BaseDAO.query = query
+	return db
+}
diff --git a/v2/dalmdl/dgraph/dgraph.go b/v2/dalmdl/dgraph/dgraph.go
new file mode 100644
index 0000000000000000000000000000000000000000..952a5b017444c001d08edd549abbc787c72d71f7
--- /dev/null
+++ b/v2/dalmdl/dgraph/dgraph.go
@@ -0,0 +1,329 @@
+package dgraph
+
+import (
+	"context"
+	"errors"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/dgraph-io/dgo"
+	"github.com/dgraph-io/dgo/protos/api"
+	"google.golang.org/grpc"
+)
+
+type Host struct {
+	Name       string `json:"hostName"`
+	Server     string `json:"server"`
+	Port       int    `json:"port"`
+	IsDefault  bool   `json:"isDefault"`
+	IsDisabled bool   `json:"IsDisabled"`
+
+	// UserName  string
+	// Password  string
+}
+
+type Instance struct {
+	client *dgo.Dgraph
+	host   Host
+}
+
+type DGraphDAO struct {
+	HostName string
+
+	instance *Instance
+}
+
+var (
+	instances   map[string]*Instance
+	defaultHost string
+	configured  bool
+)
+
+var (
+	ErrNotConfigured    = errors.New("graph db instances not configured. InitInstances() must be called to configure.")
+	ErrInstanceNotFound = errors.New("graph db instance not found")
+)
+
+// NewClient returns a new dgraph client for provided configuration.
+func NewClient(h Host) (*dgo.Dgraph, error) {
+
+	if strings.TrimSpace(h.Server) == "" {
+		return nil, errors.New("host address can not be empty")
+	}
+
+	address := bindDgraphServerWithPort(h.Server, h.Port)
+
+	// Dial a gRPC connection. The address to dial to can be configured when
+	// setting up the dgraph cluster.
+	dialOpts := []grpc.DialOption{
+		grpc.WithInsecure(),
+		grpc.WithBlock(), // block till we connect to the server
+		// grpc.WithTimeout(time.Second * 5),
+		grpc.WithDefaultCallOptions(
+			// grpc.UseCompressor(gzip.Name),
+			grpc.WaitForReady(true),
+		),
+	}
+
+	ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) // wait for 5 seconds to connect to grpc server. Exit if the deadline exceeds.
+	defer cancel()
+	d, err := grpc.DialContext(ctx, address, dialOpts...)
+	if err == context.DeadlineExceeded {
+		return nil, errors.New("graphdb connect error, connection timed out for host " + address)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	client := dgo.NewDgraphClient(api.NewDgraphClient(d))
+
+	// Note: Supported in Enterprise version only
+	// if h.UserName != "" {
+	// 	if err = client.Login(context.TODO(), h.UserName, h.Password); err != nil {
+	// 		return nil, err
+	// 	}
+	// }
+
+	return client, nil
+}
+
+// NewInstance creates n new v2 instance of dgraph client. This instance can be saved in cache with host name as identifier for further operations.
+func NewInstance(client *dgo.Dgraph, host Host) *Instance {
+	return &Instance{
+		client: client,
+		host:   host,
+	}
+}
+
+func InitInstances(configs []Host) error {
+
+	if configured {
+		return nil
+	}
+
+	instances = make(map[string]*Instance, len(configs))
+
+	for _, host := range configs {
+		if host.IsDisabled {
+			continue
+		}
+
+		client, err := NewClient(host)
+		if err != nil {
+			return err
+		}
+
+		instances[host.Name] = &Instance{
+			client: client,
+			host:   host,
+		}
+
+		if host.IsDefault {
+			defaultHost = host.Name
+		}
+	}
+
+	configured = true
+
+	return nil
+}
+
+// GetInstance returns a preconfigured dgraph instance from cache. If not present, returns an error.
+func GetInstance(hostName string) (*Instance, error) {
+	if !configured {
+		return nil, ErrNotConfigured
+	}
+
+	if hostName == "" {
+		hostName = defaultHost
+	}
+
+	i, ok := instances[hostName]
+
+	if !ok {
+		return nil, ErrInstanceNotFound
+	}
+
+	return i, nil
+}
+
+// GetDAO returns a dao instance to access and manipulate graph data and schema.
+//
+// If hostname is empty, default host will be used.
+//
+// Otherwise ErrInstanceNotFound error will be returned.
+func GetDAO(hostName string) (*DGraphDAO, error) {
+	ist, err := GetInstance(hostName)
+	if err != nil {
+		return nil, err
+	}
+	return &DGraphDAO{
+		HostName: hostName,
+		instance: ist,
+	}, nil
+}
+
+// GetTransaction returns a new transaction for provided host.
+//
+// If hostname is empty, transaction from default host will be returned.
+//
+// Otherwise ErrInstanceNotFound error will be returned.
+func GetTransaction(hostName string) (*dgo.Txn, error) {
+
+	ist, err := GetInstance(hostName)
+	if err != nil {
+		return nil, err
+	}
+
+	return ist.client.NewTxn(), nil
+}
+
+// GetData returns the nodes matching to the provided query.
+//
+// query variables can be provided in `vars` param. Safe to provide `nil` if no variables required.
+//
+// The result is against the provided key in the query.
+func (dg *DGraphDAO) GetData(ctx context.Context, query string, vars map[string]string) ([]byte, error) {
+
+	txn := dg.instance.client.NewReadOnlyTxn().BestEffort()
+	var (
+		res *api.Response
+		err error
+	)
+	if len(vars) == 0 {
+		res, err = txn.Query(ctx, query)
+	} else {
+		res, err = txn.QueryWithVars(ctx, query, vars)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return res.GetJson(), nil
+}
+
+// SetData sets the provided data as a node. Can be used to create or update a node.
+//
+// For update, the data must contain `uid` field.
+func (dg *DGraphDAO) SetData(ctx context.Context, data []byte) error {
+	return dg.mutate(ctx, &api.Mutation{
+		SetJson:   data,
+		CommitNow: true,
+	})
+}
+
+// SetData sets the provided data as a node. Can be used to create or update a node.
+//
+// For update, the data must contain `uid` field.
+//
+// The transacction must be committed or discarded.
+func (dg *DGraphDAO) SetDataTXN(ctx context.Context, txn *dgo.Txn, data []byte) error {
+	return dg.mutateTXN(ctx,
+		&api.Mutation{
+			SetJson: data,
+		},
+		txn,
+	)
+}
+
+// DeleteData deletes the node or provided node attribute.
+func (dg *DGraphDAO) DeleteData(ctx context.Context, data []byte) error {
+	return dg.mutate(ctx, &api.Mutation{
+		DeleteJson: data,
+		CommitNow:  true,
+	})
+}
+
+// DeleteData deletes the node or provided node attribute.
+//
+// The transacction must be committed or discarded.
+func (dg *DGraphDAO) DeleteDataTXN(ctx context.Context, txn *dgo.Txn, data []byte) error {
+	return dg.mutateTXN(ctx,
+		&api.Mutation{
+			DeleteJson: data,
+		},
+		txn,
+	)
+}
+
+// DeleteEdge deletes the edges for the mentioned node. predicate is the name of relationship between the node.
+//
+// Ex. Persion1 `follows` Person2.
+func (dg *DGraphDAO) DeleteEdge(ctx context.Context, uid string, predicates ...string) error {
+
+	mu := &api.Mutation{
+		CommitNow: true,
+	}
+
+	dgo.DeleteEdges(mu, uid, predicates...)
+
+	return dg.mutate(ctx, mu)
+}
+
+// DeleteEdgeTXN deletes the edges for the mentioned node. predicate is the name of relationship between the node.
+//
+// Ex. Persion1 `follows` Person2.
+func (dg *DGraphDAO) DeleteEdgeTXN(ctx context.Context, txn *dgo.Txn, uid string, predicates ...string) error {
+
+	mu := &api.Mutation{
+		CommitNow: true,
+	}
+
+	dgo.DeleteEdges(mu, uid, predicates...)
+
+	return dg.mutateTXN(ctx, mu, txn)
+}
+
+// mutate creates or updates or deletes the node data.
+func (dg *DGraphDAO) mutate(ctx context.Context, mtn *api.Mutation) error {
+
+	txn := dg.instance.client.NewTxn()
+
+	_, err := txn.Mutate(ctx, mtn)
+
+	return err
+}
+
+// mutate creates or updates the node data.
+func (dg *DGraphDAO) mutateTXN(ctx context.Context, mtn *api.Mutation, txn *dgo.Txn) error {
+
+	_, err := txn.Mutate(ctx, mtn)
+
+	return err
+}
+
+// CreateSchema sets the provided schema for the nodes data.
+func (dg *DGraphDAO) CreateSchema(ctx context.Context, schema string) error {
+
+	return dg.instance.client.Alter(ctx, &api.Operation{Schema: schema})
+}
+
+// DropSchema deletes the current schema along with the data.
+func (dg *DGraphDAO) DropSchema(ctx context.Context) error {
+
+	return dg.instance.client.Alter(ctx, &api.Operation{DropOp: api.Operation_ALL})
+}
+
+// DropData deletes complete data but maintains the schema.
+func (dg *DGraphDAO) DropData(ctx context.Context) error {
+
+	return dg.instance.client.Alter(ctx, &api.Operation{DropOp: api.Operation_DATA})
+}
+
+// DropAttr deletes a specific attribute completely from data and the schema.
+func (dg *DGraphDAO) DropAttr(ctx context.Context, attr string) error {
+
+	return dg.instance.client.Alter(ctx, &api.Operation{DropOp: api.Operation_ATTR, DropValue: attr})
+}
+
+func bindDgraphServerWithPort(server string, port int) string {
+	// if port is empty then use default port 9080(GRPC Port) & bind to server ip
+
+	if port <= 0 || strings.TrimSpace(strconv.Itoa(port)) == "" {
+		return server + ":9080"
+	}
+
+	return server + ":" + strconv.Itoa(port)
+}
diff --git a/v2/dalmdl/dgraph/dgraph_test.go b/v2/dalmdl/dgraph/dgraph_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0531ab0b72f50a9a84c6b6c7c551d20f53e383a
--- /dev/null
+++ b/v2/dalmdl/dgraph/dgraph_test.go
@@ -0,0 +1,422 @@
+package dgraph
+
+import (
+	"context"
+	"testing"
+)
+
+var dgraphHost = &Host{
+	Name:   "DGraphHost",
+	Server: "localhost",
+	Port:   9080,
+}
+
+func Test_NewClient(t *testing.T) {
+	type args struct {
+		h Host
+	}
+	tests := []struct {
+		name string
+		args args
+		// want    *dgo.Dgraph
+		wantErr bool
+	}{
+		{
+			name:    "success on valid connection",
+			args:    args{h: Host{Name: "graphDBHost", Server: dgraphHost.Server, Port: 9080}},
+			wantErr: false,
+		},
+		{
+			name:    "fail on connection fail",
+			args:    args{h: Host{Name: "graphDBHost", Server: dgraphHost.Server, Port: 8080}},
+			wantErr: true,
+		},
+		{
+			name:    "success on default port used",
+			args:    args{h: Host{Name: "graphDBHost", Server: dgraphHost.Server}},
+			wantErr: false,
+		},
+		{
+			name:    "fail on blank address",
+			args:    args{h: Host{Name: "graphDBHost", Server: ""}},
+			wantErr: true,
+		},
+		{
+			name:    "fail on invalid address",
+			args:    args{h: Host{Name: "graphDBHost", Server: "10.1.0"}},
+			wantErr: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			_, err := NewClient(tt.args.h)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("NewClient() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
+
+func Test_CreateSchema(t *testing.T) {
+	type args struct {
+		c      context.Context
+		schema string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "create schema successfully",
+			args: args{c: context.Background(), schema: `
+			 name: string @index(exact) .
+			 age: int .
+			`},
+			wantErr: false,
+		},
+		{
+			name: "pass invalid schema",
+			args: args{c: context.Background(), schema: `
+			name string @index(exact) .
+			age int .
+			 `},
+			wantErr: true,
+		},
+		{
+			name:    "pass blank schema",
+			args:    args{c: context.Background(), schema: ``},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			err := InitInstances([]Host{*dgraphHost})
+			if err != nil {
+				t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+			}
+
+			dg, err := GetDAO(dgraphHost.Name)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("CreateSchema() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+
+			err = dg.CreateSchema(tt.args.c, tt.args.schema)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("CreateSchema() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
+
+func Test_SetData(t *testing.T) {
+	type args struct {
+		c    context.Context
+		data []byte
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "success on correct data",
+			args: args{c: context.Background(), data: []byte(`{
+				"name": "Person 1",
+				"age": 29,
+				"follows": {
+					"name": "Person 2",
+					"age": 18,
+					"follows": {
+						"name": "Person 3",
+						"age": 37
+					}
+				}
+			}`)},
+			wantErr: false,
+		},
+		{
+			name:    "failure on incorrect data",
+			args:    args{c: context.Background(), data: []byte(``)},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg, err := GetDAO(dgraphHost.Name)
+		if err != nil && !tt.wantErr {
+			t.Errorf("SetData() error = %v", err)
+		}
+
+		err = dg.SetData(tt.args.c, tt.args.data)
+		if err != nil && !tt.wantErr {
+			t.Errorf("SetData() error = %v", err)
+		}
+	}
+}
+
+func Test_DropAttr(t *testing.T) {
+	type args struct {
+		c    context.Context
+		attr string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "success case for delete known attribute",
+			args:    args{c: context.Background(), attr: "age"},
+			wantErr: false,
+		},
+		{
+			name:    "fail case for deleting absent attribute",
+			args:    args{c: context.Background(), attr: "height"},
+			wantErr: true,
+		},
+		{
+			name:    "fail case for blank attribute",
+			args:    args{c: context.Background(), attr: ""},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg, err := GetDAO(dgraphHost.Name)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DropAttr() error = %v", err)
+		}
+
+		err = dg.DropAttr(tt.args.c, tt.args.attr)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DropAttr() error = %v", err)
+		}
+	}
+}
+
+func Test_GetData(t *testing.T) {
+	type args struct {
+		c     context.Context
+		query string
+		vars  map[string]string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "success query to fetch data without params",
+			args: args{c: context.Background(), query: `
+			{
+				people(func: has(name)) {
+				  name
+				  age,
+				  follows
+				}
+			  }
+			`},
+			wantErr: false,
+		},
+		{
+			name:    "failure on blank query",
+			args:    args{c: context.Background(), query: ""},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg, err := GetDAO(dgraphHost.Name)
+		if err != nil && !tt.wantErr {
+			t.Errorf("GetData() error = %v", err)
+		}
+
+		_, err = dg.GetData(tt.args.c, tt.args.query, tt.args.vars)
+		if err != nil && !tt.wantErr {
+			t.Errorf("GetData() error = %v", err)
+		}
+	}
+}
+
+func Test_DeleteEdge(t *testing.T) {
+	type args struct {
+		c          context.Context
+		uid        string
+		predicates []string
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "success case to delete an edge",
+			args:    args{c: context.Background(), uid: "0x754f", predicates: []string{"follows"}},
+			wantErr: false,
+		},
+		{
+			name:    "fail case to delete blank edge",
+			args:    args{c: context.Background(), uid: "0x7551", predicates: []string{""}},
+			wantErr: true,
+		},
+		{
+			name:    "fail case to delete blank UID",
+			args:    args{c: context.Background(), uid: "", predicates: []string{""}},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg, err := GetDAO(dgraphHost.Name)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DeleteEdge() error = %v", err)
+		}
+
+		err = dg.DeleteEdge(tt.args.c, tt.args.uid, tt.args.predicates...)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DeleteEdge() error = %v", err)
+		}
+	}
+}
+
+func Test_DeleteData(t *testing.T) {
+	type args struct {
+		c    context.Context
+		data []byte
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "success on delete correct data",
+			args: args{c: context.Background(), data: []byte(`{
+				"uid": "0x754f"
+			}`)},
+			wantErr: false,
+		},
+		{
+			name:    "failure on incorrect delete data",
+			args:    args{c: context.Background(), data: []byte(``)},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg, err := GetDAO(dgraphHost.Name)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DeleteData() error = %v", err)
+		}
+
+		err = dg.DeleteData(tt.args.c, tt.args.data)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DeleteData() error = %v", err)
+		}
+	}
+}
+func Test_DropData(t *testing.T) {
+	type args struct {
+		c context.Context
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "Drop data case",
+			args:    args{c: context.Background()},
+			wantErr: false,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg, err := GetDAO(dgraphHost.Name)
+		if err != nil {
+			t.Errorf("DropData() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		err = dg.DropData(tt.args.c)
+		if err != nil {
+			t.Errorf("DropData() error = %v, wantErr %v", err, tt.wantErr)
+		}
+	}
+}
+
+func Test_DropSchema(t *testing.T) {
+	type args struct {
+		c context.Context
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "Drop schema case",
+			args:    args{c: context.Background()},
+			wantErr: false,
+		},
+	}
+
+	for _, tt := range tests {
+		err := InitInstances([]Host{*dgraphHost})
+		if err != nil {
+			t.Errorf("InitInstances() error = %v, wantErr %v", err, tt.wantErr)
+		}
+
+		dg, err := GetDAO(dgraphHost.Name)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DropSchema() error = %v", err)
+		}
+
+		err = dg.DropSchema(tt.args.c)
+		if err != nil && !tt.wantErr {
+			t.Errorf("DropSchema() error = %v", err)
+		}
+	}
+}
diff --git a/v2/dalmdl/fdb/fdb.go b/v2/dalmdl/fdb/fdb.go
new file mode 100755
index 0000000000000000000000000000000000000000..27d9c285725eb2e4157b36213e5b0a75debcd393
--- /dev/null
+++ b/v2/dalmdl/fdb/fdb.go
@@ -0,0 +1,331 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:14:04 GMT+0530 (IST)
+
+// Package fdb will help you access data from FDB
+package fdb
+
+import (
+	"path/filepath"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/statemdl"
+
+	"github.com/pquerna/ffjson/ffjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/tidwall/gjson"
+)
+
+// securityRequired for securing fdb data
+var securityRequired = false
+
+// downloadFromInternet flag enables filedownload from internet
+// var downloadFromInternet = false
+// var hostAddressForDataFiles = ""
+// var splitString = ""
+
+var defaultSecurityKey = []byte{}
+
+var mutex = &sync.Mutex{}
+var (
+	// NumberOfReads Collects Number of Reads
+	NumberOfReads = 0
+	// NumberOfWrites Collects Number of Writes
+	NumberOfWrites = 0
+	// Fastcache Holds Cache for queries
+	Fastcache cachemdl.FastCacheHelper
+	// NumberOfReadsWithoutSecurity Collects Number of Reads without Security
+	NumberOfReadsWithoutSecurity = 0
+	// NumberOfWritesWithoutSecurity Collects Number of Writes without Security
+	NumberOfWritesWithoutSecurity = 0
+)
+
+// GetDataDAO will return query data
+func GetDataDAO(filePath, query string, isCachable bool, cacheTime time.Duration, rs gjson.Result) (gjson.Result, error) {
+
+	if rs.Raw != "" {
+		rs = rs.Get(query)
+		return rs, nil
+	}
+	if isCachable {
+		data, found := Fastcache.Get(filePath + query)
+		go statemdl.UpdateGlobalServiceCacheState(found)
+		if errormdl.CheckBool(found) {
+			val, ok := data.(gjson.Result)
+			if errormdl.CheckBool1(ok) {
+				return val, nil
+			}
+			return gjson.Result{}, nil
+		}
+	}
+	byteData, err := getDataFromFDB(filePath)
+	if errormdl.CheckErr(err) != nil {
+		return gjson.Result{}, errormdl.CheckErr(err)
+	}
+	if query == constantmdl.STAR {
+		rs = gjson.ParseBytes(byteData)
+	} else {
+		rs = gjson.ParseBytes(byteData).Get(query)
+	}
+	if isCachable {
+		Fastcache.SetWithExpiration(filePath+query, rs, time.Second*cacheTime)
+	}
+	return rs, nil
+}
+
+//GetDataFromFDB gets data from FDB
+func getDataFromFDB(filePath string) ([]byte, error) {
+	//Following checking will be done for the provided file path, which will let us know
+	// whether the file is Read only/Write only/Update only
+	mutex.Lock()
+	NumberOfReads = NumberOfReads + 1
+	mutex.Unlock()
+
+	data, err := filemdl.ReadFile(filePath)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+	}
+	if securityRequired {
+		keyBytes, hashError := GetKeyWithFileNameAndDefaultKey(filePath)
+		if errormdl.CheckErr1(hashError) != nil {
+			return nil, errormdl.CheckErr1(hashError)
+		}
+		decryptedData, decryptionError := securitymdl.AESDecrypt(data, keyBytes)
+		if errormdl.CheckErr2(decryptionError) != nil {
+			return nil, errormdl.CheckErr2(decryptionError)
+		}
+		decompressedData, decompressError := filemdl.UnZipBytes(decryptedData)
+		if errormdl.CheckErr3(decompressError) != nil {
+			return nil, errormdl.CheckErr3(decompressError)
+		}
+		return decompressedData, nil
+	}
+	return data, err
+}
+
+// SaveDataToFDB saves the data in FDB
+func SaveDataToFDB(filePath string, data []byte, makeDir, createBackup bool) error {
+	mutex.Lock()
+	NumberOfWrites = NumberOfWrites + 1
+	mutex.Unlock()
+
+	if securityRequired {
+
+		keyBytes, hashError := GetKeyWithFileNameAndDefaultKey(filePath)
+		if errormdl.CheckErr(hashError) != nil {
+			return errormdl.CheckErr(hashError)
+		}
+		compressedText, compressionError := filemdl.ZipBytes(data)
+		if errormdl.CheckErr2(compressionError) != nil {
+			return errormdl.CheckErr2(compressionError)
+		}
+		encryptedData, encryptionError := securitymdl.AESEncrypt(compressedText, keyBytes)
+		if errormdl.CheckErr1(encryptionError) != nil {
+			return errormdl.CheckErr1(encryptionError)
+		}
+		saveError := filemdl.GetInstance().Save(filePath, encryptedData, makeDir, createBackup)
+		return saveError
+	}
+
+	saveError := filemdl.GetInstance().Save(filePath, data, makeDir, createBackup)
+	if errormdl.CheckErr(saveError) != nil {
+		return errormdl.CheckErr(saveError)
+	}
+	// TODO: Delete file related entries from cacahe entries
+	return nil
+}
+
+// SaveInterfaceDataToFDB saves the data in FDB
+func SaveInterfaceDataToFDB(filePath string, interfaceData interface{}, makeDir, createBackup bool) error {
+	mutex.Lock()
+	NumberOfWrites = NumberOfWrites + 1
+	mutex.Unlock()
+
+	data, marshalError := ffjson.Marshal(interfaceData)
+	if errormdl.CheckErr(marshalError) != nil {
+		loggermdl.LogError(marshalError)
+		return marshalError
+	}
+	if securityRequired {
+
+		keyBytes, hashError := GetKeyWithFileNameAndDefaultKey(filePath)
+		if errormdl.CheckErr(hashError) != nil {
+			return errormdl.CheckErr(hashError)
+		}
+		compressedText, compressionError := filemdl.ZipBytes(data)
+		if errormdl.CheckErr2(compressionError) != nil {
+			return errormdl.CheckErr2(compressionError)
+		}
+		encryptedData, encryptionError := securitymdl.AESEncrypt(compressedText, keyBytes)
+		if errormdl.CheckErr1(encryptionError) != nil {
+			return errormdl.CheckErr1(encryptionError)
+		}
+		saveError := filemdl.GetInstance().Save(filePath, encryptedData, makeDir, createBackup)
+		return saveError
+	}
+
+	saveError := filemdl.GetInstance().Save(filePath, data, makeDir, createBackup)
+	if errormdl.CheckErr(saveError) != nil {
+		return errormdl.CheckErr(saveError)
+	}
+	// TODO: Delete file related entries from cacahe entries
+	return nil
+}
+
+//EnableSecurity Set remote path for receiving files if not found at local
+func EnableSecurity(key []byte, initializationVector string) {
+	securitymdl.SetSecurityConfig(key, initializationVector)
+	defaultSecurityKey = key
+	securityRequired = true
+}
+
+//DisableSecurity disables security
+func DisableSecurity() {
+	securityRequired = false
+}
+
+//GetSecurityStatus get status of security flag
+func GetSecurityStatus() bool {
+	return securityRequired
+}
+
+// GetKeyWithFileNameAndDefaultKey generates key using file name + Default key
+func GetKeyWithFileNameAndDefaultKey(filePath string) ([]byte, error) {
+	fileName := filepath.Base(filePath)
+	fileNameBytes := []byte(fileName)
+	fileNameBytes = append(fileNameBytes, defaultSecurityKey...)
+	keyBytes, getHashError := hashmdl.Get128BitHash(fileNameBytes)
+	if errormdl.CheckErr(getHashError) != nil {
+		return nil, errormdl.CheckErr(getHashError)
+	}
+	return keyBytes[:], nil
+}
+
+//AppendDataToFDB apppends data to FDB
+func AppendDataToFDB(filePath string, data []byte, createBackup bool) error {
+	saveError := filemdl.GetInstance().Save(filePath, data, false, createBackup)
+	return saveError
+}
+
+// SoftDeleteFileFromFDB rename file as per timestamp
+func SoftDeleteFileFromFDB(filePath string) error {
+	t := time.Now()
+	newFilePath := filePath + "_deleted_" + t.Format("20060102150405")
+	err := filemdl.RenameFile(filePath, newFilePath)
+	return err
+}
+
+// HardDeleteFileFromFDB permanantly delete file from fdb
+func HardDeleteFileFromFDB(filePath string) error {
+	err := filemdl.DeleteFile(filePath)
+	return err
+}
+
+//GetDataFromFDBWithoutSecurity gets data from FDB
+func GetDataFromFDBWithoutSecurity(filePath string) ([]byte, error) {
+	//Following checking will be done for the provided file path, which will let us know
+	// whether the file is Read only/Write only/Update only
+	mutex.Lock()
+	NumberOfReadsWithoutSecurity = NumberOfReadsWithoutSecurity + 1
+	mutex.Unlock()
+	data, err := filemdl.ReadFile(filePath)
+	if errormdl.CheckErr(err) != nil {
+		// if !downloadFromInternet {
+		return nil, errormdl.CheckErr(err)
+		// }
+		// if hostAddressForDataFiles == "" {
+		// 	loggermdl.LogError("Call dalhelper.SetRemoteHostPath(remoteHost) to set remote file path ")
+		// 	return nil, errormdl.CheckErr(err)
+		// }
+		// relativeFilePath := strings.SplitAfter(filePath, splitString)
+		// remotePath := hostAddressForDataFiles + relativeFilePath[1]
+		// resp, httpError := http.Get(remotePath)
+		// if httpError != nil || (resp != nil && resp.StatusCode == 404) {
+		// 	return nil, httpError
+		// }
+		// data, readError := ioutil.ReadAll(resp.Body)
+		// if errormdl.CheckErr1(readError) != nil {
+		// 	loggermdl.LogError("Error while reading data from response body ", readError)
+		// 	return nil, errormdl.CheckErr1(readError)
+		// }
+		// defer resp.Body.Close()
+		// saveError := SaveDataToFDB(filePath, data, true, false)
+		// return data, saveError
+	}
+	return data, err
+}
+
+//SaveDataToFDBWithoutSecurity saves data to FDB
+func SaveDataToFDBWithoutSecurity(filePath string, data []byte, makeDir, createBackup bool) error {
+	mutex.Lock()
+	NumberOfWritesWithoutSecurity = NumberOfWritesWithoutSecurity + 1
+	mutex.Unlock()
+	saveError := filemdl.GetInstance().Save(filePath, data, makeDir, createBackup)
+	return saveError
+}
+
+// // EnableRemoteFileDownload Set remote address and enable file download from internet
+// func EnableRemoteFileDownload(remoteHostAddress, pathSplitString string) {
+// 	hostAddressForDataFiles = remoteHostAddress
+// 	splitString = pathSplitString
+// 	downloadFromInternet = true
+// }
+
+// // DisableRemoteFileDownload disables  file download from internet
+// func DisableRemoteFileDownload() {
+// 	hostAddressForDataFiles = ""
+// 	splitString = ""
+// 	downloadFromInternet = false
+// }
+
+// // GetRemoteFileDownloadStatus get status of file download from internet
+// func GetRemoteFileDownloadStatus() bool {
+// 	return downloadFromInternet
+// }
+
+// SaveDataToFDBWithoutQueue saves the data in FDB without file queueing
+func SaveDataToFDBWithoutQueue(filePath string, data []byte, makeDir, createBackup bool) error {
+	mutex.Lock()
+	NumberOfWrites = NumberOfWrites + 1
+	mutex.Unlock()
+
+	if securityRequired {
+		keyBytes, hashError := GetKeyWithFileNameAndDefaultKey(filePath)
+		if errormdl.CheckErr(hashError) != nil {
+			return errormdl.CheckErr(hashError)
+		}
+		compressedText, compressionError := filemdl.ZipBytes(data)
+		if errormdl.CheckErr2(compressionError) != nil {
+			return errormdl.CheckErr2(compressionError)
+		}
+		encryptedData, encryptionError := securitymdl.AESEncrypt(compressedText, keyBytes)
+		if errormdl.CheckErr1(encryptionError) != nil {
+			return errormdl.CheckErr1(encryptionError)
+		}
+		saveError := filemdl.WriteFile(filePath, encryptedData, makeDir, createBackup)
+		return saveError
+	}
+
+	saveError := filemdl.WriteFile(filePath, data, makeDir, createBackup)
+	if errormdl.CheckErr(saveError) != nil {
+		return errormdl.CheckErr(saveError)
+	}
+	// TODO: Delete file related entries from cacahe entries
+	return nil
+}
+
+//AppendDataToFDBWithoutQueue apppends data to FDB without file queueing
+func AppendDataToFDBWithoutQueue(filePath string, data []byte, createBackup bool) error {
+	_, saveError := filemdl.AppendFile(filePath, string(data))
+	return saveError
+}
diff --git a/v2/dalmdl/fdb/fdb_test.go b/v2/dalmdl/fdb/fdb_test.go
new file mode 100755
index 0000000000000000000000000000000000000000..c36966e56f085f3d477566138974a0f32730bb2a
--- /dev/null
+++ b/v2/dalmdl/fdb/fdb_test.go
@@ -0,0 +1,337 @@
+// TODO: Commented because of following error while setting up go modules
+// Command - go mod tidy
+// Error -
+// go: corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/fdb tested by
+//         corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/fdb.test imports
+//         corelab.mkcl.org/MKCLOS/coredevelopmentplatform/coreospackage/dalhelper imports
+//         gopkg.in/ahmetb/go-linq.v3: gopkg.in/ahmetb/go-linq.v3@v3.1.0: parsing go.mod:
+//         module declares its path as: github.com/ahmetb/go-linq/v3
+//                 but was required as: gopkg.in/ahmetb/go-linq.v3
+
+// //@author  Ajit Jagtap
+
+// //@version Thu Jul 05 2018 06:13:57 GMT+0530 (IST)
+package fdb
+
+// import (
+// 	"fmt"
+// 	"testing"
+
+// 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/coreospackage/dalhelper"
+
+// 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+// 	"github.com/stretchr/testify/assert"
+// 	"github.com/tidwall/gjson"
+// )
+
+// func TestGetDataDAO(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	result, err := GetDataDAO("../../testingdata/users.json", "*", true, gjson.Result{})
+// 	assert.NoError(t, err, "This should not return error")
+// 	a := result.String()
+// 	assert.NotZero(t, a, "Should give len")
+// }
+
+// func TestGetDataDAO_WithRaw(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	result, err := GetDataDAO("../../testingdata/users.json", "*", true, gjson.Result{})
+// 	//pass same result again
+// 	result, err = GetDataDAO("../../testingdata/users.json", "*", true, result)
+// 	assert.NoError(t, err, "This should not return error")
+// }
+
+// func TestGetDataDAO_WithFileReadError(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	_, err := GetDataDAO("../../testingdata/users.json", "*", false, gjson.Result{})
+// 	assert.Error(t, err, "This should  return error")
+// }
+
+// func TestGetDataDAO_WithFileRead(t *testing.T) {
+// 	//no error and cache flush
+// 	Fastcache.Purge()
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	_, err := GetDataDAO("../../testingdata/users.json", "#", true, gjson.Result{})
+// 	assert.NoError(t, err, "This should not return error")
+// }
+
+// func TestGetDataDAO_WithCacheGetErrAndFileRead(t *testing.T) {
+// 	//no error and cache flush
+// 	Fastcache.Purge()
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	_, err := GetDataDAO("../../testingdata/users.json", "#", true, gjson.Result{})
+// 	assert.NoError(t, err, "This should not return error")
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// }
+
+// func TestSaveDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := dalhelper.GetDataFromFDB("../../testingdata/users.json")
+// 	err := SaveDataToFDB("../../testingdata/users.json", ba, false, false)
+// 	assert.NoError(t, err, "This should not return error")
+// }
+
+// func Test1SaveDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	ba, _ := dalhelper.GetDataFromFDB("../../testingdata/users.json")
+// 	err := SaveDataToFDB("../../testingdata/users.json", ba, false, true)
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	assert.Error(t, err, "This should return error")
+// }
+
+// func Test2SaveDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	err := SaveDataToFDB("../../testingdata/users_enc.json", ba, true, false)
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func Test3SaveDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	err := SaveDataToFDB("../../testingdata/users_enc1.json", ba, true, false)
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func Test4SaveDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn2 = true
+// 	err := SaveDataToFDB("../../testingdata/users_enc1.json", ba, true, false)
+// 	errormdl.IsTestingNegetiveCaseOn2 = false
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.Error(t, err, "Error Expected")
+// }
+
+// func Test5SaveDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	err := SaveDataToFDB("../../testingdata/users_enc1.json", ba, true, false)
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.Error(t, err, "Error Expected")
+// }
+// func Test6SaveDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	err := SaveDataToFDB("../../testingdata/users_enc1.json", ba, true, false)
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.Error(t, err, "Error Expected")
+// }
+
+// func TestAppendDataToFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	err := AppendDataToFDB("../../testingdata/users_enc.json", ba, false)
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+// func TestSoftDeleteFileFromFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	err := SoftDeleteFileFromFDB("../../testingdata/users_enc.json")
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func TestHardDeleteFileFromFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	err := HardDeleteFileFromFDB("../../testingdata/users_enc1.json")
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func TestGetDataFromFDBWithoutSecurity(t *testing.T) {
+// 	Fastcache.Purge()
+// 	_, err := GetDataFromFDBWithoutSecurity("../../testingdata/users.json")
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func Test1GetDataFromFDBWithoutSecurity(t *testing.T) {
+// 	Fastcache.Purge()
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	_, err := GetDataFromFDBWithoutSecurity("../../testingdata/users.json")
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	assert.Error(t, err, "No Error Expected")
+// }
+
+// // func Test2GetDataFromFDBWithoutSecurity(t *testing.T) {
+// // 	Fastcache.Purge()
+// // 	errormdl.IsTestingNegetiveCaseOn = true
+// // 	EnableRemoteFileDownload("", "")
+// // 	_, err := GetDataFromFDBWithoutSecurity("../../testingdata/users.json")
+// // 	errormdl.IsTestingNegetiveCaseOn = false
+// // 	assert.Error(t, err, "No Error Expected")
+// // }
+
+// // func Test3GetDataFromFDBWithoutSecurity(t *testing.T) {
+// // 	Fastcache.Purge()
+// // 	EnableRemoteFileDownload("http://cdpcdn.mkcl.org/", "testingdata")
+// // 	_, err := GetDataFromFDBWithoutSecurity("../../testingdata/92/4d/f1/924df182524b2428080069888694a3a2.mp3")
+// // 	DisableRemoteFileDownload()
+// // 	fmt.Println(GetRemoteFileDownloadStatus())
+// // 	assert.NoError(t, err, "No Error Expected")
+// // }
+
+// // func Test4GetDataFromFDBWithoutSecurity(t *testing.T) {
+// // 	Fastcache.Purge()
+// // 	EnableRemoteFileDownload("http://cdpcdn1.mkcl.org/", "testingdata")
+// // 	_, err := GetDataFromFDBWithoutSecurity("../../testingdata/3b/23/e4/3b23e4f39eb4bd69b53cfefbe0c606fc.mp4")
+// // 	DisableRemoteFileDownload()
+// // 	fmt.Println(GetRemoteFileDownloadStatus())
+// // 	assert.Error(t, err, "Error Expected")
+// // }
+
+// // func Test5GetDataFromFDBWithoutSecurity(t *testing.T) {
+// // 	Fastcache.Purge()
+// // 	errormdl.IsTestingNegetiveCaseOn1 = true
+// // 	EnableRemoteFileDownload("http://cdpcdn.mkcl.org/", "testingdata")
+// // 	_, err := GetDataFromFDBWithoutSecurity("../../testingdata/3b/23/e4/3b23e4f39eb4bd69b53cfefbe0c606fc.mp4")
+// // 	DisableRemoteFileDownload()
+// // 	fmt.Println(GetRemoteFileDownloadStatus())
+// // 	errormdl.IsTestingNegetiveCaseOn1 = false
+// // 	assert.Error(t, err, "No Error Expected")
+// // }
+
+// func TestSaveDataToFDBWithoutSecurity(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	err := SaveDataToFDBWithoutSecurity("../../testingdata/users.json", ba, false, false)
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func TestSaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users.json", ba, false, false)
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+// func Test1SaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users.json", ba, false, false)
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	assert.Error(t, err, "No Error Expected")
+// }
+
+// func Test2SaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users_enc.json", ba, true, false)
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func Test3SaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users_enc1.json", ba, true, false)
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func Test4SaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn2 = true
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users_enc1.json", ba, true, false)
+// 	errormdl.IsTestingNegetiveCaseOn2 = false
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.Error(t, err, "Error Expected")
+// }
+
+// func Test5SaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users_enc1.json", ba, true, false)
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.Error(t, err, "Error Expected")
+// }
+// func Test6SaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users_enc1.json", ba, true, false)
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.Error(t, err, "Error Expected")
+// }
+
+// func TestAppendDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	err := AppendDataToFDBWithoutQueue("../../testingdata/users.json", ba, false)
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func Test7SaveDataToFDBWithoutQueue(t *testing.T) {
+// 	Fastcache.Purge()
+// 	ba, _ := getDataFromFDB("../../testingdata/users.json")
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	err := SaveDataToFDBWithoutQueue("../../testingdata/users_enc1.json", ba, true, false)
+// 	DisableSecurity()
+// 	fmt.Println(GetSecurityStatus())
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func TestGetDataFromFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	_, err := getDataFromFDB("../../testingdata/users_enc1.json")
+// 	DisableSecurity()
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func Test1GetDataFromFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	_, err := getDataFromFDB("../../testingdata/users_enc1.json")
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	DisableSecurity()
+// 	assert.Error(t, err, "Error is Expected")
+// }
+
+// func Test2GetDataFromFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn2 = true
+// 	_, err := getDataFromFDB("../../testingdata/users_enc1.json")
+// 	errormdl.IsTestingNegetiveCaseOn2 = false
+// 	DisableSecurity()
+// 	assert.Error(t, err, "Error is Expected")
+// }
+
+// func Test3GetDataFromFDB(t *testing.T) {
+// 	Fastcache.Purge()
+// 	EnableSecurity([]byte("MKCLSecurity$#@!"), "AAAAAAAAAAAAAAAA")
+// 	errormdl.IsTestingNegetiveCaseOn3 = true
+// 	_, err := getDataFromFDB("../../testingdata/users_enc1.json")
+// 	errormdl.IsTestingNegetiveCaseOn3 = false
+// 	DisableSecurity()
+// 	assert.Error(t, err, "Error is Expected")
+// }
diff --git a/v2/dalmdl/fdb/fdbdefault.go b/v2/dalmdl/fdb/fdbdefault.go
new file mode 100755
index 0000000000000000000000000000000000000000..453e3f4535654f1f2d9d9a65f788202617914876
--- /dev/null
+++ b/v2/dalmdl/fdb/fdbdefault.go
@@ -0,0 +1,19 @@
+//+build !prod
+
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:16:34 GMT+0530 (IST)
+
+// Package fdb default compliation;  this will compile only for development envirnoment
+package fdb
+
+import (
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+)
+
+func init() {
+	Fastcache = cachemdl.FastCacheHelper{}
+	//FIXME: Below hardcoded values need better number
+	Fastcache.Setup(5000, time.Minute*500, time.Minute*500)
+}
diff --git a/v2/dalmdl/fdb/fdbprod.go b/v2/dalmdl/fdb/fdbprod.go
new file mode 100755
index 0000000000000000000000000000000000000000..bd4b26a4f7c40ca99ec77dfe97f77e5df9ebcae8
--- /dev/null
+++ b/v2/dalmdl/fdb/fdbprod.go
@@ -0,0 +1,13 @@
+//+build prod
+
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:18:41 GMT+0530 (IST)
+
+// Package fdb Prod this will compile only for production envirnoment
+package fdb
+
+// func init() {
+// 	Mastercache = cachemdl.CacheGCHelper{}
+// 	//FIXME: Below hardcoded values need better number
+// 	Mastercache.Setup(5000, time.Second*500)
+// }
diff --git a/v2/dalmdl/lazywriter/lazywriter.go b/v2/dalmdl/lazywriter/lazywriter.go
new file mode 100644
index 0000000000000000000000000000000000000000..41aab31cb989102b5c21a0b182febf93e5743efa
--- /dev/null
+++ b/v2/dalmdl/lazywriter/lazywriter.go
@@ -0,0 +1,548 @@
+package lazywriter
+
+import (
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"github.com/tidwall/gjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/fdb"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/noaway/heartbeat"
+
+	"github.com/pquerna/ffjson/ffjson"
+
+	"github.com/patrickmn/go-cache"
+)
+
+// var PerformanceAnalyser = map[string]LazyCacheObject{}
+
+// var isDebugMode = false
+var lazyMutex = &sync.Mutex{}
+
+const MIN_DATA_LENGTH = 10
+
+// LazyFDBHelper Helps to Save and Get cache object
+// also saves information into hard disk
+type LazyFDBHelper struct {
+	gc                     *cache.Cache
+	Heartbeat              *heartbeat.Task
+	IsProcessRunning       bool
+	NumberOfUpdateAttempts int
+	MAX_NUMBER_OF_RETRY    int
+	RETRY_SLEEP_TIME_SEC   time.Duration
+	INTERVAL_TIME_SEC      int
+	MEMORY_WRITE_COUNT     int
+	MEMORY_READ_COUNT      int
+	DISK_READ_COUNT        int
+	DISK_WRITE_COUNT       int
+	CacheExpirationTime    time.Duration
+	CacheCleanUpInterval   time.Duration
+}
+
+// LazyCacheObject is a
+type LazyCacheObject struct {
+	// Identifier will be used to identify the LazyCacheObject
+	Identifier string
+	// This will be used as a Key for GC cache
+	FileName string
+
+	// Data is for storing byte array of an object; right now it is not used.
+	GJSONData gjson.Result
+
+	// This will hold object which developer wants to fetch or store
+	InterfaceData interface{}
+
+	//This number indicates how many times InterfaceData data is changed
+	ChangeCount int
+
+	// This will tell if object is locked for file saving or not.
+	IsLocked bool
+
+	MEMORY_WRITE_COUNT int
+	MEMORY_READ_COUNT  int
+	DISK_READ_COUNT    int
+	DISK_WRITE_COUNT   int
+
+	SaveFn SaveDataFn
+}
+
+// SaveDataFn - This is an user defined callback function executed to presist data. If not provided default save function will be executed.
+type SaveDataFn func(key string, value *LazyCacheObject)
+
+// StartProcess
+func (lfd *LazyFDBHelper) StartProcess(objectCount int, taskName string,
+	intervalTime int, sleepTime int, maxNumberOfRetry int, isDebugMode bool) {
+	// loggermdl.LogDebug("StartProcess")
+	//This is Default value that is why used as
+	lfd.INTERVAL_TIME_SEC = 5
+	lfd.RETRY_SLEEP_TIME_SEC = 1
+	// isDebugMode = isDebugMode
+	// check parameter is valid - if not keep default else set new value
+	if intervalTime > 0 {
+		lfd.INTERVAL_TIME_SEC = intervalTime
+	}
+
+	// check parameter is valid - if not keep default else set new value
+
+	if sleepTime > 0 {
+		lfd.RETRY_SLEEP_TIME_SEC = time.Duration(sleepTime) * time.Millisecond
+	}
+
+	if maxNumberOfRetry <= 0 {
+		lfd.MAX_NUMBER_OF_RETRY = 3
+	} else {
+		lfd.MAX_NUMBER_OF_RETRY = maxNumberOfRetry
+	}
+
+	//start Heartbeat event
+	lfd.Heartbeat, _ = heartbeat.NewTast(taskName, lfd.INTERVAL_TIME_SEC)
+
+	// TODO: Use Fast Cache here
+	// Use default ARC algo for store
+	// lfd.gc = gcache.New(objectCount).
+	// 	LFU().
+	// 	Build()
+	lfd.gc = cache.New(lfd.CacheExpirationTime, lfd.CacheCleanUpInterval)
+	//Init timer
+	lazyMutex.Lock()
+	lfd.Heartbeat.Start(func() error {
+
+		// check if process already running
+		if lfd.IsProcessRunning {
+			//If process is already running skip processing running it again.
+			return nil
+		}
+
+		//process eatch object in cache and save to hdd.
+		lfd.saveObjectsToFdb()
+		return nil
+	})
+	lazyMutex.Unlock()
+}
+
+// ClearLazyObjInterfaceData -
+func (lfd *LazyFDBHelper) ClearLazyObjInterfaceData(identifier string) error {
+
+	for item := range lfd.gc.Items() {
+		//TODO: catch errors
+		cachedObject, ok := lfd.gc.Get(item)
+		if !ok {
+			return errormdl.Wrap("error occured while getting " + item + " from gcache")
+		}
+		cachedObjectActual, _ := cachedObject.(LazyCacheObject)
+
+		if cachedObjectActual.Identifier == identifier {
+			if cachedObjectActual.ChangeCount > 0 {
+				cachedObjectActual.IsLocked = true
+				// saveDataToFDB(cachedObjectActual.FileName, cachedObjectActual.InterfaceData, cachedObjectActual.GJSONData)
+				cachedObjectActual.ChangeCount = 0
+				cachedObjectActual.IsLocked = false
+				lazyMutex.Lock()
+				lfd.DISK_WRITE_COUNT++
+				lazyMutex.Unlock()
+				// if isDebugMode {
+				// 	lazyCacheObject := PerformanceAnalyser[cachedObjectActual.FileName]
+				// 	lazyCacheObject.DISK_WRITE_COUNT++
+				// 	PerformanceAnalyser[cachedObjectActual.FileName] = lazyCacheObject
+				// }
+			}
+			lazyMutex.Lock()
+			lfd.gc.Delete(cachedObjectActual.FileName)
+			lazyMutex.Unlock()
+
+		}
+	}
+	return nil
+}
+
+// saveObjectsToFdb is private method which will be called to save object on hdd
+// this will fetch all objects in store and save it one by one
+// right now it is not using any threading
+func (lfd *LazyFDBHelper) saveObjectsToFdb() {
+	lfd.IsProcessRunning = true
+
+	// Fetch All Rows and then save into db
+	for item := range lfd.gc.Items() {
+
+		//TODO: catch errors
+		cacheObjectraw, _ := lfd.gc.Get(item)
+		cacheObjActual, _ := cacheObjectraw.(LazyCacheObject)
+
+		if cacheObjActual.ChangeCount > 0 {
+
+			cacheObjActual.IsLocked = true
+			// TODO: Catch errors from save function
+			if cacheObjActual.SaveFn == nil {
+				loggermdl.LogError("Executing default function")
+				saveDataToFDB(cacheObjActual.FileName, cacheObjActual.InterfaceData, cacheObjActual.GJSONData)
+			} else {
+				loggermdl.LogError("Executing custom function")
+				cacheObjActual.SaveFn(item, &cacheObjActual)
+			}
+			cacheObjActual.ChangeCount = 0
+			cacheObjActual.IsLocked = false
+			lfd.gc.Set(cacheObjActual.FileName, cacheObjActual, lfd.CacheExpirationTime)
+
+			lazyMutex.Lock()
+			lfd.MEMORY_WRITE_COUNT++
+			lfd.DISK_WRITE_COUNT++
+
+			// if isDebugMode {
+			// 	lazyCacheObject := PerformanceAnalyser[cacheObjActual.FileName]
+			// 	lazyCacheObject.MEMORY_WRITE_COUNT++
+			// 	PerformanceAnalyser[cacheObjActual.FileName] = lazyCacheObject
+			// }
+			lazyMutex.Unlock()
+			loggermdl.LogError("changes saved to disk at ", cacheObjActual.FileName)
+		}
+	}
+	lfd.NumberOfUpdateAttempts = lfd.NumberOfUpdateAttempts + 1
+
+	lfd.IsProcessRunning = false
+}
+
+// SaveOrUpdateDataInCache this method will Save object in cache if unavailable
+func (lfd *LazyFDBHelper) SaveOrUpdateDataInCache(newObject LazyCacheObject) bool {
+
+	jsonString := newObject.GJSONData.String()
+	byteArray := []byte(jsonString)
+	if jsonString == "" {
+		var marshalError error
+		byteArray, marshalError = ffjson.Marshal(newObject.InterfaceData)
+		if marshalError != nil {
+			loggermdl.LogError("error occured while marshaling data ", marshalError)
+			return false
+		}
+	}
+
+	// check data length before saving. If less than assumed data length, return false
+
+	lengthOfData := len(byteArray)
+	if lengthOfData < MIN_DATA_LENGTH {
+		loggermdl.LogError("data size is less than minimun expected data length. Actual data length: ", lengthOfData)
+		loggermdl.LogError("data received: ", string(byteArray))
+
+		return false
+	}
+
+	retryCount := 0
+retrylabel:
+	// Get prev object and then save new one
+	//TODO: catch errors
+	dataFromGC, ok := lfd.gc.Get(newObject.FileName)
+
+	if !ok || dataFromGC == nil {
+		newObject.ChangeCount = 1
+		lfd.gc.Set(newObject.FileName, newObject, lfd.CacheExpirationTime)
+
+	} else {
+
+		oldObject, _ := dataFromGC.(LazyCacheObject)
+		if oldObject.IsLocked && retryCount < lfd.MAX_NUMBER_OF_RETRY {
+			retryCount++
+			// Sleep for few sec so that other thread will release lock
+			time.Sleep(lfd.RETRY_SLEEP_TIME_SEC)
+			goto retrylabel
+		}
+		newObject.ChangeCount = oldObject.ChangeCount + 1
+		lfd.gc.Set(newObject.FileName, newObject, lfd.CacheExpirationTime)
+	}
+	lazyMutex.Lock()
+	lfd.MEMORY_WRITE_COUNT++
+	lazyMutex.Unlock()
+	// if isDebugMode {
+	// 	lazyCacheObject := PerformanceAnalyser[newObject.FileName]
+	// 	lazyCacheObject.MEMORY_WRITE_COUNT++
+	// 	PerformanceAnalyser[newObject.FileName] = lazyCacheObject
+	// }
+	// loggermdl.LogError("data updated in cache")
+	return true
+}
+
+//saveDataToFDB data to hard disk.
+// This saves data by marshaling using json.Marshal
+func saveDataToFDB(filePath string, objectData interface{}, GJSONData gjson.Result) bool {
+	//TODO: catch errors
+	stringData := GJSONData.String()
+	byteArray := []byte(stringData)
+	if stringData == "" {
+		var marshalError error
+		byteArray, marshalError = ffjson.Marshal(objectData)
+		if marshalError != nil {
+			loggermdl.LogError("error occured while marshaling data ", marshalError)
+			return false
+		}
+	}
+
+	// check data length before saving. If less than assumed data length, return false
+	lengthOfData := len(byteArray)
+	if lengthOfData < MIN_DATA_LENGTH {
+		loggermdl.LogError("data size is less than minimun expected data length. Actual data length: ", lengthOfData)
+		loggermdl.LogError("data received: ", string(byteArray))
+		return false
+	}
+
+	saveError := fdb.SaveDataToFDBWithoutQueue(filePath, byteArray, true, false)
+	if saveError != nil {
+		loggermdl.LogError("error occured while saving data ", saveError)
+		return false
+	}
+	return true
+}
+
+// use it when req.
+// func GetBytes(key interface{}) ([]byte, error) {
+// 	var buf bytes.Buffer
+// 	enc := gob.NewEncoder(&buf)
+// 	err := enc.Encode(key)
+// 	if err != nil {
+// 		return nil, err
+// 	}
+// 	return buf.Bytes(), nil
+// }
+
+// RemoveDataFromCache Removes Data From Cache
+func (lfd *LazyFDBHelper) RemoveDataFromCache(identifier string) {
+
+	// Fetch All Rows and then save into db
+
+	// cachedObjectList := lfd.gc.Items()
+	for item := range lfd.gc.Items() {
+
+		//TODO: catch errors
+		cachedObject, ok := lfd.gc.Get(item)
+		if !ok {
+			loggermdl.LogError("error occured while getting ", item, " from gcache")
+		}
+		cachedObjectActual, _ := cachedObject.(LazyCacheObject)
+
+		if cachedObjectActual.Identifier == identifier {
+			if cachedObjectActual.ChangeCount > 0 {
+				cachedObjectActual.IsLocked = true
+				saveDataToFDB(cachedObjectActual.FileName, cachedObjectActual.InterfaceData, cachedObjectActual.GJSONData)
+				cachedObjectActual.ChangeCount = 0
+				cachedObjectActual.IsLocked = false
+				lazyMutex.Lock()
+				lfd.DISK_WRITE_COUNT++
+				lazyMutex.Unlock()
+				// if isDebugMode {
+				// 	lazyCacheObject := PerformanceAnalyser[cachedObjectActual.FileName]
+				// 	lazyCacheObject.DISK_WRITE_COUNT++
+				// 	PerformanceAnalyser[cachedObjectActual.FileName] = lazyCacheObject
+				// }
+			}
+			lazyMutex.Lock()
+			lfd.gc.Delete(cachedObjectActual.FileName)
+			lazyMutex.Unlock()
+
+		}
+	}
+}
+
+//New Add new Key and value
+// func  (lfd *LazyFDBHelper) SetToCache(newObject LazyCacheObject) error {
+// 	lfd.MEMORY_WRITE_COUNT++
+// 	return lfd.gc.Set(newObject.FileName, newObject.InterfaceData)
+// }
+
+//Get object based on key
+func (lfd *LazyFDBHelper) GetFromCache(newObject LazyCacheObject) (interface{}, bool) {
+	lazyMutex.Lock()
+	lfd.MEMORY_READ_COUNT++
+	lazyMutex.Unlock()
+	// if isDebugMode {
+	// 	lazyCacheObject := PerformanceAnalyser[newObject.FileName]
+	// 	lazyCacheObject.MEMORY_READ_COUNT++
+	// 	PerformanceAnalyser[newObject.FileName] = lazyCacheObject
+	// }
+	return lfd.gc.Get(newObject.FileName)
+}
+
+// GetAll objects from gc
+// func (lfd *LazyFDBHelper) GetAllFromCache() map[interface{}]interface{} {
+// 	return lfd.gc.Items()
+// }
+
+// GetCacheLength Get Cache Length
+func (lfd *LazyFDBHelper) GetCacheLength() int {
+	return lfd.gc.ItemCount()
+}
+
+// PurgeCache first saves all data inside FDB and finally purge all Cache
+func (lfd *LazyFDBHelper) PurgeCache() {
+
+	// Fetch All Rows and then save into db
+
+	// cachedObjectList := lfd.gc.GetALL()
+	for item := range lfd.gc.Items() {
+
+		//TODO: catch errors
+		cachedObject, ok := lfd.gc.Get(item)
+		if !ok {
+			loggermdl.LogError("error occured while getting ", item, " from gcache")
+		}
+		cachedObjectActual, conversionSuccessful := cachedObject.(LazyCacheObject)
+
+		if conversionSuccessful && cachedObjectActual.ChangeCount > 0 {
+			cachedObjectActual.IsLocked = true
+			saveDataToFDB(cachedObjectActual.FileName, cachedObjectActual.InterfaceData, cachedObjectActual.GJSONData)
+			cachedObjectActual.ChangeCount = 0
+			cachedObjectActual.IsLocked = false
+			lazyMutex.Lock()
+			lfd.DISK_WRITE_COUNT++
+			lazyMutex.Unlock()
+		}
+	}
+	lazyMutex.Lock()
+	lfd.gc.Flush()
+	lazyMutex.Unlock()
+}
+
+// =================================== > Lazywriter for appending data to file < ============================================
+
+// StartProcessForAppend StartProcessForAppend
+func (lfd *LazyFDBHelper) StartProcessForAppend(objectCount int, taskName string,
+	intervalTime int, sleepTime int, maxNumberOfRetry int, isDebugMode bool) {
+	//This is Default value that is why used as
+	lfd.INTERVAL_TIME_SEC = 5
+	lfd.RETRY_SLEEP_TIME_SEC = 1
+	// isDebugMode = isDebugMode
+	// check parameter is valid - if not keep default else set new value
+	if intervalTime > 0 {
+		lfd.INTERVAL_TIME_SEC = intervalTime
+	}
+
+	// check parameter is valid - if not keep default else set new value
+	if sleepTime > 0 {
+		lfd.RETRY_SLEEP_TIME_SEC = time.Duration(sleepTime) * time.Millisecond
+	}
+
+	if maxNumberOfRetry <= 0 {
+		lfd.MAX_NUMBER_OF_RETRY = 3
+	} else {
+		lfd.MAX_NUMBER_OF_RETRY = maxNumberOfRetry
+	}
+
+	//start Heartbeat event
+	lfd.Heartbeat, _ = heartbeat.NewTast(taskName, lfd.INTERVAL_TIME_SEC)
+
+	// Use default ARC algo for store
+	lfd.gc = cache.New(lfd.CacheExpirationTime, lfd.CacheCleanUpInterval)
+
+	//Init timer
+	lazyMutex.Lock()
+	lfd.Heartbeat.Start(func() error {
+
+		// check if process already running
+		if lfd.IsProcessRunning {
+			//If process is already running skip processing running it again.
+			return nil
+		}
+
+		//process eatch object in cache and save to hdd.
+		lfd.appendObjectsToFdb()
+		return nil
+	})
+	lazyMutex.Unlock()
+}
+
+// appendObjectsToFdb is private method which will be called to append object/data to file on hdd
+// this will fetch all objects in store and append data to respective file it one by one
+// right now it is not using any threading
+func (lfd *LazyFDBHelper) appendObjectsToFdb() {
+	lfd.IsProcessRunning = true
+
+	// Fetch All Rows and then save into db
+	for item := range lfd.gc.Items() {
+
+		//TODO: catch errors
+		cacheObjectraw, _ := lfd.gc.Get(item)
+		cacheObjActual, _ := cacheObjectraw.(LazyCacheObject)
+
+		if cacheObjActual.ChangeCount > 0 {
+
+			cacheObjActual.IsLocked = true
+			appendDataToFDB(cacheObjActual.FileName, cacheObjActual.InterfaceData, cacheObjActual.GJSONData)
+			cacheObjActual.ChangeCount = 0
+			cacheObjActual.IsLocked = false
+			lfd.gc.Set(cacheObjActual.FileName, cacheObjActual, lfd.CacheExpirationTime)
+
+			lazyMutex.Lock()
+			lfd.MEMORY_WRITE_COUNT++
+			lfd.DISK_WRITE_COUNT++
+
+			// if isDebugMode {
+			// 	lazyCacheObject := PerformanceAnalyser[cacheObjActual.FileName]
+			// 	lazyCacheObject.MEMORY_WRITE_COUNT++
+			// 	PerformanceAnalyser[cacheObjActual.FileName] = lazyCacheObject
+			// }
+			lazyMutex.Unlock()
+		}
+	}
+	lfd.NumberOfUpdateAttempts = lfd.NumberOfUpdateAttempts + 1
+
+	lfd.IsProcessRunning = false
+}
+
+// appendDataToFDB appends data to file on hard drive.
+// This appends data by marshaling using json.Marshal
+// '\n' will be added as a separator between two different objects
+func appendDataToFDB(filePath string, objectData interface{}, GJSONData gjson.Result) {
+	//TODO: catch errors
+	jsonString := GJSONData.String()
+	byteArray := []byte(jsonString)
+	if jsonString == "" {
+		var marshalError error
+		byteArray, marshalError = ffjson.Marshal(objectData)
+		if marshalError != nil {
+			loggermdl.LogError("error occured while marshaling data ", marshalError)
+		}
+	}
+	byteArray = append(byteArray, []byte("\n")...)
+	saveError := fdb.AppendDataToFDBWithoutQueue(filePath, byteArray, false)
+	if saveError != nil {
+		loggermdl.LogError("error occured while saving data ", saveError)
+	}
+}
+
+// RemoveDataFromCacheForAppend Removes Data From Cache
+func (lfd *LazyFDBHelper) RemoveDataFromCacheForAppend(identifier string) {
+
+	// Fetch All Rows and then save into db
+
+	cachedObjectList := lfd.gc.Items()
+	for item := range cachedObjectList {
+
+		//TODO: catch errors
+		cachedObject, ok := lfd.gc.Get(item)
+		if !ok {
+			loggermdl.LogError("error occured while getting ", item, " from gcache")
+		}
+		cachedObjectActual, _ := cachedObject.(LazyCacheObject)
+		if cachedObjectActual.Identifier == identifier {
+			if cachedObjectActual.ChangeCount > 0 {
+				cachedObjectActual.IsLocked = true
+				appendDataToFDB(cachedObjectActual.FileName, cachedObjectActual.InterfaceData, cachedObjectActual.GJSONData)
+				cachedObjectActual.ChangeCount = 0
+				cachedObjectActual.IsLocked = false
+				lazyMutex.Lock()
+				lfd.DISK_WRITE_COUNT++
+				lazyMutex.Unlock()
+				// if isDebugMode {appendDataToFDB
+				// 	lazyCacheObject := PerformanceAnalyser[cachedObjectActual.FileName]
+				// 	lazyCacheObject.DISK_WRITE_COUNT++
+				// 	PerformanceAnalyser[cachedObjectActual.FileName] = lazyCacheObject
+				// }
+			}
+			lazyMutex.Lock()
+			lfd.gc.Delete(cachedObjectActual.FileName)
+			lazyMutex.Unlock()
+
+		}
+	}
+}
diff --git a/v2/dalmdl/lazywriter/lazywriter_test.go b/v2/dalmdl/lazywriter/lazywriter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b5ba3fa5409fa6393acbfab3d7a20666b7187f7e
--- /dev/null
+++ b/v2/dalmdl/lazywriter/lazywriter_test.go
@@ -0,0 +1,42 @@
+package lazywriter
+
+import (
+	"fmt"
+	"strconv"
+	"sync"
+	"testing"
+
+	"github.com/tidwall/gjson"
+
+	"github.com/noaway/heartbeat"
+)
+
+var masterLazzyObj LazyFDBHelper
+
+func init() {
+	masterLazzyObj.StartProcessForAppend(10000, "append", 100, 5, 5, false)
+}
+
+func TestStartProcess(t *testing.T) {
+
+	task, _ := heartbeat.NewTast("Sample", 3)
+	startIndex := 0
+	wg := sync.WaitGroup{}
+	wg.Add(1)
+
+	task.Start(func() error {
+		lazyCacheObject := LazyCacheObject{}
+		lazyCacheObject.FileName = "logs.json"
+		lazyCacheObject.Identifier = "logs.json"
+		startIndex += 3
+		tmp := `{"name":"SampleObject", "timeInterval": "` + strconv.Itoa(startIndex) + `"}`
+		lazyCacheObject.GJSONData = gjson.Parse(tmp)
+		if ok := masterLazzyObj.SaveOrUpdateDataInCache(lazyCacheObject); ok {
+			fmt.Println("Object append")
+		} else {
+			fmt.Println("Object not append")
+		}
+		return nil
+	})
+	wg.Wait()
+}
diff --git a/v2/dalmdl/mongodb/mongodb.go b/v2/dalmdl/mongodb/mongodb.go
new file mode 100644
index 0000000000000000000000000000000000000000..d44842402ee1102b5b2d2ca78c6eec97a110f782
--- /dev/null
+++ b/v2/dalmdl/mongodb/mongodb.go
@@ -0,0 +1,584 @@
+package mongodb
+
+import (
+	"encoding/json"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/statemdl"
+
+	"gopkg.in/mgo.v2/bson"
+
+	"github.com/tidwall/gjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/configmdl"
+
+	mgo "gopkg.in/mgo.v2"
+)
+
+// MongoHost -MongoHost
+type MongoHost struct {
+	HostName        string        `json:"hostName"`
+	Server          string        `json:"server"`
+	Port            int           `json:"port"`
+	Username        string        `json:"username"`
+	Password        string        `json:"password"`
+	Database        string        `json:"database"`
+	IsDefault       bool          `json:"isDefault"`
+	MaxIdleConns    int           `json:"maxIdleConns" `
+	MaxOpenConns    int           `json:"maxOpenConns"`
+	ConnMaxLifetime time.Duration `json:"connMaxLifetime" `
+	IsDisabled      bool          `json:"isDisabled" `
+}
+
+// TomlConfig - TomlConfig
+type TomlConfig struct {
+	MongoHosts map[string]MongoHost
+}
+
+var instances map[string]*mgo.Session
+var mutex sync.Mutex
+var once sync.Once
+var config TomlConfig
+var defaultHost string
+
+// func init() {
+// 	instances = make(map[string]*mgo.Session)
+// }
+
+// Init initializes Mongo Connections for give toml file
+func Init(tomlFilepath, defaultHostName string) error {
+	var sessionError error
+	once.Do(func() {
+		defer mutex.Unlock()
+		mutex.Lock()
+		instances = make(map[string]*mgo.Session)
+		_, err := configmdl.InitConfig(tomlFilepath, &config)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			sessionError = err
+			return
+		}
+		for hostName, hostDetails := range config.MongoHosts {
+			session, err := mgo.DialWithInfo(&mgo.DialInfo{
+				Addrs:    []string{bindMongoServerWithPort(hostDetails.Server, hostDetails.Port)},
+				Username: hostDetails.Username,
+				Timeout:  time.Second * 3,
+				Password: hostDetails.Password,
+				Database: hostDetails.Database,
+			})
+
+			if err != nil {
+				sessionError = err
+				loggermdl.LogError(sessionError)
+				return
+			}
+			instances[hostName] = session
+
+		}
+		defaultHost = defaultHostName
+	})
+	return sessionError
+}
+
+// InitUsingJSON initializes Mongo Connections for give JSON data
+func InitUsingJSON(configs []MongoHost) error {
+	var sessionError error
+	once.Do(func() {
+		defer mutex.Unlock()
+		mutex.Lock()
+		config.MongoHosts = make(map[string]MongoHost)
+		instances = make(map[string]*mgo.Session)
+		for _, hostDetails := range configs {
+			if hostDetails.IsDisabled {
+				continue
+			}
+			session, err := mgo.DialWithInfo(&mgo.DialInfo{
+				Addrs:    []string{bindMongoServerWithPort(hostDetails.Server, hostDetails.Port)},
+				Username: hostDetails.Username,
+				Password: hostDetails.Password,
+				Timeout:  time.Second * 3,
+				Database: hostDetails.Database,
+			})
+
+			if err != nil {
+				sessionError = err
+				loggermdl.LogError(sessionError)
+				return
+			}
+			instances[hostDetails.HostName] = session
+			if hostDetails.IsDefault {
+				defaultHost = hostDetails.HostName
+			}
+			config.MongoHosts[hostDetails.HostName] = hostDetails
+		}
+	})
+	return sessionError
+}
+
+// DeleteSession -DeleteSession
+func DeleteSession(hostName string) error {
+	defer mutex.Unlock()
+	mutex.Lock()
+	if _, ok := instances[hostName]; !ok {
+		return errormdl.Wrap("NO_HOST_FOUND")
+	}
+	delete(instances, hostName)
+	return nil
+}
+
+// InitNewSession - InitNewSession
+func InitNewSession(hostDetails MongoHost) error {
+	defer mutex.Unlock()
+	mutex.Lock()
+	if instances == nil {
+		instances = make(map[string]*mgo.Session)
+	}
+	if _, ok := instances[hostDetails.HostName]; ok {
+		return errormdl.Wrap("DUPLICATE_HOSTNAME")
+	}
+
+	session, err := mgo.DialWithInfo(&mgo.DialInfo{
+		Addrs:    []string{bindMongoServerWithPort(hostDetails.Server, hostDetails.Port)},
+		Username: hostDetails.Username,
+		Timeout:  time.Second * 3,
+		Password: hostDetails.Password,
+		Database: hostDetails.Database,
+	})
+
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	instances[hostDetails.HostName] = session
+	return nil
+}
+
+//GetMongoConnection method
+func GetMongoConnection(hostName string) (*mgo.Session, error) {
+	defer mutex.Unlock()
+	mutex.Lock()
+	if instances == nil {
+		return nil, errormdl.Wrap("MONGO_INIT_NOT_DONE")
+	}
+	if hostName == "" {
+		if instance, ok := instances[defaultHost]; ok {
+			statemdl.MongoHits()
+			return instance.Copy(), nil
+		}
+	}
+	if instance, ok := instances[hostName]; ok {
+		statemdl.MongoHits()
+		return instance.Copy(), nil
+	}
+	return nil, errormdl.Wrap("Session not found for instance: " + hostName)
+}
+
+// MongoDAO mongo DAO struct
+type MongoDAO struct {
+	hostName       string
+	collectionName string
+}
+
+// GetMongoDAOWithHost return mongo DAO instance
+func GetMongoDAOWithHost(host, collection string) *MongoDAO {
+	return &MongoDAO{
+		hostName:       host,
+		collectionName: collection,
+	}
+}
+
+// GetMongoDAO return mongo DAO instance
+func GetMongoDAO(collection string) *MongoDAO {
+	return &MongoDAO{
+		collectionName: collection,
+	}
+}
+
+// SaveData Save data in mongo db
+func (mg *MongoDAO) SaveData(data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	insertError := collection.Insert(data)
+	if errormdl.CheckErr1(insertError) != nil {
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+// UpdateAll update all
+func (mg *MongoDAO) UpdateAll(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	_, updateError := collection.UpdateAll(selector, bson.M{"$set": data})
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// Update will update single entry
+func (mg *MongoDAO) Update(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	updateError := collection.Update(selector, bson.M{"$set": data})
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// GetData will return query for selector
+func (mg *MongoDAO) GetData(selector map[string]interface{}) (*gjson.Result, error) {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return nil, errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return nil, errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	var result []interface{}
+	collection.Find(selector).All(&result)
+	ba, marshalError := json.Marshal(result)
+	if errormdl.CheckErr2(marshalError) != nil {
+		return nil, errormdl.CheckErr2(marshalError)
+	}
+	rs := gjson.ParseBytes(ba)
+	return &rs, nil
+}
+
+// DeleteData will delete data given for selector
+func (mg *MongoDAO) DeleteData(selector map[string]interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	deleteError := collection.Remove(selector)
+	if errormdl.CheckErr1(deleteError) != nil {
+		return errormdl.CheckErr1(deleteError)
+	}
+	return deleteError
+}
+
+// DeleteAll will delete all the matching data given for selector
+func (mg *MongoDAO) DeleteAll(selector map[string]interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	_, deleteError := collection.RemoveAll(selector)
+	if errormdl.CheckErr1(deleteError) != nil {
+		return errormdl.CheckErr1(deleteError)
+	}
+	return deleteError
+}
+
+// GetProjectedData will return query for selector and projector
+func (mg *MongoDAO) GetProjectedData(selector map[string]interface{}, projector map[string]interface{}) (*gjson.Result, error) {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return nil, errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return nil, errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	var result []interface{}
+	collection.Find(selector).Select(projector).All(&result)
+	ba, marshalError := json.Marshal(result)
+	if errormdl.CheckErr2(marshalError) != nil {
+		return nil, errormdl.CheckErr2(marshalError)
+	}
+	rs := gjson.ParseBytes(ba)
+	return &rs, nil
+}
+
+// GetAggregateData - return result using aggregation query
+func (mg *MongoDAO) GetAggregateData(selector interface{}) (*gjson.Result, error) {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return nil, errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return nil, errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	var result []bson.M
+	collection.Pipe(selector).All(&result)
+	ba, marshalError := json.Marshal(result)
+	if errormdl.CheckErr2(marshalError) != nil {
+		return nil, errormdl.CheckErr2(marshalError)
+	}
+	rs := gjson.ParseBytes(ba)
+	return &rs, nil
+}
+
+// Upsert will update single entry
+func (mg *MongoDAO) Upsert(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	_, updateError := collection.Upsert(selector, bson.M{"$set": data})
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// PushData - append in array
+func (mg *MongoDAO) PushData(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	_, updateError := collection.UpdateAll(selector, bson.M{"$push": data})
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// CustomUpdate - CustomUpdate
+func (mg *MongoDAO) CustomUpdate(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	_, updateError := collection.UpdateAll(selector, data)
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+// CustomUpdateOne - CustomUpdateOne
+func (mg *MongoDAO) CustomUpdateOne(selector map[string]interface{}, data interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if !ok {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	updateError := collection.Update(selector, data)
+	if errormdl.CheckErr1(updateError) != nil {
+		return errormdl.CheckErr1(updateError)
+	}
+	return nil
+}
+
+/************************* BULK Functionalities ******************************/
+
+// BulkSaveData ata Save data in mongo db in bulk
+func (mg *MongoDAO) BulkSaveData(data []interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	b := collection.Bulk()
+	b.Insert(data...)
+	_, insertError := b.Run()
+	if errormdl.CheckErr1(insertError) != nil {
+		loggermdl.LogError(insertError)
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+// BulkUpdateData  update data in mongo db in bulk
+func (mg *MongoDAO) BulkUpdateData(data []interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	b := collection.Bulk()
+	b.UpdateAll(data...)
+	_, insertError := b.Run()
+	if errormdl.CheckErr1(insertError) != nil {
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+// BulkDeleteData  delete data in mongo db in bulk
+func (mg *MongoDAO) BulkDeleteData(data []interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	b := collection.Bulk()
+	b.RemoveAll(data...)
+	_, insertError := b.Run()
+	if errormdl.CheckErr1(insertError) != nil {
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+// BulkUpsertData  Upsert data in mongo db in bulk
+func (mg *MongoDAO) BulkUpsertData(data []interface{}) error {
+	session, sessionError := GetMongoConnection(mg.hostName)
+	if errormdl.CheckErr(sessionError) != nil {
+		return errormdl.CheckErr(sessionError)
+	}
+	defer session.Close()
+	if mg.hostName == "" {
+		mg.hostName = defaultHost
+	}
+	db, ok := config.MongoHosts[mg.hostName]
+	if errormdl.CheckBool(!ok) {
+		return errormdl.Wrap("No_Configuration_Found_For_Host: " + mg.hostName)
+	}
+	collection := session.DB(db.Database).C(mg.collectionName)
+	b := collection.Bulk()
+	b.Upsert(data...)
+	_, insertError := b.Run()
+	if errormdl.CheckErr1(insertError) != nil {
+		return errormdl.CheckErr1(insertError)
+	}
+	return nil
+}
+
+func bindMongoServerWithPort(server string, port int) string {
+	// if port is empty then used default port 27017 & bind to server ip
+	var serverURI string
+	if port <= 0 || strings.TrimSpace(strconv.Itoa(port)) == "" {
+		serverURI = server + ":27017"
+	} else {
+		serverURI = server + ":" + strconv.Itoa(port)
+	}
+	return serverURI
+}
diff --git a/v2/dalmdl/mongodb/mongodb_test.go b/v2/dalmdl/mongodb/mongodb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b2f05fbf0c1df6e405bdbfdbd5a95f73b23a57fe
--- /dev/null
+++ b/v2/dalmdl/mongodb/mongodb_test.go
@@ -0,0 +1,48 @@
+package mongodb
+
+type sample struct {
+	Name string `bson:"name"`
+}
+
+// func TestInit(t *testing.T) {
+// 	tomlFilepath := "../../testingdata/testData/config/config.toml"
+// 	err := Init(tomlFilepath)
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func TestGetMongoConnection(t *testing.T) {
+// 	_, err := getMongoConnection("host1")
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+
+// func TestSaveData(t *testing.T) {
+// 	sample := sample{}
+// 	sample.Name = "test"
+// 	err := GetMongoDAO("host1", "test").SaveData(sample)
+// 	assert.NoError(t, err, "No Error Expected")
+// }
+// func Test1SaveData(t *testing.T) {
+// 	sample := sample{}
+// 	sample.Name = "test"
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	err := GetMongoDAO("host1", "test").SaveData(sample)
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// 	assert.Error(t, err, "Error Expected")
+// }
+// func Test2SaveData(t *testing.T) {
+// 	sample := sample{}
+// 	sample.Name = "test"
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	err := GetMongoDAO("host1", "test").SaveData(sample)
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	assert.Error(t, err, "Error Expected")
+// }
+
+// func Test3SaveData(t *testing.T) {
+// 	sample := sample{}
+// 	sample.Name = "test"
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	err := GetMongoDAO("host1", "test").SaveData(sample)
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	assert.Error(t, err, "Error Expected")
+// }
diff --git a/v2/dalmdl/mysql/mysql-config.toml b/v2/dalmdl/mysql/mysql-config.toml
new file mode 100644
index 0000000000000000000000000000000000000000..03f29eb1bdb83754e14c68c07ebd96d07ebe5b69
--- /dev/null
+++ b/v2/dalmdl/mysql/mysql-config.toml
@@ -0,0 +1,10 @@
+# configuration file for mysql
+[MysqlHosts]
+
+    
+    [MysqlHosts.dev]
+        Server = "10.2.10.15:3306"
+        Username = "dev"
+        Password = "dev#@!"
+        Protocol="tcp"
+        Database = "CoreStudio"
diff --git a/v2/dalmdl/mysql/mysql.go b/v2/dalmdl/mysql/mysql.go
new file mode 100644
index 0000000000000000000000000000000000000000..673ec659a316b096ff1d3d0c553176d831b83f21
--- /dev/null
+++ b/v2/dalmdl/mysql/mysql.go
@@ -0,0 +1,363 @@
+package mysql
+
+import (
+	"database/sql"
+	"database/sql/driver"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/statemdl"
+
+	_ "github.com/go-sql-driver/mysql"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/sjsonhelpermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/configmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"github.com/gocraft/dbr/v2"
+)
+
+// Hold a single global connection (pooling provided by sql driver)
+var sqlConnections map[string]*dbr.Connection
+var connectionError error
+var sqlOnce sync.Once
+var config tomlConfig
+var defaultHost string
+
+// MySQLConnection - MySQLConnection
+type MySQLConnection struct {
+	HostName        string        `json:"hostName" bson:"hostName"`
+	Server          string        `json:"server" bson:"server"`
+	Port            int           `json:"port" bson:"port"`
+	Username        string        `json:"username" bson:"username"`
+	Password        string        `json:"password" bson:"password"`
+	Protocol        string        `json:"protocol" bson:"protocol"`
+	Database        string        `json:"database" bson:"database"`
+	Parameters      []param       `json:"params" bson:"params"`
+	MaxIdleConns    int           `json:"maxIdleConns" bson:"maxIdleConns"`
+	MaxOpenConns    int           `json:"maxOpenConns" bson:"maxOpenConns"`
+	ConnMaxLifetime time.Duration `json:"connMaxLifetime" bson:"connMaxLifetime"`
+	IsDefault       bool          `json:"isDefault" bson:"isDefault"`
+	IsDisabled      bool          `json:"isDisabled" bson:"isDisabled"`
+}
+
+// InitUsingJSON - InitUsingJSON
+func InitUsingJSON(configs []MySQLConnection) error {
+	sqlOnce.Do(func() {
+		sqlConnections = make(map[string]*dbr.Connection)
+
+		for _, connectionDetails := range configs {
+			if connectionDetails.IsDisabled {
+				continue
+			}
+			connection, err := InitConnection(connectionDetails)
+			if errormdl.CheckErr1(err) != nil {
+				loggermdl.LogError("Init dbr.Open Err : ", err)
+				connectionError = err
+				return
+			}
+			sqlConnections[connectionDetails.HostName] = connection
+			if connectionDetails.IsDefault {
+				defaultHost = connectionDetails.HostName
+			}
+		}
+	})
+	return connectionError
+}
+
+// InitConnection - InitConnection
+func InitConnection(connectionDetails MySQLConnection) (*dbr.Connection, error) {
+	paramsString := strings.Builder{}
+
+	if len(connectionDetails.Parameters) > 0 {
+		for paramIndex, param := range connectionDetails.Parameters {
+			if paramsString.String() == "" {
+				paramsString.WriteString("?")
+			}
+			paramsString.WriteString(param.ParamKey)
+			paramsString.WriteString("=")
+			paramsString.WriteString(param.ParamValue)
+
+			hasNextParam := paramIndex+1 < len(connectionDetails.Parameters)
+			if hasNextParam {
+				paramsString.WriteString("&")
+			}
+		}
+	}
+	conStr := strings.Builder{}
+	conStr.WriteString(connectionDetails.Username)
+	conStr.WriteString(":")
+	conStr.WriteString(connectionDetails.Password)
+	conStr.WriteString("@")
+	conStr.WriteString(connectionDetails.Protocol)
+	conStr.WriteString("(")
+	conStr.WriteString(connectionDetails.Server)
+	if connectionDetails.Port <= 0 || strings.TrimSpace(strconv.Itoa(connectionDetails.Port)) == "" {
+		conStr.WriteString(":3306") // mysql default port is 3306
+	} else {
+		conStr.WriteString(":")
+		conStr.WriteString(strconv.Itoa(connectionDetails.Port))
+	}
+	conStr.WriteString(")/")
+	conStr.WriteString(connectionDetails.Database)
+	conStr.WriteString(paramsString.String())
+	connection, err := dbr.Open("mysql", conStr.String(), nil)
+	if errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError("Init dbr.Open Err : ", err)
+		return nil, err
+	}
+	if connectionDetails.MaxIdleConns == 0 {
+		connectionDetails.MaxIdleConns = constantmdl.MAX_IDLE_CONNECTIONS // default is 2
+	}
+	if connectionDetails.MaxOpenConns == 0 {
+		connectionDetails.MaxOpenConns = constantmdl.MAX_OPEN_CONNECTIONS // default there's no limit
+	}
+	if connectionDetails.ConnMaxLifetime == 0 {
+		connectionDetails.ConnMaxLifetime = constantmdl.CONNECTION_MAX_LIFETIME
+	}
+	connection.SetMaxIdleConns(connectionDetails.MaxIdleConns)
+	connection.SetMaxOpenConns(connectionDetails.MaxOpenConns)
+	connection.SetConnMaxLifetime(connectionDetails.ConnMaxLifetime)
+	return connection, nil
+}
+
+type param struct {
+	ParamKey   string `json:"paramkey" bson:"paramkey"`
+	ParamValue string `json:"paramvalue" bson:"paramvalue"`
+}
+
+type tomlConfig struct {
+	MysqlHosts map[string]MySQLConnection
+}
+
+// Init initializes MYSQL Connections for given toml file
+func Init(tomlFilepath string, defaultHostName string) (map[string]*dbr.Connection, error) {
+	sqlOnce.Do(func() {
+		sqlConnections = make(map[string]*dbr.Connection)
+		_, err := configmdl.InitConfig(tomlFilepath, &config)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError("Init InitConfig Err : ", err)
+			connectionError = err
+			return
+		}
+		for connectionName, connectionDetails := range config.MysqlHosts {
+			paramsString := ""
+			if len(connectionDetails.Parameters) > 0 {
+				for paramIndex, param := range connectionDetails.Parameters {
+					if paramsString == "" {
+						paramsString = "?"
+					}
+					paramsString = paramsString + param.ParamKey + "=" + param.ParamValue
+					hasNextParam := paramIndex+1 < len(connectionDetails.Parameters)
+					if hasNextParam {
+						paramsString = paramsString + "&"
+					}
+				}
+			}
+
+			connection, err := dbr.Open("mysql", connectionDetails.Username+":"+connectionDetails.Password+"@"+connectionDetails.Protocol+"("+connectionDetails.Server+")/"+connectionDetails.Database+paramsString, nil)
+			if errormdl.CheckErr1(err) != nil {
+				loggermdl.LogError("Init dbr.Open Err : ", err)
+				connectionError = err
+				return
+			}
+			if connectionDetails.MaxIdleConns == 0 {
+				connectionDetails.MaxIdleConns = constantmdl.MAX_IDLE_CONNECTIONS // default is 2
+			}
+			if connectionDetails.MaxOpenConns == 0 {
+				connectionDetails.MaxOpenConns = constantmdl.MAX_OPEN_CONNECTIONS // default there's no limit
+			}
+			if connectionDetails.ConnMaxLifetime == 0 {
+				connectionDetails.ConnMaxLifetime = constantmdl.CONNECTION_MAX_LIFETIME
+			}
+			connection.SetMaxIdleConns(connectionDetails.MaxIdleConns)
+			connection.SetMaxOpenConns(connectionDetails.MaxOpenConns)
+			connection.SetConnMaxLifetime(connectionDetails.ConnMaxLifetime)
+			sqlConnections[connectionName] = connection
+		}
+		defaultHost = defaultHostName
+	})
+	return sqlConnections, errormdl.CheckErr2(connectionError)
+}
+
+//GetMYSQLConnection -
+func GetMYSQLConnection(connectionName string) (*dbr.Connection, error) {
+	if errormdl.CheckBool(sqlConnections == nil) {
+		loggermdl.LogError("GetMYSQLConnection Err : ", errormdl.Wrap("MYSQL_INIT_NOT_DONE"))
+		return nil, errormdl.Wrap("MYSQL_INIT_NOT_DONE")
+	}
+	if connectionName == "" {
+		if instance, keyExist := sqlConnections[defaultHost]; keyExist {
+			statemdl.MySQLHits()
+			return instance, nil
+		}
+	}
+	if session, keyExist := sqlConnections[connectionName]; keyExist {
+		statemdl.MySQLHits()
+		return session, nil
+	}
+	loggermdl.LogError("GetMYSQLConnection Err : ", errormdl.Wrap("Connection not found for host: "+connectionName))
+	return nil, errormdl.Wrap("Connection not found for host: " + connectionName)
+}
+
+// MysqlDAO Mysql DAO struct
+type MySQLDAO struct {
+	hostName string
+}
+
+// GetMysqlDAO return Mysql DAO instance
+func GetMySQLDAO() *MySQLDAO {
+	return &MySQLDAO{
+		hostName: defaultHost,
+	}
+}
+
+// GetMysqlDAOWithHost return Mysql DAO instance
+func GetMySQLDAOWithHost(host string) *MySQLDAO {
+	return &MySQLDAO{
+		hostName: host,
+	}
+}
+
+// ExecQuery - ExecQuery
+func (md *MySQLDAO) ExecQuery(query string, args ...interface{}) (string, error) {
+	connection, connectionError := GetMYSQLConnection(md.hostName)
+	if errormdl.CheckErr(connectionError) != nil {
+		loggermdl.LogError("SaveUpdateOrDelete GetMYSQLConnection Err : ", connectionError)
+		return "", errormdl.CheckErr(connectionError)
+	}
+	pingError := connection.Ping()
+	if errormdl.CheckErr(pingError) != nil && pingError != driver.ErrBadConn {
+		loggermdl.LogError(pingError)
+		return "", errormdl.CheckErr(pingError)
+	}
+	result, execError := connection.Exec(query, args...)
+	if errormdl.CheckErr(execError) != nil {
+		loggermdl.LogError(execError)
+		return "", errormdl.CheckErr(execError)
+	}
+
+	_, affectError := result.RowsAffected()
+	if errormdl.CheckErr(affectError) != nil {
+		loggermdl.LogError(affectError)
+		return "", errormdl.CheckErr(affectError)
+	}
+	ID, err := result.LastInsertId()
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return "", errormdl.CheckErr(err)
+	}
+	return strconv.Itoa(int(ID)), nil
+}
+
+// SelectQuery - SelectQuery
+func (md *MySQLDAO) SelectQuery(query string, args ...interface{}) (*gjson.Result, error) {
+	connection, connectionError := GetMYSQLConnection(md.hostName)
+	if errormdl.CheckErr(connectionError) != nil {
+		loggermdl.LogError("SaveUpdateOrDelete GetMYSQLConnection Err : ", connectionError)
+		return nil, errormdl.CheckErr(connectionError)
+	}
+
+	// loggermdl.LogSpot(connection)
+	pingError := connection.Ping()
+	if errormdl.CheckErr(pingError) != nil && pingError != driver.ErrBadConn {
+		loggermdl.LogError(pingError)
+		return nil, errormdl.CheckErr(pingError)
+	}
+	rows, queryError := connection.Query(query, args...)
+	if errormdl.CheckErr(queryError) != nil {
+		loggermdl.LogError(queryError)
+		return nil, errormdl.CheckErr(queryError)
+	}
+	defer rows.Close()
+	columns, err := rows.Columns()
+	if errormdl.CheckErr2(err) != nil {
+		loggermdl.LogError("GetAllData rows.Columns() Err : ", err)
+		return nil, errormdl.CheckErr2(err)
+	}
+	values := make([]interface{}, len(columns))
+	valuePtrs := make([]interface{}, len(columns))
+	tableData := "[]"
+	for rows.Next() {
+		for i := 0; i < len(columns); i++ {
+			valuePtrs[i] = &values[i]
+		}
+		rows.Scan(valuePtrs...)
+		data, err := sjsonhelpermdl.SetMultiple("", columns, values)
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+		tableData, err = sjson.Set(tableData, "-1", gjson.Parse(data).Value())
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+	}
+	resultSet := gjson.Parse(tableData)
+	return &resultSet, nil
+}
+
+// ExecTxQuery - ExecTxQuery
+func ExecTxQuery(query string, tx *sql.Tx, args ...interface{}) (string, error) {
+
+	result, execError := tx.Exec(query, args...)
+	if errormdl.CheckErr(execError) != nil {
+		loggermdl.LogError(execError)
+		return "", errormdl.CheckErr(execError)
+	}
+	_, affectError := result.RowsAffected()
+	if errormdl.CheckErr(affectError) != nil {
+		loggermdl.LogError(affectError)
+		return "", errormdl.CheckErr(affectError)
+	}
+	ID, err := result.LastInsertId()
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return "", errormdl.CheckErr(err)
+	}
+	return strconv.Itoa(int(ID)), nil
+}
+
+// SelectTxQuery - SelectTxQuery
+func SelectTxQuery(query string, tx *sql.Tx, args ...interface{}) (*gjson.Result, error) {
+	rows, queryError := tx.Query(query, args...)
+	if errormdl.CheckErr(queryError) != nil {
+		loggermdl.LogError(queryError)
+		return nil, errormdl.CheckErr(queryError)
+	}
+	defer rows.Close()
+	columns, err := rows.Columns()
+	if errormdl.CheckErr2(err) != nil {
+		loggermdl.LogError("GetAllData rows.Columns() Err : ", err)
+		return nil, errormdl.CheckErr2(err)
+	}
+	values := make([]interface{}, len(columns))
+	valuePtrs := make([]interface{}, len(columns))
+	tableData := "[]"
+	for rows.Next() {
+		for i := 0; i < len(columns); i++ {
+			valuePtrs[i] = &values[i]
+		}
+		rows.Scan(valuePtrs...)
+		data, err := sjsonhelpermdl.SetMultiple("", columns, values)
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+		tableData, err = sjson.Set(tableData, "-1", gjson.Parse(data).Value())
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+	}
+	resultSet := gjson.Parse(tableData)
+	return &resultSet, nil
+}
diff --git a/v2/dalmdl/mysql/mysql_test.go b/v2/dalmdl/mysql/mysql_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c7aeecb1b3608ecad4b8767a7c8e51d4a56838db
--- /dev/null
+++ b/v2/dalmdl/mysql/mysql_test.go
@@ -0,0 +1,261 @@
+package mysql
+
+import (
+	"fmt"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/gocraft/dbr/v2"
+	"github.com/gocraft/dbr/v2/dialect"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInit(t *testing.T) {
+	connmap, err := Init("mysql-config.toml", "localhost")
+	if err != nil {
+		loggermdl.LogError("err: ", err)
+	}
+	db := connmap["localhost"]
+	stmt, _ := db.Prepare("INSERT person SET Name=?")
+	stmt.Exec("astaxie1")
+	assert.NoError(t, err, "This should not return error")
+}
+func TestInitByPing(t *testing.T) {
+	connmap, err := Init("mysql-config.toml", "localhost")
+	if err != nil {
+		loggermdl.LogError("err: ", err)
+	}
+	db := connmap["localhost"]
+	err = db.Ping()
+	if err != nil {
+		panic(err.Error()) // proper error handling instead of panic in your app
+	}
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestInitWrongDefaultHostname(t *testing.T) {
+	connmap, err := Init("mysql-config.toml", "localhostWrong")
+	if err != nil {
+		loggermdl.LogError("err: ", err)
+	}
+	db := connmap["localhost"]
+	stmt, _ := db.Prepare("INSERT person SET Name=?")
+	stmt.Exec("astaxie1")
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestInitMultipleConnections(t *testing.T) {
+	connmap, err := Init("mysql-config.toml", "localhost")
+	if err != nil {
+		loggermdl.LogError("err: ", err)
+	}
+	db := connmap["dev"]
+	err2 := db.Ping()
+	if err2 != nil {
+		panic(err2.Error()) // proper error handling instead of panic in your app
+	}
+	stmt, _ := db.Prepare("INSERT person SET Name=?")
+	stmt.Exec("astaxie111")
+	assert.NoError(t, err, "This should not return error")
+}
+
+// func TestInitErrorWrongTomlFilePath(t *testing.T) {
+// 	_, err := Init("mysql-config1.toml", "")
+// 	assert.Error(t, err, "This should return error")
+// }
+
+// func TestInitError1(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	_, err := Init("mysql-config.toml", "localhost")
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	assert.Error(t, err, "This should return error")
+// }
+func TestGetMYSQLConnection(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	db, err := GetMYSQLConnection("localhost")
+	stmt, _ := db.Prepare("INSERT person SET Name=?")
+	stmt.Exec("rr1")
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestGetMYSQLConnectionEmptyHost(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	_, err := GetMYSQLConnection("")
+	assert.NoError(t, err, "This should not  return error")
+}
+func TestGetMYSQLConnectionWrongHost(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	_, err := GetMYSQLConnection("localhost2")
+	assert.Error(t, err, "This should return error")
+}
+func TestGetMYSQLConnectionWrongDefaultHost(t *testing.T) {
+	Init("mysql-config.toml", "localhost2")
+	_, err := GetMYSQLConnection("localhost")
+	assert.NoError(t, err, "This should not return error")
+}
+
+// func TestGetMYSQLConnectionWrongDefaultHostAndEmptyHost(t *testing.T) {
+// 	Init("mysql-config.toml", "localhost2")
+// 	_, err := GetMYSQLConnection("")
+// 	assert.Error(t, err, "This should return error")
+// }
+
+func TestGetMYSQLConnectionError(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	_, err := GetMYSQLConnection("localhost123")
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	assert.Error(t, err, "This should return error")
+}
+func TestSaveUpdateOrDeleteData(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	err := GetMysqlDAO().SaveUpdateOrDeleteData(insertstmt1, "mayurishinde15", 25, true, 2000.00)
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestSaveUpdateOrDeleteDataInitError(t *testing.T) {
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	err := GetMysqlDAO().SaveUpdateOrDeleteData(insertstmt1, "mayuri2", 25, true, 2000.00)
+	assert.Error(t, err, "This should return error")
+}
+func TestSaveUpdateOrDeleteDataError(t *testing.T) {
+	Init("mysql-config.toml", "localhost1")
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := GetMysqlDAOWithHost("localhost").SaveUpdateOrDeleteData(insertstmt1, "mayuri2", 25, true, 2000.00)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should return error")
+}
+func TestSaveUpdateOrDeleteDataError1(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	err := GetMysqlDAO().SaveUpdateOrDeleteData(insertstmt1, "mayuri2", 25, true, 2000.00)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should return error")
+}
+func TestSaveUpdateOrDeleteDataMysqlError(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	err := GetMysqlDAO().SaveUpdateOrDeleteData(insertstmt1, "mayuri2", 25, true, 2000.00)
+	assert.Error(t, err, "This should return error")
+}
+func TestSaveUpdateOrDeleteDataError2(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	err := GetMysqlDAO().SaveUpdateOrDeleteData(insertstmt1, "mayuri2", 25, true, 2000.00)
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err, "This should return error")
+}
+
+func TestSaveUpdateOrDeleteDataError3(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	errormdl.IsTestingNegetiveCaseOn3 = true
+	err := GetMysqlDAO().SaveUpdateOrDeleteData(insertstmt1, "mayuri3", 25, true, 3000.00)
+	errormdl.IsTestingNegetiveCaseOn3 = false
+	assert.Error(t, err, "This should return error")
+}
+
+func TestGetData(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	selectstmt := "SELECT count(*) cnt,age FROM person1 WHERE age =?"
+	_, err := GetMysqlDAO().GetData(selectstmt, 25)
+	assert.NoError(t, err, "This should not return error")
+}
+func TestGetDataError(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	selectstmt := "SELECT * FROM person1 WHERE age =?"
+	errormdl.IsTestingNegetiveCaseOn = true
+	_, err := GetMysqlDAO().GetData(selectstmt, 25)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should not return error")
+}
+
+func TestGetDataError1(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	selectstmt := "SELECT * FROM person1 WHERE age =?"
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	_, err := GetMysqlDAO().GetData(selectstmt, 25)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should not return error")
+}
+func TestGetDataError2(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	selectstmt := "SELECT name,age FROM person1 WHERE age =?"
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	_, err := GetMysqlDAO().GetData(selectstmt, 25)
+	errormdl.IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, err, "This should not return error")
+}
+func TestGetDataError3(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	selectstmt := "SELECT name,age FROM person1 WHERE age =?"
+	errormdl.IsTestingNegetiveCaseOn3 = true
+	_, err := GetMysqlDAO().GetData(selectstmt, 25)
+	errormdl.IsTestingNegetiveCaseOn3 = false
+	assert.Error(t, err, "This should not return error")
+}
+func TestMysqlTransaction(t *testing.T) {
+	Init("mysql-config.toml", "localhost")
+	connection, connectionError := GetMYSQLConnection("localhost")
+	if errormdl.CheckErr(connectionError) != nil {
+		loggermdl.LogError(connectionError)
+	}
+	session := connection.NewSession(nil)
+	tx, err := session.Begin()
+	if err != nil {
+		loggermdl.LogError(err)
+	}
+	defer tx.RollbackUnlessCommitted()
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	stmt1 := session.InsertBySql(insertstmt1, "mayuri3", 25, true, 2000.00)
+	buf := dbr.NewBuffer()
+	stmt1.Build(dialect.MySQL, buf)
+	query1, interpolateErr1 := dbr.InterpolateForDialect(buf.String(), buf.Value(), dialect.MySQL)
+	if interpolateErr1 != nil {
+		fmt.Println(interpolateErr1)
+	} else {
+		fmt.Println(query1) // INSERT INTO `test_upsert` (`id`,`name`,`address`) VALUES ('g','Taro','Tokyo') ON DUPLICATE KEY UPDATE name = 'Taro', address = 'Tokyo'
+	}
+	if result1, insertErr1 := session.InsertBySql(query1).Exec(); insertErr1 != nil {
+		fmt.Println(err)
+	} else {
+		fmt.Println(result1.RowsAffected())
+	}
+	insertstmt2 := "INSERT person1 SET Name=?,age=?,isAlive=?,salary=?"
+	stmt2 := session.InsertBySql(insertstmt2, "mayuri2", 25, true, 2000.00)
+	buf2 := dbr.NewBuffer()
+	stmt2.Build(dialect.MySQL, buf2)
+	query2, interpolateErr2 := dbr.InterpolateForDialect(buf2.String(), buf2.Value(), dialect.MySQL)
+	if interpolateErr2 != nil {
+		fmt.Println(interpolateErr2)
+	} else {
+		fmt.Println(query2) // INSERT INTO `test_upsert` (`id`,`name`,`address`) VALUES ('g','Taro','Tokyo') ON DUPLICATE KEY UPDATE name = 'Taro', address = 'Tokyo'
+	}
+	if result2, insertErr2 := session.InsertBySql(query2).Exec(); insertErr2 != nil {
+		fmt.Println(err)
+	} else {
+		fmt.Println(result2.RowsAffected())
+	}
+	tx.Commit()
+	assert.NoError(t, err, "This should not return error")
+}
+
+func BenchmarkSave(b *testing.B) {
+	Init("mysql-config.toml", "localhost")
+	insertstmt1 := "INSERT INTO person1 (Name,age,isAlive,salary) VALUES(?,?,?,?)"
+	for i := 0; i < b.N; i++ {
+		GetMysqlDAOWithHost("localhost").SaveUpdateOrDeleteData(insertstmt1, "mayuri2", 25, true, 2000.00)
+	}
+}
+
+func BenchmarkUpdate(b *testing.B) {
+	Init("mysql-config.toml", "localhost")
+	updatestmt := "UPDATE person1 SET isAlive=?,salary=? WHERE Name = ? AND age =?"
+	for i := 0; i < b.N; i++ {
+		GetMysqlDAOWithHost("localhost").SaveUpdateOrDeleteData(updatestmt, false, 2000.00, "mayuri2", 25)
+	}
+}
diff --git a/v2/dalmdl/sqlserver/sqlserver.go b/v2/dalmdl/sqlserver/sqlserver.go
new file mode 100644
index 0000000000000000000000000000000000000000..c4a0ccad55774c3f55177fa621ab31c14686b7fa
--- /dev/null
+++ b/v2/dalmdl/sqlserver/sqlserver.go
@@ -0,0 +1,288 @@
+package sqlserver
+
+import (
+	"database/sql"
+	"database/sql/driver"
+	"fmt"
+	"net/url"
+	"strconv"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/sjsonhelpermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	_ "github.com/denisenkom/go-mssqldb"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+//SQLServerConfig
+type SQLServerConfig struct {
+	HostName        string        `json:"hostName"`
+	Server          string        `json:"server"`
+	Port            int           `json:"port"`
+	Username        string        `json:"username"`
+	Password        string        `json:"password"`
+	Database        string        `json:"database"`
+	IsDefault       bool          `json:"isDefault"`
+	MaxIdleConns    int           `json:"maxIdleConns" `
+	MaxOpenConns    int           `json:"maxOpenConns"`
+	ConnMaxLifetime time.Duration `json:"connMaxLifetime" `
+	IsDisabled      bool          `json:"isDisabled" `
+}
+
+//SQLServerDAO
+type SQLServerDAO struct {
+	HostName string
+}
+
+var sqlServerConnections map[string]*sql.DB
+var once sync.Once
+var defaultHost string
+var mutex sync.Mutex
+
+func init() {
+	sqlServerConnections = make(map[string]*sql.DB)
+}
+
+// InitUsingJSON initializes sqlserver Connections for give JSON data
+func InitUsingJSON(configs []SQLServerConfig) {
+
+	var connectionErr error
+
+	once.Do(func() {
+		mutex.Lock()
+		defer mutex.Unlock()
+
+		for _, hostDetails := range configs {
+
+			// Build connection string
+			connString := fmt.Sprintf("server=%s;user id=%s;password=%s;port=%d;database=%s;",
+				hostDetails.Server, hostDetails.Username, hostDetails.Password, hostDetails.Port, hostDetails.Database)
+
+			var err error
+
+			// Create connection pool
+			conn, err := sql.Open("mssql", connString)
+			if err != nil {
+				connectionErr = err
+				loggermdl.LogError("error while creating connection:", err)
+				return
+			}
+
+			conn.SetConnMaxLifetime(hostDetails.ConnMaxLifetime)
+			conn.SetMaxIdleConns(hostDetails.MaxIdleConns)
+			conn.SetMaxOpenConns(hostDetails.MaxOpenConns)
+
+			err = conn.Ping()
+			if err != nil {
+				connectionErr = err
+				loggermdl.LogError("failed to connect:", err)
+				return
+			}
+
+			sqlServerConnections[hostDetails.HostName] = conn
+			if hostDetails.IsDefault {
+				defaultHost = hostDetails.HostName
+			}
+		}
+	})
+}
+
+// InitConnection - InitConnection
+func InitConnection(connectionDetails SQLServerConfig) (*sql.DB, error) {
+
+	// Build connection string
+	connString := fmt.Sprintf("server=%s;user id=%s;password=%s;port=%d;database=%s;",
+		connectionDetails.Server, connectionDetails.Username, connectionDetails.Password, connectionDetails.Port, connectionDetails.Database)
+
+	// Create connection pool
+	connection, err := sql.Open("mssql", connString)
+	if err != nil {
+		loggermdl.LogError("error while creating connection:", err)
+		return nil, err
+	}
+
+	connection.SetConnMaxLifetime(connectionDetails.ConnMaxLifetime)
+	connection.SetMaxIdleConns(connectionDetails.MaxIdleConns)
+	connection.SetMaxOpenConns(connectionDetails.MaxOpenConns)
+
+	return connection, nil
+}
+
+//makeConnURL prepare the url for connection
+func makeConnURL(config SQLServerConfig) *url.URL {
+	return &url.URL{
+		Scheme: "sqlserver",
+		Host:   config.Server + ":" + strconv.Itoa(config.Port),
+		User:   url.UserPassword(config.Username, config.Password),
+	}
+}
+
+//GetSQLServerConnection returns connection by hostname
+func GetSQLServerConnection(hostName string) (*sql.DB, error) {
+
+	mutex.Lock()
+	defer mutex.Unlock()
+
+	if sqlServerConnections == nil {
+		return nil, errormdl.Wrap("SQLSERVER_INIT_NOT_DONE")
+	}
+	if hostName == "" {
+		if connection, ok := sqlServerConnections[defaultHost]; ok {
+			return connection, nil
+		}
+	}
+	if connection, ok := sqlServerConnections[hostName]; ok {
+		return connection, nil
+	}
+	return nil, errormdl.Wrap("Connection not found for : " + hostName)
+}
+
+//GetSQLServerDAO  returns SQLServer DAO instance with default host
+func GetSQLServerDAO() *SQLServerDAO {
+	return &SQLServerDAO{
+		HostName: defaultHost,
+	}
+}
+
+//GetSQLServerDAOWithHost returns SQLServer DAO instance with provided host
+func GetSQLServerDAOWithHost(hostName string) *SQLServerDAO {
+	return &SQLServerDAO{
+		HostName: hostName,
+	}
+}
+
+// SelectQuery - SelectQuery
+func (ss *SQLServerDAO) SelectQuery(query string, args ...interface{}) (*gjson.Result, error) {
+	connection, connectionError := GetSQLServerConnection(ss.HostName)
+	if errormdl.CheckErr(connectionError) != nil {
+		loggermdl.LogError("GetSqlServerConnection Err : ", connectionError)
+		return nil, errormdl.CheckErr(connectionError)
+	}
+
+	// loggermdl.LogSpot(connection)
+	pingError := connection.Ping()
+	if errormdl.CheckErr(pingError) != nil && pingError != driver.ErrBadConn {
+		loggermdl.LogError(pingError)
+		return nil, errormdl.CheckErr(pingError)
+	}
+	rows, queryError := connection.Query(query, args...)
+	if errormdl.CheckErr(queryError) != nil {
+		loggermdl.LogError(queryError)
+		return nil, errormdl.CheckErr(queryError)
+	}
+	defer rows.Close()
+	columns, err := rows.Columns()
+	if errormdl.CheckErr2(err) != nil {
+		loggermdl.LogError("GetAllData rows.Columns() Err : ", err)
+		return nil, errormdl.CheckErr2(err)
+	}
+	values := make([]interface{}, len(columns))
+	valuePtrs := make([]interface{}, len(columns))
+	tableData := "[]"
+	for rows.Next() {
+		for i := 0; i < len(columns); i++ {
+			valuePtrs[i] = &values[i]
+		}
+		rows.Scan(valuePtrs...)
+		data, err := sjsonhelpermdl.SetMultiple("", columns, values)
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+		tableData, err = sjson.Set(tableData, "-1", gjson.Parse(data).Value())
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+	}
+	resultSet := gjson.Parse(tableData)
+	return &resultSet, nil
+}
+
+// SelectTxQuery - SelectTxQuery
+func SelectTxQuery(query string, tx *sql.Tx, args ...interface{}) (*gjson.Result, error) {
+	rows, queryError := tx.Query(query, args...)
+	if errormdl.CheckErr(queryError) != nil {
+		loggermdl.LogError(queryError)
+		return nil, errormdl.CheckErr(queryError)
+	}
+	defer rows.Close()
+	columns, err := rows.Columns()
+	if errormdl.CheckErr2(err) != nil {
+		loggermdl.LogError("GetAllData rows.Columns() Err : ", err)
+		return nil, errormdl.CheckErr2(err)
+	}
+	values := make([]interface{}, len(columns))
+	valuePtrs := make([]interface{}, len(columns))
+	tableData := "[]"
+	for rows.Next() {
+		for i := 0; i < len(columns); i++ {
+			valuePtrs[i] = &values[i]
+		}
+		rows.Scan(valuePtrs...)
+		data, err := sjsonhelpermdl.SetMultiple("", columns, values)
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+		tableData, err = sjson.Set(tableData, "-1", gjson.Parse(data).Value())
+		if errormdl.CheckErr3(err) != nil {
+			loggermdl.LogError("GetAllData sjson.Set Err : ", err)
+			return nil, errormdl.CheckErr3(err)
+		}
+	}
+	resultSet := gjson.Parse(tableData)
+	return &resultSet, nil
+}
+
+//ExecQuery to execute query
+func (ss *SQLServerDAO) ExecQuery(query string, args ...interface{}) (string, error) {
+
+	conn, err := GetSQLServerConnection(ss.HostName)
+	if err != nil {
+		return "", err
+	}
+
+	result, err := conn.Exec(query, args...)
+	if err != nil {
+		return "", err
+	}
+	_ = result
+
+	//TODO: Get last insertedID
+
+	// var lastInsertedID interface{}
+	// conn.QueryRow("SELECT SCOPE_IDENTITY()")
+
+	// last, err := result.LastInsertId()
+
+	return "-1", nil
+}
+
+// ExecTxQuery - ExecTxQuery
+func ExecTxQuery(query string, tx *sql.Tx, args ...interface{}) (string, error) {
+
+	result, execError := tx.Exec(query, args...)
+	if errormdl.CheckErr(execError) != nil {
+		loggermdl.LogError(execError)
+		return "", errormdl.CheckErr(execError)
+	}
+	_, affectError := result.RowsAffected()
+	if errormdl.CheckErr(affectError) != nil {
+		loggermdl.LogError(affectError)
+		return "", errormdl.CheckErr(affectError)
+	}
+
+	//TODO: get last inserted id
+	// ID, err := result.LastInsertId()
+	// if errormdl.CheckErr(err) != nil {
+	// 	loggermdl.LogError(err)
+	// 	return "", errormdl.CheckErr(err)
+	// }
+	// return strconv.Itoa(int(ID)), nil
+	return "-1", nil
+}
diff --git a/v2/downloadhelpermdl/downloadhelpermdl.go b/v2/downloadhelpermdl/downloadhelpermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..2be09986a71c2e4c6c6e2b1824917501562f49e3
--- /dev/null
+++ b/v2/downloadhelpermdl/downloadhelpermdl.go
@@ -0,0 +1,389 @@
+// Author: Deepak Prakash [17-July-2018]
+// Description: Package downloadhelpermdl will help download files to destination location.
+
+package downloadhelpermdl
+
+// All required imports
+import (
+	"io"
+	"net/http"
+	"os"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+// Constants Required
+const (
+	EMPTY_STR                       = ""
+	HTTP_PROTOCOL                   = "http"
+	FILE_PRESENT_CHECK_ERROR        = 0
+	FILE_PRESENT_AT_DESTINATION     = 1
+	FILE_NOT_PRESENT_AT_DESTINATION = 2
+	WAIT_TIME_DURATION              = 2000
+	MIN_FILE_SIZE                   = 1
+)
+
+//TODO: 1. Add Skip List
+//TODO: 2. Lenghth checking
+//TODO: 3. HTTP Pooling
+//TODO: 4. HTTP2 Support - Certificate
+//TODO: 5. Resume Download
+
+// DownloadHelper - Struct
+type DownloadHelper struct {
+	sourceURL       string
+	destinationPath string
+	retryCount      int
+	DownloadParam   DownloadParameter
+}
+
+// Structure DownloadParameter: with different parameters which can be received for downloading the file
+type DownloadParameter struct {
+	jwtToken        string
+	isCacheBurst    bool
+	fileHashedValue string
+	requestTimeOut  int64
+	DownloadError   error
+}
+
+// GetDownloadHelper Gives you download helper from where you can run steps
+func GetDownloadHelper(sourceURL, destinationPath string, retryCount int) *DownloadHelper {
+	newDownloadHelper := DownloadHelper{}
+	newDownloadHelper.sourceURL = sourceURL
+	newDownloadHelper.destinationPath = destinationPath
+	newDownloadHelper.retryCount = retryCount
+	return &newDownloadHelper
+}
+
+// AddParamsJWT - method to add jwtToken at runtime
+func (dh *DownloadHelper) AddParamsJWT(jwtToken string) *DownloadHelper {
+	dh.DownloadParam.jwtToken = jwtToken
+	return dh
+}
+
+// AddParamsFileHashed - method to add fileHashedValue at runtime
+func (dh *DownloadHelper) AddParamsFileHashed(fileHashedValue string) *DownloadHelper {
+	dh.DownloadParam.fileHashedValue = fileHashedValue
+	return dh
+}
+
+// AddParamsFileHashed - method to add fileHashedValue at runtime
+func (dh *DownloadHelper) AddParamsCacheBurst(isCacheBurst bool) *DownloadHelper {
+	dh.DownloadParam.isCacheBurst = isCacheBurst
+	return dh
+}
+
+func (dh *DownloadHelper) AddRequestTimeout(requestTimeOutValue int64) *DownloadHelper {
+	dh.DownloadParam.requestTimeOut = requestTimeOutValue
+	return dh
+}
+
+// Run all Steps one by one
+func (dh *DownloadHelper) Run() *DownloadHelper {
+	_, dh.DownloadParam.DownloadError = DownloadFile(dh)
+	return dh
+}
+
+// Download File From Cloud With / Without Hash Checking
+// params : DownloadParameter, consists of different aspects of downloading
+func DownloadFile(params *DownloadHelper) (bool, error) {
+
+	// Clean destination path
+	params.destinationPath = filemdl.CleanPath(params.destinationPath)
+
+	// Source URL and Destination path validations
+	if strings.Trim(params.sourceURL, EMPTY_STR) == EMPTY_STR || strings.Trim(params.destinationPath, EMPTY_STR) == EMPTY_STR || !strings.HasPrefix(params.sourceURL, HTTP_PROTOCOL) {
+		loggermdl.LogDebug("DownloadFile : sourceURL/destinationPath is empty or not proper")
+		return false, errormdl.Wrap("Either Source/Destination url is empty or source url does not start with 'http'")
+	}
+
+	//Check file available on destination location
+	isValid, err := destinationValidator(params.destinationPath)
+	if err != nil {
+		loggermdl.LogError("File destination validator error : ", err)
+		return false, err
+	}
+
+	// Cache Burst Implementation
+	if params.DownloadParam.isCacheBurst {
+		mkclCacheBurst := strconv.FormatInt(time.Now().UnixNano(), 10)
+		if strings.Contains(params.sourceURL, "?") {
+			params.sourceURL = params.sourceURL + "&mkclCacheBurst=" + mkclCacheBurst
+		} else {
+			params.sourceURL = params.sourceURL + "?mkclCacheBurst=" + mkclCacheBurst
+		}
+	}
+	loggermdl.LogDebug("DownloadFile : Downloading ", params.sourceURL, " at ", params.destinationPath)
+
+	// Take backup in case required
+	backupTaken := false
+	downloadSuccessfull := false
+	if isValid == FILE_PRESENT_AT_DESTINATION {
+		// File present -> Backup required
+		err := backupFile(params.destinationPath)
+		if errormdl.CheckErr1(err) != nil {
+			return false, errormdl.CheckErr1(err)
+		}
+		backupTaken = true
+	}
+
+	var errDownload error
+	// Downloading with retry attempts
+	for index := 0; index <= params.retryCount; index++ {
+		loggermdl.LogDebug("Attempt number: ", index+1)
+
+		// Call method to perform actual downloading
+		errDownload = downloadFileFromCloud(params)
+
+		if errormdl.CheckErr1(errDownload) != nil {
+			loggermdl.LogError("Download failed: ", err)
+			continue
+		}
+		downloadSuccessfull = true
+		loggermdl.LogDebug("DownloadFile : Download Successful")
+
+		// Delete backup
+		DeleteBackup(params.destinationPath)
+		// All operations done successfully
+		return true, nil
+	}
+
+	// Restore backup in case downloading failed
+	if backupTaken && errormdl.CheckBool(!downloadSuccessfull) {
+		restoreBackup(params.destinationPath)
+		time.Sleep(WAIT_TIME_DURATION * time.Millisecond)
+	}
+	return false, errDownload
+}
+
+// downloadFileFromCloud: To use http client and download a url to a local file.
+// params : DownloadParameter, consists of different aspects of downloading
+func downloadFileFromCloud(params *DownloadHelper) error {
+
+	// Check hash check enabled or not
+	destinationPath := ""
+	if strings.Trim(params.DownloadParam.fileHashedValue, EMPTY_STR) != EMPTY_STR {
+		// With hash check implementation
+		destinationPath = os.TempDir() + string(os.PathSeparator) + filepath.Base(params.destinationPath)
+	} else {
+		// Without hash check implementation
+		destinationPath = params.destinationPath
+	}
+
+	// Create file at destination
+	out, err := os.Create(destinationPath)
+	if errormdl.CheckErr(err) != nil {
+		return errormdl.CheckErr(err)
+	}
+	defer out.Close()
+
+	//TODO: Code might be refactor (in case provision is given in httpmdl)
+	if params.DownloadParam.requestTimeOut <= 0 {
+		params.DownloadParam.requestTimeOut = 0
+	}
+
+	// Fetching data from httpClient
+	client := &http.Client{
+		Timeout: time.Duration(params.DownloadParam.requestTimeOut) * time.Second,
+	}
+
+	// Call to fetch content
+	req, err := http.NewRequest("GET", params.sourceURL, nil)
+	if errormdl.CheckErr1(err) != nil {
+		return errormdl.CheckErr1(err)
+	}
+
+	// Set JWT in request header in case required
+	if strings.Trim(params.DownloadParam.jwtToken, EMPTY_STR) != EMPTY_STR {
+		req.Header.Add("Authorization", params.DownloadParam.jwtToken)
+	}
+
+	// Call http client
+	resp, err := client.Do(req)
+	if errormdl.CheckErr2(err) != nil {
+		return errormdl.CheckErr2(err)
+	}
+	defer resp.Body.Close()
+
+	if errormdl.CheckBool(resp == nil) {
+		return errormdl.Wrap("Empty response.")
+	} else if resp.StatusCode != 200 && resp.StatusCode != 206 {
+		loggermdl.LogError("Download failed.")
+		return errormdl.Wrap("Response code NOT 200 or 206.")
+	}
+	//resp.StatusCode == 200 || resp.StatusCode == 206
+	// Write the response body to file
+	_, err = io.Copy(out, resp.Body)
+	if errormdl.CheckErr3(err) != nil {
+		return errormdl.CheckErr3(err)
+	}
+	out.Close()
+
+	// File downloaded successfully
+	loggermdl.LogDebug("DownloadFile : Download Successful")
+	timeHeader := resp.Header.Get("Last-Modified")
+
+	//Last-Modified is not available in case of GRIDFS CDN
+	if strings.Trim(timeHeader, EMPTY_STR) != EMPTY_STR {
+		err = updateModifiedDateAndValidate(timeHeader, destinationPath, params)
+		if err != nil {
+			return err
+		}
+	}
+
+	// All operations done successfully
+	return nil
+}
+
+// updateModifiedDate - Method will update modifed date and Validate downloaded file
+func updateModifiedDateAndValidate(timeHeader, destinationPath string, params *DownloadHelper) error {
+	// Get last modified date of file from header file
+	lastModifiedTime, err := time.Parse("Mon, 02 Jan 2006 15:04:05 GMT", timeHeader)
+	if errormdl.CheckErr(err) != nil {
+		// Error while getting last modified date of the file.
+		return errormdl.CheckErr(err)
+	}
+
+	// Set last modified date and last access date from server
+	err = os.Chtimes(destinationPath, lastModifiedTime, lastModifiedTime)
+	if errormdl.CheckErr1(err) != nil {
+		// Error while setting last modified date of the file.
+		return errormdl.CheckErr1(err)
+	}
+
+	// Validation of downloaded file
+	err = validateDownloadedFile(destinationPath, params)
+	if errormdl.CheckErr3(err) != nil {
+		return errormdl.CheckErr3(err)
+	}
+	return nil
+}
+
+// ValidateDownloadedFile : Method to validate the file is proper and move the file from TEMP to destination in case hashing is required
+func validateDownloadedFile(tempDestinationPath string, params *DownloadHelper) error {
+	// Get file info -> to compare with response length
+	fileInfo, err := filemdl.FileInfo(tempDestinationPath)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError("Error in getting file info: ", err)
+		return errormdl.Wrap("Error in getting file info.")
+	}
+
+	// Compare downloaded file with minimum file size
+	if errormdl.CheckBool(fileInfo.Size() < MIN_FILE_SIZE) {
+		// Content is less than minimum file size
+		loggermdl.LogError("Content length less than minimuma specified file size: ", err)
+		return errormdl.Wrap("Error, file size is less than expected.")
+	}
+
+	// Check if hash checking requied and then calculate and compare hash of downloaded file
+	if strings.Trim(params.DownloadParam.fileHashedValue, EMPTY_STR) != EMPTY_STR {
+		// Calculate hash value of downloaded file
+		hashValue, err := hashmdl.GetAtributeBasedHash(tempDestinationPath)
+		if errormdl.CheckErr1(err) != nil {
+			loggermdl.LogError("Error in getting hash value of file: ", err)
+			return errormdl.Wrap("Error in getting hash value of file.")
+		}
+
+		// Compare hash value with provided value
+		if errormdl.CheckBool1(hashValue == params.DownloadParam.fileHashedValue) {
+			// Move file from Temp to actual destination
+			err := filemdl.MoveFile(tempDestinationPath, params.destinationPath)
+			if errormdl.CheckErr2(err) != nil {
+				loggermdl.LogError("Error in moving downloaded file: ", err)
+				return errormdl.Wrap("Error in moving downloaded file.")
+			}
+			loggermdl.LogDebug("File moved successfully to destination.")
+			return nil
+		}
+		loggermdl.LogError("File and its hashed value is not matching: ", err)
+		return errormdl.Wrap("File and its hashed value is not matching.")
+	}
+	// Validation of downloaded file successfully done
+	return nil
+}
+
+// destinationValidator
+func destinationValidator(destinationPath string) (int, error) {
+	// Check file available or not
+	if filemdl.FileAvailabilityCheck(destinationPath) {
+		// File available at destination
+		destInfo, infoError := filemdl.FileInfo(destinationPath)
+		if errormdl.CheckErr(infoError) != nil {
+			return FILE_PRESENT_CHECK_ERROR, errormdl.Wrap("error occured while getting information of destination directory : " + destinationPath)
+		}
+
+		if destInfo.IsDir() {
+			loggermdl.LogDebug("DownloadFile : Destination Path is Directory")
+			return FILE_PRESENT_CHECK_ERROR, errormdl.Wrap("destination path must be a file path, got directory : " + destinationPath)
+		}
+		return FILE_PRESENT_AT_DESTINATION, nil
+	}
+	// File not available at destination
+	return FILE_NOT_PRESENT_AT_DESTINATION, nil
+}
+
+// backupFile Backup File
+func backupFile(filePath string) error {
+	fileExists := filemdl.FileAvailabilityCheck(filePath)
+	if fileExists {
+		loggermdl.LogDebug("backupFile : File Exists. Creating Backup")
+		parentDirectory, fileName := path.Split(filePath)
+		ext := path.Ext(fileName)
+		backupFileName := strings.Replace(fileName, ext, "", -1) + "_backup" + ext
+
+		backupFilePath := filepath.Join(parentDirectory, backupFileName)
+		return filemdl.RenameFile(filePath, backupFilePath)
+	}
+	loggermdl.LogDebug("backupFile : File Not Exists.")
+	return nil
+}
+
+// restoreBackup Restores Backup
+func restoreBackup(filePath string) error {
+	fileExists := filemdl.FileAvailabilityCheck(filePath)
+	if fileExists {
+		loggermdl.LogDebug("restoreBackup : File Exists. Deleting File")
+		err := filemdl.DeleteFile(filePath)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+	}
+	parentDirectory, fileName := path.Split(filePath)
+	ext := path.Ext(fileName)
+	backupFileName := strings.Replace(fileName, ext, "", -1) + "_backup" + ext
+
+	backupFilePath := filepath.Join(parentDirectory, backupFileName)
+	fileExists = filemdl.FileAvailabilityCheck(backupFilePath)
+	if fileExists {
+		loggermdl.LogDebug("restoreBackup : File Exists. Restoring Backup")
+		return filemdl.RenameFile(backupFilePath, filePath)
+	}
+	loggermdl.LogDebug("restoreBackup : Backup File Not Exists.")
+	return nil
+}
+
+// DeleteBackup Deletes Backup
+func DeleteBackup(filePath string) error {
+	parentDirectory, fileName := path.Split(filePath)
+	ext := path.Ext(fileName)
+	backupFileName := strings.Replace(fileName, ext, "", -1) + "_backup" + ext
+
+	backupFilePath := filepath.Join(parentDirectory, backupFileName)
+	fileExists := filemdl.FileAvailabilityCheck(backupFilePath)
+	if fileExists {
+		loggermdl.LogDebug("DeleteBackup : File Exists. Deleting Backup")
+		return filemdl.DeleteFile(backupFilePath)
+	}
+	loggermdl.LogDebug("DeleteBackup : Backup File Not Exists.")
+	return nil
+}
diff --git a/v2/downloadhelpermdl/downloadhelpermdl_test.go b/v2/downloadhelpermdl/downloadhelpermdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a61612e5b112e08a9752a24a48200428ef1e99b
--- /dev/null
+++ b/v2/downloadhelpermdl/downloadhelpermdl_test.go
@@ -0,0 +1,251 @@
+// Author: Deepak Prakash [17-July-2018]
+
+// Description: Package downloadhelpermdl will help download files to destination location.
+
+package downloadhelpermdl
+
+import (
+	"os"
+	"path/filepath"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"github.com/stretchr/testify/assert"
+)
+
+const (
+	RETRY_COUNT   = 2
+	MODIFIED_DATE = "Sun, 06 Aug 2017 10:43:25 GMT"
+	SOURCE_URL    = "https://golangcode.com/images/avatar.jpg?name=Deepak"
+)
+
+// TestCase 1 - Valid Case - Pass
+func TestGetDownloadHelper_1(t *testing.T) {
+	dh := GetDownloadHelper(SOURCE_URL, getDestinationFilePath(), RETRY_COUNT).
+		AddParamsFileHashed("15020162054018").
+		AddParamsJWT("abcdef").
+		AddParamsCacheBurst(true)
+	assert.Empty(t, dh.Run().downloadParam.DownloadError)
+}
+
+// TestCase 2 - Empty Source URL - Fail
+func TestGetDownloadHelper_2(t *testing.T) {
+	dh := GetDownloadHelper("", getDestinationFilePath(), RETRY_COUNT)
+	assert.NotNil(t, dh.Run().downloadParam.DownloadError)
+}
+
+// TestCase 3 - Destination As Folder - Fail
+func TestGetDownloadHelper_3(t *testing.T) {
+	dh := GetDownloadHelper("https://golangcode.com/images/avatar.jpg", getWorkingDirectoryPath(), RETRY_COUNT)
+	assert.NotNil(t, dh.Run().downloadParam.DownloadError)
+}
+
+// TestCase 4 - Wrong URL - Fail
+func TestGetDownloadHelper_4(t *testing.T) {
+	dh := GetDownloadHelper("http://g10.mkcl.org/test", getDestinationFilePath(), RETRY_COUNT).AddParamsCacheBurst(true)
+	assert.NotNil(t, dh.Run().downloadParam.DownloadError)
+}
+
+// TestCase 5 - File Backup Error - Fail
+func TestGetDownloadHelper_5(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn = true
+	assert.NotNil(t, dh.Run().downloadParam.DownloadError)
+	errormdl.IsTestingNegetiveCaseOn = false
+}
+
+// TestCase 6 - Cache Burst URL - Pass
+func TestGetDownloadHelper_6(t *testing.T) {
+	dh := createFile().AddParamsCacheBurst(true)
+	assert.Nil(t, dh.Run().downloadParam.DownloadError)
+}
+
+// TestCase 7 - Wrong Hashed Value - Fail
+func TestGetDownloadHelper_8(t *testing.T) {
+	dh := createFile().AddParamsFileHashed("WrongValue")
+	assert.NotNil(t, dh.Run().downloadParam.DownloadError)
+}
+
+// TestCase 8 - Delete File Failed Case
+func Test_restoreBackup_1(t *testing.T) {
+	filePath := getDestinationFilePath()
+	errormdl.IsTestingNegetiveCaseOn = true
+	assert.NotNil(t, restoreBackup(filePath))
+	errormdl.IsTestingNegetiveCaseOn = false
+}
+
+// TestCase 9 - Backup File Does Not Exist
+func Test_restoreBackup_2(t *testing.T) {
+	filePath := getDestinationFilePath()
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	assert.Nil(t, restoreBackup(filePath))
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+}
+
+// TestCase 10 - Backup creation of a non-existing file
+func TestBackupFile_1(t *testing.T) {
+	filePath := getWorkingDirectoryPath() + "\\testCase22.jpg"
+	assert.Nil(t, backupFile(filePath))
+}
+
+// TestCase 11 - Destination Validator
+func Test_destinationValidator(t *testing.T) {
+	destinationPath := getWorkingDirectoryPath() + "\\testCase22.jpg"
+	errormdl.IsTestingNegetiveCaseOn = true
+	_, err := destinationValidator(destinationPath)
+	assert.Nil(t, err)
+	errormdl.IsTestingNegetiveCaseOn = false
+}
+
+// TestCase 12 - validate Downloaded File
+func Test_validateDownloadedFile_1(t *testing.T) {
+	tempDestinationPath := getWorkingDirectoryPath() + "\\testCase22.jpg"
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	assert.NotNil(t, validateDownloadedFile(tempDestinationPath, dh))
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+}
+
+// TestCase 13 - validate Downloaded File
+func Test_validateDownloadedFile_2(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	assert.NotNil(t, validateDownloadedFile(dh.destinationPath, dh))
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+}
+
+// TestCase 14 - validate Downloaded File
+func Test_validateDownloadedFile_3(t *testing.T) {
+	dh := createFile().AddParamsFileHashed("15020162054018")
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	assert.NotNil(t, validateDownloadedFile(dh.destinationPath, dh))
+	errormdl.IsTestingNegetiveCaseOn1 = false
+}
+
+// TestCase 15 - validate Downloaded File
+func Test_validateDownloadedFile_4(t *testing.T) {
+	dh := createFile().AddParamsFileHashed("15020162054018")
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+	assert.NotNil(t, validateDownloadedFile(dh.destinationPath, dh))
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+}
+
+// TestCase 16 - validate Downloaded File
+func Test_validateDownloadedFile_5(t *testing.T) {
+	dh := createFile().AddParamsFileHashed("15020162054018")
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	assert.NotNil(t, validateDownloadedFile(dh.destinationPath, dh))
+	errormdl.IsTestingNegetiveCaseOn2 = false
+}
+
+// TestCase 17 - Download File From Cloud
+func Test_downloadFileFromCloud_1(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn = true
+	assert.NotNil(t, downloadFileFromCloud(dh))
+	errormdl.IsTestingNegetiveCaseOn = false
+}
+
+// TestCase 18 - Download File From Cloud
+func Test_downloadFileFromCloud_2(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	assert.NotNil(t, downloadFileFromCloud(dh))
+	errormdl.IsTestingNegetiveCaseOn1 = false
+}
+
+// TestCase 19 - Download File From Cloud
+func Test_downloadFileFromCloud_3(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn2 = true
+	assert.NotNil(t, downloadFileFromCloud(dh))
+	errormdl.IsTestingNegetiveCaseOn2 = false
+}
+
+// TestCase 20 - Download File From Cloud
+func Test_downloadFileFromCloud_4(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn3 = true
+	assert.NotNil(t, downloadFileFromCloud(dh))
+	errormdl.IsTestingNegetiveCaseOn3 = false
+}
+
+// TestCase 21 - Update Modified Date and Validate
+func Test_updateModifiedDateAndValidate_1(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn = true
+	assert.NotNil(t, updateModifiedDateAndValidate(MODIFIED_DATE, getTempDestinationFilePath(), dh))
+	errormdl.IsTestingNegetiveCaseOn = false
+}
+
+// TestCase 22 - Update Modified Date and Validate
+func Test_updateModifiedDateAndValidate_2(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	assert.NotNil(t, updateModifiedDateAndValidate(MODIFIED_DATE, getTempDestinationFilePath(), dh))
+	errormdl.IsTestingNegetiveCaseOn1 = false
+}
+
+// TestCase 23 - Update Modified Date and Validate
+func Test_updateModifiedDateAndValidate_3(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn3 = true
+	assert.NotNil(t, updateModifiedDateAndValidate(MODIFIED_DATE, getTempDestinationFilePath(), dh))
+	errormdl.IsTestingNegetiveCaseOn3 = false
+}
+
+// TestCase 24 - Download File From Cloud
+func Test_downloadFileFromCloud_5(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	assert.NotNil(t, downloadFileFromCloud(dh))
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+}
+
+// TestCase 25 - Download File From Cloud
+func Test_downloadFileFromCloud_6(t *testing.T) {
+	dh := createFile()
+	assert.Nil(t, downloadFileFromCloud(dh))
+}
+
+// TestCase 26 - Destination File Creation Failed - Fail
+func TestGetDownloadHelper_7(t *testing.T) {
+	dh := createFile()
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	_, err := DownloadFile(dh)
+	assert.NotNil(t, err)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+}
+
+// Helper Method - to create file at application running level
+func createFile() *DownloadHelper {
+	destinationPath := getWorkingDirectoryPath() + "\\testCase.jpg"
+	dh := GetDownloadHelper("https://golangcode.com/images/avatar.jpg?name=Deepak", destinationPath, RETRY_COUNT)
+	fileExists := filemdl.FileAvailabilityCheck(destinationPath)
+	if !fileExists {
+		dh.Run()
+	}
+	return dh
+}
+
+// Helper Method - to fetch application running file path
+func getWorkingDirectoryPath() string {
+	dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
+	if err != nil {
+		loggermdl.LogError(err)
+	}
+	return dir
+}
+
+// Helper Method - to provide destination file path
+func getDestinationFilePath() string {
+	return getWorkingDirectoryPath() + "\\testCase.jpg"
+}
+
+// Helper Method - to provide destination file path
+func getTempDestinationFilePath() string {
+	return os.TempDir() + string(os.PathSeparator) + "\\testCase.jpg"
+}
diff --git a/v2/errormdl/errorcodemdl.go b/v2/errormdl/errorcodemdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee113b9dfd34445dd01ece4097b23d1ee36e2481
--- /dev/null
+++ b/v2/errormdl/errorcodemdl.go
@@ -0,0 +1,27 @@
+package errormdl
+
+const (
+	// NOERROR - success Code
+	NOERROR = 1000
+
+	// APPLICATION ERRORS
+	// SJSONERROR - sjson helper error
+	SJSONERROR          = 1001
+	KEYNOTFOUND         = 1002
+	CASTINGERROR        = 1003
+	DATAINVALIDERROR    = 1004
+	EMAILERROR          = 1005
+	SERVICENOTFOUND     = 1006
+	CONDITIONPARSEERROR = 1007
+	CONDITIONEVALERROR  = 1008
+	NEXTFORMEVALERROR   = 1009
+
+	// DB Errors
+	MONGOERROR     = 2001
+	MYSQLERROR     = 2002
+	SQLSERVERERROR = 2003
+	GRAPHDBERROR   = 2004
+
+	// CUSTOM Error
+	EXPECTATIONFAILED = 4000
+)
diff --git a/v2/errormdl/errormdl.go b/v2/errormdl/errormdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..082ae4ca6a0daaf8ebc4321d6660f63794a7731d
--- /dev/null
+++ b/v2/errormdl/errormdl.go
@@ -0,0 +1,139 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:36:54 GMT+0530 (IST)
+
+// Package errormdl will help you catch error
+package errormdl
+
+import (
+	"fmt"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+)
+
+// IsTestingNegetiveCaseOn mark this on if you want system cases to fail
+var IsTestingNegetiveCaseOnCheckBool bool
+var IsTestingNegetiveCaseOnCheckBool1 bool
+var IsTestingNegetiveCaseOnCheckBool2 bool
+var IsTestingNegetiveCaseOnCheckBool3 bool
+
+// IsTestingNegetiveCaseOn mark this on if you want system cases to fail
+var IsTestingNegetiveCaseOnCheckInt bool
+var IsTestingNegetiveCaseOnCheckInt1 bool
+var IsTestingNegetiveCaseOnCheckInt2 bool
+
+// IsTestingNegetiveCaseOn mark this on if you want system cases to fail
+var IsTestingNegetiveCaseOn bool
+
+// IsTestingNegetiveCaseOn1 mark this on if you want system cases to fail
+var IsTestingNegetiveCaseOn1 bool
+
+// IsTestingNegetiveCaseOn2 mark this on if you want system cases to fail
+var IsTestingNegetiveCaseOn2 bool
+
+// IsTestingNegetiveCaseOn3 mark this on if you want system cases to fail
+var IsTestingNegetiveCaseOn3 bool
+
+// CoreError is custom error
+type CoreError struct {
+	msg string
+}
+
+// CheckBool will help checking bool condition for real as well as test cases. It can fail based on  IsTestingNegetiveCaseOn flag
+func CheckBool(orginalInput bool) bool {
+	if IsTestingNegetiveCaseOnCheckBool {
+		return !orginalInput
+	}
+	return orginalInput
+
+}
+
+func CheckBool1(orginalInput bool) bool {
+	if IsTestingNegetiveCaseOnCheckBool1 {
+		return !orginalInput
+	}
+	return orginalInput
+}
+func CheckBool2(orginalInput bool) bool {
+	if IsTestingNegetiveCaseOnCheckBool2 {
+		return !orginalInput
+	}
+	return orginalInput
+}
+
+func CheckBool3(orginalInput bool) bool {
+	if IsTestingNegetiveCaseOnCheckBool3 {
+		return !orginalInput
+	}
+	return orginalInput
+}
+
+// CheckInt will help checking int condition for real as well as test cases. It can fail based on  IsTestingNegetiveCaseOn flag
+func CheckInt(len int) int {
+	if IsTestingNegetiveCaseOnCheckInt {
+		return constantmdl.ZERO
+	}
+	return len
+}
+func CheckInt1(len int) int {
+	if IsTestingNegetiveCaseOnCheckInt1 {
+		return constantmdl.MINUS_ONE
+	}
+	return len
+}
+func CheckInt2(len int) int {
+	if IsTestingNegetiveCaseOnCheckInt2 {
+		return constantmdl.MINUS_ONE
+	}
+	return len
+}
+
+// CheckErr will help checking err condition for real as well as test cases. It can fail based on  IsTestingNegetiveCaseOn flag
+func CheckErr(err error) error {
+	if IsTestingNegetiveCaseOn {
+		return Wrap("This is test error")
+	}
+	return err
+}
+
+// CheckErr1 condition for real as well as test cases. It can fail based on  IsTestingNegetiveCaseOn1 flag
+func CheckErr1(err error) error {
+	if IsTestingNegetiveCaseOn1 {
+		return Wrap("This is test error")
+	}
+	return err
+}
+
+// CheckErr2 condition for real as well as test cases. It can fail based on  IsTestingNegetiveCaseOn2 flag
+func CheckErr2(err error) error {
+	if IsTestingNegetiveCaseOn2 {
+		return Wrap("This is test error")
+	}
+	return err
+}
+
+// CheckErr3 condition for real as well as test cases. It can fail based on  IsTestingNegetiveCaseOn3 flag
+func CheckErr3(err error) error {
+	if IsTestingNegetiveCaseOn3 {
+		return Wrap("This is test error")
+	}
+	return err
+}
+
+// Wrap Use wrapper error an not golang
+func Wrap(msg string) *CoreError {
+	err := CoreError{}
+	err.msg = msg
+	return &err
+}
+
+// WrapWithCode Use wrapper error an not golang
+func WrapWithCode(msg string, errorCode int) (int, *CoreError) {
+	err := CoreError{}
+	err.msg = msg
+	return errorCode, &err
+}
+
+// Error is interface method so that this will help compatibility with error
+func (cerr *CoreError) Error() string {
+	return fmt.Sprintf(cerr.msg)
+}
diff --git a/v2/errormdl/errormdl_test.go b/v2/errormdl/errormdl_test.go
new file mode 100755
index 0000000000000000000000000000000000000000..0877d067810f07e335c2ae9d0919460cbb422daa
--- /dev/null
+++ b/v2/errormdl/errormdl_test.go
@@ -0,0 +1,88 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:36:43 GMT+0530 (IST)
+package errormdl
+
+import (
+	"errors"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestCoreError_Wrap(t *testing.T) {
+	err := Wrap("test")
+	if err != nil {
+		assert.Len(t, err.Error(), 4, "Check if we get back error")
+		assert.Error(t, err)
+	}
+}
+
+func TestCheckBool(t *testing.T) {
+	IsTestingNegetiveCaseOnCheckBool = true
+	assert.False(t, CheckBool(true), "it should return true")
+	IsTestingNegetiveCaseOnCheckBool = false
+	assert.False(t, CheckBool(false), "it should return false")
+
+	IsTestingNegetiveCaseOnCheckBool1 = true
+	assert.False(t, CheckBool1(true), "it should return true")
+	IsTestingNegetiveCaseOnCheckBool1 = false
+	assert.False(t, CheckBool1(false), "it should return false")
+
+	IsTestingNegetiveCaseOnCheckBool2 = true
+	assert.False(t, CheckBool2(true), "it should return true")
+	IsTestingNegetiveCaseOnCheckBool2 = false
+	assert.False(t, CheckBool2(false), "it should return false")
+
+	IsTestingNegetiveCaseOnCheckBool3 = true
+	assert.False(t, CheckBool3(true), "it should return true")
+	IsTestingNegetiveCaseOnCheckBool3 = false
+	assert.False(t, CheckBool3(false), "it should return false")
+}
+
+func TestCheckError(t *testing.T) {
+	IsTestingNegetiveCaseOn = false
+	assert.Error(t, CheckErr(errors.New("hi")))
+	IsTestingNegetiveCaseOn = true
+	assert.Error(t, CheckErr(errors.New("hi")))
+}
+
+func TestCheckError1(t *testing.T) {
+	IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, CheckErr1(errors.New("hi")))
+	IsTestingNegetiveCaseOn1 = true
+	assert.Error(t, CheckErr1(errors.New("hi")))
+}
+
+func TestCheckError2(t *testing.T) {
+	IsTestingNegetiveCaseOn2 = false
+	assert.Error(t, CheckErr2(errors.New("hi")))
+	IsTestingNegetiveCaseOn2 = true
+	assert.Error(t, CheckErr2(errors.New("hi")))
+}
+
+func TestCheckError3(t *testing.T) {
+	IsTestingNegetiveCaseOn3 = false
+	assert.Error(t, CheckErr3(errors.New("hi")))
+	IsTestingNegetiveCaseOn3 = true
+	assert.Error(t, CheckErr3(errors.New("hi")))
+}
+func TestCheckInt(t *testing.T) {
+	IsTestingNegetiveCaseOnCheckInt = false
+	assert.Equal(t, CheckInt(2), 2, "Should return 2")
+	IsTestingNegetiveCaseOnCheckInt = true
+	assert.Equal(t, CheckInt(2), 0, "Should return zero")
+}
+
+func TestCheckInt1(t *testing.T) {
+	IsTestingNegetiveCaseOnCheckInt1 = false
+	assert.Equal(t, CheckInt1(2), 2, "Should return 2")
+	IsTestingNegetiveCaseOnCheckInt1 = true
+	assert.Equal(t, CheckInt1(2), -1, "Should return -1")
+}
+
+func TestCheckInt2(t *testing.T) {
+	IsTestingNegetiveCaseOnCheckInt2 = false
+	assert.Equal(t, CheckInt2(2), 2, "Should return 2")
+	IsTestingNegetiveCaseOnCheckInt2 = true
+	assert.Equal(t, CheckInt2(2), -1, "Should return -1")
+}
diff --git a/v2/filemdl/filemdl.go b/v2/filemdl/filemdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..44c576d91fea92447f81730f2363f8af15c52038
--- /dev/null
+++ b/v2/filemdl/filemdl.go
@@ -0,0 +1,845 @@
+package filemdl
+
+import (
+	"errors"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/juju/fslock"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+)
+
+var (
+	backupPath = ""
+	dbPath     = ""
+	filePtrs   cachemdl.FastCacheHelper
+)
+
+const (
+	// TempDir - Serves as tmp directory for atomic file operations
+	TempDir = "tmp"
+)
+
+func init() {
+	filePtrs.Setup(100000, time.Hour*72, time.Hour*72)
+	if !FileAvailabilityCheck(TempDir) {
+		CreateDirectory(TempDir) // Create directory "tmp" at app root
+	}
+}
+
+// TODO: Symbolic link evelution for this package not supported in windows .symlink file
+
+// SetBackPath set backup folder path
+func SetBackPath(folderPath, dbFolderPath string) error {
+	if folderPath == "" || dbFolderPath == "" {
+		return errors.New("Backup folder path and DB Path must not be empty")
+	}
+	bFolder, bFile := filepath.Split(folderPath)
+	if bFile != "" {
+		return errors.New("Backup Path must be folder or you forget to provide slash at the end")
+	}
+	dbFolder, dbFile := filepath.Split(dbFolderPath)
+	if dbFile != "" {
+		return errors.New("DB Path must be folder or you forget to provide slash at the end")
+	}
+	backupPath = bFolder
+	dbPath = dbFolder
+	return nil
+}
+
+// createFileBackup createfile back in backup folder
+func createFileBackup(filePath string) error {
+	if backupPath == "" || dbPath == "" {
+		loggermdl.LogError("Backup folder path not set")
+		return errors.New("Backup folder path not set")
+	}
+	backupFilePath := backupPath + strings.TrimPrefix(filePath, dbPath)
+	_, err := CopyFile(filePath, backupFilePath, true)
+	if errormdl.CheckErr(err) != nil {
+		return errormdl.CheckErr(err)
+	}
+	return nil
+}
+
+//FileHelperServiceObject FileHelperServiceObject must be created while calling FileSearh function
+type FileHelperServiceObject struct {
+	searchResult   []string
+	searchFileName string
+}
+
+// ReadFile reads contents from provided file path and retry when timeout occure
+func ReadFile(filePath string) ([]byte, error) {
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	ba, err := ioutil.ReadFile(filePath)
+	if errormdl.CheckErr(err) != nil {
+		if errormdl.CheckErr(err).Error() == "i/o timeout" {
+			return ioutil.ReadFile(filePath)
+		}
+		return nil, errormdl.CheckErr(err)
+	}
+	return ba, nil
+}
+
+// ReadFileUsingFp reads contents from provided file pointer
+func ReadFileUsingFp(fp *os.File) ([]byte, error) {
+	filePath := fp.Name()
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	if _, err := fp.Seek(0, 0); err != nil {
+		return nil, err
+	}
+	ba, err := ioutil.ReadAll(fp)
+	if errormdl.CheckErr(err) != nil {
+		if errormdl.CheckErr(err).Error() == "i/o timeout" {
+			return ioutil.ReadFile(filePath)
+		}
+		return nil, errormdl.CheckErr(err)
+	}
+	return ba, nil
+}
+
+//createFile creates a new file
+func createFile(filePath string) (*os.File, error) {
+	// function is usable for file module only. Required for zip module
+	return os.Create(filePath)
+}
+
+func createRecursiveDirectoryForFile(filePath string) error {
+	dir, _ := filepath.Split(filePath)
+	createError := CreateDirectoryRecursive(dir)
+	if errormdl.CheckErr(createError) != nil {
+		loggermdl.LogError(createError)
+		return errormdl.CheckErr(createError)
+	}
+	return nil
+}
+
+// WriteFile writes provided  bytes to file
+func WriteFile(filePath string, data []byte, makeDir bool, createBackup bool) error {
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	if makeDir {
+		createError := createRecursiveDirectoryForFile(filePath)
+		if errormdl.CheckErr(createError) != nil {
+			loggermdl.LogError(createError)
+			return errormdl.CheckErr(createError)
+		}
+	}
+	if createBackup {
+		backupErr := createFileBackup(filePath)
+		if backupErr != nil {
+			loggermdl.LogError(backupErr)
+		}
+	}
+
+	return ioutil.WriteFile(filePath, data, 0644)
+}
+
+// WriteFileUsingFp writes provided bytes to file
+func WriteFileUsingFp(f *os.File, data []byte, makeDir bool, createBackup bool) error {
+	filePath := f.Name()
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	if makeDir {
+		createError := createRecursiveDirectoryForFile(filePath)
+		if errormdl.CheckErr(createError) != nil {
+			loggermdl.LogError(createError)
+			return errormdl.CheckErr(createError)
+		}
+	}
+	if createBackup {
+		backupErr := createFileBackup(filePath)
+		if backupErr != nil {
+			loggermdl.LogError(backupErr)
+		}
+	}
+	err := f.Truncate(0)
+	if err != nil {
+		return err
+	}
+	_, err = f.Seek(0, 0)
+	if err != nil {
+		return err
+	}
+
+	if err = f.Sync(); err != nil {
+		return err
+	}
+	_, err = f.Write(data)
+	return err
+}
+
+//AppendFile appends provided data/text to file
+func AppendFile(filename string, text string) (int, error) {
+	path, linkErr := os.Readlink(filename)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filename = path
+	}
+	f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0777)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return 0, errormdl.CheckErr(err)
+	}
+
+	defer f.Close()
+
+	return f.WriteString(text)
+}
+
+//DeleteFile deletes provided file path
+func DeleteFile(filePath string) error {
+
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+
+	err := os.Remove(filePath)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+	return nil
+}
+
+// RenameFile renames/moves file from old path to new path
+func RenameFile(oldFilePath, newFilePath string) error {
+	return os.Rename(oldFilePath, newFilePath)
+}
+
+// CreateDirectory creates directory using provided path
+func CreateDirectory(directoryPath string) error {
+	return os.Mkdir(directoryPath, os.ModePerm)
+}
+
+// CreateDirectoryRecursive creates directory recursively using provided path
+func CreateDirectoryRecursive(directoryPath string) error {
+	return os.MkdirAll(directoryPath, os.ModePerm)
+}
+
+// DeleteDirectory creates directory using provided path
+func DeleteDirectory(directoryPath string) error {
+	return os.RemoveAll(directoryPath)
+}
+
+//ListDirectory returns list of all available components of directory
+func ListDirectory(directoryPath string) ([]os.FileInfo, error) {
+	return ioutil.ReadDir(directoryPath)
+}
+
+//MoveDirectory MoveDirectory
+func MoveDirectory(source, destination string) error {
+	return os.Rename(source, destination)
+}
+
+//MoveFile MoveFile
+func MoveFile(source, destination string) error {
+	return os.Rename(source, destination)
+}
+
+// MoveFileToOtherHost MoveFileToOtherHost
+func MoveFileToOtherHost(source, destination string) error {
+	// Copy source file to destination
+	_, copyError := CopyFile(source, destination, true)
+	if errormdl.CheckErr(copyError) != nil {
+		loggermdl.LogError(copyError)
+		return errormdl.CheckErr(copyError)
+	}
+	// Delete source file
+	deleteError := DeleteFile(source)
+	if errormdl.CheckErr1(deleteError) != nil {
+		loggermdl.LogError(deleteError)
+		return errormdl.CheckErr1(deleteError)
+	}
+	return nil
+}
+
+// MoveFolderToOtherHost MoveFolderToOtherHost
+func MoveFolderToOtherHost(source, destination string) error {
+	// Copy source dir to destination
+	copyError := CopyDir(source, destination)
+	if errormdl.CheckErr(copyError) != nil {
+		loggermdl.LogError(copyError)
+		return errormdl.CheckErr(copyError)
+	}
+	// Delete source file
+	deleteError := DeleteDirectory(source)
+	if errormdl.CheckErr1(deleteError) != nil {
+		loggermdl.LogError(deleteError)
+		return errormdl.CheckErr1(deleteError)
+	}
+	return nil
+}
+
+//CopyFile CopyFile
+func CopyFile(source, destination string, makeDir bool) (int64, error) {
+	// return os.Rename(source, destination)
+	path, linkErr := os.Readlink(source)
+	if errormdl.CheckErr1(linkErr) == nil {
+		source = path
+	}
+	in, openError := os.Open(source)
+	defer in.Close()
+	if errormdl.CheckErr(openError) != nil {
+		loggermdl.LogError(openError)
+		return 0, errormdl.CheckErr(openError)
+	}
+	if makeDir {
+		createError := createRecursiveDirectoryForFile(destination)
+		if errormdl.CheckErr1(createError) != nil {
+			loggermdl.LogError(createError)
+			return 0, errormdl.CheckErr1(createError)
+		}
+	}
+	out, createError := os.Create(destination)
+	defer out.Close()
+	if errormdl.CheckErr2(createError) != nil {
+		loggermdl.LogError(createError)
+		return 0, errormdl.CheckErr2(createError)
+	}
+	n, copyError := io.Copy(out, in)
+	if errormdl.CheckErr3(copyError) != nil {
+		loggermdl.LogError(copyError)
+		return 0, errormdl.CheckErr3(copyError)
+	}
+	return n, nil
+}
+
+//CopyDir makes copy of source directory to the destination directory
+func CopyDir(source string, dest string) (err error) {
+	path, linkErr := os.Readlink(source)
+	if errormdl.CheckErr2(linkErr) == nil {
+		source = path
+	}
+	// get properties of source dir
+	sourceinfo, err := os.Stat(source)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+	// create dest dir
+
+	err = os.MkdirAll(dest, sourceinfo.Mode())
+	if errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr1(err)
+	}
+	directory, _ := os.Open(source)
+	defer directory.Close()
+	objects, err := directory.Readdir(-1)
+
+	for _, obj := range objects {
+
+		sourcefilepointer := source + "/" + obj.Name()
+
+		destinationfilepointer := dest + "/" + obj.Name()
+
+		if obj.IsDir() {
+			// create sub-directories - recursively
+			err = CopyDir(sourcefilepointer, destinationfilepointer)
+			if errormdl.CheckErr2(err) != nil {
+				loggermdl.LogError(err)
+			}
+		} else {
+			// perform copy
+			_, err = CopyFile(sourcefilepointer, destinationfilepointer, true)
+			if errormdl.CheckErr2(err) != nil {
+				loggermdl.LogError(err)
+			}
+		}
+
+	}
+	return
+}
+
+//ReplaceFile ReplaceFile
+func ReplaceFile(data []byte, destination string, createBackup bool) error {
+	path, linkErr := os.Readlink(destination)
+	if errormdl.CheckErr(linkErr) == nil {
+		destination = path
+	}
+	return WriteFile(destination, data, false, createBackup)
+}
+
+//TruncateFile TruncateFile
+func TruncateFile(path string, size int64) error {
+	tmp, linkErr := os.Readlink(path)
+	if errormdl.CheckErr(linkErr) == nil {
+		path = tmp
+	}
+	return os.Truncate(path, size)
+}
+
+//SeekFile SeekFile
+// func SeekFile(path string, offset int64) error {
+// 	file, err := os.OpenFile(path, os.O_RDWR, 0600)
+// 	if err != nil {
+// 		return err
+// 	}
+
+// 	defer file.Close()
+
+// 	_, err = file.Seek(offset, 0)
+
+// 	return err
+// }
+
+//FileInfo FileInfo
+func FileInfo(path string) (os.FileInfo, error) {
+	tmp, linkErr := os.Readlink(path)
+	if errormdl.CheckErr(linkErr) == nil {
+		path = tmp
+	}
+	return os.Stat(path)
+}
+
+//FileSearch FileSearch
+func (fileHelperServiceObject *FileHelperServiceObject) FileSearch(fileName, path string) ([]string, error) {
+	fileHelperServiceObject.searchResult = []string{}
+	fileHelperServiceObject.searchFileName = fileName
+	searchDirectory, err := os.Open(path)
+
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return fileHelperServiceObject.searchResult, errormdl.CheckErr(err)
+	}
+	defer searchDirectory.Close()
+
+	testFileInfo, _ := searchDirectory.Stat()
+	if !testFileInfo.IsDir() {
+		return fileHelperServiceObject.searchResult, err
+	}
+
+	err = filepath.Walk(path, fileHelperServiceObject.findFile)
+	if errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError(err)
+		return fileHelperServiceObject.searchResult, errormdl.CheckErr1(err)
+	}
+	return fileHelperServiceObject.searchResult, nil
+
+}
+
+func (fileHelperServiceObject *FileHelperServiceObject) findFile(path string, fileInfo os.FileInfo, err error) error {
+
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+
+	// get absolute path of the folder that we are searching
+	absolute, err := filepath.Abs(path)
+	if errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr1(err)
+	}
+
+	if fileInfo.IsDir() {
+		testDir, err := os.Open(absolute)
+		if errormdl.CheckErr2(err) != nil {
+			loggermdl.LogError(err)
+			return errormdl.CheckErr2(err)
+		}
+		testDir.Close()
+		return nil
+	}
+
+	matched, err := filepath.Match(fileHelperServiceObject.searchFileName, fileInfo.Name())
+	if errormdl.CheckErr3(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr3(err)
+	}
+	if matched {
+		add := absolute
+		fileHelperServiceObject.searchResult = append(fileHelperServiceObject.searchResult, add)
+	}
+
+	return nil
+}
+
+//FileAvailabilityCheck checks whether file is available at given location
+func FileAvailabilityCheck(filePath string) bool {
+	tmp, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr(linkErr) == nil {
+		filePath = tmp
+	}
+	fileInfo, err := os.Stat(filePath)
+	if fileInfo == nil && errormdl.CheckErr(err) != nil {
+		return false
+	}
+	return true
+}
+
+// CleanPath clean path
+func CleanPath(path string) string {
+	path = strings.Replace(path, "\n", "\\n", -1)
+	path = strings.Replace(path, "\t", "\\t", -1)
+	path = strings.Replace(path, "\r", "\\r", -1)
+	path = strings.Replace(path, "\b", "\\b", -1)
+	path = strings.Replace(path, "\a", "\\b", -1)
+	path = strings.Replace(path, "\v", "\\b", -1)
+	//path = strings.Replace(path, '\', '/', -1)
+
+	normalizedPath := filepath.Clean(path)
+	normalizedPath = filepath.ToSlash(normalizedPath)
+
+	if strings.HasSuffix(path, string(filepath.Separator)) || strings.HasSuffix(path, "/") {
+		normalizedPath = normalizedPath + "/"
+	}
+
+	return normalizedPath
+}
+
+// AppendDataInFile - AppendDataInFile
+func AppendDataInFile(filePath string, data []byte, makeDir bool) error {
+	if makeDir {
+		createError := createRecursiveDirectoryForFile(filePath)
+		if errormdl.CheckErr(createError) != nil {
+			loggermdl.LogError(createError)
+			return errormdl.CheckErr(createError)
+		}
+	}
+	file := &os.File{}
+	rawPtr, ok := filePtrs.Get(filePath)
+	if !ok {
+		tmp, openError := os.OpenFile(filePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0777)
+		if errormdl.CheckErr(openError) != nil {
+			loggermdl.LogError(openError)
+			return errormdl.CheckErr(openError)
+		}
+		file = tmp
+	} else {
+		file, ok = rawPtr.(*os.File)
+		if !ok {
+			return errormdl.Wrap("file pointer casting error")
+		}
+	}
+
+	_, writeError := file.Write(data)
+	if errormdl.CheckErr(writeError) != nil {
+		loggermdl.LogError(writeError)
+		return errormdl.CheckErr(writeError)
+	}
+	filePtrs.SetNoExpiration(filePath, file)
+	return nil
+}
+
+// CloseFilePointer - CloseFilePointer
+func CloseFilePointer(filePath string) error {
+	file := &os.File{}
+	rawPtr, ok := filePtrs.Get(filePath)
+	if !ok {
+		tmp, openError := os.OpenFile(filePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0777)
+		if errormdl.CheckErr(openError) != nil {
+			loggermdl.LogError(openError)
+			return errormdl.CheckErr(openError)
+		}
+		file = tmp
+	} else {
+		file, ok = rawPtr.(*os.File)
+		if !ok {
+			return errormdl.Wrap("file pointer casting error")
+		}
+	}
+	file.Close()
+	filePtrs.Delete(filePath)
+	return nil
+}
+
+// WalkFunc - WalkFunc
+type WalkFunc func(path string, info os.FileInfo, err error) error
+
+// Walk - walks folder recursively
+func Walk(root string, walkFunc WalkFunc) error {
+	info, err := FileInfo(root)
+	if err != nil {
+		err = walkFunc(root, info, err)
+	} else {
+		err = walk(root, info, walkFunc)
+	}
+	return err
+}
+
+// walk - walks specified path
+func walk(path string, fileInfo os.FileInfo, walkFunc WalkFunc) error {
+	// if not dir
+	if !fileInfo.IsDir() {
+		return walkFunc(path, fileInfo, nil)
+	}
+	// if dir
+	err := walkFunc(path, fileInfo, nil)
+	if err != nil {
+		return err
+	}
+	fileInfos, err := ListDirectory(path)
+
+	if err != nil {
+		return err
+	}
+
+	for _, fileInfo := range fileInfos {
+		filePath := path + string(filepath.Separator) + fileInfo.Name()
+		_, err := FileInfo(filePath)
+		if err != nil {
+			if err = walkFunc(filePath, fileInfo, err); err != nil {
+				return err
+			}
+		} else {
+			err = walk(filePath, fileInfo, walkFunc)
+			if err != nil {
+				if !fileInfo.IsDir() {
+					return err
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+// ListFileRecursively - returns array of filepath recursively from specified path
+func ListFileRecursively(path string) ([]string, error) {
+	paths := make([]string, 0)
+	err := Walk(path, func(filePath string, info os.FileInfo, err error) error {
+		if !info.IsDir() {
+			paths = append(paths, filePath)
+		}
+		return nil
+	})
+	if err != nil {
+		return paths, err
+	}
+	return paths, nil
+}
+
+// OpenFile - opens file with specified mode
+func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
+	f, err := os.OpenFile(path,
+		flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+// Open - opens file with read mode
+func Open(name string) (*os.File, error) {
+	return OpenFile(name, os.O_RDONLY, 0)
+}
+
+// FastReadFile - reads contents from provided file path with fast read method
+func FastReadFile(filePath string) ([]byte, error) {
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	file, err := Open(filePath)
+	if errormdl.CheckErr(err) != nil {
+		return nil, errormdl.CheckErr(err)
+	}
+	defer file.Close()
+	fileStat, err := file.Stat()
+	if errormdl.CheckErr(err) != nil {
+		return nil, errormdl.CheckErr(err)
+	}
+	fileBytes := make([]byte, fileStat.Size())
+	bytesRead, err := file.Read(fileBytes)
+	if errormdl.CheckErr(err) == nil && bytesRead < len(fileBytes) {
+		err = errormdl.Wrap("short read")
+	}
+	return fileBytes, err
+}
+
+// FastWriteFile - writes provided bytes to file with fast write method
+func FastWriteFile(filePath string, data []byte, makeDir bool, createBackup bool, safeMode bool) error {
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	if makeDir {
+		createError := createRecursiveDirectoryForFile(filePath)
+		if errormdl.CheckErr(createError) != nil {
+			loggermdl.LogError(createError)
+			return errormdl.CheckErr(createError)
+		}
+	}
+	if createBackup {
+		backupErr := createFileBackup(filePath)
+		if backupErr != nil {
+			loggermdl.LogError(backupErr)
+		}
+	}
+
+	if safeMode {
+		return writeFileSafely(filePath, data, 0644)
+	}
+
+	return writeFile(filePath, data, 0644)
+}
+
+func writeFileSafely(filePath string, data []byte, perm os.FileMode) error {
+	_, name := filepath.Split(filePath)
+	tmpFile, err := ioutil.TempFile(TempDir, name)
+	if err != nil {
+		return err
+	}
+
+	closeTempFile := true
+
+	defer func() {
+		if closeTempFile {
+			tmpFile.Close()
+		}
+	}()
+
+	n, err := tmpFile.Write(data)
+	if err != nil {
+		return errormdl.Wrap("cannot create temp file:" + err.Error())
+	}
+
+	if n < len(data) {
+		return errormdl.Wrap("cannot create temp file: short write")
+	}
+	err = tmpFile.Sync()
+	if err != nil {
+		return err
+	}
+	tmpFileName := tmpFile.Name()
+
+	info, err := os.Stat(filePath)
+	if err != nil && !os.IsNotExist(err) {
+		return err
+	}
+	// get the file mode from the original file and use that for the replacement
+	// file, too.
+	if err == nil {
+		if err := os.Chmod(tmpFileName, info.Mode()); err != nil {
+			return errormdl.Wrap("can't set filemode on tempfile: " + tmpFileName + ", error: " + err.Error())
+		}
+	}
+	// loggermdl.LogError(tmpFileName)
+	// set closeTempFile to false and close the tempFile. The file will be opened in AtomicReplaceFile function.
+	// If we don't close the file here, we will get "file is being used by another process" error
+
+	closeTempFile = false
+	tmpFile.Close()
+
+	if err := AtomicReplaceFile(tmpFileName, filePath); err != nil {
+		loggermdl.LogError("Atomic replace failed - ", err)
+		return errormdl.Wrap("cannot replace " + filePath + " with " + tmpFileName)
+	}
+	return nil
+}
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+	f, err := OpenFile(filename, os.O_WRONLY|os.O_CREATE, perm)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	n, err := f.Write(data)
+	if err != nil {
+		return err
+	}
+	if n < len(data) {
+		return errormdl.Wrap("short write")
+	}
+	return f.Sync()
+}
+
+// AppendDataToFile - AppendDataToFile
+func AppendDataToFile(filePath string, data []byte, makeDir bool) (startOffset int64, dataSize int, err error) {
+	path, linkErr := os.Readlink(filePath)
+	if errormdl.CheckErr1(linkErr) == nil {
+		filePath = path
+	}
+	if makeDir {
+		// create file recursively
+		createError := createRecursiveDirectoryForFile(filePath)
+		if errormdl.CheckErr(createError) != nil {
+			loggermdl.LogError(createError)
+			return startOffset, dataSize, errormdl.CheckErr(createError)
+		}
+	}
+	f, err := OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+	if err != nil {
+		return startOffset, dataSize, err
+	}
+	defer f.Close()
+	fileStat, err := f.Stat()
+	if err != nil {
+		return startOffset, dataSize, err
+	}
+	startOffset = fileStat.Size()
+
+	n, err := f.WriteString(string(data))
+	if err != nil {
+		return startOffset, dataSize, err
+	}
+
+	if n < len(data) {
+		return startOffset, dataSize, errormdl.Wrap("short write")
+	}
+
+	dataSize = n
+	err = f.Sync()
+	return startOffset, dataSize, err
+}
+
+// ReadFileFromOffset - ReadFileFromOffset
+func ReadFileFromOffset(f *os.File, startOffset int64, dataSize int64) ([]byte, error) {
+	bytesOfFile := make([]byte, dataSize)
+	_, err := f.ReadAt(bytesOfFile, startOffset)
+	if err != nil {
+		return bytesOfFile, err
+	}
+	return bytesOfFile, nil
+}
+
+// WriteFileAtOffset - WriteFileAtOffset
+func WriteFileAtOffset(f *os.File, startOffset int64, bytesToWrite []byte) (int64, error) {
+
+	n, err := f.WriteAt(bytesToWrite, startOffset)
+	if err != nil {
+		return 0, err
+	}
+
+	if n < len(bytesToWrite) {
+		return int64(n), errormdl.Wrap("short write")
+	}
+
+	return int64(n), nil
+}
+
+// AcquireFileLock -
+func AcquireFileLock(filePath string) (*fslock.Lock, error) {
+	lock := fslock.New(filePath)
+	lockErr := lock.Lock()
+	if lockErr != nil {
+		loggermdl.LogError("failed to acquire lock > " + lockErr.Error())
+		return nil, errormdl.Wrap("failed to acquire lock > " + lockErr.Error())
+	}
+	return lock, nil
+}
+
+// ReleaseFileLock
+func ReleaseFileLock(lock *fslock.Lock) error {
+	return lock.Unlock()
+}
diff --git a/v2/filemdl/filemdl_darwin.go b/v2/filemdl/filemdl_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..acd187767d9c8ebcafabb998afd29d37eca8e74f
--- /dev/null
+++ b/v2/filemdl/filemdl_darwin.go
@@ -0,0 +1,15 @@
+// TODO: Build flag needs to be passed while building exe/executable
+// +build !windows
+
+package filemdl
+
+import (
+	"os"
+)
+
+// AtomicReplaceFile atomically replaces the destination file or directory with the
+// source.  It is guaranteed to either replace the target file entirely, or not
+// change either file.
+func AtomicReplaceFile(source, destination string) error {
+	return os.Rename(source, destination)
+}
diff --git a/v2/filemdl/filemdl_linux.go b/v2/filemdl/filemdl_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..acd187767d9c8ebcafabb998afd29d37eca8e74f
--- /dev/null
+++ b/v2/filemdl/filemdl_linux.go
@@ -0,0 +1,15 @@
+// TODO: Build flag needs to be passed while building exe/executable
+// +build !windows
+
+package filemdl
+
+import (
+	"os"
+)
+
+// AtomicReplaceFile atomically replaces the destination file or directory with the
+// source.  It is guaranteed to either replace the target file entirely, or not
+// change either file.
+func AtomicReplaceFile(source, destination string) error {
+	return os.Rename(source, destination)
+}
diff --git a/v2/filemdl/filemdl_test.go b/v2/filemdl/filemdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..365b2c68c198ff53c321063327862a58ee580679
--- /dev/null
+++ b/v2/filemdl/filemdl_test.go
@@ -0,0 +1,1214 @@
+// TODO: Commented because of following error while setting up go modules
+// Command - go mod tidy
+// Error -
+// go: corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl tested by
+//         corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl.test imports
+//         corelab.mkcl.org/MKCLOS/coredevelopmentplatform/coreospackage/dalhelper imports
+//         gopkg.in/ahmetb/go-linq.v3: gopkg.in/ahmetb/go-linq.v3@v3.1.0: parsing go.mod:
+//         module declares its path as: github.com/ahmetb/go-linq/v3
+//                 but was required as: gopkg.in/ahmetb/go-linq.v3
+
+package filemdl
+
+// import (
+// 	"encoding/json"
+// 	_ "net/http/pprof"
+// 	"path/filepath"
+// 	"reflect"
+// 	"sync"
+// 	"testing"
+
+// 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/coreospackage/dalhelper"
+// 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+// 	"github.com/stretchr/testify/assert"
+// )
+
+// func TestCleanup(t *testing.T) {
+// 	err := DeleteDirectory("../testingdata/testData")
+// 	err = Unzip("../testingdata/testData.zip", "../testingdata")
+// 	assert.NoError(t, err, "Error Not expected")
+// }
+// func Test2ReadFile(t *testing.T) {
+// 	_, err := ReadFile("")
+// 	assert.Error(t, err, "Error is expected")
+// }
+
+// func Test1ReadFile(t *testing.T) {
+// 	type args struct {
+// 		filePath string
+// 	}
+
+// 	ba, _ := dalhelper.GetDataFromFDB("../testingdata/users.json")
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		want    []byte
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    []byte
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{"../testingdata/users.json"},
+// 			want:    ba,
+// 			wantErr: false,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			got, err := ReadFile(tt.args.filePath)
+// 			if (err != nil) != tt.wantErr {
+// 				t.Errorf("ReadFile() error = %v, wantErr %v", err, tt.wantErr)
+// 				return
+// 			}
+// 			if !reflect.DeepEqual(got, tt.want) {
+// 				t.Errorf("ReadFile() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestWriteFile(t *testing.T) {
+// 	type args struct {
+// 		filePath         string
+// 		data             []byte
+// 		makeDir          bool
+// 		createFileBackup bool
+// 	}
+// 	ba, _ := dalhelper.GetDataFromFDB("../testingdata/users.json")
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{"../testingdata/testData/writeFile/tmp.json", ba, false, false},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{"../testingdata/testData/writeFile/test/tmp.json", ba, false, false},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test3",
+// 			args:    args{"../testingdata/testData/writeFile/test/tmp.json", ba, true, false},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test4",
+// 			args:    args{"../testingdata/testData/writeFile/test/", ba, true, false},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test5",
+// 			args:    args{"../testingdata/testData/writeFile/test2?/tmp.json", ba, true, false},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test6",
+// 			args:    args{"../testingdata/.symlink", ba, true, false},
+// 			wantErr: false,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if err := WriteFile(tt.args.filePath, tt.args.data, tt.args.makeDir, tt.args.createFileBackup); (err != nil) != tt.wantErr {
+// 				t.Errorf("WriteFile() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestAppendFile(t *testing.T) {
+// 	type args struct {
+// 		filename string
+// 		text     string
+// 	}
+
+// 	str := `
+// 	{
+// 		"title": "Abhyas Kausalya unit 1",
+// 		"contentID": "fc60934f5b3e0eae14d46f6a9a05f119",
+// 		"author": "",
+// 		"isQC": false,
+// 		"QCBy": null,
+// 		"qc": "fc60934f5b3e0eae14d46f6a9a05f119",
+// 		"log": "fc60934f5b3e0eae14d46f6a9a05f119",
+// 		"duration": 0,
+// 		"languageID": "1",
+// 		"name": "Abhyas Kausalya unit 1",
+// 		"originalName": "Abhyas Kausalya unit 1.pdf",
+// 		"path": "2a57dc3355a316bf5922c31851c7b73c.pdf",
+// 		"size": "110515",
+// 		"tags": ["5th",
+// 		" Study Skill",
+// 		" Abhyas Kausalya unit 1"],
+// 		"type": "",
+// 		"isUploaded": true,
+// 		"uploadedBy": "rashmis",
+// 		"UploadedDate": "25-01-2018",
+// 		"useIn": null,
+// 		"thumbnail": "",
+// 		"isLatest": false,
+// 		"srtFiles": [],
+// 		"description": "",
+// 		"isSupportingFiles": false,
+// 		"passcode": "",
+// 		"fileHash": "14504648841716758967",
+// 		"location": "/Abhyas Kausalya unit 1/Abhyas Kausalya unit 1.pdf",
+// 		"localName": "2a57dc3355a316bf5922c31851c7b73c.pdf",
+// 		"isCompressed": false,
+// 		"compressedSize": "",
+// 		"compressedPath": "",
+// 		"compressed480Size": "",
+// 		"compressed480Path": ""
+// 	}
+// 	`
+// 	ln := 1018
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		want    int
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{filename: "../testingdata/testData/appendFile/tmp.json", text: str},
+// 			want:    ln,
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{filename: "../testingdata/testData/appendFile/test5/tmp.json", text: str},
+// 			want:    0,
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			got, err := AppendFile(tt.args.filename, tt.args.text)
+// 			if (err != nil) != tt.wantErr {
+// 				t.Errorf("AppendFile() error = %v, wantErr %v", err, tt.wantErr)
+// 				return
+// 			}
+// 			if got != tt.want {
+// 				t.Errorf("AppendFile() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestDeleteFile(t *testing.T) {
+// 	type args struct {
+// 		filePath string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{"../testingdata/testData/deleteFile/tmp.json"},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{"../testingdata/testData/deleteFile/test/tmp.json"},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test3",
+// 			args:    args{"../testingdata/testData/deleteFile/test"},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test4",
+// 			args:    args{"../testingdata/testData/deleteFile/"},
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if err := DeleteFile(tt.args.filePath); (err != nil) != tt.wantErr {
+// 				t.Errorf("DeleteFile() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestRenameFile(t *testing.T) {
+// 	type args struct {
+// 		oldFilePath string
+// 		newFilePath string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{oldFilePath: "../testingdata/testData/renameFile/log.txt", newFilePath: "../testingdata/testData/renameFile/log1.txt"},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{oldFilePath: "../testingdata/testData/renameFile/log1.txt", newFilePath: "../testingdata/processedData/testData/renameFile/log.txt"},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test3",
+// 			args:    args{oldFilePath: "../testingdata/testData/renameFile/log.txt", newFilePath: "../testingdata/processedData/testData/renameFile/log.txt"},
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if err := RenameFile(tt.args.oldFilePath, tt.args.newFilePath); (err != nil) != tt.wantErr {
+// 				t.Errorf("RenameFile() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestCreateDirectory(t *testing.T) {
+// 	type args struct {
+// 		directoryPath string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{"../testingdata/testData/createDir/test"},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{"../testingdata/testData/createDir/test"},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test3",
+// 			args:    args{"../testingdata/testData/createDir/test?"},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test4",
+// 			args:    args{"../testingdata/testData/createDir/test1/test2/"},
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if err := CreateDirectory(tt.args.directoryPath); (err != nil) != tt.wantErr {
+// 				t.Errorf("CreateDirectory() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestDeleteDirectory(t *testing.T) {
+// 	type args struct {
+// 		directoryPath string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{"../testingdata/testData/deleteDir/test"},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{"../testingdata/testData/deleteDir/test"},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test3",
+// 			args:    args{"../testingdata/testData/deleteDir/test2"},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test4",
+// 			args:    args{"../testingdata/testData/deleteDir/test1/test2/"},
+// 			wantErr: false,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if err := DeleteDirectory(tt.args.directoryPath); (err != nil) != tt.wantErr {
+// 				t.Errorf("DeleteDirectory() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestListDirectory(t *testing.T) {
+// 	type args struct {
+// 		directoryPath string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		want    int
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{"../testingdata/testData/listDir/tmp"},
+// 			want:    2,
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{"../testingdata/testData/listDir/test1/test2/"},
+// 			want:    0,
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			got, err := ListDirectory(tt.args.directoryPath)
+// 			if (err != nil) != tt.wantErr {
+// 				t.Errorf("ListDirectory() error = %v, wantErr %v", err, tt.wantErr)
+// 				return
+// 			}
+// 			if !reflect.DeepEqual(len(got), tt.want) {
+// 				t.Errorf("ListDirectory() = %v, want %v", len(got), tt.want)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestMoveFileToOtherHost(t *testing.T) {
+// 	type args struct {
+// 		source      string
+// 		destination string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{source: "../testingdata/testData/MoveFileToOtherHost/test.mp4", destination: "../testingdata/testData/MoveFileToOtherHost/test2/test.mp4"},
+// 			wantErr: false,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			// errormdl.IsTestingNegetiveCaseOn1 = true
+// 			if err := MoveFileToOtherHost(tt.args.source, tt.args.destination); (err != nil) != tt.wantErr {
+// 				t.Errorf("MoveFileToOtherHost() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 			// errormdl.IsTestingNegetiveCaseOn1 = false
+// 		})
+
+// 	}
+
+// }
+
+// func Test1MoveFileToOtherHost(t *testing.T) {
+// 	type args struct {
+// 		source      string
+// 		destination string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{source: "../testingdata/testData/MoveFileToOtherHost/test.mp4", destination: "../testingdata/testData/MoveFileToOtherHost/test2/test.mp4"},
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			errormdl.IsTestingNegetiveCaseOn = true
+// 			err := MoveFileToOtherHost(tt.args.source, tt.args.destination)
+// 			errormdl.IsTestingNegetiveCaseOn = false
+// 			if (err != nil) != tt.wantErr {
+// 				t.Errorf("MoveFileToOtherHost() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+
+// 	}
+
+// }
+// func Test2MoveFileToOtherHost(t *testing.T) {
+// 	err := MoveFileToOtherHost("../testingdata/testData/MoveFileToOtherHost/test1.mp4", "../testingdata/testData/MoveFileToOtherHost/output/test1.mp4")
+// 	assert.NoError(t, err, "This is test error")
+// }
+// func TestCopyFile(t *testing.T) {
+// 	type args struct {
+// 		source      string
+// 		destination string
+// 		makeDir     bool
+// 	}
+// 	ba, _ := dalhelper.GetDataFromFDB("../testingdata/users.json")
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		want    int64
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int64
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{source: "../testingdata/users.json", destination: "../testingdata/testData/copyFile/tmp.json", makeDir: true},
+// 			want:    int64(len(ba)),
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int64
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{source: "../testingdata/users.json", destination: "../testingdata/testData/copyFile/test2??/tmp.json", makeDir: true},
+// 			want:    0,
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int64
+// 			wantErr bool
+// 		}{
+// 			name:    "test3",
+// 			args:    args{source: "../testingdata/users.json", destination: "../testingdata/testData/copyFile/test3/tmp.json", makeDir: false},
+// 			want:    0,
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int64
+// 			wantErr bool
+// 		}{
+// 			name:    "test4",
+// 			args:    args{source: "../testingdata/", destination: "../testingdata/testData/copyFile/test3/", makeDir: true},
+// 			want:    0,
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			got, err := CopyFile(tt.args.source, tt.args.destination, tt.args.makeDir)
+// 			if (err != nil) != tt.wantErr {
+// 				t.Errorf("CopyFile() error = %v, wantErr %v", err, tt.wantErr)
+// 				return
+// 			}
+// 			if got != tt.want {
+// 				t.Errorf("CopyFile() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+// }
+
+// func Test1CopyFile(t *testing.T) {
+// 	type args struct {
+// 		source      string
+// 		destination string
+// 		makeDir     bool
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		want    int64
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			want    int64
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{source: "../testingdata/users.json", destination: "../testingdata/testData/copyFile/test2/tmp.json", makeDir: true},
+// 			want:    0,
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			errormdl.IsTestingNegetiveCaseOn3 = true
+// 			got, err := CopyFile(tt.args.source, tt.args.destination, tt.args.makeDir)
+// 			errormdl.IsTestingNegetiveCaseOn3 = false
+// 			if (err != nil) != tt.wantErr {
+// 				t.Errorf("CopyFile() error = %v, wantErr %v", err, tt.wantErr)
+// 				return
+// 			}
+// 			if got != tt.want {
+// 				t.Errorf("CopyFile() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+// }
+
+// func Test2CopyDir(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn2 = true
+// 	err := CopyDir("../testingdata/tmp", "../testingdata/testData/copyDir")
+// 	errormdl.IsTestingNegetiveCaseOn2 = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+
+// func Test1CopyDir(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	err := CopyDir("../testingdata/tmp", "../testingdata/testData/copyDir/failed")
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+
+// func TestCopyDir(t *testing.T) {
+// 	type args struct {
+// 		source string
+// 		dest   string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{source: "../testingdata/tmp", dest: "../testingdata/testData/copyDir/test4"},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{source: "../testingdata/roshan", dest: "../testingdata/testData/copyDir/test2??/tmp.json"},
+// 			wantErr: true,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test3",
+// 			args:    args{source: "../testingdata/users.json", dest: "../testingdata/testData/copyDir/test3"},
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if err := CopyDir(tt.args.source, tt.args.dest); (err != nil) != tt.wantErr {
+// 				t.Errorf("CopyDir() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestTruncateFile(t *testing.T) {
+// 	type args struct {
+// 		path string
+// 		size int64
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		wantErr bool
+// 	}{
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test1",
+// 			args:    args{path: "../testingdata/testData/truncateFile/tmp.json", size: 123},
+// 			wantErr: false,
+// 		},
+// 		struct {
+// 			name    string
+// 			args    args
+// 			wantErr bool
+// 		}{
+// 			name:    "test2",
+// 			args:    args{path: "../testingdata/testData/truncateFile/test10/tmp.json", size: 123},
+// 			wantErr: true,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if err := TruncateFile(tt.args.path, tt.args.size); (err != nil) != tt.wantErr {
+// 				t.Errorf("TruncateFile() error = %v, wantErr %v", err, tt.wantErr)
+// 			}
+// 		})
+// 	}
+// }
+// func Test3FileHelperServiceObject_FileSearch(t *testing.T) {
+// 	obj := FileHelperServiceObject{}
+// 	_, err := obj.FileSearch("tmp.json", "../testingdata/tmp/tmp.json")
+// 	assert.NoError(t, err, "This Should not be Error")
+// }
+
+// func Test2FileHelperServiceObject_FileSearch(t *testing.T) {
+// 	obj := FileHelperServiceObject{}
+
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	_, err := obj.FileSearch("tmp.json", "../testingdata/tmp")
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+
+// func Test1FileHelperServiceObject_FileSearch(t *testing.T) {
+// 	obj := FileHelperServiceObject{}
+
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	_, err := obj.FileSearch("tmp.json", "../testingdata/tmp")
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+
+// // func TestFileHelperServiceObject_FileSearch(t *testing.T) {
+// // 	type args struct {
+// // 		fileName string
+// // 		path     string
+// // 	}
+// // 	tests := []struct {
+// // 		name                    string
+// // 		fileHelperServiceObject *FileHelperServiceObject
+// // 		args                    args
+// // 		want                    []string
+// // 		wantErr                 bool
+// // 	}{
+// // 		struct {
+// // 			name                    string
+// // 			fileHelperServiceObject *FileHelperServiceObject
+// // 			args                    args
+// // 			want                    []string
+// // 			wantErr                 bool
+// // 		}{
+// // 			name: "test1",
+// // 			fileHelperServiceObject: &FileHelperServiceObject{},
+// // 			args:    args{fileName: "tmp.json", path: "../testingdata/tmp/"},
+// // 			want:    []string{"D:\\go\\src\\CoreOSWork\\tmp\\tmp.json"},
+// // 			wantErr: false,
+// // 		},
+// // 	}
+// // 	for _, tt := range tests {
+// // 		t.Run(tt.name, func(t *testing.T) {
+// // 			got, err := tt.fileHelperServiceObject.FileSearch(tt.args.fileName, tt.args.path)
+// // 			if (err != nil) != tt.wantErr {
+// // 				t.Errorf("FileHelperServiceObject.FileSearch() error = %v, wantErr %v", err, tt.wantErr)
+// // 				return
+// // 			}
+// // 			if !reflect.DeepEqual(got, tt.want) {
+// // 				t.Errorf("FileHelperServiceObject.FileSearch() = %v, want %v", got, tt.want)
+// // 			}
+// // 		})
+// // 	}
+// // }
+
+// func TestCleanPath(t *testing.T) {
+// 	type args struct {
+// 		path string
+// 	}
+// 	tests := []struct {
+// 		name string
+// 		args args
+// 		want string
+// 	}{
+// 		struct {
+// 			name string
+// 			args args
+// 			want string
+// 		}{
+// 			name: "test1",
+// 			args: args{"c:\\d\\e/f/"},
+// 			want: "c:/d/e/f/",
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if got := CleanPath(tt.args.path); got != tt.want {
+// 				t.Errorf("CleanPath() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+// }
+
+// func Test_findFile(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	obj := FileHelperServiceObject{}
+// 	obj.searchFileName = "tmp.json"
+// 	err := filepath.Walk("../testingdata/tmp", obj.findFile)
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+// func Test4findFile(t *testing.T) {
+// 	obj := FileHelperServiceObject{}
+// 	obj.searchFileName = "tmp.json"
+// 	err := filepath.Walk("../testingdata/tmp", obj.findFile)
+// 	assert.NoError(t, err, "This Should not be Error")
+// }
+
+// func Test3findFile(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn3 = true
+// 	obj := FileHelperServiceObject{}
+// 	obj.searchFileName = "tmp.json"
+// 	err := filepath.Walk("../testingdata/tmp", obj.findFile)
+// 	errormdl.IsTestingNegetiveCaseOn3 = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+
+// func Test2findFile(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn2 = true
+// 	obj := FileHelperServiceObject{}
+// 	obj.searchFileName = "tmp.json"
+// 	err := filepath.Walk("../testingdata/tmp", obj.findFile)
+// 	errormdl.IsTestingNegetiveCaseOn2 = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+
+// func Test1findFile(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	obj := FileHelperServiceObject{}
+// 	obj.searchFileName = "tmp.json"
+// 	err := filepath.Walk("../testingdata/tmp", obj.findFile)
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+// 	assert.Error(t, err, "This Should not be Error")
+// }
+// func TestFileAvailabilityCheck(t *testing.T) {
+// 	type args struct {
+// 		filePath string
+// 	}
+// 	tests := []struct {
+// 		name string
+// 		args args
+// 		want bool
+// 	}{
+// 		struct {
+// 			name string
+// 			args args
+// 			want bool
+// 		}{
+// 			name: "test1",
+// 			args: args{"../testingdata/testData/fileAva/test.txt"},
+// 			want: true,
+// 		},
+// 		struct {
+// 			name string
+// 			args args
+// 			want bool
+// 		}{
+// 			name: "test2",
+// 			args: args{"../testingdata/testData/fileAva/test1.txt"},
+// 			want: false,
+// 		},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if got := FileAvailabilityCheck(tt.args.filePath); got != tt.want {
+// 				t.Errorf("FileAvailabilityCheck() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+// }
+// func TestGetInstance(t *testing.T) {
+// 	err := GetInstance().Error
+// 	assert.NoError(t, err, "No Error is expected here")
+// }
+// func Test1Filemdl_Save(t *testing.T) {
+// 	err := GetInstance().Save("testPath", []byte("roshan"), false, false)
+// 	assert.NoError(t, err, "No Error is expected here")
+// }
+// func TestFilemdl_Save(t *testing.T) {
+// 	Init(3, 3)
+// 	str := `
+// 	{
+// 		"title": "Abhyas Kausalya unit 1",
+// 		"contentID": "fc60934f5b3e0eae14d46f6a9a05f119",
+// 		"author": "",
+// 		"isQC": false,
+// 		"QCBy": null,
+// 		"qc": "fc60934f5b3e0eae14d46f6a9a05f119",
+// 		"log": "fc60934f5b3e0eae14d46f6a9a05f119",
+// 		"duration": 0,
+// 		"languageID": "1",
+// 		"name": "Abhyas Kausalya unit 1",
+// 		"originalName": "Abhyas Kausalya unit 1.pdf",
+// 		"path": "2a57dc3355a316bf5922c31851c7b73c.pdf",
+// 		"size": "110515",
+// 		"tags": ["5th",
+// 		" Study Skill",
+// 		" Abhyas Kausalya unit 1"],
+// 		"type": "",
+// 		"isUploaded": true,
+// 		"uploadedBy": "rashmis",
+// 		"UploadedDate": "25-01-2018",
+// 		"useIn": null,
+// 		"thumbnail": "",
+// 		"isLatest": false,
+// 		"srtFiles": [],
+// 		"description": "",
+// 		"isSupportingFiles": false,
+// 		"passcode": "",
+// 		"fileHash": "14504648841716758967",
+// 		"location": "/Abhyas Kausalya unit 1/Abhyas Kausalya unit 1.pdf",
+// 		"localName": "2a57dc3355a316bf5922c31851c7b73c.pdf",
+// 		"isCompressed": false,
+// 		"compressedSize": "",
+// 		"compressedPath": "",
+// 		"compressed480Size": "",
+// 		"compressed480Path": ""
+// 	}
+// 	`
+// 	ba, _ := json.Marshal(str)
+// 	var err error
+// 	wg := sync.WaitGroup{}
+// 	wg.Add(32)
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	for index := 0; index < 8; index++ {
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp.json", ba, false, false)
+// 			assert.Error(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp1.json", ba, false, false)
+// 			assert.Error(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp2.json", ba, false, false)
+// 			assert.Error(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp3.json", ba, false, false)
+// 			assert.Error(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 	}
+// 	wg.Wait()
+// 	wg.Add(32)
+
+// 	errormdl.IsTestingNegetiveCaseOn = false
+
+// 	for index := 0; index < 8; index++ {
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp.json", ba, false, false)
+// 			assert.NoError(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp1.json", ba, false, false)
+// 			assert.NoError(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp2.json", ba, false, false)
+// 			assert.NoError(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 		go func() {
+// 			err := GetInstance().Save("../testingdata/processedData/output/tmp3.json", ba, false, false)
+// 			assert.NoError(t, err, "Check this error")
+// 			wg.Done()
+// 		}()
+// 	}
+// 	wg.Wait()
+// 	assert.NoError(t, err, "This should not return error")
+// }
+
+// // func Test1Save(t *testing.T) {
+// // 	str := `
+// // 	{
+// // 		"title": "Abhyas Kausalya unit 1",
+// // 		"contentID": "fc60934f5b3e0eae14d46f6a9a05f119",
+// // 		"author": "",
+// // 		"isQC": false,
+// // 		"QCBy": null,
+// // 		"qc": "fc60934f5b3e0eae14d46f6a9a05f119",
+// // 		"log": "fc60934f5b3e0eae14d46f6a9a05f119",
+// // 		"duration": 0,
+// // 		"languageID": "1",
+// // 		"name": "Abhyas Kausalya unit 1",
+// // 		"originalName": "Abhyas Kausalya unit 1.pdf",
+// // 		"path": "2a57dc3355a316bf5922c31851c7b73c.pdf",
+// // 		"size": "110515",
+// // 		"tags": ["5th",
+// // 		" Study Skill",
+// // 		" Abhyas Kausalya unit 1"],
+// // 		"type": "",
+// // 		"isUploaded": true,
+// // 		"uploadedBy": "rashmis",
+// // 		"UploadedDate": "25-01-2018",
+// // 		"useIn": null,
+// // 		"thumbnail": "",
+// // 		"isLatest": false,
+// // 		"srtFiles": [],
+// // 		"description": "",
+// // 		"isSupportingFiles": false,
+// // 		"passcode": "",
+// // 		"fileHash": "14504648841716758967",
+// // 		"location": "/Abhyas Kausalya unit 1/Abhyas Kausalya unit 1.pdf",
+// // 		"localName": "2a57dc3355a316bf5922c31851c7b73c.pdf",
+// // 		"isCompressed": false,
+// // 		"compressedSize": "",
+// // 		"compressedPath": "",
+// // 		"compressed480Size": "",
+// // 		"compressed480Path": ""
+// // 	}
+// // 	`
+// // 	errormdl.IsTestingNegetiveCaseOn = true
+// // 	ba, _ := json.Marshal(str)
+// // 	err := GetInstance().Save("../testingdata/processedData/output/tmp.json", ba)
+// // 	errormdl.IsTestingNegetiveCaseOn = false
+// // 	assert.Error(t, err, "Check this error")
+// // }
+
+// func Test_createFile(t *testing.T) {
+// 	_, err := createFile("../testingdata/processedData/output/createFile.json")
+// 	assert.NoError(t, err, "This should not return error")
+// }
+
+// func TestReadFile(t *testing.T) {
+// 	type args struct {
+// 		filePath string
+// 	}
+// 	tests := []struct {
+// 		name    string
+// 		args    args
+// 		want    []byte
+// 		wantErr bool
+// 	}{
+// 		// TODO: Add test cases.
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			got, err := ReadFile(tt.args.filePath)
+// 			if (err != nil) != tt.wantErr {
+// 				t.Errorf("ReadFile() error = %v, wantErr %v", err, tt.wantErr)
+// 				return
+// 			}
+// 			if !reflect.DeepEqual(got, tt.want) {
+// 				t.Errorf("ReadFile() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+// }
+
+// func TestMoveDirectory(t *testing.T) {
+// 	err := MoveDirectory("../testingdata/testData/move/test", "../testingdata/testData/move/test2/test")
+// 	assert.NoError(t, err, "No error Generated")
+// }
+
+// func TestMoveFile(t *testing.T) {
+// 	err := MoveFile("../testingdata/testData/move/test1.txt", "../testingdata/testData/move/test2/test.txt")
+// 	assert.NoError(t, err, "No error Generated")
+// }
+
+// func TestReplaceFile(t *testing.T) {
+// 	ba, _ := dalhelper.GetDataFromFDB("../testingdata/users.json")
+// 	err := ReplaceFile(ba, "../testingdata/processedData/output/ReplaceFile.json", false)
+// 	assert.NoError(t, err, "This should not thorw error")
+// }
+
+// func TestFileHelper_triggerWritingData(t *testing.T) {
+// 	obj := GetInstance()
+// 	obj.triggerWritingData()
+// 	assert.NoError(t, nil, "No Error return by this")
+// }
+
+// func TestFileInfo(t *testing.T) {
+// 	_, err := FileInfo("../testingdata/users.json")
+// 	assert.NoError(t, err, "This hsould not throw error")
+// }
+
+// func Test2WriteFile(t *testing.T) {
+// 	ba, _ := dalhelper.GetDataFromFDB("../testingdata/users.json")
+// 	err := WriteFile("../testingdata/users.json", ba, true, true)
+// 	assert.NoError(t, err, "Error expected Here")
+// }
+// func Test1_createFileBackup(t *testing.T) {
+// 	err := createFileBackup("../testingdata/tmp/tmp.json")
+// 	assert.Error(t, err, "Error expected Here")
+// }
+// func TestSetBackPath(t *testing.T) {
+// 	err := SetBackPath("../testingdata/processedData/test/backup/", "../testingdata/processedData/DB/")
+// 	assert.NoError(t, err, "No Error Here")
+// }
+
+// func Test1SetBackPath(t *testing.T) {
+// 	err := SetBackPath("", "../testingdata/processedData/DB/")
+// 	assert.Error(t, err, "Error expected Here")
+// }
+// func Test2SetBackPath(t *testing.T) {
+// 	err := SetBackPath("../testingdata/processedData/test/backup", "../testingdata/processedData/DB/")
+// 	assert.Error(t, err, "Error expected Here")
+// }
+
+// func Test3SetBackPath(t *testing.T) {
+// 	err := SetBackPath("../testingdata/processedData/test/backup/", "../testingdata/processedData/DB")
+// 	assert.Error(t, err, "Error expected Here")
+// }
+
+// func Test4SetBackPath(t *testing.T) {
+// 	err := SetBackPath("../testingdata/backup/", "../testingdata/")
+// 	assert.NoError(t, err, "No Error Here")
+// }
+
+// func Test_createFileBackup(t *testing.T) {
+// 	SetBackPath("../testingdata/backup/", "../testingdata/")
+// 	err := createFileBackup("../testingdata/tmp/tmp.json")
+// 	assert.NoError(t, err, "No Error Here")
+// }
+
+// func Test2_createFileBackup(t *testing.T) {
+// 	SetBackPath("../testingdata/backup/", "../testingdata/")
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	err := createFileBackup("../testingdata/tmp/tmp.json")
+// 	errormdl.IsTestingNegetiveCaseOn = false
+// 	assert.Error(t, err, "Error expected Here")
+// }
+
+// func Test1WriteFile(t *testing.T) {
+// 	ba, _ := dalhelper.GetDataFromFDB("../testingdata/users.json")
+// 	SetBackPath("../testingdata/backup/", "../testingdata/")
+// 	err := WriteFile("../testingdata/users.json", ba, true, true)
+// 	assert.NoError(t, err, "No Error Here")
+// }
diff --git a/v2/filemdl/filemdl_windows.go b/v2/filemdl/filemdl_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..824438f0018c14ee0ad7759c1540a506806aee68
--- /dev/null
+++ b/v2/filemdl/filemdl_windows.go
@@ -0,0 +1,51 @@
+package filemdl
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+const (
+	moveFileReplacExisting = 0x1
+	moveFileWriteThrough   = 0x8
+)
+
+var (
+	modkernel32     = syscall.NewLazyDLL("kernel32.dll")
+	procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+//sys moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) = MoveFileExW
+
+// AtomicReplaceFile atomically replaces the destination file or directory with the
+// source.  It is guaranteed to either replace the target file entirely, or not
+// change either file.
+func AtomicReplaceFile(source, destination string) error {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, moveFileReplacExisting|moveFileWriteThrough); err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	return nil
+}
+
+func moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(lpExistingFileName)), uintptr(unsafe.Pointer(lpNewFileName)), uintptr(dwFlags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
diff --git a/v2/filemdl/filepack/packFile.go b/v2/filemdl/filepack/packFile.go
new file mode 100644
index 0000000000000000000000000000000000000000..f76f23b1f5a2dd6f852f2c8878c9b9c2e12d2d2b
--- /dev/null
+++ b/v2/filemdl/filepack/packFile.go
@@ -0,0 +1,2400 @@
+package filepack
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/securitymdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+const (
+	fileStatusReady                     = 0
+	fileStatusUpdatingData              = 1
+	fileStatusUpdatingIndex             = 2
+	fileStatusOffsetInFile              = 0
+	isReadyForUploadOffsetInFile        = 1
+	isUpdatedAndNotCommitedOffsetInFile = 2
+	isReorgRequiredOffsetInFile         = 3
+	isReindexRequiredOffsetInFile       = 4
+	footerOffsetInFile                  = 5
+	footerOffsetReservedSize            = 15
+	footerSizeOffset                    = 20
+	filehashOffest                      = 35
+	lastUpdatedOffset                   = 43
+	dataStartOffset                     = 53
+	sizeReservedForHeaders              = 53
+	lineBreak                           = "\r\n"
+
+	// IndexKeyValSeperator -
+	IndexKeyValSeperator = "="
+	// FileType - represents key for type of file. Used whenever we need to set file type field in json
+	FileType = "fileType"
+)
+
+// ErrNoDataFound - ErrNoDataFound
+var ErrNoDataFound = errors.New("No data found")
+
+var isSecurityEnabled bool
+var isCompressionEnabled bool
+var defaultSecurityKey = []byte{}
+
+var lineBreakBytes = []byte(lineBreak)
+
+// Init - initializes pack config
+func Init(isSecurityRequired, isCompressionRequired bool, securityKey string) {
+	defaultSecurityKey = []byte(securityKey)
+	isSecurityEnabled = isSecurityRequired
+	isCompressionEnabled = isCompressionRequired
+}
+
+// InFileIndex -
+type InFileIndex struct {
+	FileType    string             `json:"fileType"`
+	IndexFields []InFileIndexField `json:"indexFields"`
+}
+
+// InFileIndexField - InFileIndexField
+type InFileIndexField struct {
+	FieldName string `json:"fieldName"`
+	Query     string `json:"query"`
+}
+
+var mutexMap = map[string]*sync.Mutex{}
+var getMapSyncMutex = &sync.Mutex{}
+
+func getLock(fileLocation string) *sync.Mutex {
+	getMapSyncMutex.Lock()
+	defer getMapSyncMutex.Unlock()
+	m, found := mutexMap[fileLocation]
+	if !found {
+		m = &sync.Mutex{}
+		mutexMap[fileLocation] = m
+	}
+	return m
+}
+
+func appendPaddingPadValue(value int64, padNumber int) string {
+	no := strconv.Itoa(padNumber)
+	return fmt.Sprintf("%0"+no+"d", value)
+}
+
+func getFileStatus(f *os.File) (int, error) {
+
+	data, err := filemdl.ReadFileFromOffset(f, fileStatusOffsetInFile, 1)
+	if err != nil {
+		loggermdl.LogError(err)
+		return -1, err
+	}
+	status, err := strconv.Atoi(string(data))
+	return status, err
+}
+
+func getFooterOffset(f *os.File) int64 {
+	data, err := filemdl.ReadFileFromOffset(f, footerOffsetInFile, footerOffsetReservedSize)
+	if err != nil {
+		loggermdl.LogError(err)
+		return -1
+	}
+	footerOffset, err := strconv.Atoi(string(data))
+	if err != nil {
+		loggermdl.LogError("err", err)
+		return -1
+	}
+
+	return int64(footerOffset)
+}
+
+func setFileStatusFlag(f *os.File, fileStatus int) error {
+	status := strconv.Itoa(fileStatus)
+	_, err := filemdl.WriteFileAtOffset(f, fileStatusOffsetInFile, []byte(status))
+	return err
+}
+
+func setFileReadyForUploadFlag(f *os.File, isReadyToUpload bool) error {
+	flagVal := strconv.FormatBool(isReadyToUpload)
+	_, err := filemdl.WriteFileAtOffset(f, isReadyForUploadOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileUpdatedAndNotCommitedFlag(f *os.File, isUpdatedAndNotCommited bool) error {
+	flagVal := strconv.FormatBool(isUpdatedAndNotCommited)
+	_, err := filemdl.WriteFileAtOffset(f, isUpdatedAndNotCommitedOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileReorgRequiredFlag(f *os.File, isReorgRequired bool) error {
+	flagVal := strconv.FormatBool(isReorgRequired)
+	_, err := filemdl.WriteFileAtOffset(f, isReorgRequiredOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFileReindexRequiredFlag(f *os.File, isReindexRequired bool) error {
+	flagVal := strconv.FormatBool(isReindexRequired)
+	_, err := filemdl.WriteFileAtOffset(f, isReindexRequiredOffsetInFile, []byte(flagVal))
+	return err
+}
+
+func setFooterOffset(f *os.File, footerOffset int64) error {
+	footerOffestInString := appendPaddingPadValue(footerOffset, 15)
+	_, err := filemdl.WriteFileAtOffset(f, footerOffsetInFile, []byte(footerOffestInString))
+	return err
+}
+
+func setFooterSize(f *os.File, footerSize int64) error {
+	footerSizeInString := appendPaddingPadValue(footerSize, 15)
+	_, err := filemdl.WriteFileAtOffset(f, footerSizeOffset, []byte(footerSizeInString))
+	return err
+}
+
+func getFooterSize(f *os.File) (int64, error) {
+	data, err := filemdl.ReadFileFromOffset(f, footerSizeOffset, 15)
+	if err != nil {
+		return -1, err
+	}
+	footerSize, err := strconv.Atoi(string(data))
+	if err != nil {
+		loggermdl.LogError("err", err)
+		return -1, err
+	}
+
+	return int64(footerSize), nil
+}
+
+func setIndexDataInFile(f *os.File, footerOffset int64, indexData string) error {
+	_, err := filemdl.WriteFileAtOffset(f, footerOffset, []byte(indexData))
+	return err
+}
+
+func compressData(data []byte) ([]byte, error) {
+	var hashError error
+	dataByteToWriteRes, hashError := filemdl.ZipBytes(data)
+	if errormdl.CheckErr2(hashError) != nil {
+		return data, errormdl.CheckErr2(hashError)
+	}
+	return dataByteToWriteRes, nil
+}
+
+func decompressData(data []byte) ([]byte, error) {
+	var hashError error
+	dataByteToWriteRes, hashError := filemdl.UnZipBytes(data)
+	if errormdl.CheckErr2(hashError) != nil {
+		return data, errormdl.CheckErr2(hashError)
+	}
+	return dataByteToWriteRes, nil
+}
+
+func encryptData(data []byte, fileName string) (dataOut []byte, err error) {
+	key, err := getSecurityKey(fileName)
+	if err != nil {
+		return dataOut, err
+	}
+	dataOut, err = compressData(data)
+	if errormdl.CheckErr1(err) != nil {
+		return
+	}
+	dataOut, err = securitymdl.AESEncrypt(dataOut, key)
+	if errormdl.CheckErr1(err) != nil {
+		return
+	}
+	return
+}
+
+func decryptData(data []byte, fileName string) (dataOut []byte, err error) {
+	key, err := getSecurityKey(fileName)
+	if err != nil {
+		return dataOut, err
+	}
+
+	dataOut, err = securitymdl.AESDecrypt(data, key)
+	if errormdl.CheckErr1(err) != nil {
+		return
+	}
+
+	dataOut, err = decompressData(dataOut)
+	if errormdl.CheckErr1(err) != nil {
+		return
+	}
+	return
+}
+
+func getSecurityKey(fileName string) ([]byte, error) {
+	key := []byte{}
+	var err error
+	securityKeyGenFunc := securitymdl.GetSecurityKeyGeneratorFunc()
+	if securityKeyGenFunc != nil {
+		key, err = securityKeyGenFunc(fileName)
+	} else {
+		key, err = GetKeyWithFileNameAndDefaultKey(fileName)
+	}
+	return key, err
+}
+
+func addFileDataInFile(f *os.File, offset int64, data string, breakLine bool, rs *gjson.Result, secParams securitymdl.FDBSecParams) (int64, error) {
+	dataBytes := []byte(data)
+
+	// if isSecurityEnabled {
+	// 	dataBytes, err = encryptData(dataBytes, f.Name())
+	// 	if errormdl.CheckErr(err) != nil {
+	// 		return 0, errormdl.CheckErr(err)
+	// 	}
+	// }
+	// if !isSecurityEnabled && isCompressionEnabled {
+	// 	dataBytes, err = filemdl.ZipBytes(dataBytes)
+	// 	if err != nil {
+	// 		return 0, err
+	// 	}
+	// }
+
+	if secParams.EnableSecurity {
+		// _, fileName := filepath.Split(f.Name())
+
+		key, err := securitymdl.GenSecKeyBytes(f.Name(), rs)
+		if err != nil {
+			loggermdl.LogError("failed to generate security key: ", err)
+			return 0, err
+		}
+
+		dataBytes, err = encryptWithCompression(dataBytes, secParams.EnableCompression, key)
+		if err != nil {
+			loggermdl.LogError(err)
+			return 0, err
+		}
+	}
+
+	if breakLine {
+		dataBytes = append(dataBytes, lineBreakBytes...)
+	}
+
+	return filemdl.WriteFileAtOffset(f, offset, dataBytes)
+}
+
+func addByteDataInFile(f *os.File, offset int64, dataBytes []byte, breakLine bool) (int64, error) {
+	var err error
+
+	if breakLine {
+		dataBytes = append(dataBytes, lineBreakBytes...)
+	}
+	dataSize, err := filemdl.WriteFileAtOffset(f, offset, dataBytes)
+	return dataSize, err
+}
+
+func getInFileIndexData(f *os.File) (string, error) {
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return "[]", errormdl.Wrap("fail to fetch infile index data")
+	}
+	footerSize, err := getFooterSize(f)
+	if err != nil {
+		return "[]", nil
+	}
+	if footerSize == 0 {
+		return "[]", nil
+	}
+	dataByte, err := filemdl.ReadFileFromOffset(f, footerStartOffset, footerSize)
+	if err != nil {
+		if err.Error() == "EOF" {
+			loggermdl.LogError("EOF")
+			return "[]", nil
+		}
+		loggermdl.LogError("error while fetching index data", err)
+		return "[]", err
+	}
+	return string(dataByte), nil
+}
+
+func getFileDataFromPack(f *os.File, startOffset, dataSize int64, rs *gjson.Result, secParams securitymdl.FDBSecParams) ([]byte, error) {
+
+	ba, err := filemdl.ReadFileFromOffset(f, startOffset, dataSize)
+
+	if secParams.EnableSecurity {
+		// _, fileName := filepath.Split(fp.Name())
+		key, err := securitymdl.GenSecKeyBytes(f.Name(), rs)
+		if err != nil {
+			loggermdl.LogError("failed to generate security key: ", err)
+			return nil, err
+		}
+		ba, err = decryptwithDecompression(ba, secParams.EnableCompression, key)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+	}
+	return ba, err
+}
+
+// GetKeyWithFileNameAndDefaultKey generates key using file name + Default key
+func GetKeyWithFileNameAndDefaultKey(filePath string) ([]byte, error) {
+	fileName := filepath.Base(filePath)
+	fileNameBytes := []byte(fileName)
+	fileNameBytes = append(fileNameBytes, defaultSecurityKey...)
+	keyBytes, getHashError := hashmdl.Get128BitHash(fileNameBytes)
+	if errormdl.CheckErr(getHashError) != nil {
+		return nil, errormdl.CheckErr(getHashError)
+	}
+	return keyBytes[:], nil
+}
+
+//
+func CreateIndexJSON(indexFields []InFileIndexField, rs *gjson.Result) (string, error) {
+	json := `{}`
+	for _, indexField := range indexFields {
+		val := rs.Get(indexField.Query).Value()
+		// validation
+		if val == nil {
+			return "", errormdl.Wrap("please provide value for index field: " + indexField.Query)
+		}
+		json, _ = sjson.Set(json, indexField.FieldName, val)
+	}
+	return json, nil
+}
+
+func initializeWithHeader(filePath string) error {
+	// fileLock, err := filemdl.AcquireFileLock(filePath)
+	// defer filemdl.ReleaseFileLock(fileLock)
+	// if errormdl.CheckErr(err) != nil {
+	// 	loggermdl.LogError(err)
+	// 	return errormdl.CheckErr(err)
+	// }
+	// initailize file with headers and fdb index entry
+	f, err := filemdl.OpenFile(filePath, os.O_CREATE|os.O_RDWR|os.O_SYNC, 0777)
+	defer f.Close()
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+	_, err = f.WriteAt([]byte(strconv.Itoa(fileStatusReady)), fileStatusOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// isFile ready for upload  =0
+	_, err = f.WriteAt([]byte("0"), isReadyForUploadOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isUpdatedAndNotCommitedOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReorgRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReindexRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// _, err = f.WriteAt([]byte(appendPaddingToNumber(sizeReservedForHeaders, 15)), footerOffsetInFile)
+	err = setFooterOffset(f, sizeReservedForHeaders+int64(len(lineBreak)))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = setFooterSize(f, 0)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("filehash"), filehashOffest)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	timestamp := strconv.FormatInt(time.Now().Unix(), 10)
+	_, err = f.WriteAt([]byte(timestamp), lastUpdatedOffset)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("\r\n"), sizeReservedForHeaders)
+	return err
+}
+
+func initializeWithHeaderUsingFp(f *os.File) error {
+	// fileLock, err := filemdl.AcquireFileLock(filePath)
+	// defer filemdl.ReleaseFileLock(fileLock)
+	// if errormdl.CheckErr(err) != nil {
+	// 	loggermdl.LogError(err)
+	// 	return errormdl.CheckErr(err)
+	// }
+	// initailize file with headers and fdb index entry
+	// f, err := filemdl.OpenFile(filePath, os.O_CREATE|os.O_RDWR|os.O_SYNC, 0777)
+	// defer f.Close()
+	// if errormdl.CheckErr(err) != nil {
+	// 	loggermdl.LogError(err)
+	// 	return errormdl.CheckErr(err)
+	// }
+	_, err := f.WriteAt([]byte(strconv.Itoa(fileStatusReady)), fileStatusOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// isFile ready for upload  =0
+	_, err = f.WriteAt([]byte("0"), isReadyForUploadOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isUpdatedAndNotCommitedOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReorgRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("0"), isReindexRequiredOffsetInFile)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// _, err = f.WriteAt([]byte(appendPaddingToNumber(sizeReservedForHeaders, 15)), footerOffsetInFile)
+	err = setFooterOffset(f, sizeReservedForHeaders+int64(len(lineBreak)))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = setFooterSize(f, 0)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("filehash"), filehashOffest)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	timestamp := strconv.FormatInt(time.Now().Unix(), 10)
+	_, err = f.WriteAt([]byte(timestamp), lastUpdatedOffset)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	_, err = f.WriteAt([]byte("\r\n"), sizeReservedForHeaders)
+	return err
+}
+
+// AddFileInPackFile -
+func AddFileInPackFile(filePath string, infileIndexFields []InFileIndexField, rs *gjson.Result, secParams securitymdl.FDBSecParams) error {
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	if !isFilePresent {
+		dir, _ := filepath.Split(filePath)
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return errormdl.CheckErr(err)
+		}
+
+		err = initializeWithHeader(filePath)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+
+	}
+	return addFileInPackFile(filePath, infileIndexFields, rs, secParams)
+}
+
+// AddMediaInPackFile -
+func AddMediaInPackFile(filePath string, mediaData []byte, infileIndexFields []InFileIndexField, rs *gjson.Result) (string, error) {
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	if !isFilePresent {
+		dir, _ := filepath.Split(filePath)
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return "", errormdl.CheckErr(err)
+		}
+
+		err = initializeWithHeader(filePath)
+		if err != nil {
+			loggermdl.LogError(err)
+			return "", err
+		}
+
+	}
+	return addMediaInPackFile(filePath, mediaData, infileIndexFields, rs)
+}
+
+// AddMediaInPackFileUsingFp - AddMediaInPackFileUsingFp
+func AddMediaInPackFileUsingFp(f *os.File, infileIndex *gjson.Result, mediaData []byte, infileIndexFields []InFileIndexField, rs *gjson.Result) (string, *gjson.Result, error) {
+	isFilePresent := filemdl.FileAvailabilityCheck(f.Name())
+
+	info, err := f.Stat()
+	if err != nil {
+		return "", infileIndex, err
+	}
+	if !isFilePresent || info.Size() == 0 {
+		dir, _ := filepath.Split(f.Name())
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return "", infileIndex, errormdl.CheckErr(err)
+		}
+
+		err = initializeWithHeaderUsingFp(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return "", infileIndex, err
+		}
+
+	}
+	return addMediaInPackFileUsingFp(f, infileIndex, mediaData, infileIndexFields, rs)
+}
+
+func addMediaInPackFile(filePath string, mediaData []byte, infileIndexFields []InFileIndexField, rs *gjson.Result) (string, error) {
+	recordID := ""
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return recordID, errormdl.Wrap("please specify fileType")
+	}
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return recordID, errormdl.CheckErr(err)
+	}
+	// if file is being created for first time
+	fileStatus := fileStatusReady
+	// if isFilePresent {
+	fileStatus, err = getFileStatus(f)
+	if err != nil {
+		if err.Error() == "EOF" {
+			fileStatus = fileStatusReady
+		} else {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+	}
+	// }
+	if fileStatusReady == fileStatus {
+		// if its first write in file then add fdb index data of fileType FDBIndex
+		//prepare data
+		indexRowJSON, err := CreateIndexJSON(infileIndexFields, rs)
+		if err != nil {
+			loggermdl.LogError("error writing to bucket: ", err)
+			return recordID, err
+		}
+
+		fileHash, err := securitymdl.GetHash(rs.String())
+		if err != nil {
+			loggermdl.LogError("error writing to bucket: ", err)
+			return recordID, err
+		}
+		// compress & encrypt data
+		previousIndexData, err := getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+		// loggermdl.LogDebug("previous index data", previousIndexData)
+		footerStartOffset := getFooterOffset(f)
+		if footerStartOffset == -1 {
+			loggermdl.LogError("fail to fetch infile index offset")
+			return recordID, errormdl.Wrap("fail to fetch infile index data")
+		}
+
+		err = setFileStatusFlag(f, fileStatusUpdatingData)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+		// write data
+		dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+		recordID = rs.Get("recordID").String()
+		if recordID == "" {
+			recordID = guidmdl.GetGUID()
+		}
+
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", fileType)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+		// append new entry in infile index
+		parsedindexRowJSON := gjson.Parse(indexRowJSON)
+		// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+		updatedIndexData, _ := sjson.Set(previousIndexData, "-1", parsedindexRowJSON.Value())
+		// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+		// updating infile index
+		err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+
+		footerNewOffset := footerStartOffset + dataSize
+		err = setFooterOffset(f, footerNewOffset)
+		err = setFooterSize(f, int64(len(updatedIndexData)))
+		err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+		err = setFileStatusFlag(f, fileStatusReady)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, err
+		}
+		f.Sync()
+	} else {
+		// retry after timeout
+	}
+
+	return recordID, nil
+
+}
+func addMediaInPackFileUsingFp(f *os.File, infileIndex *gjson.Result, mediaData []byte, infileIndexFields []InFileIndexField, rs *gjson.Result) (string, *gjson.Result, error) {
+	recordID := ""
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return recordID, infileIndex, errormdl.Wrap("please specify fileType")
+	}
+	lock := getLock(f.Name())
+	lock.Lock()
+	// f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+	// if errormdl.CheckErr(err) != nil {
+	// 	loggermdl.LogError(err)
+	// 	return recordID, infileIndex, errormdl.CheckErr(err)
+	// }
+	// if file is being created for first time
+	// fileStatus := fileStatusReady
+	// if isFilePresent {
+
+	// if its first write in file then add fdb index data of fileType FDBIndex
+	//prepare data
+	indexRowJSON, err := CreateIndexJSON(infileIndexFields, rs)
+	if err != nil {
+		loggermdl.LogError("error writing to bucket: ", err)
+		return recordID, infileIndex, err
+	}
+
+	fileHash, err := securitymdl.GetHash(rs.String())
+	if err != nil {
+		loggermdl.LogError("error writing to bucket: ", err)
+		return recordID, infileIndex, err
+	}
+	previousIndexData := "[]"
+	if infileIndex == nil {
+		previousIndexData, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return recordID, infileIndex, err
+		}
+	} else {
+		previousIndexData = infileIndex.String()
+	}
+	// loggermdl.LogDebug("previous index data", previousIndexData)
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, infileIndex, errormdl.Wrap("fail to fetch infile index data")
+	}
+
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	// write data
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+
+	recordID = guidmdl.GetGUID()
+
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", fileType)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	// append new entry in infile index
+	parsedindexRowJSON := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(previousIndexData, "-1", parsedindexRowJSON.Value())
+	// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+	// updating infile index
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	err = f.Sync()
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	return recordID, &updatedIndexDataObj, nil
+
+}
+
+func addFileInPackFile(filePath string, infileIndexFields []InFileIndexField, rs *gjson.Result, secParams securitymdl.FDBSecParams) error {
+
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return errormdl.Wrap("please specify fileType")
+	}
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return errormdl.CheckErr(err)
+	}
+	// if file is being created for first time
+	fileStatus := fileStatusReady
+	// if isFilePresent {
+	fileStatus, err = getFileStatus(f)
+	if err != nil {
+		if err.Error() == "EOF" {
+			fileStatus = fileStatusReady
+		} else {
+			loggermdl.LogError(err)
+			return err
+		}
+	}
+	// }
+	if fileStatusReady == fileStatus {
+
+		// if its first write in file then add fdb index data of fileType FDBIndex
+		//prepare data
+		indexRowJSON, err := CreateIndexJSON(infileIndexFields, rs)
+		if err != nil {
+			loggermdl.LogError("error writing to bucket: ", err)
+			return err
+		}
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", fileType)
+		fileHash, err := securitymdl.GetHash(rs.String())
+		if err != nil {
+			loggermdl.LogError("error writing to bucket: ", err)
+			return err
+		}
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+		// compress & encrypt data
+		previousIndexData, err := getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// loggermdl.LogDebug("previous index data", previousIndexData)
+		footerStartOffset := getFooterOffset(f)
+		if footerStartOffset == -1 {
+			loggermdl.LogError("fail to fetch infile index offset")
+			return errormdl.Wrap("fail to fetch infile index data")
+		}
+		dataString := rs.String()
+		err = setFileStatusFlag(f, fileStatusUpdatingData)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// write data
+		dataSize, err := addFileDataInFile(f, footerStartOffset, dataString, true, rs, secParams)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+		// append new entry in infile index
+		parsedindexRowJSON := gjson.Parse(indexRowJSON)
+		// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+		updatedIndexData, _ := sjson.Set(previousIndexData, "-1", parsedindexRowJSON.Value())
+		// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+		// updating infile index
+		err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+
+		footerNewOffset := footerStartOffset + dataSize
+		err = setFooterOffset(f, footerNewOffset)
+		err = setFooterSize(f, int64(len(updatedIndexData)))
+		err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+		err = setFileStatusFlag(f, fileStatusReady)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		f.Sync()
+	} else {
+		// retry after timeout
+	}
+
+	return nil
+
+}
+
+// GetDataFromPackFile - GetDataFromPackFile
+func GetDataFromPackFile(filePath string, inFileIndexQueries []string, rs *gjson.Result, secParams securitymdl.FDBSecParams) (string, error) {
+	if !filemdl.FileAvailabilityCheck(filePath) {
+		return "", errormdl.Wrap("file not found at:" + filePath)
+	}
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := filemdl.Open(filePath)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+
+	if err != nil {
+		loggermdl.LogError("err while opening file: ", filePath, err)
+		return "", err
+	}
+	indexDataString, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", filePath, err)
+		return "", err
+	}
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRows.ForEach(func(key, indexRow gjson.Result) bool {
+		// read files
+		startOffSet := indexRow.Get("startOffset").Int()
+		dataSize := indexRow.Get("dataSize").Int()
+		if startOffSet == 0 || dataSize == 0 {
+			return true
+		}
+		dataByte := []byte{'{', '}'}
+		var err error
+		// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
+		dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, rs, secParams)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		_, err = sb.Write(dataByte)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		sb.WriteString(",")
+
+		return true // keep iterating
+	})
+	sb.WriteString("]")
+	finalResult := strings.Replace(sb.String(), ",]", "]", 1)
+	return finalResult, nil
+}
+
+// GetMediaFromPackFile - GetMediaFromPackFile
+func GetMediaFromPackFile(filePath string, recordID string) ([]byte, gjson.Result, error) {
+	dataByte := []byte{}
+	var indexRow gjson.Result
+	if !filemdl.FileAvailabilityCheck(filePath) {
+		return dataByte, indexRow, errormdl.Wrap("file not found at:" + filePath)
+	}
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := filemdl.Open(filePath)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+
+	if err != nil {
+		loggermdl.LogError("err while opening file: ", filePath, err)
+		return dataByte, indexRow, err
+	}
+	indexDataString, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", filePath, err)
+		return dataByte, indexRow, err
+	}
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	if indexRows.String() == "" {
+		loggermdl.LogError("data not found for record id: ", recordID)
+		return dataByte, indexRow, errormdl.Wrap("data not found for record id: " + recordID)
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRow = indexRows.Get("0")
+	// indexRows.ForEach(func(key, indexRow gjson.Result) bool {
+	// read files
+	startOffSet := indexRow.Get("startOffset").Int()
+	dataSize := indexRow.Get("dataSize").Int()
+	if startOffSet == 0 || dataSize == 0 {
+		return dataByte, indexRow, errormdl.Wrap("data not found")
+	}
+	// dataByte := []byte{'{', '}'}
+	// var err error
+	// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
+	dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, nil, securitymdl.FDBSecParams{EnableSecurity: false})
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, indexRow, err
+	}
+
+	return dataByte, indexRow.Get("requiredData"), nil
+}
+
+// GetMediaFromPackFileUsingFp - GetMediaFromPackFileUsingFp
+func GetMediaFromPackFileUsingFp(f *os.File, infileIndex *gjson.Result, recordID string) ([]byte, *gjson.Result, error) {
+	dataByte := []byte{}
+	var metaData *gjson.Result
+	if !filemdl.FileAvailabilityCheck(f.Name()) {
+		return dataByte, metaData, errormdl.Wrap("file not found at:" + f.Name())
+	}
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	lock := getLock(f.Name())
+	lock.Lock()
+	// f, err := filemdl.Open(filePath)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+
+	// if err != nil {
+	// 	loggermdl.LogError("err while opening file: ", filePath, err)
+	// 	return dataByte, metaData, err
+	// }
+	indexDataString := "[]"
+	var err error
+	if infileIndex == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", f.Name(), err)
+			return dataByte, metaData, err
+		}
+	} else {
+		indexDataString = infileIndex.String()
+	}
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	if indexRows.String() == "" {
+		loggermdl.LogError("data not found for recordId: ", recordID)
+		return dataByte, metaData, errormdl.Wrap("data not found")
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRow := indexRows.Get("0")
+	// indexRows.ForEach(func(key, indexRow gjson.Result) bool {
+	// read files
+	startOffSet := indexRow.Get("startOffset").Int()
+	dataSize := indexRow.Get("dataSize").Int()
+	if startOffSet == 0 || dataSize == 0 {
+		return dataByte, metaData, errormdl.Wrap("data not found")
+	}
+	// dataByte := []byte{'{', '}'}
+	// var err error
+	// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
+	dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, nil, securitymdl.FDBSecParams{EnableSecurity: false})
+	if err != nil {
+		loggermdl.LogError(err)
+		return dataByte, metaData, err
+	}
+
+	data, _ := sjson.Set("", "requiredData", indexRow.Get("requiredData").String())
+	data, _ = sjson.Set(data, "infileIndex", indexData.String())
+	metaDataObj := gjson.Parse(data)
+	return dataByte, &metaDataObj, nil
+}
+
+// UpdateMediaInPackFile - UpdateMediaInPackFile
+func UpdateMediaInPackFile(filePath string, recordID string, mediaData []byte, infileIndexFields []InFileIndexField, rs *gjson.Result) (string, error) {
+
+	if !filemdl.FileAvailabilityCheck(filePath) {
+		return recordID, errormdl.Wrap("file not found at:" + filePath)
+	}
+
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	lock := getLock(filePath)
+	lock.Lock()
+
+	f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+
+	if err != nil {
+		loggermdl.LogError("err while opening file: ", filePath, err)
+		return recordID, err
+	}
+	indexDataString, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError("index data not found: ", filePath, err)
+		return recordID, err
+	}
+
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	foundAtIndex := -1
+	foundIndexRow := gjson.Result{}
+	for index, indexRow := range indexRows.Array() {
+		if indexRow.Get("recordID").String() == recordID {
+			foundAtIndex = index
+			foundIndexRow = indexRow
+			break
+		}
+	}
+	// if indexRows.String() == "" {
+	// 	loggermdl.LogError("data not found for record id: ", recordID)
+	// 	return recordID, errormdl.Wrap("data not found for record id: " + recordID)
+	// }
+
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, errormdl.Wrap("fail to fetch infile index data")
+	}
+
+	// write data
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	indexRowJSON := foundIndexRow.String()
+	if foundAtIndex == -1 {
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", rs.Get("fileType").String())
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	// append new entry in infile index
+	updatedIndexRow := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(indexDataString, strconv.Itoa(foundAtIndex), updatedIndexRow.Value())
+	// updatedIndexData := gjson.Parse(indexDataString)
+	// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, err
+	}
+	err = f.Sync()
+	return recordID, err
+}
+
+// UpdateMediaInPackFileUsingFp - UpdateMediaInPackFileUsingFp
+func UpdateMediaInPackFileUsingFp(f *os.File, infileIndex *gjson.Result, recordID string, mediaData []byte, infileIndexFields []InFileIndexField, rs *gjson.Result) (string, *gjson.Result, error) {
+
+	isFilePresent := filemdl.FileAvailabilityCheck(f.Name())
+
+	info, err := f.Stat()
+	if err != nil {
+		return "", infileIndex, err
+	}
+	if !isFilePresent || info.Size() == 0 {
+		return recordID, infileIndex, errormdl.Wrap("file not found at:" + f.Name())
+	}
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	lock := getLock(f.Name())
+	lock.Lock()
+
+	// f, err := filemdl.OpenFile(f.Name(), os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+
+	// if err != nil {
+	// 	loggermdl.LogError("err while opening file: ", filePath, err)
+	// 	return recordID, infileIndex, err
+	// }
+	indexDataString := "[]"
+
+	if infileIndex == nil {
+
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", f.Name(), err)
+			return recordID, infileIndex, err
+		}
+	} else {
+		indexDataString = infileIndex.String()
+	}
+
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	foundAtIndex := -1
+	foundIndexRow := gjson.Result{}
+	for index, indexRow := range indexData.Array() {
+		r := indexRow.Get("recordID").String()
+		if r != "" && indexRow.Get("recordID").String() == recordID {
+			foundAtIndex = index
+			foundIndexRow = indexRow
+			break
+		}
+	}
+	if foundAtIndex == -1 {
+		loggermdl.LogError("no data found to update: ", recordID)
+		return recordID, infileIndex, errormdl.Wrap("no data found to update ")
+	}
+
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, infileIndex, errormdl.Wrap("fail to fetch infile index data")
+	}
+
+	// write data
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	indexRowJSON := foundIndexRow.String()
+	// if foundAtIndex == -1 {
+	// 	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", rs.Get("fileType").String())
+	// }
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	// append new entry in infile index
+	updatedIndexRow := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(indexDataString, strconv.Itoa(foundAtIndex), updatedIndexRow.Value())
+	// updatedIndexData := gjson.Parse(indexDataString)
+	// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	err = f.Sync()
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	return recordID, &updatedIndexDataObj, err
+}
+
+// UpsertMediaInPackFileUsingFp - UpsertMediaInPackFileUsingFp
+func UpsertMediaInPackFileUsingFp(f *os.File, infileIndex *gjson.Result, recordID string, mediaData []byte, infileIndexFields []InFileIndexField, rs *gjson.Result) (string, *gjson.Result, error) {
+
+	isFilePresent := filemdl.FileAvailabilityCheck(f.Name())
+
+	info, err := f.Stat()
+	if err != nil {
+		return "", infileIndex, err
+	}
+	if !isFilePresent || info.Size() == 0 {
+		dir, _ := filepath.Split(f.Name())
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return "", infileIndex, errormdl.CheckErr(err)
+		}
+
+		err = initializeWithHeaderUsingFp(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return "", infileIndex, err
+		}
+
+	}
+	inFileIndexQueries := []string{`#[recordID=` + recordID + `]`}
+
+	lock := getLock(f.Name())
+	lock.Lock()
+
+	// f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+
+	// if err != nil {
+	// 	loggermdl.LogError("err while opening file: ", filePath, err)
+	// 	return recordID, err
+	// }
+	indexDataString := "[]"
+
+	if infileIndex == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", f.Name(), err)
+			return recordID, infileIndex, err
+		}
+	} else {
+		indexDataString = infileIndex.String()
+	}
+
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	foundAtIndex := -1
+	foundIndexRow := gjson.Result{}
+	for index, indexRow := range indexData.Array() {
+		if indexRow.Get("recordID").String() == recordID {
+			foundAtIndex = index
+			foundIndexRow = indexRow
+			break
+		}
+	}
+	// if foundAtIndex == -1 {
+	// 	loggermdl.LogError("no data found to update: ", recordID)
+	// 	return recordID, infileIndex, errormdl.Wrap("no data found to update")
+	// }
+	// if indexRows.String() == "" {
+	// 	loggermdl.LogError("data not found for record id: ", recordID)
+	// 	return recordID,infileIndex, errormdl.Wrap("data not found for record id: " + recordID)
+	// }
+
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return recordID, infileIndex, errormdl.Wrap("fail to fetch infile index data")
+	}
+
+	// write data
+	dataSize, err := addByteDataInFile(f, footerStartOffset, mediaData, true)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	indexRowJSON := foundIndexRow.String()
+
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "requiredData", rs.String())
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "recordID", recordID)
+
+	// append new entry in infile index
+	updatedIndexRow := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(indexDataString, strconv.Itoa(foundAtIndex), updatedIndexRow.Value())
+	// updatedIndexData := gjson.Parse(indexDataString)
+	// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return recordID, infileIndex, err
+	}
+	err = f.Sync()
+	if err != nil {
+		return recordID, infileIndex, err
+	}
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	return recordID, &updatedIndexDataObj, err
+}
+
+// UpdateFilesInPackFile - UpdateFilesInPackFile
+func UpdateFilesInPackFile(filePath string, infileIndexQueries []string, rs *gjson.Result, secParams securitymdl.FDBSecParams) (*gjson.Result, error) {
+	// check fileType index availability
+	// check is data present
+	// if data present
+	// then calculate size of updated data
+	// if size is less than or equal to previuos data size
+	// then write at the same location
+	// else if size of updated data is more than existing data then append it to end of data
+	// update startOffset and data size of file in index row
+	// update footer offset and footer size
+	requestedFileType := rs.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		return nil, errormdl.Wrap("please specify fileType")
+	}
+
+	if !filemdl.FileAvailabilityCheck(filePath) {
+		return nil, errormdl.Wrap("file not found: " + filePath)
+	}
+	// lock, err := filemdl.AcquireFileLock(filePath)
+	// defer filemdl.ReleaseFileLock(lock)
+	// if err != nil {
+	// 	return nil, errormdl.Wrap("fail to update data" + err.Error())
+	// }
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+	if err != nil {
+		return nil, errormdl.Wrap("fail to update data" + err.Error())
+	}
+	indexDataString, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	// infileIndexQueries = append(infileIndexQueries, `#[fileType==`+requestedFileType+`]`)
+	indexRows := gjson.Parse(indexDataString)
+	indexRecordsToUpdate := indexRows
+	for _, query := range infileIndexQueries {
+		indexRecordsToUpdate = indexRecordsToUpdate.Get(query + "#")
+	}
+
+	indexRecordsToUpdateObjs := indexRecordsToUpdate.Array()
+	if len(indexRecordsToUpdateObjs) == 0 {
+		return nil, ErrNoDataFound
+	}
+	// updating first record
+	resultArrayStr := "[]"
+	for _, recordToUpdateIndexRow := range indexRecordsToUpdateObjs {
+		// loggermdl.LogDebug("recordToUpdateIndexRow", recordToUpdateIndexRow)
+		result, err := updateSingleRecordInPackFile(f, recordToUpdateIndexRow, *rs, secParams)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, errormdl.Wrap("fail to update data" + err.Error())
+		}
+		resultArrayStr, _ = sjson.Set(resultArrayStr, "-1", result.Value())
+	}
+	resultData := gjson.Parse(resultArrayStr)
+	return &resultData, nil
+}
+
+func updateSingleRecordInPackFile(f *os.File, recordToUpdateIndexRow gjson.Result, rs gjson.Result, secParams securitymdl.FDBSecParams) (*gjson.Result, error) {
+	fileStartOffset := recordToUpdateIndexRow.Get("startOffset").Int()
+	dataSize := recordToUpdateIndexRow.Get("dataSize").Int()
+	if fileStartOffset == 0 || dataSize == 0 {
+		loggermdl.LogError("index row details incorrect - start offset :", fileStartOffset, " data size :", dataSize)
+		return nil, errormdl.Wrap("index row details incorrect")
+	}
+	// compare data size
+	// TODO: compare with encrypted data
+
+	// update fields data
+	// existingData, err := filemdl.ReadFileFromOffset(f, fileStartOffset, dataSize)
+	existingData, err := getFileDataFromPack(f, fileStartOffset, dataSize, &rs, secParams)
+	if err != nil {
+		return nil, err
+	}
+
+	updatedDataStr := strings.TrimSpace(string(existingData))
+	rs.ForEach(func(key, val gjson.Result) bool {
+		// updating existing data
+		updatedDataStr, _ = sjson.Set(updatedDataStr, key.String(), val.Value())
+		return true
+	})
+	newDataSize := int64(len(updatedDataStr))
+	footerStartOffset := getFooterOffset(f)
+	updatedFooterOffset := footerStartOffset
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		return nil, err
+	}
+	indexDataString, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	existingIndexRows := gjson.Parse(indexDataString)
+	if len(strings.TrimSpace(updatedDataStr)) <= len(strings.TrimSpace(string(existingData))) {
+		newDataSize, err = addFileDataInFile(f, fileStartOffset, updatedDataStr, false, &rs, secParams)
+		if err != nil {
+			return nil, err
+		}
+		// newDataSize = newDataSize + 2
+	} else {
+
+		newDataSize, err = addFileDataInFile(f, footerStartOffset, updatedDataStr, true, &rs, secParams)
+		updatedFooterOffset = footerStartOffset + newDataSize
+		fileStartOffset = footerStartOffset
+	}
+	// indexRows, err := getInFileIndexData(f)
+	// if err != nil {
+	// 	return nil, err
+	// }
+	// update startofffset and datasize in infile index
+	// updateIndexRow
+	updatedIndexRowStr := recordToUpdateIndexRow.String()
+
+	recordToUpdateIndexRow.ForEach(func(key, value gjson.Result) bool {
+		indexFieldKey := key.String()
+		if rs.Get(indexFieldKey).Exists() {
+			updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, indexFieldKey, rs.Get(indexFieldKey).Value())
+		}
+		return true
+	})
+	fileHash, err := securitymdl.GetHash(updatedDataStr)
+	if err != nil {
+		return nil, err
+	}
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "startOffset", fileStartOffset)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "dataSize", newDataSize)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "fileHash", fileHash)
+	updatedIndexRows, err := updateIndexRow(&existingIndexRows, recordToUpdateIndexRow, gjson.Parse(updatedIndexRowStr))
+
+	if err != nil {
+		return nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return nil, err
+	}
+	// loggermdl.LogDebug("updated index data", updatedIndexRows)
+	err = setIndexDataInFile(f, updatedFooterOffset, updatedIndexRows.String())
+	if err != nil {
+		return nil, err
+	}
+	err = setFooterOffset(f, updatedFooterOffset)
+	if err != nil {
+		return nil, err
+	}
+
+	err = setFooterSize(f, int64(len(updatedIndexRows.String())))
+	if err != nil {
+		return nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return nil, err
+	}
+	f.Sync()
+	// TODO: discussion
+	// err = setFileReadyForUploadFlag(f, true)
+	// if err != nil {
+	// 	return err
+	// }
+	updatedData := gjson.Parse(updatedDataStr)
+	return &updatedData, nil
+}
+
+func updateIndexRow(indexRows *gjson.Result, previousIndexRow gjson.Result, updatedRow gjson.Result) (*gjson.Result, error) {
+	indexRowObjs := indexRows.Array()
+	if len(indexRowObjs) == 0 {
+		return nil, errormdl.Wrap("no data found to update")
+	}
+	// loggermdl.LogDebug("indexRows", indexRows)
+
+	prevIndexRowString := previousIndexRow.String()
+	foundRowIndex := -1
+
+	for index, indexRowObj := range indexRowObjs {
+		if indexRowObj.String() != "" && prevIndexRowString != "" && indexRowObj.String() == prevIndexRowString {
+			foundRowIndex = index
+			break
+		}
+	}
+
+	if foundRowIndex == -1 {
+		return nil, errormdl.Wrap("no record found to update")
+	}
+	var err error
+	updatedIndexDataString := indexRows.String()
+	// for _, foundRowIndex := range foundRowIndexes {
+	updatedIndexDataString, err = sjson.Set(updatedIndexDataString, strconv.Itoa(foundRowIndex), updatedRow.Value())
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, errormdl.Wrap("failed to update index rows")
+	}
+	// }
+	updatedIndexData := gjson.Parse(updatedIndexDataString)
+	return &updatedIndexData, nil
+}
+
+// DeletDataFromPackFile -DeletDataFromPackFile
+func DeletDataFromPackFile(filePath string, infileIndexQueries []string) (int, error) {
+	// get indexdata
+	// get update index records
+	// save updated index
+	// update index size header
+	// lock, err := filemdl.AcquireFileLock(filePath)
+	// defer filemdl.ReleaseFileLock(lock)
+	// if err != nil {
+	// 	return err
+	// }
+	recordsDeletedCnt := 0
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+
+	indexDataStr, err := getInFileIndexData(f)
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+
+	indexData := gjson.Parse(indexDataStr)
+	indexRecordsToDelete := indexData
+	// loggermdl.LogDebug("indexRecordsToDelete file type", indexRecordsToDelete)
+	for _, query := range infileIndexQueries {
+		indexRecordsToDelete = indexRecordsToDelete.Get(query + "#")
+	}
+	indexRowsToDelete := indexRecordsToDelete.Array()
+	if len(indexRowsToDelete) == 0 {
+		loggermdl.LogError("ErrNoDataFound")
+		return recordsDeletedCnt, ErrNoDataFound
+	}
+	updatedIndexRecords := indexData
+	for _, indexRowToRemove := range indexRowsToDelete {
+		updatedIndexRecords, err = removeIndexRow(updatedIndexRecords, indexRowToRemove.String())
+		if err != nil {
+			loggermdl.LogError("fail to delete record:", err)
+			return recordsDeletedCnt, errormdl.Wrap("fail to delete record:" + err.Error())
+		}
+		recordsDeletedCnt++
+	}
+
+	footerOffset := getFooterOffset(f)
+	if footerOffset == -1 {
+		return recordsDeletedCnt, errormdl.Wrap("fail to fetch infile index offset")
+	}
+	newIndexDataSize := len(updatedIndexRecords.String())
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+	err = setIndexDataInFile(f, footerOffset, updatedIndexRecords.String())
+	if err != nil {
+		loggermdl.LogError("fail to update infile index data :", err)
+		return recordsDeletedCnt, err
+	}
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return recordsDeletedCnt, err
+	}
+	return recordsDeletedCnt, setFooterSize(f, int64(newIndexDataSize))
+}
+
+func removeIndexRow(indexRows gjson.Result, indexRowToDelete string) (gjson.Result, error) {
+
+	indexRowObjs := indexRows.Array()
+	if len(indexRowObjs) == 0 {
+		return indexRows, errormdl.Wrap("no data found to update")
+	}
+	// loggermdl.LogDebug("indexRows", indexRows)
+
+	foundIndexToDelete := -1
+	for index, indexRowObj := range indexRowObjs {
+		if indexRowObj.String() != "" && indexRowToDelete != "" && indexRowObj.String() == indexRowToDelete {
+			foundIndexToDelete = index
+			break
+		}
+	}
+
+	if foundIndexToDelete == -1 {
+		return indexRows, errormdl.Wrap("no record found to delete")
+	}
+	var err error
+	updatedIndexDataString, err := sjson.Delete(indexRows.String(), strconv.Itoa(foundIndexToDelete))
+	if err != nil {
+		loggermdl.LogError(err)
+	}
+	return gjson.Parse(updatedIndexDataString), nil
+}
+
+// AddFileInPackFileUsingFp -
+func AddFileInPackFileUsingFp(f *os.File, infileIndexFields []InFileIndexField, rs *gjson.Result, secParams securitymdl.FDBSecParams) error {
+	filePath := f.Name()
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	info, err := f.Stat()
+	if err != nil {
+		return err
+	}
+	if !isFilePresent || info.Size() == 0 {
+		dir, _ := filepath.Split(filePath)
+		err := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return errormdl.CheckErr(err)
+		}
+
+		err = initializeWithHeaderUsingFp(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+	}
+	return addFileInPackFileUsingFp(f, infileIndexFields, rs, secParams)
+}
+
+func addFileInPackFileUsingFp(f *os.File, infileIndexFields []InFileIndexField, rs *gjson.Result, secParams securitymdl.FDBSecParams) error {
+	filePath := f.Name()
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return errormdl.Wrap("please specify fileType")
+	}
+	lock := getLock(filePath)
+	lock.Lock()
+	// f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+	// if errormdl.CheckErr(err) != nil {
+	// 	loggermdl.LogError(err)
+	// 	return errormdl.CheckErr(err)
+	// }
+	// if file is being created for first time
+	fileStatus := fileStatusReady
+	// if isFilePresent {
+	fileStatus, err := getFileStatus(f)
+	if err != nil {
+		if err.Error() == "EOF" {
+			fileStatus = fileStatusReady
+		} else {
+			loggermdl.LogError(err)
+			return err
+		}
+	}
+	// }
+	if fileStatusReady == fileStatus {
+
+		// if its first write in file then add fdb index data of fileType FDBIndex
+		//prepare data
+		indexRowJSON, err := CreateIndexJSON(infileIndexFields, rs)
+		if err != nil {
+			loggermdl.LogError("error writing to bucket: ", err)
+			return err
+		}
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", fileType)
+		fileHash, err := securitymdl.GetHash(rs.String())
+		if err != nil {
+			loggermdl.LogError("error writing to bucket: ", err)
+			return err
+		}
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+		// compress & encrypt data
+		previousIndexData, err := getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// loggermdl.LogDebug("previous index data", previousIndexData)
+		footerStartOffset := getFooterOffset(f)
+		if footerStartOffset == -1 {
+			loggermdl.LogError("fail to fetch infile index offset")
+			return errormdl.Wrap("fail to fetch infile index data")
+		}
+		dataString := rs.String()
+		err = setFileStatusFlag(f, fileStatusUpdatingData)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// write data
+		dataSize, err := addFileDataInFile(f, footerStartOffset, dataString, true, rs, secParams)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+		// append new entry in infile index
+		parsedindexRowJSON := gjson.Parse(indexRowJSON)
+		// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+		updatedIndexData, _ := sjson.Set(previousIndexData, "-1", parsedindexRowJSON.Value())
+		// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+		// updating infile index
+		err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+
+		footerNewOffset := footerStartOffset + dataSize
+		err = setFooterOffset(f, footerNewOffset)
+		err = setFooterSize(f, int64(len(updatedIndexData)))
+		err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+		err = setFileStatusFlag(f, fileStatusReady)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		f.Sync()
+	} else {
+		// retry after timeout
+	}
+	return nil
+}
+
+// GetDataFromPackFileUsingFp - GetDataFromPackFileUsingFp
+func GetDataFromPackFileUsingFp(f *os.File, infileIndexData *gjson.Result, inFileIndexQueries []string, rs *gjson.Result, secParams securitymdl.FDBSecParams) (string, *gjson.Result, error) {
+	filePath := f.Name()
+	if !filemdl.FileAvailabilityCheck(filePath) {
+		return "", infileIndexData, errormdl.Wrap("file not found at:" + filePath)
+	}
+
+	lock := getLock(filePath)
+	lock.Lock()
+	// f, err := filemdl.Open(filePath)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+
+	// if err != nil {
+	// 	loggermdl.LogError("err while opening file: ", filePath, err)
+	// 	return "", err
+	// }
+	indexDataString := ""
+	var err error
+	if infileIndexData == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError("index data not found: ", filePath, err)
+			return "", infileIndexData, err
+		}
+	} else {
+		indexDataString = infileIndexData.String()
+	}
+	indexData := gjson.Parse(indexDataString)
+	indexRows := indexData
+	// indexRows := indexData.Get(`#[fileType==` + requestedFileType + `]#`)
+	for i := 0; i < len(inFileIndexQueries); i++ {
+		indexRows = indexRows.Get(inFileIndexQueries[i] + "#")
+	}
+	sb := strings.Builder{}
+	sb.WriteString("[")
+	indexRows.ForEach(func(key, indexRow gjson.Result) bool {
+		// read files
+		startOffSet := indexRow.Get("startOffset").Int()
+		dataSize := indexRow.Get("dataSize").Int()
+		if startOffSet == 0 || dataSize == 0 {
+			return true
+		}
+		dataByte := []byte{'{', '}'}
+		var err error
+		// dataByte, err = filemdl.ReadFileFromOffset(f, startOffSet, dataSize)
+		dataByte, err = getFileDataFromPack(f, startOffSet, dataSize, rs, secParams)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		_, err = sb.Write(dataByte)
+		if err != nil {
+			loggermdl.LogError(err)
+			return true
+		}
+		sb.WriteString(",")
+
+		return true // keep iterating
+	})
+	sb.WriteString("]")
+	finalResult := strings.Replace(sb.String(), ",]", "]", 1)
+	return finalResult, &indexData, nil
+}
+
+// AddPackFileUsingFp -
+func AddPackFileUsingFp(f *os.File, infileIndexFields []InFileIndexField, infileIndex, rs *gjson.Result, secParams securitymdl.FDBSecParams) (infileIndexData *gjson.Result, err error) {
+	filePath := f.Name()
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	info, err := f.Stat()
+	if err != nil {
+		return
+	}
+	if !isFilePresent || info.Size() == 0 {
+		dir, _ := filepath.Split(filePath)
+		err = filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			err = err
+			return
+		}
+
+		err = initializeWithHeaderUsingFp(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return
+		}
+	}
+	return addPackFileUsingFp(f, infileIndex, infileIndexFields, rs, secParams)
+}
+
+func addPackFileUsingFp(f *os.File, infileIndex *gjson.Result, infileIndexFields []InFileIndexField, rs *gjson.Result, secParams securitymdl.FDBSecParams) (*gjson.Result, error) {
+	filePath := f.Name()
+	fileType := rs.Get("fileType").String()
+	if len(fileType) == 0 {
+		return nil, errormdl.Wrap("please specify fileType")
+	}
+	lock := getLock(filePath)
+	lock.Lock()
+	// f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+	}()
+
+	//prepare data
+	indexRowJSON, err := CreateIndexJSON(infileIndexFields, rs)
+	if err != nil {
+		loggermdl.LogError("error writing to bucket: ", err)
+		return nil, err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileType", fileType)
+	fileHash, err := securitymdl.GetHash(rs.String())
+	if err != nil {
+		loggermdl.LogError("error writing to bucket: ", err)
+		return nil, err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "fileHash", fileHash)
+	// compress & encrypt data
+	previousIndexData := "[]"
+	if infileIndex == nil {
+		previousIndexData, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, err
+		}
+	} else {
+		previousIndexData = infileIndex.String()
+	}
+	// loggermdl.LogDebug("previous index data", previousIndexData)
+	footerStartOffset := getFooterOffset(f)
+	if footerStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return nil, errormdl.Wrap("fail to fetch infile index data")
+	}
+	dataString := rs.String()
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	// write data
+	dataSize, err := addFileDataInFile(f, footerStartOffset, dataString, true, rs, secParams)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "startOffset", footerStartOffset)
+	indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", dataSize)
+	// append new entry in infile index
+	parsedindexRowJSON := gjson.Parse(indexRowJSON)
+	// indexTableRecords, _ = sjson.Set(indexTableRecords, "-1", parsedJsonzObj.Value())
+	updatedIndexData, _ := sjson.Set(previousIndexData, "-1", parsedindexRowJSON.Value())
+	// loggermdl.LogDebug("updatedIndexData", updatedIndexData)
+	// updating infile index
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+
+	footerNewOffset := footerStartOffset + dataSize
+	err = setFooterOffset(f, footerNewOffset)
+	err = setFooterSize(f, int64(len(updatedIndexData)))
+	err = setIndexDataInFile(f, footerNewOffset, updatedIndexData)
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		loggermdl.LogError(err)
+		return nil, err
+	}
+	f.Sync()
+	// } else {
+	// 	// retry after timeout
+	// }
+	updatedIndexDataObj := gjson.Parse(updatedIndexData)
+	return &updatedIndexDataObj, nil
+}
+
+// UpdateFilesInPackFileUsingFp - UpdateFilesInPackFileUsingFp
+func UpdateFileInPackFileUsingFp(f *os.File, infileIndexQueries []string, infileIndex, rs *gjson.Result, secParams securitymdl.FDBSecParams) (*gjson.Result, *gjson.Result, error) {
+	// check fileType index availability
+	// check is data present
+	// if data present
+	// then calculate size of updated data
+	// if size is less than or equal to previuos data size
+	// then write at the same location
+	// else if size of updated data is more than existing data then append it to end of data
+	// update startOffset and data size of file in index row
+	// update footer offset and footer size
+	requestedFileType := rs.Get("fileType").String()
+	if len(requestedFileType) == 0 {
+		return nil, infileIndex, errormdl.Wrap("please specify fileType")
+	}
+
+	if !filemdl.FileAvailabilityCheck(f.Name()) {
+		return nil, infileIndex, errormdl.Wrap("file not found: " + f.Name())
+	}
+	// lock, err := filemdl.AcquireFileLock(filePath)
+	// defer filemdl.ReleaseFileLock(lock)
+	// if err != nil {
+	// 	return nil, infileIndex, errormdl.Wrap("fail to update data" + err.Error())
+	// }
+	lock := getLock(f.Name())
+	lock.Lock()
+	// f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+	// if err != nil {
+	// 	return nil, infileIndex, errormdl.Wrap("fail to update data" + err.Error())
+	// }
+	indexDataString := "[]"
+	var err error
+	if infileIndex == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, infileIndex, err
+		}
+	} else {
+		indexDataString = infileIndex.String()
+	}
+	// infileIndexQueries = append(infileIndexQueries, `#[fileType==`+requestedFileType+`]`)
+	indexRows := gjson.Parse(indexDataString)
+	indexRecordsToUpdate := indexRows
+	for _, query := range infileIndexQueries {
+		indexRecordsToUpdate = indexRecordsToUpdate.Get(query + "#")
+	}
+
+	indexRecordsToUpdateObjs := indexRecordsToUpdate.Array()
+	if len(indexRecordsToUpdateObjs) == 0 {
+		return nil, infileIndex, ErrNoDataFound
+	}
+	// updating first record
+	resultArrayStr := "[]"
+	var updatedInfileIndex *gjson.Result
+	var result *gjson.Result
+	for _, recordToUpdateIndexRow := range indexRecordsToUpdateObjs {
+		// loggermdl.LogDebug("recordToUpdateIndexRow", recordToUpdateIndexRow)
+		result, updatedInfileIndex, err = updateSingleRecordInPackFileUsingFp(f, recordToUpdateIndexRow, updatedInfileIndex, rs, secParams)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, infileIndex, errormdl.Wrap("fail to update data" + err.Error())
+		}
+		resultArrayStr, _ = sjson.Set(resultArrayStr, "-1", result.Value())
+	}
+	resultData := gjson.Parse(resultArrayStr)
+	return &resultData, updatedInfileIndex, nil
+}
+
+func updateSingleRecordInPackFileUsingFp(f *os.File, recordToUpdateIndexRow gjson.Result, infileIndex, rs *gjson.Result, secParams securitymdl.FDBSecParams) (*gjson.Result, *gjson.Result, error) {
+	fileStartOffset := recordToUpdateIndexRow.Get("startOffset").Int()
+	dataSize := recordToUpdateIndexRow.Get("dataSize").Int()
+	if fileStartOffset == 0 || dataSize == 0 {
+		loggermdl.LogError("index row details incorrect - start offset :", fileStartOffset, " data size :", dataSize)
+		return nil, nil, errormdl.Wrap("index row details incorrect")
+	}
+	// compare data size
+	// TODO: compare with encrypted data
+
+	// update fields data
+	// existingData, err := filemdl.ReadFileFromOffset(f, fileStartOffset, dataSize)
+	existingData, err := getFileDataFromPack(f, fileStartOffset, dataSize, rs, secParams)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	updatedDataStr := strings.TrimSpace(string(existingData))
+	rs.ForEach(func(key, val gjson.Result) bool {
+		// updating existing data
+		updatedDataStr, _ = sjson.Set(updatedDataStr, key.String(), val.Value())
+		return true
+	})
+	newDataSize := int64(len(updatedDataStr))
+	footerStartOffset := getFooterOffset(f)
+	updatedFooterOffset := footerStartOffset
+	err = setFileStatusFlag(f, fileStatusUpdatingData)
+	if err != nil {
+		return nil, nil, err
+	}
+	indexDataString := "[]"
+	if infileIndex == nil {
+		indexDataString, err = getInFileIndexData(f)
+		if err != nil {
+			loggermdl.LogError(err)
+			return nil, nil, err
+		}
+	} else {
+		indexDataString = infileIndex.String()
+	}
+	existingIndexRows := gjson.Parse(indexDataString)
+	if len(strings.TrimSpace(updatedDataStr)) <= len(strings.TrimSpace(string(existingData))) {
+		newDataSize, err = addFileDataInFile(f, fileStartOffset, updatedDataStr, false, rs, secParams)
+		if err != nil {
+			return nil, nil, err
+		}
+		// newDataSize = newDataSize + 2
+	} else {
+
+		newDataSize, err = addFileDataInFile(f, footerStartOffset, updatedDataStr, true, rs, secParams)
+		updatedFooterOffset = footerStartOffset + newDataSize
+		fileStartOffset = footerStartOffset
+	}
+	// indexRows, err := getInFileIndexData(f)
+	// if err != nil {
+	// 	return nil, err
+	// }
+	// update startofffset and datasize in infile index
+	// updateIndexRow
+	updatedIndexRowStr := recordToUpdateIndexRow.String()
+
+	recordToUpdateIndexRow.ForEach(func(key, value gjson.Result) bool {
+		indexFieldKey := key.String()
+		if rs.Get(indexFieldKey).Exists() {
+			updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, indexFieldKey, rs.Get(indexFieldKey).Value())
+		}
+		return true
+	})
+	fileHash, err := securitymdl.GetHash(updatedDataStr)
+	if err != nil {
+		return nil, nil, err
+	}
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "startOffset", fileStartOffset)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "dataSize", newDataSize)
+	updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "fileHash", fileHash)
+	updatedIndexRows, err := updateIndexRow(&existingIndexRows, recordToUpdateIndexRow, gjson.Parse(updatedIndexRowStr))
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return nil, nil, err
+	}
+	// loggermdl.LogDebug("updated index data", updatedIndexRows)
+	err = setIndexDataInFile(f, updatedFooterOffset, updatedIndexRows.String())
+	if err != nil {
+		return nil, nil, err
+	}
+	err = setFooterOffset(f, updatedFooterOffset)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFooterSize(f, int64(len(updatedIndexRows.String())))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return nil, nil, err
+	}
+	f.Sync()
+	// TODO: discussion
+	// err = setFileReadyForUploadFlag(f, true)
+	// if err != nil {
+	// 	return err
+	// }
+	updatedData := gjson.Parse(updatedDataStr)
+	return &updatedData, updatedIndexRows, nil
+}
+
+// DeletDataFromPackFileUsingFp -DeletDataFromPackFileUsingFp
+func DeletDataFromPackFileUsingFp(f *os.File, infileIndex *gjson.Result, infileIndexQueries []string) (int, *gjson.Result, error) {
+	// get indexdata
+	// get update index records
+	// save updated index
+	// update index size header
+	// lock, err := filemdl.AcquireFileLock(filePath)
+	// defer filemdl.ReleaseFileLock(lock)
+	// if err != nil {
+	// 	return err
+	// }
+
+	recordsDeletedCnt := 0
+	lock := getLock(f.Name())
+	lock.Lock()
+	// f, err := filemdl.OpenFile(filePath, os.O_RDWR|os.O_SYNC, 0777)
+	defer func() {
+		lock.Unlock()
+		// f.Close()
+	}()
+	// if err != nil {
+	// 	return recordsDeletedCnt, err
+	// }
+	indexDataStr := "[]"
+	var err error
+	if infileIndex == nil {
+
+		indexDataStr, err = getInFileIndexData(f)
+		if err != nil {
+			return recordsDeletedCnt, infileIndex, err
+		}
+		loggermdl.LogError("infile index in file", indexDataStr)
+	} else {
+		indexDataStr = infileIndex.String()
+		loggermdl.LogError("infile index in cache", indexDataStr)
+	}
+
+	indexData := gjson.Parse(indexDataStr)
+	indexRecordsToDelete := indexData
+	// loggermdl.LogDebug("indexRecordsToDelete file type", indexRecordsToDelete)
+	for _, query := range infileIndexQueries {
+		indexRecordsToDelete = indexRecordsToDelete.Get(query + "#")
+	}
+	indexRowsToDelete := indexRecordsToDelete.Array()
+	if len(indexRowsToDelete) == 0 {
+		loggermdl.LogError("ErrNoDataFound")
+		return recordsDeletedCnt, infileIndex, ErrNoDataFound
+	}
+	loggermdl.LogError("before delete ", indexData)
+	updatedIndexRecords := indexData
+	for _, indexRowToRemove := range indexRowsToDelete {
+		updatedIndexRecords, err = removeIndexRow(updatedIndexRecords, indexRowToRemove.String())
+		if err != nil {
+			loggermdl.LogError("fail to delete record:", err)
+			return recordsDeletedCnt, infileIndex, errormdl.Wrap("fail to delete record:" + err.Error())
+		}
+		recordsDeletedCnt++
+	}
+	loggermdl.LogError("updatedIndexRecords after delete ", updatedIndexRecords, f.Name())
+	footerOffset := getFooterOffset(f)
+	if footerOffset == -1 {
+		return recordsDeletedCnt, infileIndex, errormdl.Wrap("fail to fetch infile index offset")
+	}
+	newIndexDataSize := len(updatedIndexRecords.String())
+	err = setFileStatusFlag(f, fileStatusUpdatingIndex)
+	if err != nil {
+		return recordsDeletedCnt, infileIndex, err
+	}
+	err = setIndexDataInFile(f, footerOffset, updatedIndexRecords.String())
+	if err != nil {
+		loggermdl.LogError("fail to update infile index data :", err)
+		return recordsDeletedCnt, infileIndex, err
+	}
+	err = setFileStatusFlag(f, fileStatusReady)
+	if err != nil {
+		return recordsDeletedCnt, infileIndex, err
+	}
+	return recordsDeletedCnt, &updatedIndexRecords, setFooterSize(f, int64(newIndexDataSize))
+}
+
+func encryptWithCompression(data []byte, compress bool, encKey []byte) (res []byte, err error) {
+	if compress {
+		res, err = filemdl.ZipBytes(data)
+		if err != nil {
+			return
+		}
+	}
+
+	return securitymdl.AESEncrypt(res, encKey)
+}
+
+func decryptwithDecompression(data []byte, deCompress bool, encKey []byte) (res []byte, err error) {
+	res, err = securitymdl.AESDecrypt(data, encKey)
+	if err != nil {
+		return
+	}
+
+	if deCompress {
+		return filemdl.UnZipBytes(res)
+	}
+
+	return
+}
+
+func ReorgPackFile(filePath string) error {
+
+	isFilePresent := filemdl.FileAvailabilityCheck(filePath)
+	if !isFilePresent {
+		return errormdl.Wrap("file not found")
+	}
+	lock := getLock(filePath)
+	lock.Lock()
+	f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	defer func() {
+		lock.Unlock()
+		f.Close()
+	}()
+	if err != nil {
+		loggermdl.LogError("Error occured during reOrg of file data", err)
+		return errormdl.Wrap("Error occured during reOrg of file data")
+	}
+	_, sourceFileName := filepath.Split(filePath)
+	desFileName := sourceFileName + "_" + strconv.FormatInt(time.Now().Unix(), 10)
+	tempFilepath, err := filepath.Abs(filepath.Join(filemdl.TempDir, desFileName))
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	tempFilDir, _ := filepath.Split(tempFilepath)
+	err = filemdl.CreateDirectoryRecursive(tempFilDir)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	err = initializeWithHeader(tempFilepath)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	dir, _ := filepath.Split(tempFilepath)
+	if dir != "" {
+		createError := filemdl.CreateDirectoryRecursive(dir)
+		if errormdl.CheckErr(createError) != nil {
+			return errormdl.CheckErr(createError)
+		}
+	}
+	fpTemp, err := os.OpenFile(tempFilepath, os.O_CREATE|os.O_RDWR, os.ModePerm)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		fpTemp.Close()
+	}()
+	infileIndexData, err := getInFileIndexData(f)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	infileIndexRows := gjson.Parse(infileIndexData)
+	if len(infileIndexRows.Array()) == 0 {
+		return nil
+	}
+	tempFileFooterStartOffset := getFooterOffset(fpTemp)
+	if tempFileFooterStartOffset == -1 {
+		loggermdl.LogError("fail to fetch infile index offset")
+		return errormdl.Wrap("fail to fetch infile index data")
+	}
+	updatedIndexRowStr := "[]"
+	for _, infileIndex := range infileIndexRows.Array() {
+		startOffset, err := strconv.Atoi(infileIndex.Get("startOffset").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching startOffset", err)
+			return err
+		}
+		dataSize, err := strconv.Atoi(infileIndex.Get("dataSize").String())
+		if err != nil {
+			loggermdl.LogError("Error occured while fetching dataSize", err)
+			return err
+		}
+
+		byteArr, err := getFileDataFromPack(f, int64(startOffset), int64(dataSize), nil, securitymdl.FDBSecParams{EnableSecurity: false})
+		if err != nil {
+			loggermdl.LogError("Error occured while reading file data from offset", err)
+			return err
+		}
+		byteCnt, err := addByteDataInFile(fpTemp, tempFileFooterStartOffset, byteArr, false)
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		indexRowJSON, _ := sjson.Set(infileIndex.String(), "startOffset", tempFileFooterStartOffset)
+		indexRowJSON, _ = sjson.Set(indexRowJSON, "dataSize", byteCnt)
+		indexRowJSONObj := gjson.Parse(indexRowJSON)
+		updatedIndexRowStr, _ = sjson.Set(updatedIndexRowStr, "-1", indexRowJSONObj.Value())
+		tempFileFooterStartOffset = tempFileFooterStartOffset + byteCnt
+	}
+
+	err = setFooterOffset(fpTemp, tempFileFooterStartOffset)
+	if err != nil {
+		return err
+	}
+	err = setFooterSize(fpTemp, int64(len(updatedIndexRowStr)))
+	if err != nil {
+		return err
+	}
+	err = setIndexDataInFile(fpTemp, tempFileFooterStartOffset, updatedIndexRowStr)
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Sync()
+	if err != nil {
+		return err
+	}
+	err = fpTemp.Close()
+	if err != nil {
+		return err
+	}
+	err = f.Close()
+	if err != nil {
+		return err
+	}
+
+	return filemdl.AtomicReplaceFile(tempFilepath, filePath)
+}
diff --git a/v2/filemdl/filepack/packFile_test.go b/v2/filemdl/filepack/packFile_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..54c953320a8b929ca561ecb3cd339ce5b69f5a8c
--- /dev/null
+++ b/v2/filemdl/filepack/packFile_test.go
@@ -0,0 +1,116 @@
+package filepack
+
+import (
+	"fmt"
+	"log"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+func TestSaveDataInPackFile(t *testing.T) {
+	// get file list
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "studentId", 10014)
+	data, _ = sjson.Set(data, "class", "SY_MCA")
+	data, _ = sjson.Set(data, "age", 23)
+	data, _ = sjson.Set(data, "fileType", "Profile")
+	studentObj := gjson.Parse(data)
+	IndexFields := []InFileIndexField{
+		InFileIndexField{
+			FieldName: "name",
+			Query:     "name",
+		},
+	}
+	// create pack
+	err := AddFileInPackFile("/home/vivekn/fdb_data/packFileTest/pack", IndexFields, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func TestGetDataInPackFile(t *testing.T) {
+
+	infileIndexQueries := []string{`#[fileType=="Profile"]`, `#[name=="vijay"]`}
+	res, err := GetDataFromPackFile("/home/vivekn/fdb_data/packFileTest/pack1", infileIndexQueries)
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Println("res", res)
+}
+
+func TestUpdateFileInPackFile(t *testing.T) {
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "studentId", 10014)
+	data, _ = sjson.Set(data, "class", "TY_MCA")
+	data, _ = sjson.Set(data, "age", 23)
+	data, _ = sjson.Set(data, "fileType", "Profile")
+	studentObj := gjson.Parse(data)
+	infileIndexQueries := []string{`#[fileType=="Profile"]`, `#[name=="vijay"]`}
+	filePaths := []string{`/home/vivekn/fdb_data/packFileTest/pack`, `/home/vivekn/fdb_data/packFileTest/pack1`}
+	for _, filePath := range filePaths {
+		res, err := UpdateFilesInPackFile(filePath, infileIndexQueries, &studentObj)
+		if err != nil {
+			log.Fatal(err)
+		}
+		fmt.Println("res", res)
+	}
+}
+
+func TestUpdateFileInMultiplePackFile(t *testing.T) {
+	data, _ := sjson.Set("", "name", "vijay")
+	data, _ = sjson.Set(data, "studentId", 10014)
+	data, _ = sjson.Set(data, "class", "TY_MCA")
+	data, _ = sjson.Set(data, "age", 23)
+	data, _ = sjson.Set(data, "fileType", "Profile")
+	studentObj := gjson.Parse(data)
+	infileIndexQueries := []string{`#[fileType=="Profile"]`, `#[name=="vijay"]`}
+	res, err := UpdateFilesInPackFile("/home/vivekn/fdb_data/packFileTest/pack", infileIndexQueries, &studentObj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Println("res", res)
+}
+
+func TestDeletDataFromPackFile(t *testing.T) {
+	infileIndexQueries := []string{`#[fileType=="Profile"]`, `#[name=="vijay"]`}
+	_, err := DeletDataFromPackFile("/home/vivekn/fdb_data/packFileTest/pack1", infileIndexQueries)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func TestAddMediaInPackFile(t *testing.T) {
+	data, _ := sjson.Set("", "extension", "png")
+	data, _ = sjson.Set(data, "fileType", "Asset")
+	rs := gjson.Parse(data)
+	filePath := "/home/vivekn/Pictures/abc.png"
+	dataByte, err := filemdl.ReadFile(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	// dataByte = []byte("hello")
+	IndexFields := []InFileIndexField{}
+	id, err := AddMediaInPackFile("/home/vivekn/fdb_data/packFileTest/pack1", dataByte, IndexFields, &rs)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError(id)
+}
+func TestGetMediaFromPackFile(t *testing.T) {
+	// data, _ := sjson.Set("", "extension", "png")
+	// data, _ = sjson.Set(data, "recordID", "1V6my0ijyVn8Yu5cubfgcUwRiDk")
+	// rs := gjson.Parse(data)
+	infileIndeQueries := []string{`#[recordID="1V8rdwCsRFyUq91byPE2ad0rb4e"]`}
+	filePath := "/home/vivekn/fdb_data/packFileTest/pack1"
+	data, err := GetMediaFromPackFile(filePath, infileIndeQueries)
+	if err != nil {
+		log.Fatal(err)
+	}
+	loggermdl.LogError(string(data))
+}
diff --git a/v2/filemdl/filequeueing.go b/v2/filemdl/filequeueing.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce09f4f72ea3e7eaaf2c988f2870bdef9fd95d86
--- /dev/null
+++ b/v2/filemdl/filequeueing.go
@@ -0,0 +1,208 @@
+package filemdl
+
+import (
+	"bytes"
+	"container/list"
+	"errors"
+	"io"
+	"sync"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/workerpoolmdl"
+)
+
+// Filemdl Object
+type Filemdl struct {
+	requestList *list.List
+	lock        sync.Mutex
+	taskCount   int
+	concurrency int
+	Error       error
+}
+
+// enqueObject is used for enquing writing request in list
+type enqueObject struct {
+	writer       *io.PipeWriter
+	Reader       *io.PipeReader
+	FilePath     string
+	Data         []byte
+	makeDir      bool
+	createBackup bool
+}
+
+var instance *Filemdl
+var once sync.Once
+var trigger sync.Once
+
+// init initalises once and initiate new list object
+func init() {
+	once.Do(func() {
+		instance = &Filemdl{
+			requestList: list.New(),
+		}
+		instance.taskCount = constantmdl.TASKCOUNT
+		instance.concurrency = constantmdl.TASKCOUNT
+	})
+}
+
+// Init initializes Filemdl object with parameters
+func Init(taskCount, concurrency int) {
+	instance.taskCount = taskCount
+	instance.concurrency = concurrency
+	if taskCount == 0 {
+		instance.taskCount = constantmdl.TASKCOUNT
+		instance.concurrency = constantmdl.TASKCOUNT
+	}
+}
+
+// GetInstance return single instance of filemdl object
+func GetInstance() *Filemdl {
+	if instance == nil || instance.requestList == nil {
+		instance = &Filemdl{}
+		loggermdl.LogError("filemdl does not initialise")
+		instance.Error = errors.New("filemdl does not initialise")
+	}
+	return instance
+}
+
+// getEnqueueObject returns new enques object for each write request
+func getEnqueueObject(filePath string, ba []byte, makeDir, createBackup bool) enqueObject {
+	r, w := io.Pipe()
+	obj := enqueObject{}
+	obj.writer = w
+	obj.Reader = r
+	obj.Data = ba
+	obj.FilePath = filePath
+	obj.makeDir = makeDir
+	obj.createBackup = createBackup
+	return obj
+}
+
+// enqueueingRequest push request in list from back
+func (filemdl *Filemdl) enqueueingRequest(filePath string, ba []byte, makeDir, createBackup bool) *enqueObject {
+	en := getEnqueueObject(filePath, ba, makeDir, createBackup)
+	filemdl.lock.Lock()
+	filemdl.requestList.PushBack(en)
+	filemdl.lock.Unlock()
+	return &en
+}
+
+// Save Enque the requested object
+func (filemdl *Filemdl) Save(filePath string, ba []byte, makeDir, createBackup bool) error {
+	if filemdl.Error != nil {
+		loggermdl.LogError(filemdl.Error)
+		return filemdl.Error
+	}
+	en := filemdl.enqueueingRequest(filePath, ba, makeDir, createBackup)
+	trigger.Do(func() {
+		go filemdl.triggerWritingData()
+	})
+	buf := new(bytes.Buffer)
+	buf.ReadFrom(en.Reader)
+	if len(buf.Bytes()) != 0 {
+		err := errormdl.Wrap(buf.String())
+		loggermdl.LogError(err)
+		return err
+	}
+	en.Reader.Close()
+	return nil
+}
+
+// triggerWritingData triggers the writing thread to write data (only once)
+func (filemdl *Filemdl) triggerWritingData() {
+	tasks := []*workerpoolmdl.Task{}
+	fileList := make(map[string]string)
+	deleteStatus := false
+	el := filemdl.getFirstElement()
+	if el == nil {
+		return
+	}
+	// This logic will execute very first time
+	en := el.Value.(enqueObject)
+	if _, ok := fileList[en.FilePath]; !ok {
+		tasks = append(tasks, workerpoolmdl.NewTask(en, writeQueueData))
+		fileList[en.FilePath] = ""
+		deleteStatus = true
+	}
+	tmp := el
+	el = el.Next()
+	if deleteStatus {
+		filemdl.deleteElement(el, tmp)
+	}
+	tasks, el, fileList = filemdl.listTasks(tasks, el, fileList)
+}
+
+// listTasks collect task as per threshold from list
+func (filemdl *Filemdl) listTasks(tasks []*workerpoolmdl.Task, el *list.Element, fileList map[string]string) ([]*workerpoolmdl.Task, *list.Element, map[string]string) {
+	for {
+		deleteStatus := false
+		if el == nil {
+			if len(tasks) > 0 {
+				tasks, fileList = filemdl.runTask(tasks)
+			}
+			el = filemdl.getFirstElement()
+			continue
+		}
+		if len(tasks) == filemdl.taskCount {
+			tasks, fileList = filemdl.runTask(tasks)
+			el = filemdl.getFirstElement()
+			continue
+		}
+		en := el.Value.(enqueObject)
+		// this will check if task contains only unique files
+		if _, ok := fileList[en.FilePath]; !ok {
+			tasks = append(tasks, workerpoolmdl.NewTask(en, writeQueueData))
+			fileList[en.FilePath] = ""
+			deleteStatus = true
+		}
+		tmp := el
+		el = el.Next()
+		if deleteStatus {
+			filemdl.deleteElement(el, tmp)
+		}
+	}
+}
+
+// runTask runs the collected task from list using workpool
+func (filemdl *Filemdl) runTask(tasks []*workerpoolmdl.Task) ([]*workerpoolmdl.Task, map[string]string) {
+	p := workerpoolmdl.NewPool(tasks, filemdl.concurrency)
+	p.Run()
+	tasks = []*workerpoolmdl.Task{}
+	fileList := make(map[string]string)
+	return tasks, fileList
+}
+
+// getFirstElement return front element from list
+func (filemdl *Filemdl) getFirstElement() *list.Element {
+	defer filemdl.lock.Unlock()
+	filemdl.lock.Lock()
+	return filemdl.requestList.Front()
+}
+
+// deleteElement delete given element from list
+func (filemdl *Filemdl) deleteElement(el, tmp *list.Element) {
+	filemdl.lock.Lock()
+	// if current element is not nil, it will delete its prevevious else delete current element
+	if el != nil {
+		filemdl.requestList.Remove(el.Prev())
+	} else {
+		filemdl.requestList.Remove(tmp)
+	}
+	filemdl.lock.Unlock()
+}
+
+// writeQueueData writes data in the file or any other stream
+func writeQueueData(raw interface{}) error {
+	en := raw.(enqueObject)
+	err := WriteFile(en.FilePath, en.Data, en.makeDir, en.createBackup)
+	defer en.writer.Close()
+	// if error found in writing on any destinatio stream, error will be written on same pipe instance
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		en.writer.Write([]byte(errormdl.CheckErr(err).Error()))
+	}
+	return nil
+}
diff --git a/v2/filemdl/filezip.go b/v2/filemdl/filezip.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca89c8b6cabd0b8580db5089339a46dc840ebdfe
--- /dev/null
+++ b/v2/filemdl/filezip.go
@@ -0,0 +1,351 @@
+package filemdl
+
+import (
+	"archive/zip"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	// "github.com/DataDog/zstd"
+	"github.com/klauspost/compress/zstd"
+)
+
+var encoder, _ = zstd.NewWriter(nil)
+var decoder, _ = zstd.NewReader(nil)
+
+//Zip - Zip
+func Zip(source, target string) error {
+	source = filepath.Clean(source)
+	target = filepath.Clean(target)
+
+	zipfile, err := os.Create(target)
+	if err != nil {
+		return err
+	}
+	defer zipfile.Close()
+
+	archive := zip.NewWriter(zipfile)
+	defer archive.Close()
+
+	info, err := os.Stat(source)
+	if err != nil {
+		return err
+	}
+
+	var baseDir string
+	if info.IsDir() {
+		baseDir = filepath.Base(source)
+	}
+
+	filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+
+		header, err := zip.FileInfoHeader(info)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+
+		if baseDir != "" {
+			header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source))
+			// Replace all occurances of \\ with /. This is necessary to properly unzip the zip file created on windows system on ubuntu system
+			header.Name = strings.Replace(header.Name, "\\", "/", -1)
+		}
+
+		if info.IsDir() {
+			header.Name += "/"
+		} else {
+			header.Method = zip.Deflate
+		}
+
+		writer, err := archive.CreateHeader(header)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+
+		if info.IsDir() {
+			return nil
+		}
+
+		file, err := os.Open(path)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+		_, err = io.Copy(writer, file)
+		if errormdl.CheckErr(err) != nil {
+		}
+
+		file.Close()
+		return errormdl.CheckErr(err)
+	})
+
+	return errormdl.CheckErr(err)
+}
+
+//ZipWithoutBaseDirectory Zip Without Base Directory
+func ZipWithoutBaseDirectory(source, target string) error {
+	source = filepath.Clean(source)
+	target = filepath.Clean(target)
+
+	zipfile, err := os.Create(target)
+	if err != nil {
+		return err
+	}
+	defer zipfile.Close()
+
+	archive := zip.NewWriter(zipfile)
+	defer archive.Close()
+
+	info, err := os.Stat(source)
+	if err != nil {
+		return err
+	}
+
+	var baseDir string
+	if info.IsDir() {
+		baseDir = filepath.Base(source)
+	}
+
+	filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+
+		header, err := zip.FileInfoHeader(info)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+
+		if baseDir != "" {
+			header.Name = strings.TrimPrefix(path, source)
+			// Replace all occurances of \\ with /. This is necessary to properly unzip the zip file created on windows system on ubuntu system
+			header.Name = strings.Replace(header.Name, "\\", "/", -1)
+		}
+
+		if info.IsDir() {
+			header.Name += "/"
+		} else {
+			header.Method = zip.Deflate
+		}
+
+		writer, err := archive.CreateHeader(header)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+
+		if info.IsDir() {
+			return nil
+		}
+
+		file, err := os.Open(path)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+		_, err = io.Copy(writer, file)
+		if errormdl.CheckErr(err) != nil {
+		}
+
+		file.Close()
+		return errormdl.CheckErr(err)
+	})
+
+	return errormdl.CheckErr(err)
+}
+
+//ZipWithSkipFileList This method will skip the file added in skiplist from zip
+func ZipWithSkipFileList(source, target string, skipFileList []string) error {
+
+	source = filepath.Clean(source)
+	target = filepath.Clean(target)
+	zipfile, err := os.Create(target)
+	if err != nil {
+		return err
+	}
+	defer zipfile.Close()
+	archive := zip.NewWriter(zipfile)
+	defer archive.Close()
+	info, err := os.Stat(source)
+	if err != nil {
+		return err
+	}
+	var baseDir string
+	if info.IsDir() {
+		baseDir = filepath.Base(source)
+	}
+	filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
+
+		if !checkPathInSkipList(info.Name(), skipFileList) {
+			header, err := zip.FileInfoHeader(info)
+			if errormdl.CheckErr(err) != nil {
+				return errormdl.CheckErr(err)
+			}
+
+			if baseDir != "" {
+				header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source))
+				// Replace all occurances of \\ with /. This is necessary to properly unzip the zip file created on windows system on ubuntu system
+				header.Name = strings.Replace(header.Name, "\\", "/", -1)
+			}
+
+			if info.IsDir() {
+				header.Name += "/"
+			} else {
+				header.Method = zip.Deflate
+			}
+
+			writer, err := archive.CreateHeader(header)
+			if errormdl.CheckErr(err) != nil {
+				return errormdl.CheckErr(err)
+			}
+
+			if info.IsDir() {
+				return nil
+			}
+
+			file, err := os.Open(path)
+			if errormdl.CheckErr(err) != nil {
+				return errormdl.CheckErr(err)
+			}
+
+			_, err = io.Copy(writer, file)
+			if err != nil {
+			}
+
+			file.Close()
+			return nil
+		}
+
+		return nil
+	})
+	return errormdl.CheckErr(err)
+}
+
+func checkPathInSkipList(path string, skipList []string) bool {
+	for _, prefix := range skipList {
+		if strings.HasPrefix(path, prefix) {
+			return true
+		}
+		if strings.HasSuffix(path, prefix) {
+			return true
+		}
+	}
+	return false
+}
+
+//Unzip Unzip
+func Unzip(archive, target string) error {
+	reader, err := zip.OpenReader(archive)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	mkcDirError := os.MkdirAll(target, 0755)
+	if errormdl.CheckErr(mkcDirError) != nil {
+		loggermdl.LogError(errormdl.CheckErr(mkcDirError))
+		return errormdl.CheckErr(mkcDirError)
+	}
+
+	for _, file := range reader.File {
+		filePath := filepath.Join(target, file.Name)
+		parentPath := path.Dir(CleanPath(filePath))
+
+		if !FileAvailabilityCheck(parentPath) {
+			CreateDirectoryRecursive(parentPath)
+		}
+
+		if file.FileInfo().IsDir() {
+			os.MkdirAll(filePath, file.Mode())
+			continue
+		}
+
+		fileReader, openError := file.Open()
+		if errormdl.CheckErr(openError) != nil {
+			loggermdl.LogError(errormdl.CheckErr(openError))
+			return errormdl.CheckErr(openError)
+		}
+
+		targetFile, targetOpenError := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
+		if errormdl.CheckErr(targetOpenError) != nil {
+			loggermdl.LogError(errormdl.CheckErr(targetOpenError))
+			return errormdl.CheckErr(targetOpenError)
+		}
+
+		_, copyError := io.Copy(targetFile, fileReader)
+		if errormdl.CheckErr(copyError) != nil {
+			loggermdl.LogError(errormdl.CheckErr(copyError))
+			return errormdl.CheckErr(copyError)
+		}
+		targetCloseError := targetFile.Close()
+		if errormdl.CheckErr(targetCloseError) != nil {
+			loggermdl.LogError(errormdl.CheckErr(targetCloseError))
+			return errormdl.CheckErr(targetCloseError)
+		}
+
+		fileCloseError := fileReader.Close()
+		if errormdl.CheckErr(fileCloseError) != nil {
+			loggermdl.LogError(errormdl.CheckErr(fileCloseError))
+			return errormdl.CheckErr(fileCloseError)
+		}
+	}
+	closeError := reader.Close()
+	if errormdl.CheckErr(closeError) != nil {
+		loggermdl.LogError(errormdl.CheckErr(closeError))
+		return errormdl.CheckErr(closeError)
+	}
+	return nil
+}
+
+//ZipBytes - zip byte array
+func ZipBytes(inputData []byte) ([]byte, error) {
+	// compressedData, err := zstd.CompressLevel(nil, inputData, 9)
+	compressedData := encoder.EncodeAll(inputData, make([]byte, 0, len(inputData)))
+	return compressedData, nil
+}
+
+// ZipSingleFile - Zip single file
+func ZipSingleFile(sourceFilePath, destFilePath string) error {
+	inputData, err := ioutil.ReadFile(sourceFilePath)
+	if err != nil {
+		return err
+	}
+	compressedData, err := ZipBytes(inputData)
+	if errormdl.CheckErr(err) != nil {
+		return errormdl.CheckErr(err)
+	}
+	err = ioutil.WriteFile(destFilePath, compressedData, 0644)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+//UnZipBytes bytes - Decompress
+func UnZipBytes(compressedData []byte) ([]byte, error) {
+	// decompressedData, err := zstd.Decompress(nil, compressedData)
+	decompressedData, err := decoder.DecodeAll(compressedData, nil)
+	return decompressedData, errormdl.CheckErr(err)
+}
+
+// UnZipSingleFile - UnZip Single File
+func UnZipSingleFile(sourceFilePath, destFilePath string) error {
+	inputData, err := ioutil.ReadFile(sourceFilePath)
+	if err != nil {
+		return err
+	}
+	uncompressedData, err := UnZipBytes(inputData)
+	if errormdl.CheckErr(err) != nil {
+		return errormdl.CheckErr(err)
+	}
+	err = ioutil.WriteFile(destFilePath, uncompressedData, 0644)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/v2/filemdl/filezip_test.go b/v2/filemdl/filezip_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..845f89f8904bf489f788dbaf7a3bd2f16016d871
--- /dev/null
+++ b/v2/filemdl/filezip_test.go
@@ -0,0 +1,108 @@
+package filemdl
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestZipWithoutBaseDirectory(t *testing.T) {
+	err := ZipWithoutBaseDirectory("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/TestZipWithoutBaseDirectory.zip")
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1ZipWithoutBaseDirectory(t *testing.T) {
+	// test case for target zip can't be created
+	err := ZipWithoutBaseDirectory("../testingdata/testData/filezip/vs-code-settings.json", "X:/Test1ZipWithoutBaseDirectory.zip")
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2ZipWithoutBaseDirectory(t *testing.T) {
+	// test case for source folder can't be read
+	err := ZipWithoutBaseDirectory("X:/vs-code-settings.json", "X:/Test2ZipWithoutBaseDirectory.zip")
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test3ZipWithoutBaseDirectory(t *testing.T) {
+	// test case for walk for source folder
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := ZipWithoutBaseDirectory("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/Test3ZipWithoutBaseDirectory.zip")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test4ZipWithoutBaseDirectory(t *testing.T) {
+	// test case for walk for source folder
+	err := ZipWithoutBaseDirectory("../testingdata/testData/filezip", "../testingdata/testData/filezip/Test4ZipWithoutBaseDirectory.zip")
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func TestZipWithSkipFileList(t *testing.T) {
+	err := ZipWithSkipFileList("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/TestZipWithSkipFileList.zip", []string{})
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1ZipWithSkipFileList(t *testing.T) {
+	// test case for target zip can't be created
+	err := ZipWithSkipFileList("../testingdata/testData/filezip/vs-code-settings.json", "X:/Test1ZipWithSkipFileList.zip", []string{})
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2ZipWithSkipFileList(t *testing.T) {
+	// test case for source folder can't be read
+	err := ZipWithSkipFileList("X:/vs-code-settings.json", "X:/Test2ZipWithSkipFileList.zip", []string{})
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test3ZipWithSkipFileList(t *testing.T) {
+	// test case for walk for source folder
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := ZipWithSkipFileList("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/Test3ZipWithSkipFileList.zip", []string{})
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test4ZipWithSkipFileList(t *testing.T) {
+	err := Zip("../testingdata/testData/filezip", "../testingdata/testData/Test4ZipWithSkipFileList.zip")
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test5ZipWithSkipFileList(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := Zip("../testingdata/testData/filezip", "../testingdata/testData/Test5ZipWithSkipFileList.zip")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should not throw error")
+}
+
+func Test_checkPathInSkipList(t *testing.T) {
+	retVal := checkPathInSkipList("a", []string{"a", "b", "c"})
+	assert.True(t, retVal, "This should return true")
+}
+func Test1_checkPathInSkipList(t *testing.T) {
+	retVal := checkPathInSkipList("d", []string{"a", "b", "c"})
+	assert.False(t, retVal, "This should return false")
+}
+func Test_Unzip(t *testing.T) {
+	Zip("../testingdata/testData/filezip", "../testingdata/testData/Test_Unzip.zip")
+	retVal := Unzip("../testingdata/testData/Test_Unzip.zip", "../testingdata/testData/filezip/Test_Unzip")
+	assert.NoError(t, retVal, "This should not return error")
+}
+func Test1_Unzip(t *testing.T) {
+	Zip("../testingdata/testData/filezip", "../testingdata/testData/Test1_Unzip.zip")
+	retVal := Unzip("X:/Test1_Unzip.zip", "../testingdata/testData/filezip/Test1_Unzip")
+	assert.Error(t, retVal, "This should return error")
+}
+func Test2_Unzip(t *testing.T) {
+	Zip("../testingdata/testData/filezip", "../testingdata/testData/Test2_Unzip.zip")
+	retVal := Unzip("../testingdata/testData/Test2_Unzip.zip", "X:/Test2_Unzip")
+	assert.Error(t, retVal, "This should return error")
+}
+
+func Test3_Unzip(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	Zip("../testingdata/testData/filezip", "../testingdata/testData/Test3_Unzip.zip")
+	retVal := Unzip("../testingdata/testData/Test3_Unzip.zip", "../testingdata/testData/filezip/Test3_Unzip")
+	assert.Error(t, retVal, "This should return error")
+	errormdl.IsTestingNegetiveCaseOn = false
+}
diff --git a/v2/filemdl/filezip_zip_test.go b/v2/filemdl/filezip_zip_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0db37d8e02afdeabafc48499e59aabf32b00dee4
--- /dev/null
+++ b/v2/filemdl/filezip_zip_test.go
@@ -0,0 +1,121 @@
+package filemdl
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestZip(t *testing.T) {
+	err := Zip("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/TestZip.zip")
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1Zip(t *testing.T) {
+	// test case for target zip can't be created
+	err := Zip("../testingdata/testData/filezip/vs-code-settings.json", "X:/Test1Zip.zip")
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2Zip(t *testing.T) {
+	// test case for source folder can't be read
+	err := Zip("X:/vs-code-settings.json", "X:/Test2Zip.zip")
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test3Zip(t *testing.T) {
+	// test case for walk for source folder
+	errormdl.IsTestingNegetiveCaseOn = true
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	err := Zip("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/Test3Zip.zip")
+	errormdl.IsTestingNegetiveCaseOn = false
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test4Zip(t *testing.T) {
+	err := Zip("../testingdata/testData/filezip", "../testingdata/testData/filezip/Test4Zip.zip")
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test5Zip(t *testing.T) {
+	err := Zip("../testingdata/testData/filezip", "../testingdata/testData/filezip/Test5Zip.zip")
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test_ZipBytes(t *testing.T) {
+	inputData := []byte{1, 2, 3, 4, 5}
+	_, err := ZipBytes(inputData)
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1_ZipBytes(t *testing.T) {
+	inputData := []byte{}
+	_, err := ZipBytes(inputData)
+	assert.Error(t, err, "This should throw error")
+}
+
+func TestZipSingleFile(t *testing.T) {
+	err := ZipSingleFile("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/TestZipSingleFile.zip")
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1ZipSingleFile(t *testing.T) {
+	err := ZipSingleFile("X:/vs-code-settings.json", "../testingdata/testData/filezip/Test1ZipSingleFile.zip")
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2ZipSingleFile(t *testing.T) {
+	err := ZipSingleFile("../testingdata/testData/filezip/vs-code-settings.json", "X:/Test1ZipSingleFile.zip")
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test3ZipSingleFile(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := ZipSingleFile("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/Test2ZipSingleFile.zip")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should not throw error")
+}
+func Test_UnZipBytes(t *testing.T) {
+	inputData := []byte{1, 2, 3, 4, 5}
+	inputData, _ = ZipBytes(inputData)
+	_, err := UnZipBytes(inputData)
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1_UnZipBytes(t *testing.T) {
+	inputData := []byte{1, 2, 3, 4, 5}
+	_, err := UnZipBytes(inputData)
+	assert.Error(t, err, "This should throw error")
+}
+
+func TestUnZipSingleFile(t *testing.T) {
+	ZipSingleFile("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/TestUnZipSingleFile.zip")
+	err := UnZipSingleFile("../testingdata/testData/filezip/TestUnZipSingleFile.zip", "../testingdata/testData/filezip/uz-vs-code-settings.json")
+	// fmt.Println("err: ", err)
+	assert.NoError(t, err, "This should not throw error")
+}
+
+func Test1UnZipSingleFile(t *testing.T) {
+	ZipSingleFile("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/Test1UnZipSingleFile.zip")
+	err := UnZipSingleFile("X:/TestUnZipSingleFile.zip", "../testingdata/testData/filezip/uz-vs-code-settings.json")
+	// fmt.Println("err: ", err)
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test2UnZipSingleFile(t *testing.T) {
+	ZipSingleFile("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/Test2UnZipSingleFile.zip")
+	err := UnZipSingleFile("../testingdata/testData/filezip/Test2UnZipSingleFile.zip", "X:/uz-vs-code-settings.json")
+	// fmt.Println("err: ", err)
+	assert.Error(t, err, "This should throw error")
+}
+
+func Test3UnZipSingleFile(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	ZipSingleFile("../testingdata/testData/filezip/vs-code-settings.json", "../testingdata/testData/filezip/TestUnZipSingleFile.zip")
+	err := UnZipSingleFile("../testingdata/testData/filezip/TestUnZipSingleFile.zip", "../testingdata/testData/filezip/uz-vs-code-settings.json")
+	// fmt.Println("err: ", err)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should not throw error")
+}
diff --git a/v2/generate.sh b/v2/generate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..732a64f43efe00f1cfdcc90265b9277833e07f52
--- /dev/null
+++ b/v2/generate.sh
@@ -0,0 +1,3 @@
+#!/bin/bash or zsh
+
+protoc grpcbuildermdl/grpcbuildermdl.proto --go_out=plugins=grpc:.
\ No newline at end of file
diff --git a/v2/gjsonhelpermdl/gjsonhelpermdl.go b/v2/gjsonhelpermdl/gjsonhelpermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..b8fe0e7c9df1db67aa128eb96f48af6f94273a52
--- /dev/null
+++ b/v2/gjsonhelpermdl/gjsonhelpermdl.go
@@ -0,0 +1,363 @@
+package gjsonhelpermdl
+
+import (
+	"strconv"
+	"time"
+	"unicode/utf16"
+	"unicode/utf8"
+
+	"github.com/tidwall/gjson"
+)
+
+// GetInterface - return interface of rs
+func GetInterface(t *gjson.Result, timeStampKeys ...string) interface{} {
+	if t.Type == gjson.String {
+		return t.Str
+	}
+	switch t.Type {
+	default:
+		return nil
+	case gjson.False:
+		return false
+	case gjson.Number:
+		return t.Num
+	case gjson.JSON:
+		r := arrayOrMap(t, 0, true, timeStampKeys...)
+		if r.vc == '{' {
+			return r.oi
+		} else if r.vc == '[' {
+			return r.ai
+		}
+		return nil
+	case gjson.True:
+		return true
+	}
+}
+
+type arrayOrMapResult struct {
+	a  []gjson.Result
+	ai []interface{}
+	o  map[string]gjson.Result
+	oi map[string]interface{}
+	vc byte
+}
+
+func arrayOrMap(t *gjson.Result, vc byte, valueize bool, timeStampKeys ...string) (r arrayOrMapResult) {
+	var json = t.Raw
+	var i int
+	var value gjson.Result
+	var count int
+	var key gjson.Result
+	if vc == 0 {
+		for ; i < len(json); i++ {
+			if json[i] == '{' || json[i] == '[' {
+				r.vc = json[i]
+				i++
+				break
+			}
+			if json[i] > ' ' {
+				goto end
+			}
+		}
+	} else {
+		for ; i < len(json); i++ {
+			if json[i] == vc {
+				i++
+				break
+			}
+			if json[i] > ' ' {
+				goto end
+			}
+		}
+		r.vc = vc
+	}
+	if r.vc == '{' {
+		if valueize {
+			r.oi = make(map[string]interface{})
+		} else {
+			r.o = make(map[string]gjson.Result)
+		}
+	} else {
+		if valueize {
+			r.ai = make([]interface{}, 0)
+		} else {
+			r.a = make([]gjson.Result, 0)
+		}
+	}
+	for ; i < len(json); i++ {
+		if json[i] <= ' ' {
+			continue
+		}
+		// get next value
+		if json[i] == ']' || json[i] == '}' {
+			break
+		}
+		switch json[i] {
+		default:
+			if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' {
+				value.Type = gjson.Number
+				value.Raw, value.Num = tonum(json[i:])
+				value.Str = ""
+			} else {
+				continue
+			}
+		case '{', '[':
+			value.Type = gjson.JSON
+			value.Raw = squash(json[i:])
+			value.Str, value.Num = "", 0
+		case 'n':
+			value.Type = gjson.Null
+			value.Raw = tolit(json[i:])
+			value.Str, value.Num = "", 0
+		case 't':
+			value.Type = gjson.True
+			value.Raw = tolit(json[i:])
+			value.Str, value.Num = "", 0
+		case 'f':
+			value.Type = gjson.False
+			value.Raw = tolit(json[i:])
+			value.Str, value.Num = "", 0
+		case '"':
+			value.Type = gjson.String
+			value.Raw, value.Str = tostr(json[i:])
+			value.Num = 0
+		}
+		i += len(value.Raw) - 1
+
+		if r.vc == '{' {
+			if count%2 == 0 {
+				key = value
+			} else {
+				if valueize {
+					if _, ok := r.oi[key.Str]; !ok {
+						if len(timeStampKeys) > 0 && detectTimeStampKey(key.Str, timeStampKeys) {
+							t, err := time.Parse(time.RFC3339Nano, value.String())
+							if err == nil {
+								r.oi[key.Str] = t.Local()
+							} else {
+								r.oi[key.Str] = value.Value()
+							}
+						} else {
+							r.oi[key.Str] = value.Value()
+						}
+					}
+				} else {
+					if _, ok := r.o[key.Str]; !ok {
+						r.o[key.Str] = value
+					}
+				}
+			}
+			count++
+		} else {
+			if valueize {
+				r.ai = append(r.ai, value.Value())
+			} else {
+				r.a = append(r.a, value)
+			}
+		}
+	}
+end:
+	return
+}
+
+func detectTimeStampKey(key string, timeStamppKeys []string) bool {
+	for _, timeStampKey := range timeStamppKeys {
+		if timeStampKey == key {
+			return true
+		}
+	}
+	return false
+}
+
+func squash(json string) string {
+	// expects that the lead character is a '[' or '{'
+	// squash the value, ignoring all nested arrays and objects.
+	// the first '[' or '{' has already been read
+	depth := 1
+	for i := 1; i < len(json); i++ {
+		if json[i] >= '"' && json[i] <= '}' {
+			switch json[i] {
+			case '"':
+				i++
+				s2 := i
+				for ; i < len(json); i++ {
+					if json[i] > '\\' {
+						continue
+					}
+					if json[i] == '"' {
+						// look for an escaped slash
+						if json[i-1] == '\\' {
+							n := 0
+							for j := i - 2; j > s2-1; j-- {
+								if json[j] != '\\' {
+									break
+								}
+								n++
+							}
+							if n%2 == 0 {
+								continue
+							}
+						}
+						break
+					}
+				}
+			case '{', '[':
+				depth++
+			case '}', ']':
+				depth--
+				if depth == 0 {
+					return json[:i+1]
+				}
+			}
+		}
+	}
+	return json
+}
+
+func tonum(json string) (raw string, num float64) {
+	for i := 1; i < len(json); i++ {
+		// less than dash might have valid characters
+		if json[i] <= '-' {
+			if json[i] <= ' ' || json[i] == ',' {
+				// break on whitespace and comma
+				raw = json[:i]
+				num, _ = strconv.ParseFloat(raw, 64)
+				return
+			}
+			// could be a '+' or '-'. let's assume so.
+			continue
+		}
+		if json[i] < ']' {
+			// probably a valid number
+			continue
+		}
+		if json[i] == 'e' || json[i] == 'E' {
+			// allow for exponential numbers
+			continue
+		}
+		// likely a ']' or '}'
+		raw = json[:i]
+		num, _ = strconv.ParseFloat(raw, 64)
+		return
+	}
+	raw = json
+	num, _ = strconv.ParseFloat(raw, 64)
+	return
+}
+
+func tolit(json string) (raw string) {
+	for i := 1; i < len(json); i++ {
+		if json[i] < 'a' || json[i] > 'z' {
+			return json[:i]
+		}
+	}
+	return json
+}
+
+func tostr(json string) (raw string, str string) {
+	// expects that the lead character is a '"'
+	for i := 1; i < len(json); i++ {
+		if json[i] > '\\' {
+			continue
+		}
+		if json[i] == '"' {
+			return json[:i+1], json[1:i]
+		}
+		if json[i] == '\\' {
+			i++
+			for ; i < len(json); i++ {
+				if json[i] > '\\' {
+					continue
+				}
+				if json[i] == '"' {
+					// look for an escaped slash
+					if json[i-1] == '\\' {
+						n := 0
+						for j := i - 2; j > 0; j-- {
+							if json[j] != '\\' {
+								break
+							}
+							n++
+						}
+						if n%2 == 0 {
+							continue
+						}
+					}
+					break
+				}
+			}
+			var ret string
+			if i+1 < len(json) {
+				ret = json[:i+1]
+			} else {
+				ret = json[:i]
+			}
+			return ret, unescape(json[1:i])
+		}
+	}
+	return json, json[1:]
+}
+
+// unescape unescapes a string
+func unescape(json string) string { //, error) {
+	var str = make([]byte, 0, len(json))
+	for i := 0; i < len(json); i++ {
+		switch {
+		default:
+			str = append(str, json[i])
+		case json[i] < ' ':
+			return string(str)
+		case json[i] == '\\':
+			i++
+			if i >= len(json) {
+				return string(str)
+			}
+			switch json[i] {
+			default:
+				return string(str)
+			case '\\':
+				str = append(str, '\\')
+			case '/':
+				str = append(str, '/')
+			case 'b':
+				str = append(str, '\b')
+			case 'f':
+				str = append(str, '\f')
+			case 'n':
+				str = append(str, '\n')
+			case 'r':
+				str = append(str, '\r')
+			case 't':
+				str = append(str, '\t')
+			case '"':
+				str = append(str, '"')
+			case 'u':
+				if i+5 > len(json) {
+					return string(str)
+				}
+				r := runeit(json[i+1:])
+				i += 5
+				if utf16.IsSurrogate(r) {
+					// need another code
+					if len(json[i:]) >= 6 && json[i] == '\\' && json[i+1] == 'u' {
+						// we expect it to be correct so just consume it
+						r = utf16.DecodeRune(r, runeit(json[i+2:]))
+						i += 6
+					}
+				}
+				// provide enough space to encode the largest utf8 possible
+				str = append(str, 0, 0, 0, 0, 0, 0, 0, 0)
+				n := utf8.EncodeRune(str[len(str)-8:], r)
+				str = str[:len(str)-8+n]
+				i-- // backtrack index by one
+			}
+		}
+	}
+	return string(str)
+}
+
+// runeit returns the rune from the the \uXXXX
+func runeit(json string) rune {
+	n, _ := strconv.ParseUint(json[:4], 16, 64)
+	return rune(n)
+}
diff --git a/v2/grpcbuildermdl/grpcbuildermdl.pb.go b/v2/grpcbuildermdl/grpcbuildermdl.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..92d2967a9b288c06f759c6f5264994d70db81bc3
--- /dev/null
+++ b/v2/grpcbuildermdl/grpcbuildermdl.pb.go
@@ -0,0 +1,425 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: grpcbuildermdl/grpcbuildermdl.proto
+
+package grpcbuildermdl
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type GRPCMessage struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	IsRestricted         bool     `protobuf:"varint,3,opt,name=isRestricted,proto3" json:"isRestricted,omitempty"`
+	IsRoleBased          bool     `protobuf:"varint,4,opt,name=isRoleBased,proto3" json:"isRoleBased,omitempty"`
+	Token                string   `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"`
+	Branch               string   `protobuf:"bytes,6,opt,name=branch,proto3" json:"branch,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GRPCMessage) Reset()         { *m = GRPCMessage{} }
+func (m *GRPCMessage) String() string { return proto.CompactTextString(m) }
+func (*GRPCMessage) ProtoMessage()    {}
+func (*GRPCMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e620ca3a5f127fa1, []int{0}
+}
+
+func (m *GRPCMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCMessage.Unmarshal(m, b)
+}
+func (m *GRPCMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCMessage.Marshal(b, m, deterministic)
+}
+func (m *GRPCMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCMessage.Merge(m, src)
+}
+func (m *GRPCMessage) XXX_Size() int {
+	return xxx_messageInfo_GRPCMessage.Size(m)
+}
+func (m *GRPCMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCMessage proto.InternalMessageInfo
+
+func (m *GRPCMessage) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *GRPCMessage) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *GRPCMessage) GetIsRestricted() bool {
+	if m != nil {
+		return m.IsRestricted
+	}
+	return false
+}
+
+func (m *GRPCMessage) GetIsRoleBased() bool {
+	if m != nil {
+		return m.IsRoleBased
+	}
+	return false
+}
+
+func (m *GRPCMessage) GetToken() string {
+	if m != nil {
+		return m.Token
+	}
+	return ""
+}
+
+func (m *GRPCMessage) GetBranch() string {
+	if m != nil {
+		return m.Branch
+	}
+	return ""
+}
+
+type GRPCRequest struct {
+	GrpcMessage          *GRPCMessage `protobuf:"bytes,1,opt,name=grpcMessage,proto3" json:"grpcMessage,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *GRPCRequest) Reset()         { *m = GRPCRequest{} }
+func (m *GRPCRequest) String() string { return proto.CompactTextString(m) }
+func (*GRPCRequest) ProtoMessage()    {}
+func (*GRPCRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e620ca3a5f127fa1, []int{1}
+}
+
+func (m *GRPCRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCRequest.Unmarshal(m, b)
+}
+func (m *GRPCRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCRequest.Marshal(b, m, deterministic)
+}
+func (m *GRPCRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCRequest.Merge(m, src)
+}
+func (m *GRPCRequest) XXX_Size() int {
+	return xxx_messageInfo_GRPCRequest.Size(m)
+}
+func (m *GRPCRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCRequest proto.InternalMessageInfo
+
+func (m *GRPCRequest) GetGrpcMessage() *GRPCMessage {
+	if m != nil {
+		return m.GrpcMessage
+	}
+	return nil
+}
+
+type GRPCResponse struct {
+	Data                 string   `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GRPCResponse) Reset()         { *m = GRPCResponse{} }
+func (m *GRPCResponse) String() string { return proto.CompactTextString(m) }
+func (*GRPCResponse) ProtoMessage()    {}
+func (*GRPCResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e620ca3a5f127fa1, []int{2}
+}
+
+func (m *GRPCResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCResponse.Unmarshal(m, b)
+}
+func (m *GRPCResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCResponse.Marshal(b, m, deterministic)
+}
+func (m *GRPCResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCResponse.Merge(m, src)
+}
+func (m *GRPCResponse) XXX_Size() int {
+	return xxx_messageInfo_GRPCResponse.Size(m)
+}
+func (m *GRPCResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCResponse proto.InternalMessageInfo
+
+func (m *GRPCResponse) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+type GRPCByteResponse struct {
+	Data                 []byte   `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+	ErrorCode            int32    `protobuf:"varint,2,opt,name=errorCode,proto3" json:"errorCode,omitempty"`
+	Error                string   `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GRPCByteResponse) Reset()         { *m = GRPCByteResponse{} }
+func (m *GRPCByteResponse) String() string { return proto.CompactTextString(m) }
+func (*GRPCByteResponse) ProtoMessage()    {}
+func (*GRPCByteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e620ca3a5f127fa1, []int{3}
+}
+
+func (m *GRPCByteResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GRPCByteResponse.Unmarshal(m, b)
+}
+func (m *GRPCByteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GRPCByteResponse.Marshal(b, m, deterministic)
+}
+func (m *GRPCByteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCByteResponse.Merge(m, src)
+}
+func (m *GRPCByteResponse) XXX_Size() int {
+	return xxx_messageInfo_GRPCByteResponse.Size(m)
+}
+func (m *GRPCByteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCByteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCByteResponse proto.InternalMessageInfo
+
+func (m *GRPCByteResponse) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *GRPCByteResponse) GetErrorCode() int32 {
+	if m != nil {
+		return m.ErrorCode
+	}
+	return 0
+}
+
+func (m *GRPCByteResponse) GetError() string {
+	if m != nil {
+		return m.Error
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*GRPCMessage)(nil), "grpcbuildermdl.GRPCMessage")
+	proto.RegisterType((*GRPCRequest)(nil), "grpcbuildermdl.GRPCRequest")
+	proto.RegisterType((*GRPCResponse)(nil), "grpcbuildermdl.GRPCResponse")
+	proto.RegisterType((*GRPCByteResponse)(nil), "grpcbuildermdl.GRPCByteResponse")
+}
+
+func init() {
+	proto.RegisterFile("grpcbuildermdl/grpcbuildermdl.proto", fileDescriptor_e620ca3a5f127fa1)
+}
+
+var fileDescriptor_e620ca3a5f127fa1 = []byte{
+	// 324 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x41, 0x4b, 0x3b, 0x31,
+	0x10, 0xc5, 0xff, 0xfb, 0xb7, 0x2d, 0xee, 0x6c, 0x91, 0x12, 0x44, 0x82, 0xf6, 0xb0, 0xc4, 0x4b,
+	0x4f, 0x15, 0xea, 0xd9, 0x4b, 0x7b, 0xb0, 0x07, 0x15, 0x89, 0x78, 0x29, 0x78, 0x48, 0x37, 0x43,
+	0xbb, 0x74, 0xbb, 0xa9, 0x49, 0x2a, 0xf8, 0x91, 0xfc, 0x96, 0x92, 0x64, 0x4b, 0x77, 0xcb, 0xe2,
+	0x6d, 0xde, 0xcb, 0x30, 0x79, 0xbf, 0x61, 0xe0, 0x76, 0xa5, 0x77, 0xd9, 0x72, 0x9f, 0x17, 0x12,
+	0xf5, 0x56, 0x16, 0x77, 0x4d, 0x39, 0xde, 0x69, 0x65, 0x15, 0xb9, 0x68, 0xba, 0xec, 0x27, 0x82,
+	0xe4, 0x91, 0xbf, 0xce, 0x9e, 0xd1, 0x18, 0xb1, 0x42, 0x42, 0xa0, 0x53, 0x8a, 0x2d, 0xd2, 0x28,
+	0x8d, 0x46, 0x31, 0xf7, 0xb5, 0xf3, 0xa4, 0xb0, 0x82, 0xfe, 0x4f, 0xa3, 0x51, 0x9f, 0xfb, 0x9a,
+	0x30, 0xe8, 0xe7, 0x86, 0xa3, 0xb1, 0x3a, 0xcf, 0x2c, 0x4a, 0x7a, 0x96, 0x46, 0xa3, 0x73, 0xde,
+	0xf0, 0x48, 0x0a, 0x49, 0x6e, 0xb8, 0x2a, 0x70, 0x2a, 0x0c, 0x4a, 0xda, 0xf1, 0x2d, 0x75, 0x8b,
+	0x5c, 0x42, 0xd7, 0xaa, 0x0d, 0x96, 0xb4, 0xeb, 0xbf, 0x0b, 0x82, 0x5c, 0x41, 0x6f, 0xa9, 0x45,
+	0x99, 0xad, 0x69, 0xcf, 0xdb, 0x95, 0x62, 0x4f, 0x21, 0x2a, 0xc7, 0xcf, 0x3d, 0x1a, 0x4b, 0x1e,
+	0x20, 0x71, 0x30, 0x55, 0x72, 0x9f, 0x38, 0x99, 0xdc, 0x8c, 0x4f, 0xb0, 0x6b, 0x70, 0xbc, 0xde,
+	0xcf, 0x18, 0xf4, 0xc3, 0x34, 0xb3, 0x53, 0xa5, 0x39, 0x52, 0x56, 0xe4, 0xae, 0x66, 0x0b, 0x18,
+	0xb8, 0x9e, 0xe9, 0xb7, 0xc5, 0xd6, 0xbe, 0xc3, 0x36, 0x86, 0x10, 0xa3, 0xd6, 0x4a, 0xcf, 0x94,
+	0x44, 0xbf, 0xa6, 0x2e, 0x3f, 0x1a, 0x8e, 0xd2, 0x0b, 0xbf, 0xa4, 0x98, 0x07, 0x31, 0xf9, 0x08,
+	0x34, 0x6f, 0xa8, 0xbf, 0xf2, 0x0c, 0xc9, 0x4b, 0x90, 0x73, 0x51, 0xca, 0x02, 0x35, 0x69, 0xe5,
+	0xa8, 0xc8, 0xaf, 0xd3, 0xb6, 0xc7, 0x7a, 0x48, 0xf6, 0x6f, 0xf2, 0x0e, 0xb1, 0x73, 0x67, 0x6b,
+	0xcc, 0x36, 0x64, 0x5e, 0x17, 0x7f, 0x8e, 0x1e, 0xb6, 0x3f, 0x1e, 0xc6, 0x4e, 0x07, 0x8b, 0x93,
+	0x0b, 0x5a, 0xf6, 0xfc, 0x61, 0xdd, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x39, 0xa8, 0x6a,
+	0x7f, 0x02, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// GRPCServiceClient is the client API for GRPCService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GRPCServiceClient interface {
+	GRPCHandler(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCByteResponse, error)
+}
+
+type gRPCServiceClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewGRPCServiceClient(cc grpc.ClientConnInterface) GRPCServiceClient {
+	return &gRPCServiceClient{cc}
+}
+
+func (c *gRPCServiceClient) GRPCHandler(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCByteResponse, error) {
+	out := new(GRPCByteResponse)
+	err := c.cc.Invoke(ctx, "/grpcbuildermdl.GRPCService/GRPCHandler", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// GRPCServiceServer is the server API for GRPCService service.
+type GRPCServiceServer interface {
+	GRPCHandler(context.Context, *GRPCRequest) (*GRPCByteResponse, error)
+}
+
+// UnimplementedGRPCServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedGRPCServiceServer struct {
+}
+
+func (*UnimplementedGRPCServiceServer) GRPCHandler(ctx context.Context, req *GRPCRequest) (*GRPCByteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GRPCHandler not implemented")
+}
+
+func RegisterGRPCServiceServer(s *grpc.Server, srv GRPCServiceServer) {
+	s.RegisterService(&_GRPCService_serviceDesc, srv)
+}
+
+func _GRPCService_GRPCHandler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GRPCRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(GRPCServiceServer).GRPCHandler(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/grpcbuildermdl.GRPCService/GRPCHandler",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(GRPCServiceServer).GRPCHandler(ctx, req.(*GRPCRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _GRPCService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "grpcbuildermdl.GRPCService",
+	HandlerType: (*GRPCServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GRPCHandler",
+			Handler:    _GRPCService_GRPCHandler_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "grpcbuildermdl/grpcbuildermdl.proto",
+}
+
+// GRPCCheckClient is the client API for GRPCCheck service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GRPCCheckClient interface {
+	GRPCCheck(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCResponse, error)
+}
+
+type gRPCCheckClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewGRPCCheckClient(cc grpc.ClientConnInterface) GRPCCheckClient {
+	return &gRPCCheckClient{cc}
+}
+
+func (c *gRPCCheckClient) GRPCCheck(ctx context.Context, in *GRPCRequest, opts ...grpc.CallOption) (*GRPCResponse, error) {
+	out := new(GRPCResponse)
+	err := c.cc.Invoke(ctx, "/grpcbuildermdl.GRPCCheck/GRPCCheck", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// GRPCCheckServer is the server API for GRPCCheck service.
+type GRPCCheckServer interface {
+	GRPCCheck(context.Context, *GRPCRequest) (*GRPCResponse, error)
+}
+
+// UnimplementedGRPCCheckServer can be embedded to have forward compatible implementations.
+type UnimplementedGRPCCheckServer struct {
+}
+
+func (*UnimplementedGRPCCheckServer) GRPCCheck(ctx context.Context, req *GRPCRequest) (*GRPCResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GRPCCheck not implemented")
+}
+
+func RegisterGRPCCheckServer(s *grpc.Server, srv GRPCCheckServer) {
+	s.RegisterService(&_GRPCCheck_serviceDesc, srv)
+}
+
+func _GRPCCheck_GRPCCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GRPCRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(GRPCCheckServer).GRPCCheck(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/grpcbuildermdl.GRPCCheck/GRPCCheck",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(GRPCCheckServer).GRPCCheck(ctx, req.(*GRPCRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _GRPCCheck_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "grpcbuildermdl.GRPCCheck",
+	HandlerType: (*GRPCCheckServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GRPCCheck",
+			Handler:    _GRPCCheck_GRPCCheck_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "grpcbuildermdl/grpcbuildermdl.proto",
+}
diff --git a/v2/grpcbuildermdl/grpcbuildermdl.proto b/v2/grpcbuildermdl/grpcbuildermdl.proto
new file mode 100644
index 0000000000000000000000000000000000000000..2ad603582cbd5e3f4102238445dafc94bce39e41
--- /dev/null
+++ b/v2/grpcbuildermdl/grpcbuildermdl.proto
@@ -0,0 +1,38 @@
+syntax = "proto3";
+
+package grpcbuildermdl;
+option go_package = "grpcbuildermdl";
+
+message GRPCMessage {
+    string name =1 ;
+    bytes data =2;
+    bool isRestricted = 3 ;
+    bool isRoleBased = 4;
+    string token = 5;
+    string branch = 6;
+}
+
+
+
+
+message GRPCRequest {
+    GRPCMessage grpcMessage = 1;
+}
+
+message GRPCResponse {
+    string data = 1;
+}
+
+message GRPCByteResponse {
+    bytes data = 1;
+    int32  errorCode = 2;
+    string error = 3;
+}
+
+service GRPCService {
+    rpc GRPCHandler(GRPCRequest) returns (GRPCByteResponse) {};
+}
+
+service GRPCCheck {
+    rpc GRPCCheck(GRPCRequest) returns (GRPCResponse) { };
+}
\ No newline at end of file
diff --git a/v2/grpcclientmdl/grpcclientmdl.go b/v2/grpcclientmdl/grpcclientmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..75b3565c09f091176da8569ca3b8e860bcf3c42b
--- /dev/null
+++ b/v2/grpcclientmdl/grpcclientmdl.go
@@ -0,0 +1,79 @@
+package grpcclientmdl
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/grpcbuildermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	grpcpool "github.com/processout/grpc-go-pool"
+	"google.golang.org/grpc"
+)
+
+// TotalCheck TotalCheck
+func TotalCheck() (string, error) {
+	// TODO: review
+	conn, err := grpc.Dial("0.0.0.0:50051", grpc.WithInsecure())
+	if err != nil {
+		loggermdl.LogError("Could not connect: ", err)
+	}
+	defer conn.Close()
+	c := grpcbuildermdl.NewGRPCCheckClient(conn)
+
+	req := &grpcbuildermdl.GRPCRequest{}
+	res, err := c.GRPCCheck(context.Background(), req)
+
+	return res.GetData(), nil
+}
+
+var instances = make(map[string]*grpcpool.Pool)
+var instancesList map[string]map[string]*grpcpool.Pool
+
+// Init init
+func Init(grpcServerURLList []string, grpcClients []string) {
+	// instances = make(map[string]*grpcpool.Pool)
+	loggermdl.LogError("Length of grpcServerURLList", len(grpcServerURLList))
+
+	for index := 0; index < len(grpcServerURLList); index++ {
+		CreateConnection(grpcServerURLList[index], grpcClients[index])
+	}
+	loggermdl.LogError("instances", instances)
+}
+
+//ByteHandler ByteHandler
+func ByteHandler(req *grpcbuildermdl.GRPCRequest, grpcServerURL string) ([]byte, int32, string, error) {
+	if instances[grpcServerURL] != nil {
+		conn, err := instances[grpcServerURL].Get(context.Background())
+		if err != nil {
+			loggermdl.LogError("Failed to create gRPC pool: %v", err)
+			return nil, 0, "", err
+		}
+		defer conn.Close()
+		client := grpcbuildermdl.NewGRPCServiceClient(conn.ClientConn)
+		res, err := client.GRPCHandler(context.Background(), req)
+		if err != nil {
+			loggermdl.LogError("GRPCHandler err:", res.GetError())
+			return res.GetData(), res.GetErrorCode(), res.GetError(), err
+		}
+		return res.GetData(), res.GetErrorCode(), res.GetError(), nil
+	}
+	return nil, 0, "", errors.New("no grpc connection found")
+}
+
+// CreateConnection method
+func CreateConnection(serverURL string, grpcClientName string) {
+	var factory grpcpool.Factory
+	factory = func() (*grpc.ClientConn, error) {
+		conn, err := grpc.Dial(serverURL, grpc.WithInsecure())
+		if err != nil {
+			loggermdl.LogError("Failed to start gRPC connection: %v", err)
+		}
+		return conn, err
+	}
+	pool, err := grpcpool.New(factory, 5, 5, time.Second)
+	if err != nil {
+		loggermdl.LogError("Failed to create gRPC pool: %v", err)
+	}
+	instances[grpcClientName] = pool
+}
diff --git a/v2/hashmdl/hashmdl.go b/v2/hashmdl/hashmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..d8a34f76e0479e3840762125fe99473682678e7d
--- /dev/null
+++ b/v2/hashmdl/hashmdl.go
@@ -0,0 +1,69 @@
+package hashmdl
+
+import (
+	"crypto/md5"
+	"os"
+	"strconv"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	OneOfOne "github.com/OneOfOne/xxhash"
+)
+
+// GetHashChecksumOfByteArray Get Hash Check sum
+func GetHashChecksumOfByteArray(byteArray []byte) (uint64, error) {
+	if byteArray == nil || len(byteArray) <= 0 {
+		loggermdl.LogError("data is nil or length is less than or equal to zero")
+		return 0, errormdl.Wrap("data is nil or length is less than or equal to zero")
+	}
+	hash := OneOfOne.New64()
+	_, writeerr := hash.Write(byteArray)
+	if errormdl.CheckErr(writeerr) != nil {
+		loggermdl.LogError("error occured while calling hash.Write : ", errormdl.CheckErr(writeerr))
+		return 0, errormdl.CheckErr(writeerr)
+	}
+	return hash.Sum64(), nil
+}
+
+//GetHashChecksumOfFile Get Hash Check sum
+func GetHashChecksumOfFile(filePath string) (uint64, error) {
+
+	byteArray, readerr := filemdl.ReadFile(filePath)
+	if errormdl.CheckErr(readerr) != nil {
+		loggermdl.LogError("error occured while calling hash.Write : ", errormdl.CheckErr(readerr))
+
+		return 0, errormdl.CheckErr(readerr)
+	}
+	hash, hasherr := GetHashChecksumOfByteArray(byteArray)
+	if errormdl.CheckErr1(hasherr) != nil {
+		loggermdl.LogError("error occured while calling GetHashChecksumOfByteArray  : ", errormdl.CheckErr1(hasherr))
+		return 0, errormdl.CheckErr1(hasherr)
+	}
+
+	return hash, nil
+}
+
+// GetAtributeBasedHash this func will return the hash calcluted on basis of file attributes
+func GetAtributeBasedHash(filePath string) (string, error) {
+	fileInfo, statErr := os.Stat(filePath)
+	if errormdl.CheckErr(statErr) != nil {
+		loggermdl.LogError("error occured while getting stats of file : ", filePath, " : ", errormdl.CheckErr(statErr))
+		return "", errormdl.CheckErr(statErr)
+	}
+	fileModTime := fileInfo.ModTime()
+	fileLength := fileInfo.Size()
+	customFileHash := strconv.FormatInt(fileModTime.Unix(), 10) + strconv.FormatInt(fileLength, 10)
+	return customFileHash, nil
+}
+
+// TODO: implement another hashing algo instead of MD5
+// Get128BitHash returns 128 bit hash
+func Get128BitHash(data []byte) ([16]byte, error) {
+
+	if data == nil || len(data) <= 0 {
+		return [16]byte{}, errormdl.Wrap("data is nil or length is less than or equal to zero")
+	}
+
+	return md5.Sum(data), nil
+}
diff --git a/v2/hashmdl/hashmdl_test.go b/v2/hashmdl/hashmdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..060726afdbaddebf2f4d439502ad668da1221fe5
--- /dev/null
+++ b/v2/hashmdl/hashmdl_test.go
@@ -0,0 +1,104 @@
+package hashmdl
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestGetHashChecksumOfByteArray(t *testing.T) {
+
+	data := []byte("This is test value")
+
+	type args struct {
+		byteArray []byte
+	}
+	tests := []struct {
+		name      string
+		args      args
+		want      uint64
+		wantError bool
+	}{
+		{"success scenario", args{data}, 12276080534736706571, false},
+		{"output mismatched", args{data}, 12276080534736706570, true},
+	}
+	for _, tt := range tests {
+
+		hash, _ := GetHashChecksumOfByteArray(data)
+		if tt.wantError {
+			assert.NotEqual(t, tt.want, hash, "Output not matching")
+		} else {
+			assert.Equal(t, tt.want, hash, "Output matching")
+		}
+	}
+	for _, tt := range tests {
+		errormdl.IsTestingNegetiveCaseOn = true
+		_, hashError := GetHashChecksumOfByteArray(data)
+
+		if tt.wantError {
+			assert.Error(t, hashError, "test error occued")
+		}
+	}
+	errormdl.IsTestingNegetiveCaseOn = false
+}
+
+func TestGetHashChecksumBlankByteArray(t *testing.T) {
+	blankData := []byte{}
+	_, hashError := GetHashChecksumOfByteArray(blankData)
+	assert.Error(t, hashError, "error occured")
+}
+
+func TestGetHashChecksumOfFileSuccess(t *testing.T) {
+
+	filePath := "../testingdata/hashcalculation.txt"
+	hash, _ := GetHashChecksumOfFile(filePath)
+
+	var expectedHash uint64
+	expectedHash = 12276080534736706571
+	assert.Equal(t, expectedHash, hash, "Output matching")
+
+}
+
+func TestGetHashChecksumOfFilePathFail(t *testing.T) {
+	filePath := "../testingdata/hashcalculation.tx"
+	_, hashError := GetHashChecksumOfFile(filePath)
+	assert.Error(t, hashError, "error occured")
+}
+
+func TestGetHashChecksumOfFileHashFail(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn1 = true
+
+	filePath := "../testingdata/hashcalculation.txt"
+	_, hashError := GetHashChecksumOfFile(filePath)
+	assert.Error(t, hashError, "error occured")
+	errormdl.IsTestingNegetiveCaseOn1 = false
+
+}
+
+func TestGetAttributeBAsedHashSuccess(t *testing.T) {
+	filePath := "../testingdata/hashcalculation.txt"
+	hash, _ := GetAtributeBasedHash(filePath)
+	assert.Equal(t, "153120260918", hash, "hash calculated successfully")
+
+}
+func TestGetAttributeBasedHashFileNotFound(t *testing.T) {
+	filePath := "../testingdata/hashcalculation.tx"
+	_, hashError := GetAtributeBasedHash(filePath)
+	assert.Error(t, hashError, "file not found")
+
+}
+
+func TestGet128BitHash(t *testing.T) {
+	data := []byte("This is test value")
+	hash, _ := Get128BitHash(data)
+	// [153 250 83 89 165 124 176 214 93 174 227 143 162 183 105 127]
+	expectedHash := [16]byte{153, 250, 83, 89, 165, 124, 176, 214, 93, 174, 227, 143, 162, 183, 105, 127}
+	assert.Equal(t, expectedHash, hash, "matched")
+}
+
+func TestGet128BitHashError(t *testing.T) {
+	data := []byte("")
+	_, hashError := Get128BitHash(data)
+	assert.Error(t, hashError, "nil/empty byte array")
+}
diff --git a/v2/httpclientmdl/httpclientmdl.go b/v2/httpclientmdl/httpclientmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..b370bdce9aa8094ed7638d2ea0962b230e0f0bda
--- /dev/null
+++ b/v2/httpclientmdl/httpclientmdl.go
@@ -0,0 +1,127 @@
+package httpclientmdl
+
+import (
+	"bytes"
+	"io/ioutil"
+	"net/http"
+	"sync"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+)
+
+var httpClient *http.Client
+var once sync.Once
+
+// GetHTTPClient This method will return the httpClient object with preconfigured settings
+func GetHTTPClient() *http.Client {
+	once.Do(func() {
+		transport := &http.Transport{
+			MaxIdleConns:        constantmdl.MAXIDLECONNS,
+			MaxIdleConnsPerHost: constantmdl.MAXIDLECONNSPERHOST,
+			IdleConnTimeout:     constantmdl.IDLECONNTIMEOUT,
+		}
+		httpClient = &http.Client{
+			Transport: transport,
+		}
+	})
+	return httpClient
+}
+
+type HTTPTransport struct {
+	MaxIdleConns        int
+	MaxIdleConnsPerHost int
+	IdleConnTimeout     time.Duration
+}
+
+// GetHTTPClientWithConfig This method will return the httpClient object with preconfigured settings
+func GetHTTPClientWithConfig(httpTransport HTTPTransport) *http.Client {
+
+	if httpTransport.MaxIdleConns == 0 {
+		httpTransport.MaxIdleConns = constantmdl.MAXIDLECONNS
+	}
+	if httpTransport.MaxIdleConnsPerHost == 0 {
+		httpTransport.MaxIdleConnsPerHost = constantmdl.MAXIDLECONNSPERHOST
+	}
+	if httpTransport.IdleConnTimeout == 0 {
+		httpTransport.IdleConnTimeout = constantmdl.IDLECONNTIMEOUT
+	}
+	transport := &http.Transport{
+		MaxIdleConns:        httpTransport.MaxIdleConns,
+		MaxIdleConnsPerHost: httpTransport.MaxIdleConnsPerHost,
+		IdleConnTimeout:     httpTransport.IdleConnTimeout,
+	}
+	httpClient := &http.Client{
+		Transport: transport,
+	}
+	return httpClient
+}
+
+// DoHTTPWithRetry for retry support to http request
+func DoHTTPWithRetry(attempts int, sleeptime time.Duration, request *http.Request) (*http.Response, error) {
+	// Get the HTTPClient
+	client := GetHTTPClient()
+	if request.Body != nil {
+		// Read the content
+		bodyBytes, errReadAll := ioutil.ReadAll(request.Body)
+		if errormdl.CheckBool(errReadAll != nil) {
+			return nil, errormdl.Wrap("Error in ReadAll function in DoHTTPWithRetry")
+		}
+		//fmt.Println("request.Body after readong its content-----", request.Body)
+		// Restore the io.ReadCloser to its original state
+		request.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
+		resp, err := client.Do(request)
+		if errormdl.CheckBool(resp != nil && err == nil && resp.StatusCode < constantmdl.HTTP400ERROR) {
+			defer resp.Body.Close()
+			return resp, nil
+		}
+		//fmt.Println("request.Body after do request-----", request.Body)
+		return TryAttemptsWithRequestBody(client, request, resp, err, attempts, sleeptime, bodyBytes)
+	}
+	resp, err := client.Do(request)
+	if errormdl.CheckBool(resp != nil && err == nil && resp.StatusCode < constantmdl.HTTP400ERROR) {
+		defer resp.Body.Close()
+		return resp, nil
+	}
+	return TryAttempts(client, request, resp, err, attempts, sleeptime)
+
+}
+
+// TryAttemptsWithRequestBody it retries http post request with a request body when failed or gives error
+func TryAttemptsWithRequestBody(client *http.Client, request *http.Request, resp *http.Response, err error, attempts int, sleeptime time.Duration, bodyBytes []byte) (*http.Response, error) {
+	newURL := request.URL.String()
+	for i := attempts; i > 0; i-- {
+		time.Sleep(sleeptime)
+		newRequest, newReqErr := http.NewRequest(request.Method, newURL, bytes.NewBuffer(bodyBytes))
+		if newReqErr == nil {
+			resp, err = client.Do(newRequest)
+			//fmt.Println("attempts remained", i)
+			if resp == nil {
+				continue
+			}
+			defer resp.Body.Close()
+			if resp.StatusCode < constantmdl.HTTP400ERROR && err == nil {
+				return resp, nil
+			}
+		}
+	}
+	return resp, errormdl.CheckErr(err)
+}
+
+// TryAttempts it retries http get/post request without request body
+func TryAttempts(client *http.Client, request *http.Request, resp *http.Response, err error, attempts int, sleeptime time.Duration) (*http.Response, error) {
+	for i := attempts; i > 0; i-- {
+		time.Sleep(sleeptime)
+		resp, err = client.Do(request)
+		//fmt.Println("attempts remained", i)
+		if resp == nil {
+			continue
+		}
+		defer resp.Body.Close()
+		if resp.StatusCode < constantmdl.HTTP400ERROR && err == nil {
+			return resp, nil
+		}
+	}
+	return resp, errormdl.CheckErr(err)
+}
diff --git a/v2/httpclientmdl/httpclientmdl_test.go b/v2/httpclientmdl/httpclientmdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f3cd8877ccc5d91a061d9ad06d5428071cb10fde
--- /dev/null
+++ b/v2/httpclientmdl/httpclientmdl_test.go
@@ -0,0 +1,204 @@
+package httpclientmdl
+
+import (
+	"bytes"
+	"encoding/json"
+	"io/ioutil"
+	"net/http"
+	"testing"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/gin-gonic/gin"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestGethttpclientwithconfigNotPanic(t *testing.T) {
+	httpTransport := HTTPTransport{}
+	httpTransport.MaxIdleConns = 10
+	httpTransport.MaxIdleConnsPerHost = 20
+	httpTransport.IdleConnTimeout = 90
+	assert.NotPanics(t, func() { GetHTTPClientWithConfig(httpTransport) }, "The code did  panic")
+}
+
+func TestGethttpclientwithconfigDefaults(t *testing.T) {
+	httpTransport := HTTPTransport{}
+	httpTransport.MaxIdleConns = 0
+	httpTransport.MaxIdleConnsPerHost = 0
+	httpTransport.IdleConnTimeout = 0
+	assert.NotPanics(t, func() { GetHTTPClientWithConfig(httpTransport) }, "The code did  panic")
+}
+func TestGethttpclientwithconfigNil(t *testing.T) {
+	httpTransport := HTTPTransport{}
+	httpTransport.MaxIdleConns = 10
+	httpTransport.MaxIdleConnsPerHost = 20
+	httpTransport.IdleConnTimeout = 90
+	assert.NotNil(t, func() { GetHTTPClientWithConfig(httpTransport) }, "Do not return nil")
+}
+func TestGethttpclientNil(t *testing.T) {
+	assert.NotNil(t, func() { GetHTTPClient() }, "Do not return nil")
+}
+func TestGethttpclientnotpanic(t *testing.T) {
+	assert.NotPanics(t, func() { GetHTTPClient() }, "The code did  panic")
+}
+
+func BenchmarkGethttpclient(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		GetHTTPClient()
+	}
+}
+
+func BenchmarkGethttpclientDefault(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		httpTransport := HTTPTransport{}
+		httpTransport.MaxIdleConns = 10
+		httpTransport.MaxIdleConnsPerHost = 20
+		httpTransport.IdleConnTimeout = 90
+		GetHTTPClientWithConfig(httpTransport)
+	}
+}
+
+var (
+	attempts  int
+	sleeptime time.Duration
+)
+
+func TestDoHttpWithRetryGET200(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	request, _ := http.NewRequest("GET", "http://www.mkcl.org/about-mkcl", nil)
+	resp, err := DoHTTPWithRetry(attempts, sleeptime, request)
+	assert.NoError(t, err, "This should not return error")
+	assert.Equal(t, resp.StatusCode, 200)
+}
+
+func TestDoHttpWithRetryGETError(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	request, _ := http.NewRequest("GET", "http://www.servernotrunning.org", nil)
+	_, err := DoHTTPWithRetry(attempts, sleeptime, request)
+	assert.Error(t, err, "This should return error")
+}
+
+func TestDoHttpWithRetryGETAttempt(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	request, _ := http.NewRequest("GET", "http://www.mkcl.org/about-mkcl-error", nil)
+	resp, err := DoHTTPWithRetry(attempts, sleeptime, request)
+	assert.NoError(t, err, "This should not return error")
+	assert.Equal(t, resp.StatusCode, 404)
+}
+
+func TestDoHttpWithRetryGETAttempt2(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	request, _ := http.NewRequest("GET", "http://www.mkcl.org/about-mkcl", nil)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	resp, err := DoHTTPWithRetry(attempts, sleeptime, request)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	assert.NoError(t, err, "This should not return error")
+	assert.Equal(t, resp.StatusCode, 200)
+}
+
+// Person struct
+type Person struct {
+	Name string `json:"name"`
+	Age  string `json:"age"`
+}
+
+func addPerson(c *gin.Context) {
+	person := Person{}
+	defer c.Request.Body.Close()
+	byteArr, readErr := ioutil.ReadAll(c.Request.Body)
+	if readErr != nil {
+		c.String(http.StatusInternalServerError, "")
+		return
+	}
+	unmarshallErr := json.Unmarshal(byteArr, &person)
+	if unmarshallErr != nil {
+		c.String(http.StatusInternalServerError, "")
+		return
+	}
+	c.String(http.StatusOK, "person1 is added successfully!")
+	return
+}
+
+var router *gin.Engine
+
+func Init(o *gin.RouterGroup) {
+	o.POST("/addPerson", addPerson)
+}
+func init() {
+	router = gin.New()
+	o := router.Group("/o")
+	Init(o)
+	router.Run("localhost:8081")
+}
+func TestDoHttpWithRetryPOST200(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	var jsonStr = []byte(`{"name":"Arnav","age":"4"}`)
+	// request, _ := http.NewRequest("POST", "http://10.4.0.104:3001/o/addPerson", bytes.NewBuffer(jsonStr))
+	request, _ := http.NewRequest("POST", "http://localhost:8081/o/addPerson", bytes.NewBuffer(jsonStr))
+	resp, err := DoHTTPWithRetry(attempts, sleeptime, request)
+	assert.NoError(t, err, "This should not return error")
+	assert.Equal(t, resp.StatusCode, 200)
+}
+func TestDoHttpWithRetryPOSTError(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	var jsonStr = []byte(`{"name":"Arnav","age":"4"}`)
+	request, _ := http.NewRequest("POST", "http://www.servernotrunning.org", bytes.NewBuffer(jsonStr))
+	_, err := DoHTTPWithRetry(attempts, sleeptime, request)
+	assert.Error(t, err, "This should return error")
+}
+
+func TestDoHttpWithRetryPOSTAttempt(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	var jsonStr = []byte(`{"name":"Arnav","age":"4"}`)
+	request, _ := http.NewRequest("POST", "http://www.mkcl.org/about-mkcl-error", bytes.NewBuffer(jsonStr))
+	resp, _ := DoHTTPWithRetry(attempts, sleeptime, request)
+	assert.Equal(t, resp.StatusCode, 404)
+}
+
+// func TestDoHttpWithRetryPOSTAttempt2(t *testing.T) {
+// 	attempts = 2
+// 	sleeptime = 2 * time.Second
+// 	var jsonStr = []byte(`{"name":"Arnav","age":"4"}`)
+// 	request, _ := http.NewRequest("POST", "http://10.4.0.104:3001/o/addPerson", bytes.NewBuffer(jsonStr))
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	resp, err := DoHTTPWithRetry(attempts, sleeptime, request)
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// 	assert.NoError(t, err, "This should not return error")
+// 	assert.Equal(t, resp.StatusCode, 200)
+// }
+func TestDoHttpWithRetryPOSTReadAllError(t *testing.T) {
+	attempts = 2
+	sleeptime = 2 * time.Second
+	var jsonStr = []byte(`{"name":"Arnav","age":"4"}`)
+	request, _ := http.NewRequest("POST", "http://www.mkcl.org/about-mkcl-error", bytes.NewBuffer(jsonStr))
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	_, err := DoHTTPWithRetry(attempts, sleeptime, request)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	assert.Error(t, err, "This should  return error")
+}
+
+func BenchmarkDoHttpWithRetry(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		attempts := 2
+		sleeptime := 2 * time.Second
+		request, _ := http.NewRequest("GET", "http://www.mkcl.org/about-mkcl-error", nil)
+		DoHTTPWithRetry(attempts, sleeptime, request)
+	}
+}
+
+func BenchmarkDoHttpWithRetryPOST(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		attempts := 2
+		sleeptime := 2 * time.Second
+		var jsonStr = []byte(`{"name":"Arnav","age":"4"}`)
+		request, _ := http.NewRequest("POST", "http://www.mkcl.org/about-mkcl-error", bytes.NewBuffer(jsonStr))
+		DoHTTPWithRetry(attempts, sleeptime, request)
+	}
+}
diff --git a/v2/httpservermdl/httpservermdl.go b/v2/httpservermdl/httpservermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f82476755dd6050bb58bbbe5146c4c8af3f6fcb
--- /dev/null
+++ b/v2/httpservermdl/httpservermdl.go
@@ -0,0 +1,12 @@
+package httpservermdl
+
+import (
+	"github.com/gin-gonic/gin"
+)
+
+// GetServer will return the webserver pointer
+func GetServer() *gin.Engine {
+	// TODO: use sync.Once
+	server := gin.New()
+	return server
+}
diff --git a/v2/httpservermdl/httpservermdl_test.go b/v2/httpservermdl/httpservermdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..39bb72b593a555bd72c18c146d0a5ca2e0f130ea
--- /dev/null
+++ b/v2/httpservermdl/httpservermdl_test.go
@@ -0,0 +1,17 @@
+package httpservermdl
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestGetServer(t *testing.T) {
+	assert.NotPanics(t, func() { GetServer() }, "The code did  panic")
+}
+
+func BenchmarkGethttpclientDefault(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		GetServer()
+	}
+}
diff --git a/v2/loggermdl/loggerdefault.go b/v2/loggermdl/loggerdefault.go
new file mode 100755
index 0000000000000000000000000000000000000000..7e37e67bfbc94a869d44af831dcd0a278324c2b0
--- /dev/null
+++ b/v2/loggermdl/loggerdefault.go
@@ -0,0 +1,124 @@
+// +build !prod
+
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:40:34 GMT+0530 (IST)
+
+// Package loggermdl
+package loggermdl
+
+import (
+	"fmt"
+	"os"
+
+	logging "github.com/op/go-logging"
+	goon "github.com/shurcooL/go-goon"
+)
+
+var log = logging.MustGetLogger("mkcllogger")
+var format = logging.MustStringFormatter(
+	`%{color}%{time:15:04:05} %{shortfile} %{callpath:5} â–¶ %{level:.4s} %{id:03x}%{color:reset}`,
+)
+
+func init() {
+
+	log.ExtraCalldepth = 1
+	backend := logging.NewLogBackend(os.Stderr, "", 0)
+
+	backendFormatter := logging.NewBackendFormatter(backend, format)
+	backendLeveled := logging.AddModuleLevel(backend)
+
+	logging.SetBackend(backendLeveled, backendFormatter)
+
+}
+
+// LogDebug logs a message at level Debug on the standard logger.
+func LogDebug(args ...interface{}) {
+	log.Debug("", args)
+}
+
+// LogInfo logs a message at level Info on the standard logger.
+func LogInfo(args ...interface{}) {
+	log.Info("", args)
+}
+
+// LogWarn logs a message at level Warn on the standard logger.
+func LogWarn(args ...interface{}) {
+	log.Warning("", args)
+}
+
+// LogError logs a message at level Error on the standard logger.
+func LogError(args ...interface{}) {
+	log.Error("", args)
+}
+
+// LogPanic logs a message at level Panic on the standard logger.
+func LogPanic(args ...interface{}) {
+	log.Panic(args)
+}
+
+// // LogJSONObject Format string
+// func LogJSONObject(pobj interface{}) {
+// 	jsonByte, _ := json.Marshal(pobj)
+// 	var objnew map[string]interface{}
+// 	json.Unmarshal(jsonByte, &objnew)
+
+// 	f := colorjson.NewFormatter()
+// 	f.Indent = 2
+
+// 	s, _ := f.Marshal(objnew)
+// 	fmt.Println(string(s))
+// }
+
+// // LogJSONByte Format string
+// func LogJSONByte(pobj []byte) {
+// 	var objnew map[string]interface{}
+// 	json.Unmarshal(pobj, &objnew)
+
+// 	f := colorjson.NewFormatter()
+// 	f.Indent = 2
+
+// 	s, _ := f.Marshal(objnew)
+// 	fmt.Println(string(s))
+// }
+
+// // LogJSONString Format string
+// func LogJSONString(str string) {
+// 	var objnew map[string]interface{}
+// 	json.Unmarshal([]byte(str), &objnew)
+
+// 	f := colorjson.NewFormatter()
+// 	f.Indent = 2
+
+// 	s, _ := f.Marshal(objnew)
+// 	fmt.Println(string(s))
+// }
+
+// LogHRStart can end line with <<<
+func LogHRStart() {
+	fmt.Println(">>>>>>>>>>>>>>>>>>>>>>>")
+}
+
+// LogHREnd can end line with <<<
+func LogHREnd() {
+	fmt.Println("<<<<<<<<<<<<<<<<<<<<<<<")
+}
+
+// LogSpot will print block
+func LogSpot(args ...interface{}) {
+	fmt.Println(">>>>>>>>>>STRT>>>>>>>>>>>>>")
+	log.Info("", args)
+	fmt.Println("<<<<<<<<<END<<<<<<<<<<<<<<")
+}
+
+// LogVars Prints variables with formatting
+func LogVars(xvars ...interface{}) {
+	for _, i := range xvars {
+		goon.Dump(i)
+	}
+}
+
+// TODO:This Function bring back later// LogTable will print data in table form
+// func LogTable(data []interface{}) {
+// 	t := gotabulate.Create(data)
+// 	fmt.Println(t.Render("grid"))
+// }
diff --git a/v2/loggermdl/loggerdefault_test.go b/v2/loggermdl/loggerdefault_test.go
new file mode 100755
index 0000000000000000000000000000000000000000..aa43f3fc660e4dc2c46b5763b619e99096acc36c
--- /dev/null
+++ b/v2/loggermdl/loggerdefault_test.go
@@ -0,0 +1,149 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:40:10 GMT+0530 (IST)
+
+package loggermdl
+
+import (
+	"encoding/json"
+	"fmt"
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/stretchr/testify/assert"
+	"go.uber.org/zap/zapcore"
+)
+
+func ExampleLogVars() {
+	type Example struct {
+		name string
+	}
+	a := Example{"name"}
+
+	LogVars(a)
+	//Output:
+	//(loggermdl.Example)(loggermdl.Example{
+	//	name: (string)("name"),
+	//})
+}
+
+func ExampleLogError() {
+	LogError("a")
+	//Output:
+	//
+}
+func ExampleLogInfo() {
+	LogInfo("a")
+	//Output:
+	//
+}
+func ExampleLogWarn() {
+	LogWarn("a")
+	//Output:
+	//
+}
+func ExampleLogDebug() {
+	LogDebug("a")
+	//Output:
+	//
+}
+func TestLogJSONString(t *testing.T) {
+	a := `{"Age": 1,"Name": "sometext"}`
+
+	assert.NotPanics(t, func() { LogJSONString(a) }, "The code did  panic")
+
+	//Output2:
+	// {
+	//   "Age": 1,
+	//   "Name": "sometext"
+	// }
+}
+func ExampleLogHREnd() {
+	LogHREnd()
+	//Output:
+	//<<<<<<<<<<<<<<<<<<<<<<<
+}
+
+func TestLogJSONObject(t *testing.T) {
+	type MySt struct {
+		Name string
+		Age  int
+	}
+	m := MySt{"sometext", 1}
+	// jsonUnmarshal(json.Marshal(m), &m)
+
+	assert.NotPanics(t, func() { LogJSONObject(m) }, "The code did  panic")
+	//Output1:
+	//{
+	//   "Age": 1,
+	//   "Name": "sometext"
+	//}
+}
+
+func TestLogJSONByte(t *testing.T) { // func ExampleLogJSONByte() {
+
+	type MySt struct {
+		Name string
+		Age  int
+	}
+	m := MySt{"sometext", 1}
+	a, _ := json.Marshal(m)
+
+	assert.NotPanics(t, func() { LogJSONByte(a) }, "The code did  panic")
+	//Output1:
+	// {
+	//   "Age": 1,
+	//   "Name": "sometext"
+	// }
+
+}
+func ExampleLogHRStart() {
+	LogHRStart()
+	//Output:
+	//>>>>>>>>>>>>>>>>>>>>>>>
+}
+
+func ExampleGetCaller() {
+
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	GetCallers(0)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	a := GetCallers(0)
+	fmt.Println(len(a))
+	//output:
+	//15
+}
+
+func ExampleNegetiveGetCaller() {
+	a := GetCallers(-323232323)
+	fmt.Println(len(a))
+	//output:
+	//8
+}
+
+func ExampleInit() {
+	Init("filename", 3, 7, 5, zapcore.DebugLevel)
+	//output:
+	//
+}
+
+func TestLogPanic(t *testing.T) {
+	assert.Panics(t, func() { LogPanic("a") }, "The code did not panic")
+	assert.NotPanics(t, func() { LogSpot("A") }, "The code did not panic")
+
+}
+
+// func TestLogTable(t *testing.T) {
+
+// 	type MySt struct {
+// 		Name string
+// 		Age  int
+// 	}
+// 	m1 := MySt{"sometextm1", 1}
+// 	m2 := MySt{"sometext m2", 13}
+// 	ary := make([]interface{}, 2)
+// 	ary = append(ary, m1)
+// 	ary = append(ary, m2)
+
+// 	assert.NotPanics(t, func() { LogTable(ary) }, "The code did  panic")
+
+// }
diff --git a/v2/loggermdl/loggermdl.go b/v2/loggermdl/loggermdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..3e1767670f7d1d9783bd0205a1e96a82a0c4f29a
--- /dev/null
+++ b/v2/loggermdl/loggermdl.go
@@ -0,0 +1,84 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 06:40:54 GMT+0530 (IST)
+
+// Package loggermdl will help you log error
+package loggermdl
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	lumberjack "gopkg.in/natefinch/lumberjack.v2"
+)
+
+var logger *zap.Logger
+var sugar *zap.SugaredLogger
+
+// Init  Init Logger
+// maxBackupFileSize,  megabytes
+// maxAgeForBackupFile,  days
+func Init(fileName string, maxBackupCnt, maxBackupFileSize, maxAgeForBackupFileInDays int, loglevel zapcore.Level) {
+	os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
+
+	w := zapcore.AddSync(&lumberjack.Logger{
+		Filename:   fileName,
+		MaxSize:    maxBackupFileSize, // megabytes
+		MaxBackups: maxBackupCnt,
+		MaxAge:     maxAgeForBackupFileInDays, // days
+	})
+
+	// zap.AddStacktrace(
+	rawJSON := []byte(`{
+	  "level": "debug",
+	  "encoding": "json",
+	  "outputPaths": ["stdout", "/tmp/logs"],
+	  "errorOutputPaths": ["stderr"],
+	  "initialFields": {"foo": "bar"},
+	  "disableCaller":false,
+	  "encoderConfig": {
+	    "messageKey": "m",
+	    "callerKey": "c",
+	    "callerEncode": 0,
+	    "timeKey": "t",
+		"levelKey": "l",
+	    "levelEncoder": "lowercase"
+	  }
+	}`)
+
+	var cfg zap.Config
+	json.Unmarshal(rawJSON, &cfg)
+	core := zapcore.NewCore(
+		//enc, //
+		zapcore.NewJSONEncoder(zap.NewDevelopmentEncoderConfig()),
+		w,
+		loglevel,
+	)
+
+	logger = zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
+
+	defer logger.Sync()
+	sugar = logger.Sugar()
+
+}
+
+// GetCallers will return callers chain
+func GetCallers(skip int) string {
+	_, file, line, ok := runtime.Caller(skip)
+	if errormdl.CheckBool(!ok) {
+		file = "<???>"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	return fmt.Sprintf("%s:%d", file, line)
+}
diff --git a/v2/loggermdl/loggerprod.go b/v2/loggermdl/loggerprod.go
new file mode 100755
index 0000000000000000000000000000000000000000..243dca215ae18d2553fd3ac94d0c8c60262ad533
--- /dev/null
+++ b/v2/loggermdl/loggerprod.go
@@ -0,0 +1,38 @@
+// +build prod
+
+package loggermdl
+
+// LogDebug logs a message at level Debug on the standard logger.
+func LogDebug(args ...interface{}) {
+	sugar.Debug(args)
+}
+
+// LogInfo logs a message at level Info on the standard logger.
+func LogInfo(args ...interface{}) {
+	sugar.Info(args)
+}
+
+// LogWarn logs a message at level Warn on the standard logger.
+func LogWarn(args ...interface{}) {
+	sugar.Warn(args)
+}
+
+// LogError logs a message at level Error on the standard logger.
+func LogError(args ...interface{}) {
+	sugar.Error(args)
+}
+
+// LogFatal logs a message at level Fatal on the standard logger.
+func LogFatal(args ...interface{}) {
+	sugar.Fatal(args)
+}
+
+// Log as an Info but highlights it.
+func LogSpot(args ...interface{}) {
+	sugar.Info(args)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func LogPanic(args ...interface{}) {
+	sugar.Panic(args)
+}
diff --git a/v2/mediamdl/mediaaudio.go b/v2/mediamdl/mediaaudio.go
new file mode 100644
index 0000000000000000000000000000000000000000..906bb079d40a7b21cc9df4caa0ee85d7c39abb35
--- /dev/null
+++ b/v2/mediamdl/mediaaudio.go
@@ -0,0 +1,10 @@
+package mediamdl
+
+// getAudioFromVideo1
+// ffmpeg -i video.mp4 -f mp3 -ab 192000 -vn music.mp3
+
+// GetAudioFromVideo - GetAudioFromVideo
+func getAudioFromVideo1(inputPath, outputfilePath string) error {
+	commandargs := []string{`-i`, inputPath, `-f`, `mp3`, `-ab`, `192000`, `-vn`, `-y`, outputfilePath}
+	return executeargs("ffmpeg", commandargs)
+}
diff --git a/v2/mediamdl/mediaaudio_test.go b/v2/mediamdl/mediaaudio_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0ceee84d55edf077117a8aa6fcf1bf91b7237bf8
--- /dev/null
+++ b/v2/mediamdl/mediaaudio_test.go
@@ -0,0 +1,36 @@
+package mediamdl
+
+import (
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+// func Test_getAudioFromVideo(t *testing.T) {
+// 	inputPath := "../testingdata/testData/mediamdl/audio/sample.mp4"
+// 	inputPath, _ = filepath.Abs(inputPath)
+// 	outputPath := "../testingdata/testData/mediamdl/audio/sample-audio.mp3"
+// 	outputPath, _ = filepath.Abs(outputPath)
+// 	err := getAudioFromVideo(inputPath, outputPath)
+// 	fmt.Println("err: ", err)
+// 	assert.Error(t, err, "This will thrown an error.")
+// }
+
+func Test_getAudioFromVideo1(t *testing.T) {
+	inputPath := "../testingdata/testData/mediamdl/audio/sample.mp4"
+	inputPath, _ = filepath.Abs(inputPath)
+	outputPath := "../testingdata/testData/mediamdl/audio/sample-audio1.mp3"
+	outputPath, _ = filepath.Abs(outputPath)
+	err := getAudioFromVideo1(inputPath, outputPath)
+	assert.NoError(t, err, "This will thrown an error.")
+}
+
+// func Test_getAudioFromVideo2(t *testing.T) {
+// 	inputPath := "../testingdata/testData/mediamdl/audio/sample.mp4"
+// 	inputPath, _ = filepath.Abs(inputPath)
+// 	outputPath := "../testingdata/testData/mediamdl/audio/sample-audio2.mp3"
+// 	outputPath, _ = filepath.Abs(outputPath)
+// 	err := getAudioFromVideo2(inputPath, outputPath)
+// 	assert.NoError(t, err, "This will thrown an error.")
+// }
diff --git a/v2/mediamdl/mediaimage.go b/v2/mediamdl/mediaimage.go
new file mode 100644
index 0000000000000000000000000000000000000000..0039c029f44513466e509ac368ec90b64aefde4c
--- /dev/null
+++ b/v2/mediamdl/mediaimage.go
@@ -0,0 +1,28 @@
+package mediamdl
+
+// ffmpeg -i input.jpg -vf scale=320:240 output_320x240.png
+// ffmpeg -i test.tif -vf scale=504:376 -sws_flags bilinear out.bmp
+// ffmpeg -i input.jpg -vf scale=iw*2:ih input_double_width.png
+
+// CompressImage - CompressImage
+func compressImage(inputPath, outputfilePath string) error {
+	commandargs := []string{`-i`, inputPath, `-vf`, `scale=iw:-1`, `-y`, outputfilePath}
+	return executeargs("ffmpeg", commandargs)
+}
+
+// ResizeImage - ResizeImage
+func resizeImage(inputPath, outputfilePath, targetwidth string) error {
+	commandargs := []string{`-i`, inputPath, `-vf`, `scale=` + targetwidth + `:-1`, `-y`, outputfilePath}
+	return executeargs("ffmpeg", commandargs)
+}
+
+// resizeImageWithoutAspectRatio - resizeImageWithoutAspectRatio
+func resizeImageWithoutAspectRatio(inputPath, outputfilePath, targetwidth, targetheight string) error {
+	commandargs := []string{`-i`, inputPath, `-vf`, `scale=` + targetwidth + `:` + targetheight, `-y`, outputfilePath}
+	return executeargs("ffmpeg", commandargs)
+}
+
+// SmartCropImage - SmartCropImage
+func smartCropImage() error {
+	return executeargs("", []string{})
+}
diff --git a/v2/mediamdl/mediaimage_test.go b/v2/mediamdl/mediaimage_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e1e71d8fb6be7758adbb560470f6b808cf64594
--- /dev/null
+++ b/v2/mediamdl/mediaimage_test.go
@@ -0,0 +1,47 @@
+package mediamdl
+
+import (
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func Test_compressImage(t *testing.T) {
+	inputPath := "../testingdata/testData/mediamdl/image/sample.bmp"
+	inputPath, _ = filepath.Abs(inputPath)
+	outputPath := "../testingdata/testData/mediamdl/image/sample-compress-image.jpg"
+	outputPath, _ = filepath.Abs(outputPath)
+	err := compressImage(inputPath, outputPath)
+	// fmt.Println("err: ", err)
+	assert.NoError(t, err, "This will not thrown an error.")
+}
+
+func Test1_compressImage(t *testing.T) {
+	inputPath := "../testingdata/testData/mediamdl/image/path with spaces/sample.bmp"
+	inputPath, _ = filepath.Abs(inputPath)
+	outputPath := "../testingdata/testData/mediamdl/image/path with spaces/sample-compress-image.jpg"
+	outputPath, _ = filepath.Abs(outputPath)
+	err := compressImage(inputPath, outputPath)
+	// fmt.Println("err: ", err)
+	assert.NoError(t, err, "This will not thrown an error.")
+}
+
+func Test_resizeImage(t *testing.T) {
+	inputPath := "../testingdata/testData/mediamdl/image/sample.bmp"
+	outputPath := "../testingdata/testData/mediamdl/image/sample-resized-image.bmp"
+	err := resizeImage(inputPath, outputPath, "100")
+	assert.NoError(t, err, "This will not throw error")
+}
+
+func Test_resizeImageWithoutAspectRatio(t *testing.T) {
+	inputPath := "../testingdata/testData/mediamdl/image/sample.bmp"
+	outputPath := "../testingdata/testData/mediamdl/image/sample-resize-wo-aspect-image.bmp"
+	err := resizeImageWithoutAspectRatio(inputPath, outputPath, "100", "100")
+	assert.NoError(t, err, "This will not throw error")
+}
+
+func Test_smartCropImage(t *testing.T) {
+	err := smartCropImage()
+	assert.Error(t, err, "This will not throw error")
+}
diff --git a/v2/mediamdl/mediamdl.go b/v2/mediamdl/mediamdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..3fcffb1cbc0a13badccc225128c1f5214e331f94
--- /dev/null
+++ b/v2/mediamdl/mediamdl.go
@@ -0,0 +1,55 @@
+package mediamdl
+
+import (
+	"bytes"
+	"os/exec"
+)
+
+// CompressVideo - CompressVideo
+func CompressVideo(inputPath, outputfilePath string) error {
+	return compressVideo(inputPath, outputfilePath)
+}
+
+// ResizeVideo - ResizeVideo
+func ResizeVideo(inputPath, targetwidth, targetheight, outputfilePath string) error {
+	return resizeVideo(inputPath, targetwidth, targetheight, outputfilePath)
+}
+
+// GetAudioFromVideo - GetAudioFromVideo
+func GetAudioFromVideo(inputPath, outputfilePath string) error {
+	return getAudioFromVideo1(inputPath, outputfilePath)
+}
+
+// CompressImage - CompressImage
+func CompressImage(inputPath, outputfilePath string) error {
+
+	return compressImage(inputPath, outputfilePath)
+}
+
+// ResizeImage - ResizeImage
+func ResizeImage(inputPath, outputfilePath, targetwidth string) error {
+	return resizeImage(inputPath, outputfilePath, targetwidth)
+}
+
+// ResizeImageWithoutAspectRatio - ResizeImageWithoutAspectRatio
+func ResizeImageWithoutAspectRatio(inputPath, outputfilePath, targetwidth, targetheight string) error {
+	return resizeImageWithoutAspectRatio(inputPath, outputfilePath, targetwidth, targetheight)
+}
+
+// SmartCropImage - SmartCropImage
+func SmartCropImage() error {
+	return smartCropImage()
+}
+
+func executeargs(commandline string, args []string) error {
+	cmd := exec.Command(commandline, args...)
+	var stderr bytes.Buffer
+	cmd.Stderr = &stderr
+	err := cmd.Run()
+	if err != nil {
+		// loggermdl.LogError(err)
+		// loggermdl.LogError(stderr.String())
+		return err
+	}
+	return nil
+}
diff --git a/v2/mediamdl/mediamdl_test.go b/v2/mediamdl/mediamdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8a97556dbb2baead7ab88692e4c46f0af28a5c8c
--- /dev/null
+++ b/v2/mediamdl/mediamdl_test.go
@@ -0,0 +1,54 @@
+package mediamdl
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestCompressVideo(t *testing.T) {
+	inputPath := "../testingdata/mediamdl/video/sample.avi"
+	outputPath := "../testingdata/mediamdl/video/sample-compress-video.mp4"
+	err := CompressVideo(inputPath, outputPath)
+	assert.Error(t, err, "This will thrown an error.")
+}
+
+func TestResizeVideo(t *testing.T) {
+	inputPath := "../testingdata/mediamdl/video/sample.mp4"
+	outputPath := "../testingdata/mediamdl/video/sample-resize-video.mp4"
+	err := ResizeVideo(inputPath, "100", "100", outputPath)
+	assert.Error(t, err, "This will thrown an error.")
+}
+
+func TestGetAudioFromVideo(t *testing.T) {
+	inputPath := "../testingdata/mediamdl/audio/sample.mp4"
+	outputPath := "../testingdata/mediamdl/audio/sample-audio.mp3"
+	err := GetAudioFromVideo(inputPath, outputPath)
+	assert.Error(t, err, "This will thrown an error.")
+}
+
+func TestCompressImage(t *testing.T) {
+	inputPath := "../testingdata/mediamdl/image/sample.bmp"
+	outputPath := "../testingdata/mediamdl/image/sample-compress-image.jpg"
+	err := CompressImage(inputPath, outputPath)
+	assert.Error(t, err, "This will thrown an error.")
+}
+
+func TestResizeImage(t *testing.T) {
+	inputPath := "../testingdata/mediamdl/image/sample.bmp"
+	outputPath := "../testingdata/mediamdl/image/sample-resized-image.bmp"
+	err := ResizeImage(inputPath, outputPath, "100")
+	assert.Error(t, err, "This will thrown an error.")
+}
+
+func TestResizeImageWithoutAspectRatio(t *testing.T) {
+	inputPath := "../testingdata/mediamdl/image/sample.bmp"
+	outputPath := "../testingdata/mediamdl/image/sample-resize-wo-aspect-image.bmp"
+	err := ResizeImageWithoutAspectRatio(inputPath, outputPath, "100", "100")
+	assert.Error(t, err, "This will thrown an error.")
+}
+
+func TestSmartCropImage(t *testing.T) {
+	err := SmartCropImage()
+	assert.Error(t, err, "This will not throw error")
+}
diff --git a/v2/mediamdl/mediavideo.go b/v2/mediamdl/mediavideo.go
new file mode 100644
index 0000000000000000000000000000000000000000..0c80b4a737a20bf3176cb2e3edbb5918ea2c0a77
--- /dev/null
+++ b/v2/mediamdl/mediavideo.go
@@ -0,0 +1,15 @@
+package mediamdl
+
+// data := `ffmpeg  -i ` + inputPath + ` -strict -2 -s 720x480 -vf scale=1280:0 -c:v libx264 -preset veryslow -crf 24 -y ` + outputfilePath
+
+// CompressVideo - CompressVideo
+func compressVideo(inputPath, outputfilePath string) error {
+	commandargs := []string{`-i`, inputPath, `-strict`, `-2`, `-vf`, `scale=iw:-1`, `-c:v`, `libx264`, `-preset`, `veryslow`, `-crf`, `24`, `-y`, outputfilePath}
+	return executeargs("ffmpeg", commandargs)
+}
+
+// ResizeVideo - ResizeVideo
+func resizeVideo(inputPath, targetwidth, targetheight, outputfilePath string) error {
+	commandargs := []string{`-i`, inputPath, `-strict`, `-2`, `-s`, targetwidth + `x` + targetheight, `-vf`, `scale=iw:-1`, `-c:v`, `libx264`, `-preset`, `veryslow`, `-crf`, `24`, `-y`, outputfilePath}
+	return executeargs("ffmpeg", commandargs)
+}
diff --git a/v2/mediamdl/mediavideo_test.go b/v2/mediamdl/mediavideo_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d0826189e2e886a0627e624dea957872d6caf805
--- /dev/null
+++ b/v2/mediamdl/mediavideo_test.go
@@ -0,0 +1,26 @@
+package mediamdl
+
+import (
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func Test_compressVideo(t *testing.T) {
+	inputPath := "../testingdata/testData/mediamdl/video/sample.avi"
+	inputPath, _ = filepath.Abs(inputPath)
+	outputPath := "../testingdata/testData/mediamdl/video/sample-compress-video.mp4"
+	outputPath, _ = filepath.Abs(outputPath)
+	err := compressVideo(inputPath, outputPath)
+	assert.NoError(t, err, "This will not thrown an error.")
+}
+
+func Test_resizeVideo(t *testing.T) {
+	inputPath := "../testingdata/testData/mediamdl/video/sample.mp4"
+	inputPath, _ = filepath.Abs(inputPath)
+	outputPath := "../testingdata/testData/mediamdl/video/sample-resize-video.mp4"
+	outputPath, _ = filepath.Abs(outputPath)
+	err := resizeVideo(inputPath, "100", "100", outputPath)
+	assert.NoError(t, err, "This will not thrown an error.")
+}
diff --git a/v2/notificationmdl/email/email-config.toml b/v2/notificationmdl/email/email-config.toml
new file mode 100644
index 0000000000000000000000000000000000000000..10b1d5e1de9259dc8ffe42a4fc0b4b5113025048
--- /dev/null
+++ b/v2/notificationmdl/email/email-config.toml
@@ -0,0 +1,5 @@
+server="smtp.gmail.com"
+port=587
+username="YOUR_EMAIL_ADDRESS"
+password="YOUR_EMAIL_PASSWORD"
+SSL=false
diff --git a/v2/notificationmdl/email/email-template.html b/v2/notificationmdl/email/email-template.html
new file mode 100644
index 0000000000000000000000000000000000000000..4e182e34678888864733e558f1607219a667adeb
--- /dev/null
+++ b/v2/notificationmdl/email/email-template.html
@@ -0,0 +1,15 @@
+<html>
+<head>
+    <title>Handler</title>
+</head>
+<body>
+        hello {{.FirstName}} {{.LastName}}! 
+        I am implementing golang template email notification.
+                    {{range .Emails}}
+                        An email addresses are {{.}}
+                    {{end}}
+        <div class="note">
+           <p> <a href="http://www.mkcl.org/" style="color: darkcyan"><b>Visit MKCL here</b></a> </p>
+        </div>
+</body>
+</html>
\ No newline at end of file
diff --git a/v2/notificationmdl/email/email.go b/v2/notificationmdl/email/email.go
new file mode 100644
index 0000000000000000000000000000000000000000..12c55e7e45aaecd9d24b7022a26b26b062f87441
--- /dev/null
+++ b/v2/notificationmdl/email/email.go
@@ -0,0 +1,210 @@
+package email
+
+import (
+	"bytes"
+	"crypto/tls"
+	"html/template"
+	"strings"
+	"sync"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+
+	"gopkg.in/gomail.v2"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/configmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	raymond "github.com/aymerick/raymond"
+)
+
+// EmailConfig - EmailConfig
+type EmailConfig struct {
+	HostName  string `json:"hostName"`
+	Server    string `json:"server"`
+	Port      int    `json:"port"`
+	Username  string `json:"username"`
+	Password  string `json:"password"`
+	SSL       bool   `json:"SSL"`
+	IsDefault bool   `json:"isDefault"`
+}
+
+var once sync.Once
+var config = EmailConfig{}
+var emailInitError error
+
+// Init - initializes toml file configurations
+func Init(tomlFilepath string) error {
+	once.Do(func() {
+		_, err := configmdl.InitConfig(tomlFilepath, &config)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			emailInitError = err
+			return
+		}
+	})
+	return emailInitError
+}
+
+type Email struct {
+	from        string
+	replyTo     string
+	to          []string
+	cc          []string
+	bcc         []string
+	subject     string
+	attachments []string
+	body        string
+	plainBody   string // alternate text if template fails
+}
+
+func NewEmail(to, cc, bcc, attachments []string, from, replyTo, subject, body string) *Email {
+	return &Email{
+		from:        from,
+		replyTo:     replyTo,
+		to:          to,
+		cc:          cc,
+		bcc:         bcc,
+		subject:     subject,
+		attachments: attachments,
+		body:        body,
+	}
+}
+
+// SetAlternateText - set alternate text for email
+func (email *Email) SetAlternateText(plainBody string) {
+	email.plainBody = plainBody
+}
+
+// SendMail - send email service sends email as per the given html template and data
+//templateData can contain any type of values, including array, slice, map, struct and func
+func (email *Email) SendMail(templateFilePath string, templateData interface{}) error {
+	err := email.parseTemplateFile(templateFilePath, templateData)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	err = email.ParsePlainText(email.plainBody, templateData)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	if err := email.Send(); errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError("error occured while calling SendMail: ", errormdl.CheckErr1(err))
+		return errormdl.Wrap("Failed to send the email to: " + strings.Join(email.to, ", "))
+	}
+	return nil
+}
+
+func (email *Email) parseTemplateFile(templateFilePath string, templateData interface{}) error {
+	tmplt, err := template.ParseFiles(templateFilePath)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError("error occured while calling parseTemplateFile: ", errormdl.CheckErr(err))
+		return err
+	}
+	buffer := new(bytes.Buffer)
+	if err = tmplt.Execute(buffer, templateData); errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError("error occured while calling parseTemplateFile: ", errormdl.CheckErr1(err))
+		return err
+	}
+	email.body = buffer.String()
+	return nil
+}
+
+// SendMailWithHandlebar - send email service sends email as per the given html text and data
+//templateData can contain any type of values, including array, slice, map, struct and func
+func (email *Email) SendMailWithHandlebar(templateText string, templateData interface{}) error {
+	err := email.parseTemplateText(templateText, templateData)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	if err := email.Send(); errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError("error occured while calling SendMailWithHandlebar: ", errormdl.CheckErr1(err))
+		return errormdl.Wrap("Failed to send the email to: " + strings.Join(email.to, ", "))
+	}
+	return nil
+}
+
+func (email *Email) parseTemplateText(templateText string, templateData interface{}) error {
+	tmplt, err := raymond.Parse(templateText)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError("error occured while calling parseTemplateText: ", errormdl.CheckErr(err))
+		return err
+	}
+	emailbody, err := tmplt.Exec(templateData)
+	if errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError("error occured while calling parseTemplateText: ", errormdl.CheckErr1(err))
+		return err
+	}
+	email.body = emailbody
+	return nil
+}
+
+// Send -send email
+func (email *Email) Send() error {
+	domain, domainErr := getSenderDomain(email.from)
+	if domainErr != nil {
+		loggermdl.LogError("error getting domain address: ", domainErr)
+		return domainErr
+	}
+	message := gomail.NewMessage()
+	message.SetHeader("From", email.from)
+	message.SetHeader("To", email.to...)
+	message.SetHeader("Reply-To", email.replyTo)
+	message.SetHeader("Cc", email.cc...)
+	message.SetHeader("Bcc", email.bcc...)
+	message.SetHeader("Subject", email.subject)
+	message.SetHeader("Message-ID", getMSGIDHeader(domain, guidmdl.GetGUID()))
+	if len(strings.TrimSpace(email.plainBody)) == 0 {
+		message.SetBody("text/html", email.body)
+	} else {
+		message.SetBody("text/plain", email.plainBody)
+		message.AddAlternative("text/html", email.body)
+	}
+
+	for _, attachment := range email.attachments {
+		message.Attach(attachment) // attach whatever you want
+	}
+	dialer := gomail.Dialer{Host: config.Server, Port: config.Port, Username: config.Username, Password: config.Password, SSL: config.SSL}
+	dialer.TLSConfig = &tls.Config{InsecureSkipVerify: !config.SSL}
+	if err := dialer.DialAndSend(message); errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError("error occured while calling Send(): ", errormdl.CheckErr1(err))
+		return err
+	}
+	return nil
+}
+
+func getMSGIDHeader(domain, guid string) string {
+	return "<" + guid + "@" + domain + ">"
+}
+
+func getSenderDomain(from string) (string, error) {
+	s := strings.Split(from, "@")
+	if len(s) != 2 {
+		return "", errormdl.Wrap("invalid email id for sender")
+	}
+
+	return s[1], nil
+}
+
+// SendMailSMTP - send email service sends email as per the given html template and data using smtp
+// func (email *Email) SendMailSMTP(templateFilePath string, templateData interface{}) error {
+// 	err := email.parseTemplate(templateFilePath, templateData)
+// 	if err != nil {
+// 		loggermdl.LogError(err)
+// 		return err
+// 	}
+// 	if err := email.sendMailSMTP(); err != nil {
+// 		loggermdl.LogError(err)
+// 		return errormdl.Wrap("Failed to send the email to: " + strings.Join(email.to, ", "))
+// 	}
+// 	return nil
+// }
+// func (email *Email) sendMailSMTP() error {
+// 	body := "To: " + strings.Join(email.to, ",") + "\r\nSubject: " + email.subject + "\r\n" + constantmdl.MIME + "\r\n" + email.body
+// 	SMTP := fmt.Sprintf("%s:%d", config.Server, config.Port)
+// 	if err := smtp.SendMail(SMTP, smtp.PlainAuth("", config.Email, config.Password, config.Server), config.Email, email.to, []byte(body)); err != nil {
+// 		return err
+// 	}
+// 	return nil
+// }
diff --git a/v2/notificationmdl/email/emailSLS.go b/v2/notificationmdl/email/emailSLS.go
new file mode 100644
index 0000000000000000000000000000000000000000..182cf0b0da19a66cbd13f75d365a470f86786e38
--- /dev/null
+++ b/v2/notificationmdl/email/emailSLS.go
@@ -0,0 +1,139 @@
+package email
+
+import (
+	"bytes"
+	"crypto/tls"
+	"html/template"
+	"strings"
+	"sync"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	gomail "gopkg.in/gomail.v2"
+)
+
+var instances map[string]EmailConfig
+var onceMutex sync.Once
+var defaultHost string
+
+// InitUsingJSON initializes Email Connections for give JSON data
+func InitUsingJSON(configs []EmailConfig) {
+	onceMutex.Do(func() {
+		instances = make(map[string]EmailConfig)
+		for _, config := range configs {
+			instances[config.HostName] = config
+			if config.IsDefault {
+				defaultHost = config.HostName
+			}
+		}
+	})
+}
+
+// SendMailFromSLS -send email
+func (email *Email) SendMailFromSLS(hostName string) error {
+	config := EmailConfig{}
+	if hostName == "" {
+		tmp, ok := instances[defaultHost]
+		if !ok {
+			loggermdl.LogError("Host not found: " + hostName)
+			return errormdl.Wrap("Host not found: " + hostName)
+		}
+		config = tmp
+	} else {
+		tmp, ok := instances[hostName]
+		if !ok {
+			loggermdl.LogError("Host not found: " + hostName)
+			return errormdl.Wrap("Host not found: " + hostName)
+		}
+		config = tmp
+	}
+
+	domain, domainErr := getSenderDomain(email.from)
+	if domainErr != nil {
+		loggermdl.LogError("error getting domain address: ", domainErr)
+		return domainErr
+	}
+
+	message := gomail.NewMessage()
+	message.SetHeader("From", email.from)
+	message.SetHeader("To", email.to...)
+	message.SetHeader("Reply-To", email.replyTo)
+	message.SetHeader("Cc", email.cc...)
+	message.SetHeader("Bcc", email.bcc...)
+	message.SetHeader("Subject", email.subject)
+	message.SetHeader("Message-ID", getMSGIDHeader(domain, guidmdl.GetGUID()))
+	if len(strings.TrimSpace(email.plainBody)) == 0 {
+		message.SetBody("text/html", email.body)
+	} else {
+		message.SetBody("text/plain", email.plainBody)
+		message.AddAlternative("text/html", email.body)
+	}
+
+	for _, attachment := range email.attachments {
+		message.Attach(attachment) // attach whatever you want
+	}
+	dialer := gomail.Dialer{Host: config.Server, Port: config.Port, Username: config.Username, Password: config.Password, SSL: config.SSL}
+	dialer.TLSConfig = &tls.Config{InsecureSkipVerify: !config.SSL}
+	if err := dialer.DialAndSend(message); errormdl.CheckErr1(err) != nil {
+		loggermdl.LogError("error occured while calling Send(): ", errormdl.CheckErr1(err))
+		return err
+	}
+	return nil
+}
+
+// // ParseTemplateText -ParseTemplateText
+// func (email *Email) ParseTemplateText(templateText string, templateData interface{}) error {
+// 	tmplt, err := raymond.Parse(templateText)
+// 	if errormdl.CheckErr(err) != nil {
+// 		loggermdl.LogError("error occured while calling parseTemplateText: ", errormdl.CheckErr(err))
+// 		return err
+// 	}
+// 	emailbody, err := tmplt.Exec(templateData)
+// 	if errormdl.CheckErr1(err) != nil {
+// 		loggermdl.LogError("error occured while calling parseTemplateText: ", errormdl.CheckErr1(err))
+// 		return err
+// 	}
+// 	email.body = emailbody
+// 	return nil
+// }
+
+// ParseTemplateText - ParseTemplateText
+func (email *Email) ParseTemplateText(templateFileText string, templateData interface{}) error {
+
+	tmp := template.Must(template.New("email").Parse(templateFileText))
+
+	// tmplt, err := template.ParseGlob(templateFileText)
+	// if errormdl.CheckErr(err) != nil {
+	// 	loggermdl.LogError("error occured while calling parseTemplateFile: ", errormdl.CheckErr(err))
+	// 	return err
+	// }
+	buffer := new(bytes.Buffer)
+	err := tmp.Execute(buffer, templateData)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+	// if err = tmplt.Execute(buffer, templateData); errormdl.CheckErr1(err) != nil {
+	// 	loggermdl.LogError("error occured while calling parseTemplateFile: ", errormdl.CheckErr1(err))
+	// 	return err
+	// }
+	email.body = buffer.String()
+	return nil
+}
+
+// ParsePlainText - ParsePlainText
+func (email *Email) ParsePlainText(templateFileText string, templateData interface{}) error {
+
+	tmp := template.Must(template.New("email").Parse(templateFileText))
+
+	buffer := new(bytes.Buffer)
+	err := tmp.Execute(buffer, templateData)
+	if err != nil {
+		loggermdl.LogError(err)
+		return err
+	}
+
+	email.plainBody = buffer.String()
+	return nil
+}
diff --git a/v2/notificationmdl/email/email_test.go b/v2/notificationmdl/email/email_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..547bb5e0889f19d3f4c30b95b4b4936776f265d6
--- /dev/null
+++ b/v2/notificationmdl/email/email_test.go
@@ -0,0 +1,218 @@
+package email
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/stretchr/testify/assert"
+)
+
+type Person struct {
+	FirstName string
+	LastName  string
+	Emails    []string
+}
+
+func TestInit(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := Init("email-config.toml")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.NoError(t, err, "This should not return error")
+}
+func TestSendMail(t *testing.T) {
+	persondata := Person{
+		FirstName: "Mayuri",
+		LastName:  "Shinde",
+		Emails:    []string{"mayuris@gmail.com", "mayuri@gmail.com"},
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test Email gomail"
+	to := []string{"mayuri92shinde@gmail.com"}
+	// cc := []string{"onkarh@mkcl.org", "kumargauravs@mkcl.org"}
+	// bcc := []string{"Prajkty@mkcl.org", "rakeshd@mkcl.org"}
+	//attachments := []string{"go.jpg"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	err := mail.SendMail("email-template.html", persondata)
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestSendMailErrorWrongTemplt(t *testing.T) {
+	persondata := Person{
+		FirstName: "Mayuri",
+		LastName:  "Shinde",
+		Emails:    []string{"mayuris@gmail.com", "mayuri@gmail.com"},
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test Email gomail"
+	to := []string{"mayuri92shinde@gmail.com"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	err := mail.SendMail("wrong.html", persondata)
+	assert.Error(t, err, "This should return error")
+}
+
+func TestSendMailError(t *testing.T) {
+	persondata := Person{
+		FirstName: "Mayuri",
+		LastName:  "Shinde",
+		Emails:    []string{"mayuris@gmail.com", "mayuri@gmail.com"},
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test Email gomail"
+	to := []string{"mayuri92shinde@gmail.com"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	err := mail.SendMail("email-template.html", persondata)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should return error")
+}
+
+func TestSendMailError1(t *testing.T) {
+	persondata := Person{
+		FirstName: "Mayuri",
+		LastName:  "Shinde",
+		Emails:    []string{"mayuris@gmail.com", "mayuri@gmail.com"},
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test Email gomail"
+	to := []string{"mayuri92shinde@gmail.com"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	err := mail.SendMail("email-template.html", persondata)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should return error")
+}
+
+func BenchmarkSendMail(b *testing.B) {
+	persondata := Person{
+		FirstName: "Mayuri",
+		LastName:  "Shinde",
+		Emails:    []string{"mayuris@gmail.com", "mayuri@gmail.com"},
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test Email smtp"
+	to := []string{"mayuri92shinde@gmail.com"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	for i := 0; i < b.N; i++ {
+		mail.SendMail("email-template.html", persondata)
+	}
+}
+func TestSendMailWithHandlebar(t *testing.T) {
+	tpl1 := `<div class="post">
+	<h1>By {{author.firstName}} {{author.lastName}}</h1>
+	<div class="body">{{body}}</div>
+  
+	<h1>Comments</h1>
+  
+	{{#each comments}}
+	<h2>By {{author.firstName}} {{author.lastName}}</h2>
+	<div class="body">{{content}}</div>
+	{{/each}}
+  </div>`
+
+	type Person struct {
+		FirstName string
+		LastName  string
+	}
+
+	type Comment struct {
+		Author Person
+		Body   string `handlebars:"content"`
+	}
+
+	type Post struct {
+		Author   Person
+		Body     string
+		Comments []Comment
+	}
+
+	ctx1 := Post{
+		Person{"Jean", "Valjean"},
+		"Life is difficult",
+		[]Comment{
+			Comment{
+				Person{"Marcel", "Beliveau"},
+				"LOL!",
+			},
+		},
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test SendMailWithHandlebar"
+	to := []string{"mayuri92shinde@gmail.com"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	err := mail.SendMailWithHandlebar(tpl1, ctx1)
+	assert.NoError(t, err, "This should not return error")
+}
+func TestSendMailWithHandlebarError(t *testing.T) {
+	tpl2 := `<div class="entry">
+	<h1>{{title}}</h1>
+	<div class="body">
+	  {{body}}
+	</div>
+  </div>
+  `
+	ctx2 := []map[string]string{
+		{
+			"title": "My New Post",
+			"body":  "This is my first post!",
+		},
+		{
+			"title": "Here is another post",
+			"body":  "This is my second post!",
+		},
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test SendMailWithHandlebarError"
+	to := []string{"mayuri92shinde@gmail.com"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := mail.SendMailWithHandlebar(tpl2, ctx2)
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestSendMailWithHandlebarError1(t *testing.T) {
+	tpl1 := `<div class="entry">
+	<h1>{{title}}</h1>
+	<div class="body">
+	  {{body}}
+	</div>
+  </div>
+  `
+
+	ctx1 := map[string]string{
+		"title": "My New Post",
+		"body":  "This is my first post!",
+	}
+	from := "mayuris@mkcl.org"
+	subject := "Test SendMailWithHandlebarError1"
+	to := []string{"mayuri92shinde@gmail.com"}
+	Init("email-config.toml")
+	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	err := mail.SendMailWithHandlebar(tpl1, ctx1)
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.Error(t, err, "This should return error")
+}
+
+// func BenchmarkSendMailSMTP(b *testing.B) {
+// 	persondata := Person{
+// 		FirstName: "Mayuri",
+// 		LastName:  "Shinde",
+// 		Emails:    []string{"mayuris@gmail.com", "mayuri@gmail.com"},
+// 	}
+// 	from := "mayuris@mkcl.org"
+// 	subject := "Test Email smtp"
+// 	to := []string{"mayuri92shinde@gmail.com"}
+// 	Init("email-config.toml")
+// 	mail := NewEmail(to, []string{}, []string{}, []string{}, from, subject, "")
+// 	for i := 0; i < b.N; i++ {
+// 		mail.SendMailSMTP("email-template.html", persondata)
+// 	}
+// }
diff --git a/v2/notificationmdl/sms/sms-config.toml b/v2/notificationmdl/sms/sms-config.toml
new file mode 100644
index 0000000000000000000000000000000000000000..f5ccc9648bace043d19eedc6067f4a8d26b8f221
--- /dev/null
+++ b/v2/notificationmdl/sms/sms-config.toml
@@ -0,0 +1,5 @@
+SMSGatewayUrl="http://hapi.smsapi.org/SendSMS.aspx"
+UserName="MKCLOS_trans"
+Password="trans123"
+SenderID="MKCLTD"
+CDMAHeader="MKCLTD"
\ No newline at end of file
diff --git a/v2/notificationmdl/sms/sms.go b/v2/notificationmdl/sms/sms.go
new file mode 100644
index 0000000000000000000000000000000000000000..204d9de7d97becc19bcdda1f5c1f113271b903de
--- /dev/null
+++ b/v2/notificationmdl/sms/sms.go
@@ -0,0 +1,78 @@
+package sms
+
+import (
+	"net/http"
+	"net/url"
+	"sync"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/configmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+type SMSConfig struct {
+	SMSGatewayUrl string
+	UserName      string
+	Password      string
+	SenderID      string
+	CDMAHeader    string
+}
+
+var once sync.Once
+var config = SMSConfig{}
+var smsInitError error
+
+// Init - initializes toml file configurations
+func Init(tomlFilepath string) error {
+	once.Do(func() {
+		_, err := configmdl.InitConfig(tomlFilepath, &config)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			smsInitError = err
+			return
+		}
+	})
+	return smsInitError
+}
+
+// SendSMS -
+func SendSMS(message string, mobileNumber string) error {
+	// define url
+	var Url *url.URL
+
+	// add url
+	Url, parseErr := url.Parse(config.SMSGatewayUrl)
+	if errormdl.CheckErr(parseErr) != nil {
+		loggermdl.LogError("error occured while connect with SMS gateway : ", errormdl.CheckErr(parseErr))
+		return parseErr
+	}
+
+	// add url parameters
+	urlParameters := url.Values{}
+	urlParameters.Add("username", config.UserName)
+	urlParameters.Add("password", config.Password)
+	if len(mobileNumber) == 0 {
+		loggermdl.LogError("error occured while calling SendSMS : ", errormdl.Wrap("Mobile number empty"))
+		return errormdl.Wrap("Mobile number empty")
+	}
+	urlParameters.Add("to", constantmdl.COUNTRYCODE+mobileNumber)
+	urlParameters.Add("from", config.CDMAHeader)
+	urlParameters.Add("text", message)
+
+	// encode url
+	Url.RawQuery = urlParameters.Encode()
+
+	// send get request
+	response, getErr := http.Get(Url.String())
+	if errormdl.CheckErr1(getErr) != nil {
+		loggermdl.LogError("error occured while calling SendSMS : ", errormdl.CheckErr1(getErr))
+		if response != nil {
+			response.Body.Close()
+		}
+		return getErr
+	}
+
+	response.Body.Close()
+	return nil
+}
diff --git a/v2/notificationmdl/sms/sms_test.go b/v2/notificationmdl/sms/sms_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2b97a16e321a535e13523a258225d576362cf00a
--- /dev/null
+++ b/v2/notificationmdl/sms/sms_test.go
@@ -0,0 +1,48 @@
+package sms
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInit(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	err := Init("sms-config.toml")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestSendSMS(t *testing.T) {
+	Init("sms-config.toml")
+	err := SendSMS("Hi..", "7768982288")
+	assert.NoError(t, err, "This should not return error")
+}
+
+func TestSendMobileempty(t *testing.T) {
+	Init("sms-config.toml")
+	err := SendSMS("Hi..", "")
+	assert.Error(t, err, "This should return error")
+}
+
+func TestSendMessageempty(t *testing.T) {
+	Init("sms-config.toml")
+	err := SendSMS("", "7768982288")
+	assert.NoError(t, err, "This should not return error")
+}
+func TestSendSMSError(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	Init("sms-config.toml")
+	err := SendSMS("Hi..", "7768982288")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.NoError(t, err, "This should return error")
+}
+
+func TestSendSMSError1(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn1 = true
+	Init("sms-config.toml")
+	err := SendSMS("Hi..", "7768982288")
+	errormdl.IsTestingNegetiveCaseOn1 = false
+	assert.NoError(t, err, "This should not return error")
+}
diff --git a/v2/powerbuildermdl/powerbuildermdl.go b/v2/powerbuildermdl/powerbuildermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..e56a03c6a0882e16793d3afc2522a0d01061a4ff
--- /dev/null
+++ b/v2/powerbuildermdl/powerbuildermdl.go
@@ -0,0 +1 @@
+package powerbuildermdl
diff --git a/v2/routebuildermdl/grpcservermdl.go b/v2/routebuildermdl/grpcservermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..247b579b4db4a2936224dac66483d6bd518ddc8e
--- /dev/null
+++ b/v2/routebuildermdl/grpcservermdl.go
@@ -0,0 +1,134 @@
+package routebuildermdl
+
+import (
+	"context"
+	"encoding/json"
+	"net"
+
+	"google.golang.org/grpc"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/jwtmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/roleenforcemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/grpcbuildermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+)
+
+// Server server
+type Server struct{}
+
+// GRPCInit init
+func GRPCInit(GRPCPort net.Listener) {
+	loggermdl.LogInfo("In GRPCInit")
+	s := grpc.NewServer()
+	grpcbuildermdl.RegisterGRPCCheckServer(s, &Server{})
+	grpcbuildermdl.RegisterGRPCServiceServer(s, &Server{})
+	if err := s.Serve(GRPCPort); err != nil {
+		loggermdl.LogError("Unable to start GRPC server: %v", err)
+	}
+	loggermdl.LogError("GRPC server started on :", GRPCPort.Addr().String())
+}
+
+// GRPCHandler GRPCHandler
+func (*Server) GRPCHandler(ctx context.Context, req *grpcbuildermdl.GRPCRequest) (*grpcbuildermdl.GRPCByteResponse, error) {
+	loggermdl.LogError("GRPC Handler inoked:")
+	principal := servicebuildermdl.Principal{}
+	errExecutingActivity := ""
+	if req.GetGrpcMessage().GetIsRestricted() {
+		claim, _ := jwtmdl.GeneratePricipleObjUsingToken(req.GetGrpcMessage().GetToken(), jwtmdl.GlobalJWTKey)
+		groups, grperr := roleenforcemdl.GetGroupNames(claim, "groups")
+		if errormdl.CheckErr(grperr) != nil {
+			loggermdl.LogError("Error accessing group", grperr)
+			return nil, errormdl.CheckErr(grperr)
+		}
+		userID, ok := claim["userId"].(string)
+		if !ok || len(userID) < 2 {
+			loggermdl.LogError("Unable to parse UserID from JWT Token")
+			return nil, errormdl.Wrap("Unable to parse UserID from JWT Token")
+		}
+		rawMetadata, ok := claim["metadata"]
+		if ok {
+			metadata, ok := rawMetadata.(string)
+			if !ok {
+				loggermdl.LogError("Unable to parse metadata from JWT Token")
+				return nil, errormdl.Wrap("Unable to parse metadata from JWT Token")
+			}
+			principal.Metadata = metadata
+		}
+		principal.Groups = groups
+		principal.UserID = userID
+		principal.Token = req.GetGrpcMessage().GetToken()
+	}
+
+	//TODO: call executeServiveBranchWise
+
+	// result, _, _, errorCode, err := executeService(
+	// 	req.GetGrpcMessage().GetName(),
+	// 	req.GetGrpcMessage().GetData(), req.GetGrpcMessage().GetIsRestricted(),
+	// 	req.GetGrpcMessage().GetIsRoleBased(), false, principal)
+	result, _, _, errorCode, err := executeServiceWithBranch(
+		req.GetGrpcMessage().GetName(),
+		req.GetGrpcMessage().GetBranch(),
+		req.GetGrpcMessage().GetData(),
+		req.GetGrpcMessage().GetIsRestricted(),
+		req.GetGrpcMessage().GetIsRoleBased(), false, principal)
+
+	e, _ := json.Marshal(result)
+	if err != nil {
+		errExecutingActivity = err.Error()
+	}
+	res := &grpcbuildermdl.GRPCByteResponse{
+		Data:      e,
+		ErrorCode: int32(errorCode),
+		Error:     errExecutingActivity,
+	}
+	return res, nil
+}
+
+// GRPCCheck GRPCCheck
+func (*Server) GRPCCheck(ctx context.Context, req *grpcbuildermdl.GRPCRequest) (*grpcbuildermdl.GRPCResponse, error) {
+	claim, _ := jwtmdl.GeneratePricipleObjUsingToken(req.GetGrpcMessage().GetToken(), jwtmdl.GlobalJWTKey)
+	principal := servicebuildermdl.Principal{}
+	groups, grperr := roleenforcemdl.GetGroupNames(claim, "groups")
+	if errormdl.CheckErr(grperr) != nil {
+		loggermdl.LogError(grperr)
+		return nil, errormdl.CheckErr(grperr)
+	}
+	userID, ok := claim["userId"].(string)
+	if !ok || len(userID) < 2 {
+		loggermdl.LogError("Unable to parse UserID from JWT Token")
+		return nil, errormdl.Wrap("Unable to parse UserID from JWT Token")
+	}
+	rawMetadata, ok := claim["metadata"]
+	if ok {
+		metadata, ok := rawMetadata.(string)
+		if !ok {
+			loggermdl.LogError("Unable to parse metadata from JWT Token")
+			return nil, errormdl.Wrap("Unable to parse metadata from JWT Token")
+		}
+		principal.Metadata = metadata
+	}
+	principal.Groups = groups
+	principal.UserID = userID
+	principal.Token = req.GetGrpcMessage().GetToken()
+
+	//TODO: remove logger
+	loggermdl.LogError("Branch:", req.GetGrpcMessage().GetBranch())
+	_, _, _, _, err := executeServiceWithBranch(
+		req.GetGrpcMessage().GetName(),
+		req.GetGrpcMessage().GetBranch(),
+		req.GetGrpcMessage().GetData(),
+		req.GetGrpcMessage().GetIsRestricted(),
+		req.GetGrpcMessage().GetIsRoleBased(), false, principal)
+
+	// _, _, _, _, err := executeService(
+	// 	req.GetGrpcMessage().GetName(),
+	// 	req.GetGrpcMessage().GetData(),
+	// 	req.GetGrpcMessage().GetIsRestricted(),
+	// 	req.GetGrpcMessage().GetIsRoleBased(), false, principal)
+	res := &grpcbuildermdl.GRPCResponse{
+		Data: "Response from GRPC Check service",
+	}
+	return res, err
+}
diff --git a/v2/routebuildermdl/masterServicemdl.go b/v2/routebuildermdl/masterServicemdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..93a7fcf546340d47523ca6bbf7198340e82f5928
--- /dev/null
+++ b/v2/routebuildermdl/masterServicemdl.go
@@ -0,0 +1,248 @@
+package routebuildermdl
+
+import (
+	"encoding/json"
+	"strconv"
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+
+	dalmdl "corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/dao"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/mongodb"
+
+	"github.com/tidwall/gjson"
+)
+
+// Master - struct for master Service
+type Master struct {
+	serviceName  string
+	isCach       bool
+	cacheTime    time.Duration
+	isRestricted bool
+	isRoleBased  bool
+	isMongo      bool
+}
+
+// MongoQuery - for mongo service
+type MongoQuery struct {
+	collection         string
+	host               string
+	query              string
+	projectionQuery    string
+	args               []string
+	isAggregationQuery bool
+}
+
+// FDBQuery - for fdb services
+type FDBQuery struct {
+	filePath string
+	query    []string
+}
+
+// Runable - helps to run
+type Runable struct {
+	Master
+	MongoQuery
+	FDBQuery
+}
+
+// MongoService - return mongo query object
+func (m *Master) MongoService(collectionName, mongoQuery string) *Runable {
+	mongo := MongoQuery{
+		collection: collectionName,
+		query:      mongoQuery,
+	}
+	m.isMongo = true
+	runable := &Runable{
+		Master:     *m,
+		MongoQuery: mongo,
+	}
+	return runable
+}
+
+// MongoServiceWithHost - return mongo query object
+func (m *Master) MongoServiceWithHost(hostName, collectionName, mongoQuery string) *Runable {
+	mongo := MongoQuery{
+		host:       hostName,
+		collection: collectionName,
+		query:      mongoQuery,
+	}
+	m.isMongo = true
+	runable := &Runable{
+		Master:     *m,
+		MongoQuery: mongo,
+	}
+	return runable
+}
+
+// FDBService - return mongo query object
+func (m *Master) FDBService(filPath string, query ...string) *Runable {
+	FDB := FDBQuery{
+		filePath: filPath,
+		query:    query,
+	}
+	runable := &Runable{
+		Master:   *m,
+		FDBQuery: FDB,
+	}
+	return runable
+}
+
+// IsCachable for both fdb and mongo
+func (m *Master) IsCachable() *Master {
+	m.isCach = true
+	return m
+}
+
+// IsCachableWithExpiration for both fdb and mongo
+func (m *Master) IsCachableWithExpiration(cacheExpirationTime time.Duration) *Master {
+	m.isCach = true
+	m.cacheTime = cacheExpirationTime
+	return m
+}
+
+// SetArgs set argument for query string
+func (m *Runable) SetArgs(args ...string) *Runable {
+	if m.Master.isMongo {
+		m.MongoQuery.args = args
+	}
+	return m
+}
+
+// SetProjectionQuery set SetProjectionQuery for query string
+func (m *Runable) SetProjectionQuery(query string) *Runable {
+	if m.Master.isMongo {
+		m.MongoQuery.projectionQuery = query
+	}
+	return m
+}
+
+// IsAggregationQuery - Set flag for aggregation query
+func (m *Runable) IsAggregationQuery() *Runable {
+	if m.Master.isMongo {
+		m.MongoQuery.isAggregationQuery = true
+	}
+	return m
+}
+
+// // Register - register it in cacahe
+// func (m *Runable) Register() {
+// 	service := ServiceCache{
+// 		MasterService:   m,
+// 		IsMasterService: true,
+// 	}
+// 	commonServiceRegistration(m.Master.serviceName, service, m.Master.isRestricted, m.Master.isRoleBased)
+// }
+
+// Run - execute and return output and error
+func (m *Runable) Run(data []byte, principal *servicebuildermdl.Principal) (interface{}, error) {
+	if m.Master.isMongo {
+		return m.runMongoService(data, principal)
+	}
+	return m.runFDBService()
+}
+
+func (m *Runable) runMongoService(data []byte, principal *servicebuildermdl.Principal) (interface{}, error) {
+	rs := gjson.ParseBytes(data)
+	tmp := m.MongoQuery.query
+	var principalError error
+	tmp, principalError = parsePricipalObject(tmp, principal)
+	if errormdl.CheckErr(principalError) != nil {
+		loggermdl.LogError(principalError)
+		return nil, errormdl.CheckErr(principalError)
+	}
+	if m.MongoQuery.isAggregationQuery {
+		v, formatError := m.formatAggregateQuery(&rs, tmp)
+		if errormdl.CheckErr(formatError) != nil {
+			loggermdl.LogError(formatError)
+			return nil, errormdl.CheckErr(formatError)
+		}
+		query, getError := mongodb.GetMongoDAOWithHost(m.MongoQuery.host, m.MongoQuery.collection).GetAggregateData(v)
+		if errormdl.CheckErr(getError) != nil {
+			loggermdl.LogError(getError)
+			return nil, errormdl.CheckErr(getError)
+		}
+		return query.Value(), nil
+	}
+
+	v, p, formatError := m.formatNormalQuery(&rs, tmp)
+	if errormdl.CheckErr(formatError) != nil {
+		loggermdl.LogError(formatError)
+		return nil, errormdl.CheckErr(formatError)
+	}
+	query, getError := mongodb.GetMongoDAOWithHost(m.MongoQuery.host, m.MongoQuery.collection).GetProjectedData(v, p)
+	if errormdl.CheckErr(getError) != nil {
+		loggermdl.LogError(getError)
+		return nil, errormdl.CheckErr(getError)
+	}
+	return query.Value(), nil
+}
+
+func (m *Runable) formatAggregateQuery(rs *gjson.Result, tmp string) ([]interface{}, error) {
+	for i, arg := range m.MongoQuery.args {
+		result := rs.Get(arg).String()
+		argNotation := "~" + strconv.Itoa(i+1)
+		tmp = strings.Replace(tmp, argNotation, result, 1)
+	}
+	v, ok := gjson.Parse(tmp).Value().([]interface{})
+	if !ok {
+		loggermdl.LogError("Invalid Mongo Query")
+		return nil, errormdl.Wrap("Invalid Mongo Query")
+	}
+	return v, nil
+}
+
+func (m *Runable) formatNormalQuery(rs *gjson.Result, tmp string) (map[string]interface{}, map[string]interface{}, error) {
+	for i, arg := range m.MongoQuery.args {
+		result := rs.Get(arg).String()
+		argNotation := "~" + strconv.Itoa(i+1)
+		tmp = strings.Replace(tmp, argNotation, result, 1)
+	}
+
+	v, ok := gjson.Parse(tmp).Value().(map[string]interface{})
+	if !ok {
+		loggermdl.LogError("Invalid Mongo Query")
+		return nil, nil, errormdl.Wrap("Invalid Mongo Query")
+	}
+	if m.MongoQuery.projectionQuery == "" {
+		m.MongoQuery.projectionQuery = "{}"
+	}
+	p, ok := gjson.Parse(m.MongoQuery.projectionQuery).Value().(map[string]interface{})
+	if !ok {
+		loggermdl.LogError("Invalid Mongo Projection Query Query")
+		return nil, nil, errormdl.Wrap("Invalid Mongo Projection Query Query")
+	}
+	return v, p, nil
+}
+
+func parsePricipalObject(query string, principal *servicebuildermdl.Principal) (string, error) {
+	ba, marshalError := json.Marshal(principal)
+	if errormdl.CheckErr(marshalError) != nil {
+		loggermdl.LogError(marshalError)
+		return "", errormdl.CheckErr(marshalError)
+	}
+	pricipalRS := gjson.ParseBytes(ba)
+	result := pricipalRS.Get("userId").String()
+	argNotation := "~tokenUserId"
+	query = strings.Replace(query, argNotation, result, 1)
+	return query, nil
+}
+
+func (m *Runable) runFDBService() (interface{}, error) {
+	rs, getError := dalmdl.GetDAO().
+		FilePath(m.FDBQuery.filePath).
+		Query(m.FDBQuery.query...).
+		IsCacheableWithDuration(m.Master.cacheTime).
+		Run()
+	if errormdl.CheckErr(getError) != nil {
+		loggermdl.LogError(getError)
+		return nil, errormdl.CheckErr(getError)
+	}
+	return rs.Value(), nil
+}
diff --git a/v2/routebuildermdl/masterServicemdl_test.go b/v2/routebuildermdl/masterServicemdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..16ec904a1e6ffee929c73e5cf92d7cb2746b915d
--- /dev/null
+++ b/v2/routebuildermdl/masterServicemdl_test.go
@@ -0,0 +1,30 @@
+package routebuildermdl
+
+// func TestRunable_Run(t *testing.T) {
+// 	err := mongodb.Init("../testingdata/testData/config/config.toml", "host1")
+// 	assert.NoError(t, err)
+// 	query := `{"name": "~1", "age": ~2}`
+
+// 	input := `{"name": "roshan", "age": 23}`
+// 	run := RegisterMasterService("serviceName", true, false).
+// 		IsCachable().
+// 		MongoService("test", query).
+// 		SetArgs("name", "age")
+
+// 	result, err := run.Run([]byte(input))
+// 	fmt.Println(result)
+// 	assert.NotNil(t, result)
+// 	assert.NoError(t, err)
+// }
+
+// func TestFDB_Run(t *testing.T) {
+// 	path := "../testingdata/users.json"
+// 	run := RegisterMasterService("serviceName", true, false).
+// 		IsCachableWithExpiration(1000).
+// 		FDBService(path, "*")
+
+// 	result, err := run.Run(nil)
+// 	fmt.Println(result)
+// 	assert.NotNil(t, result)
+// 	assert.NoError(t, err)
+// }
diff --git a/v2/routebuildermdl/responseHanldermdl.go b/v2/routebuildermdl/responseHanldermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..aca37f803784fae4ab27a3863d5ed8aae80f2d88
--- /dev/null
+++ b/v2/routebuildermdl/responseHanldermdl.go
@@ -0,0 +1,105 @@
+package routebuildermdl
+
+import (
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+)
+
+//  Repose Header setting
+
+var responseHeaders cachemdl.FastCacheHelper
+
+func init() {
+	// TODO: Take all configurations from config module
+	responseHeaders.Setup(10, time.Minute*10, time.Minute*10)
+}
+
+// ResponseHeaders this is struct for Response headers
+type ResponseHeaders struct {
+	ServiceName string
+	HeaderID    string           `json:"headerId"`
+	LCache      bool             `json:"lCache"`
+	Methods     []responseMethod `json:"methods"`
+}
+
+type responseMethod struct {
+	MethodName string      `json:"methodName"`
+	Data       interface{} `json:"data"`
+}
+
+type responseData struct {
+	Result         interface{}   `json:"result"`
+	Error          interface{}   `json:"error"`
+	ResponseHeader interface{}   `json:"reponseHeader"`
+	ErrorCode      int           `json:"errorCode"`
+	DebugResponse  DebugResponse `json:"debugInfo"`
+	IsCompressed   bool          `json:"isCompressed"`
+	ServerTime     time.Time     `json:"serverTime"`
+}
+
+// DebugResponse - DebugResponse
+type DebugResponse struct {
+	StackTrace      interface{} `json:"stackTrace"`
+	PerformanceInfo interface{} `json:"performanceInfo"`
+}
+
+// CreateResponseHeader create an instance of response header for service
+func CreateResponseHeader(serviceName string) *ResponseHeaders {
+
+	lCacheflag := false
+	responseHeader, ok := responseHeaders.Get(serviceName)
+	if ok {
+		lCacheflag = responseHeader.(ResponseHeaders).LCache
+		// TODO: Delete functionality
+		// responseHeaders.Delete(serviceName)
+	}
+	rh := &ResponseHeaders{}
+	rh.ServiceName = serviceName
+	rh.LCache = lCacheflag
+	rh.HeaderID = guidmdl.GetGUID()
+	return rh
+}
+
+// EnableReponseCache cache this response in local storge
+func (rh *ResponseHeaders) EnableReponseCache() *ResponseHeaders {
+	rh.LCache = true
+	return rh
+}
+
+// DisableReponseCache cache this response in local storge
+func (rh *ResponseHeaders) DisableReponseCache() *ResponseHeaders {
+	rh.LCache = false
+	return rh
+}
+
+// AddMethod add method and data in response
+func (rh *ResponseHeaders) AddMethod(name string, data interface{}) *ResponseHeaders {
+	rh.Methods = append(rh.Methods, responseMethod{MethodName: name, Data: data})
+	return rh
+}
+
+// SetResponseHeader set response headers in cache
+func (rh *ResponseHeaders) SetResponseHeader() {
+	responseHeaders.SetNoExpiration(rh.ServiceName, *rh)
+}
+
+// GetResponseHeader return response header object
+func GetResponseHeader(serviceName string) (interface{}, bool) {
+	return responseHeaders.Get(serviceName)
+}
+
+func formatResponse(ab *servicebuildermdl.AbstractBusinessLogicHolder, responseData responseData) responseData {
+	if ab == nil {
+		return responseData
+	}
+	stackTrace, _ := ab.GetDataResultset("MQLStackTrace")
+	performanceTrace, _ := ab.GetDataResultset("MQLPerformanceTrace")
+	responseData.DebugResponse.StackTrace = stackTrace.Value()
+	responseData.DebugResponse.PerformanceInfo = performanceTrace.Value()
+	responseData.ServerTime = time.Now()
+	return responseData
+}
diff --git a/v2/routebuildermdl/routebuilder_fasthttp.go b/v2/routebuildermdl/routebuilder_fasthttp.go
new file mode 100644
index 0000000000000000000000000000000000000000..919f000e0a9bed19169fd34c0aa5f4b4eb9da6bb
--- /dev/null
+++ b/v2/routebuildermdl/routebuilder_fasthttp.go
@@ -0,0 +1,368 @@
+//  +build fasthttp
+
+package routebuildermdl
+
+import (
+	"context"
+	"net"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/jwtmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/roleenforcemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/statemdl"
+
+	version "github.com/hashicorp/go-version"
+	"github.com/pquerna/ffjson/ffjson"
+	routing "github.com/qiangxue/fasthttp-routing"
+	"github.com/tidwall/gjson"
+)
+
+// Init routing init
+func Init(o, r, c *routing.RouteGroup, JWTKey string) {
+	o.Post("/mql/state", statemdl.StateHandler)
+	o.Post("/mql", OpenHandler)
+	r.Post("/mql", RestrictedHandler)
+	c.Post("/mql", RoleBasedHandler)
+	o.Post("/heavymql", HeavyOpenHandler)
+	r.Post("/heavymql", HeavyRestrictedHandler)
+	c.Post("/heavymql", HeavyRoleBasedHandler)
+	jwtmdl.GlobalJWTKey = JWTKey
+}
+
+func commonHandler(c *routing.Context, isRestricted, isRoleBased, heavyDataActivity bool, principalObj servicebuildermdl.Principal) error {
+	serviceHeader := string(c.Request.Header.Peek("Service-Header"))
+	branch := CleanBranch(
+		strings.TrimSpace(string(c.Request.Header.Peek(Header_Branch))),
+	)
+	services := strings.Split(serviceHeader, ",")
+	versionError := appVersioning(c)
+	if versionError != nil {
+		_, err := c.WriteString(versionError.Error())
+		c.SetStatusCode(417)
+		return err
+	}
+	responseMap := make(map[string]responseData)
+	var reqBody []byte
+
+	reqBody = c.Request.Body()
+	requestBody := gjson.ParseBytes(reqBody)
+	for i := 0; i < len(services); i++ {
+		responseDataObj := responseData{}
+		service := services[i]
+		result, ab, isCompressed, errorCode, err := executeServiceWithBranch(service, branch, []byte(requestBody.Get(service).String()), isRestricted, isRoleBased, heavyDataActivity, principalObj)
+		if errormdl.CheckErr1(err) != nil {
+			if ab == nil {
+				responseDataObj.ErrorCode = errorCode
+				responseDataObj.Error = err.Error()
+			} else {
+				responseDataObj.Error = ab.GetErrorData()
+				if responseDataObj.Error == nil {
+					responseDataObj.Error = err.Error()
+				}
+				errorCode := ab.GetErrorCode()
+				if errorCode == 0 {
+					errorCode = errormdl.EXPECTATIONFAILED
+				}
+				responseDataObj.ErrorCode = errorCode
+				if ab.TransactionEnable {
+					loggermdl.LogError("transaction enabled rollback")
+
+					var err error
+					// database transaction rollback if transaction is enabled
+					switch ab.DatabaseType {
+					case dalmdl.MYSQL:
+						if ab.TXN != nil {
+							loggermdl.LogError("MYSQL Transaction Rollbacked")
+							err = ab.TXN.Rollback()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.MYSQLERROR
+							}
+						}
+
+					case dalmdl.SQLSERVER:
+						if ab.SQLServerTXN != nil {
+							loggermdl.LogError("SQLSERVER Transaction Rollbacked")
+							err = ab.SQLServerTXN.Rollback()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.MYSQLERROR
+							}
+						}
+
+					case dalmdl.GraphDB:
+						if ab.GraphDbTXN != nil {
+							loggermdl.LogError("GRAPHDB Transaction Rollbacked")
+							err = ab.GraphDbTXN.Discard(context.TODO())
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.GRAPHDBERROR
+							}
+						}
+
+					default:
+						loggermdl.LogError("Invalid database type while rollback transaction")
+
+					}
+
+				}
+			}
+		} else {
+
+			if ab != nil {
+				if ab.TransactionEnable {
+					var err error
+
+					switch ab.DatabaseType {
+					case dalmdl.MYSQL:
+						if ab.TXN != nil {
+							loggermdl.LogError("MYSQL Transaction Commit")
+							err = ab.TXN.Commit()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.MYSQLERROR
+							} else {
+								responseDataObj.Result = result
+								responseDataObj.ErrorCode = errormdl.NOERROR
+							}
+						}
+
+					case dalmdl.SQLSERVER:
+						if ab.SQLServerTXN != nil {
+							loggermdl.LogError("SQLSERVER Transaction Commit")
+							err = ab.SQLServerTXN.Commit()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.SQLSERVERERROR
+							} else {
+								responseDataObj.Result = result
+								responseDataObj.ErrorCode = errormdl.NOERROR
+							}
+						}
+
+					case dalmdl.GraphDB:
+						if ab.SQLServerTXN != nil {
+							loggermdl.LogError("GRAPHDB Transaction Commit")
+							err = ab.GraphDbTXN.Commit(context.TODO())
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.GRAPHDBERROR
+							} else {
+								responseDataObj.Result = result
+								responseDataObj.ErrorCode = errormdl.NOERROR
+							}
+						}
+
+					default:
+						loggermdl.LogError("Invalid database type while commit transaction")
+					}
+
+				} else {
+					responseDataObj.Result = result
+					responseDataObj.ErrorCode = errormdl.NOERROR
+				}
+			} else {
+				responseDataObj.Result = result
+				responseDataObj.ErrorCode = errormdl.NOERROR
+			}
+		}
+		responseDataObj.IsCompressed = isCompressed
+		responseDataObj = formatResponse(ab, responseDataObj)
+		responseMap[service] = responseDataObj
+		// Token extraction
+		// if ab != nil {
+		// 	token, ok := ab.GetDataString("MQLToken")
+		// 	if !ok {
+		// 		token = string(c.Request.Header.Peek("Authorization"))
+		// 		token = strings.TrimPrefix(token, "Bearer")
+		// 		token = strings.TrimSpace(token)
+		// 		c.Response.Header.Set("Authorization", token)
+		// 	}
+		// 	c.Response.Header.Set("Authorization", token)
+		// } else {
+		// 	token := string(c.Request.Header.Peek("Authorization"))
+		// 	token = strings.TrimPrefix(token, "Bearer")
+		// 	token = strings.TrimSpace(token)
+		// 	c.Response.Header.Set("Authorization", token)
+		// }
+
+		if ab != nil {
+			token, ok := ab.GetDataString("MQLToken")
+			if ok {
+				c.Response.Header.Set("Authorization", token)
+			}
+		}
+	}
+	ba, _ := ffjson.Marshal(responseMap)
+	_, err := c.Write(ba)
+	c.SetStatusCode(200)
+	return err
+}
+
+// OpenHandler for /o
+func OpenHandler(c *routing.Context) error {
+	c.Response.Header.Set("content-type", "application/json")
+	principal := servicebuildermdl.Principal{}
+	principal.ClientIP = getClientIP(c)
+	commonHandler(c, false, false, false, principal)
+	return nil
+}
+
+// RestrictedHandler for /r
+func RestrictedHandler(c *routing.Context) error {
+	c.Response.Header.Set("content-type", "application/json")
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		_, err := c.WriteString(extractError.Error())
+		c.SetStatusCode(412)
+		return err
+	}
+	commonHandler(c, true, false, false, pricipalObj)
+	return nil
+}
+
+// RoleBasedHandler for /r/c
+func RoleBasedHandler(c *routing.Context) error {
+	c.Response.Header.Set("content-type", "application/json")
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		_, err := c.WriteString(extractError.Error())
+		c.SetStatusCode(412)
+		return err
+	}
+	commonHandler(c, true, true, false, pricipalObj)
+	return nil
+}
+
+// HeavyOpenHandler for /o
+func HeavyOpenHandler(c *routing.Context) error {
+	c.Response.Header.Set("content-type", "application/json")
+	principal := servicebuildermdl.Principal{}
+	principal.ClientIP = getClientIP(c)
+	commonHandler(c, false, false, true, principal)
+	return nil
+}
+
+// HeavyRestrictedHandler for /r
+func HeavyRestrictedHandler(c *routing.Context) error {
+	c.Response.Header.Set("content-type", "application/json")
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		_, err := c.WriteString(extractError.Error())
+		c.SetStatusCode(412)
+		return err
+	}
+	commonHandler(c, true, false, true, pricipalObj)
+	return nil
+}
+
+// HeavyRoleBasedHandler for /r/c
+func HeavyRoleBasedHandler(c *routing.Context) error {
+	c.Response.Header.Set("content-type", "application/json")
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		_, err := c.WriteString(extractError.Error())
+		c.SetStatusCode(412)
+		return err
+	}
+	commonHandler(c, true, true, true, pricipalObj)
+	return nil
+}
+
+func appVersioning(c *routing.Context) error {
+	if isAppVersionEnabled {
+		appVersion := string(c.Request.Header.Peek("app-version"))
+		if appVersion == "" {
+			return errormdl.Wrap("No App version Found in request header")
+		}
+		ver, err := version.NewVersion(appVersion)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+		if isStrictMode {
+			if !ver.Equal(applicationVersion) {
+				return errormdl.Wrap("Application version mismatched")
+			}
+		} else {
+			if ver.GreaterThan(applicationVersion) {
+				return errormdl.Wrap("Server Version is outdated")
+			}
+			if ver.LessThan(minimumSupportedVersion) {
+				return errormdl.Wrap("Client Version is outdated")
+			}
+		}
+	}
+	return nil
+}
+
+func extractPricipalObject(c *routing.Context) (servicebuildermdl.Principal, error) {
+	principal := servicebuildermdl.Principal{}
+	if jwtmdl.GlobalJWTKey == "" {
+		return principal, errormdl.Wrap("No Global JWT key found")
+	}
+
+	claim, decodeError := jwtmdl.DecodeToken(&c.Request)
+	if errormdl.CheckErr(decodeError) != nil {
+		// loggermdl.LogError(decodeError)
+		return principal, errormdl.CheckErr(decodeError)
+	}
+
+	groups, grperr := roleenforcemdl.GetGroupNames(claim, "groups")
+	if errormdl.CheckErr(grperr) != nil {
+		loggermdl.LogError(grperr)
+		return principal, errormdl.CheckErr(grperr)
+	}
+	userID, _ := claim["userId"].(string)
+	if len(userID) < 2 {
+		loggermdl.LogError("UserID length is less than 2")
+		return principal, errormdl.Wrap("UserID length is less than 2")
+	}
+
+	rawMetadata, ok := claim["metadata"]
+	if ok {
+		metadata, ok := rawMetadata.(string)
+		if !ok {
+			loggermdl.LogError("Unable to parse metadata from JWT Token")
+			return principal, errormdl.Wrap("Unable to parse metadata from JWT Token")
+		}
+		principal.Metadata = metadata
+	}
+	principal.Groups = groups
+	principal.UserID = userID
+	principal.Token = string(c.Request.Header.Peek("Authorization"))
+	// set client ip
+	principal.ClientIP = getClientIP(c)
+	return principal, nil
+}
+
+// getClientIP - returns respected header value from request header
+func getClientIP(c *routing.Context) string {
+	clientIP := string(c.Request.Header.Peek("X-Real-Ip"))
+	if clientIP == "" {
+		clientIP = string(c.Request.Header.Peek("X-Forwarded-For"))
+	}
+	if clientIP == "" {
+		clientIP, _, splitHostPortError := net.SplitHostPort(c.RemoteIP().String())
+		if splitHostPortError == nil && isCorrectIP(clientIP) {
+			return clientIP
+		}
+		return ""
+	}
+	if isCorrectIP(clientIP) {
+		return clientIP
+	}
+	return ""
+}
+
+// isCorrectIP - return true if ip string is valid textual representation of an IP address, else returns false
+func isCorrectIP(ip string) bool {
+	return net.ParseIP(ip) != nil
+}
diff --git a/v2/routebuildermdl/routebuilder_gin.go b/v2/routebuildermdl/routebuilder_gin.go
new file mode 100644
index 0000000000000000000000000000000000000000..6fb4bd38c8493d3ea82c1f4d554ae3c43690daf1
--- /dev/null
+++ b/v2/routebuildermdl/routebuilder_gin.go
@@ -0,0 +1,339 @@
+//  +build !fasthttp
+
+package routebuildermdl
+
+import (
+	"context"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/jwtmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/authmdl/roleenforcemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/statemdl"
+
+	"github.com/gin-gonic/gin"
+	version "github.com/hashicorp/go-version"
+	"github.com/tidwall/gjson"
+)
+
+// Init routing init
+func Init(o, r, c *gin.RouterGroup, JWTKey string) {
+	o.POST("/mql/state", statemdl.StateHandler)
+	o.POST("/mql", OpenHandler)
+	r.POST("/mql", RestrictedHandler)
+	c.POST("/mql", RoleBasedHandler)
+	o.POST("/heavymql", HeavyOpenHandler)
+	r.POST("/heavymql", HeavyRestrictedHandler)
+	c.POST("/heavymql", HeavyRoleBasedHandler)
+	jwtmdl.GlobalJWTKey = JWTKey
+}
+
+func commonHandler(c *gin.Context, isRestricted, isRoleBased, heavyDataActivity bool, principalObj servicebuildermdl.Principal) {
+	serviceHeader := c.Request.Header.Get("Service-Header")
+	branch := CleanBranch(
+		strings.TrimSpace(c.Request.Header.Get(Header_Branch)),
+	)
+	services := strings.Split(serviceHeader, ",")
+	versionError := appVersioning(c)
+	if versionError != nil {
+		c.JSON(http.StatusExpectationFailed, versionError.Error())
+		return
+	}
+	responseMap := make(map[string]responseData)
+	responseDataObj := responseData{}
+	var reqBody []byte
+
+	var readError error
+	reqBody, readError = ioutil.ReadAll(c.Request.Body)
+	if errormdl.CheckErr2(readError) != nil {
+		responseDataObj.Error = errormdl.CheckErr2(readError).Error()
+		responseDataObj.ErrorCode = errormdl.EXPECTATIONFAILED
+		loggermdl.LogError(readError)
+		c.JSON(http.StatusExpectationFailed, responseDataObj)
+		return
+	}
+
+	requestBody := gjson.ParseBytes(reqBody)
+	for i := 0; i < len(services); i++ {
+		responseDataObj := responseData{}
+		service := services[i]
+		result, ab, isCompressed, errorCode, err := executeServiceWithBranch(service, branch, []byte(requestBody.Get(service).String()), isRestricted, isRoleBased, heavyDataActivity, principalObj)
+		if errormdl.CheckErr1(err) != nil {
+			if ab == nil {
+				responseDataObj.ErrorCode = errorCode
+				responseDataObj.Error = err.Error()
+			} else {
+				responseDataObj.Error = ab.GetErrorData()
+				if responseDataObj.Error == nil {
+					responseDataObj.Error = err.Error()
+				}
+				errorCode := ab.GetErrorCode()
+				if errorCode == 0 {
+					errorCode = errormdl.EXPECTATIONFAILED
+				}
+				responseDataObj.ErrorCode = errorCode
+				if ab.TransactionEnable {
+					loggermdl.LogError("transaction enabled rollback")
+
+					var err error
+					// database transaction rollback if transaction is enabled
+					switch ab.DatabaseType {
+					case dalmdl.MYSQL:
+						if ab.TXN != nil {
+							loggermdl.LogError("MYSQL Transaction Rollbacked")
+							err = ab.TXN.Rollback()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.MYSQLERROR
+							}
+						}
+
+					case dalmdl.SQLSERVER:
+						if ab.SQLServerTXN != nil {
+							loggermdl.LogError("SQLSERVER Transaction Rollbacked")
+							err = ab.SQLServerTXN.Rollback()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.MYSQLERROR
+							}
+						}
+
+					case dalmdl.GraphDB:
+						if ab.GraphDbTXN != nil {
+							loggermdl.LogError("GRAPHDB Transaction Rollbacked")
+							err = ab.GraphDbTXN.Discard(context.TODO())
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.GRAPHDBERROR
+							}
+						}
+
+					default:
+						loggermdl.LogError("Invalid database type while rollback transaction")
+
+					}
+
+				}
+			}
+		} else {
+
+			if ab != nil {
+				if ab.TransactionEnable {
+					var err error
+
+					switch ab.DatabaseType {
+					case dalmdl.MYSQL:
+						if ab.TXN != nil {
+							loggermdl.LogError("MYSQL Transaction Commit")
+							err = ab.TXN.Commit()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.MYSQLERROR
+							} else {
+								responseDataObj.Result = result
+								responseDataObj.ErrorCode = errormdl.NOERROR
+							}
+						}
+
+					case dalmdl.SQLSERVER:
+						if ab.SQLServerTXN != nil {
+							loggermdl.LogError("SQLSERVER Transaction Commit")
+							err = ab.SQLServerTXN.Commit()
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.SQLSERVERERROR
+							} else {
+								responseDataObj.Result = result
+								responseDataObj.ErrorCode = errormdl.NOERROR
+							}
+						}
+
+					case dalmdl.GraphDB:
+						if ab.SQLServerTXN != nil {
+							loggermdl.LogError("GRAPHDB Transaction Commit")
+							err = ab.GraphDbTXN.Commit(context.TODO())
+							if err != nil {
+								responseDataObj.Error = err.Error()
+								responseDataObj.ErrorCode = errormdl.GRAPHDBERROR
+							} else {
+								responseDataObj.Result = result
+								responseDataObj.ErrorCode = errormdl.NOERROR
+							}
+						}
+
+					default:
+						loggermdl.LogError("Invalid database type while commit transaction")
+
+					}
+				} else {
+					responseDataObj.Result = result
+					responseDataObj.ErrorCode = errormdl.NOERROR
+				}
+			} else {
+				responseDataObj.Result = result
+				responseDataObj.ErrorCode = errormdl.NOERROR
+			}
+		}
+		responseDataObj.IsCompressed = isCompressed
+		responseDataObj = formatResponse(ab, responseDataObj)
+		responseMap[service] = responseDataObj
+		// Token extraction
+		if ab != nil {
+			token, _ := ab.GetDataString("MQLToken")
+			c.Header("Authorization", token)
+		}
+	}
+	c.JSON(http.StatusOK, responseMap)
+	return
+}
+
+// OpenHandler for /o
+func OpenHandler(c *gin.Context) {
+	principal := servicebuildermdl.Principal{}
+	principal.ClientIP = getClientIP(c)
+	commonHandler(c, false, false, false, principal)
+}
+
+// RestrictedHandler for /r
+func RestrictedHandler(c *gin.Context) {
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		c.JSON(http.StatusExpectationFailed, extractError.Error())
+		return
+	}
+	commonHandler(c, true, false, false, pricipalObj)
+}
+
+// RoleBasedHandler for /r/c
+func RoleBasedHandler(c *gin.Context) {
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		c.JSON(http.StatusExpectationFailed, extractError.Error())
+		return
+	}
+	commonHandler(c, true, true, false, pricipalObj)
+}
+
+// HeavyOpenHandler for /o
+func HeavyOpenHandler(c *gin.Context) {
+	principal := servicebuildermdl.Principal{}
+	principal.ClientIP = getClientIP(c)
+	commonHandler(c, false, false, true, principal)
+}
+
+// HeavyRestrictedHandler for /r
+func HeavyRestrictedHandler(c *gin.Context) {
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		c.JSON(http.StatusExpectationFailed, extractError.Error())
+		return
+	}
+	commonHandler(c, true, false, true, pricipalObj)
+}
+
+// HeavyRoleBasedHandler for /r/c
+func HeavyRoleBasedHandler(c *gin.Context) {
+	pricipalObj, extractError := extractPricipalObject(c)
+	if extractError != nil {
+		loggermdl.LogError(extractError)
+		c.JSON(http.StatusExpectationFailed, extractError.Error())
+		return
+	}
+	commonHandler(c, true, true, true, pricipalObj)
+}
+
+func appVersioning(c *gin.Context) error {
+	if isAppVersionEnabled {
+		appVersion := c.Request.Header.Get("app-version")
+		if appVersion == "" {
+			return errormdl.Wrap("No App version Found in request header")
+		}
+		ver, err := version.NewVersion(appVersion)
+		if errormdl.CheckErr(err) != nil {
+			return errormdl.CheckErr(err)
+		}
+		if isStrictMode {
+			if !ver.Equal(applicationVersion) {
+				return errormdl.Wrap("Application version mismatched")
+			}
+		} else {
+			if ver.GreaterThan(applicationVersion) {
+				return errormdl.Wrap("Server Version is outdated")
+			}
+			if ver.LessThan(minimumSupportedVersion) {
+				return errormdl.Wrap("Client Version is outdated")
+			}
+		}
+	}
+	return nil
+}
+
+func extractPricipalObject(c *gin.Context) (servicebuildermdl.Principal, error) {
+	principal := servicebuildermdl.Principal{}
+	if jwtmdl.GlobalJWTKey == "" {
+		return principal, errormdl.Wrap("No Global JWT key found")
+	}
+	claim, decodeError := jwtmdl.DecodeToken(c.Request)
+	if errormdl.CheckErr(decodeError) != nil {
+		return principal, errormdl.CheckErr(decodeError)
+	}
+
+	groups, grperr := roleenforcemdl.GetGroupNames(claim, "groups")
+	if errormdl.CheckErr(grperr) != nil {
+		loggermdl.LogError(grperr)
+		return principal, errormdl.CheckErr(grperr)
+	}
+	userID, _ := claim["userId"].(string)
+	if len(userID) < 2 {
+		loggermdl.LogError("UserID length is less than 2")
+		return principal, errormdl.Wrap("UserID length is less than 2")
+	}
+
+	rawMetadata, ok := claim["metadata"]
+	if ok {
+		metadata, ok := rawMetadata.(string)
+		if !ok {
+			loggermdl.LogError("Unable to parse metadata from JWT Token")
+			return principal, errormdl.Wrap("Unable to parse metadata from JWT Token")
+		}
+		principal.Metadata = metadata
+	}
+	principal.Groups = groups
+	principal.UserID = userID
+	principal.Token = c.Request.Header.Get("Authorization")
+	principal.ClientIP = getClientIP(c)
+	return principal, nil
+}
+
+// getClientIP - returns respected header value from request header
+func getClientIP(c *gin.Context) string {
+	clientIP := c.Request.Header.Get("X-Real-Ip")
+	if clientIP == "" {
+		clientIP = c.Request.Header.Get("X-Forwarded-For")
+	}
+	if clientIP == "" {
+		clientIP, _, splitHostPortError := net.SplitHostPort(c.Request.RemoteAddr)
+		if splitHostPortError == nil && isCorrectIP(clientIP) {
+			return clientIP
+		}
+		return ""
+	}
+	if isCorrectIP(clientIP) {
+		return clientIP
+	}
+	return ""
+}
+
+// isCorrectIP - return true if ip string is valid textual representation of an IP address, else returns false
+func isCorrectIP(ip string) bool {
+	return net.ParseIP(ip) != nil
+}
diff --git a/v2/routebuildermdl/routebuildermdl.go b/v2/routebuildermdl/routebuildermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..92e4f5611a01fdad6099ab442bf6ff4221ced400
--- /dev/null
+++ b/v2/routebuildermdl/routebuildermdl.go
@@ -0,0 +1,203 @@
+package routebuildermdl
+
+import (
+	"encoding/json"
+	"reflect"
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/statemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+)
+
+const (
+	Header_Branch = "Service-Branch"
+)
+
+// CleanBranch returns "main" branch if the branch string is empty after trimming all spaces. if Non-empty returns same string(including spaces along with characters).
+func CleanBranch(branch string) string {
+	if strings.TrimSpace(branch) == "" {
+		branch = constantmdl.Branch_Main
+	}
+
+	return branch
+}
+
+func ConcatenateEntityWithBranch(name, branch string) string {
+
+	return name + constantmdl.Branch_Separator + CleanBranch(branch)
+}
+
+func setResponseHeader(serviceName string) responseData {
+	rd := responseData{}
+	val, ok := GetResponseHeader(serviceName)
+	if ok {
+		rd.ResponseHeader = val
+	}
+	return rd
+}
+
+func executeServiceWithBranch(name, branch string, data []byte, isRestricted, isRoleBased, heavyDataActivity bool, principalObj servicebuildermdl.Principal) (interface{}, *servicebuildermdl.AbstractBusinessLogicHolder, bool, int, error) {
+
+	start := time.Now()
+	var isCompressed bool
+	var service interface{}
+	var ab *servicebuildermdl.AbstractBusinessLogicHolder
+	var found bool
+
+	// for calling queries as queries are not associated with any branch
+	activity := name
+	if name != "FetchQueryData" {
+		activity = ConcatenateEntityWithBranch(name, branch)
+	}
+
+	if isRestricted {
+		if isRoleBased {
+			service, found = roleBasedServices.Get(activity)
+		} else {
+			service, found = restrictedServices.Get(activity)
+		}
+	} else {
+		service, found = openServices.Get(activity)
+	}
+
+	if !found {
+		loggermdl.LogError("Service Not Found: " + activity)
+		return nil, ab, isCompressed, errormdl.SERVICENOTFOUND, errormdl.Wrap("Service Not Found: " + name)
+	}
+
+	// FetchQueryData & FetchGCData
+	// We are not validating token for these two activities here.
+	// we are checking & validating token inside these routes because these routes are used for open, restricted & rolebased actors.
+	if isRestricted && isRoleBased && name != "FetchQueryData" && name != "FetchGCData" {
+		if !validateRoleFromToken(principalObj, service.(ServiceCache)) {
+			loggermdl.LogError("INVALID_ACTOR: " + name)
+			return nil, ab, isCompressed, errormdl.SERVICENOTFOUND, errormdl.Wrap("INVALID_ACTOR: " + name)
+		}
+	}
+	var result interface{}
+	var serviceError error
+	tmpServiceCache := service.(ServiceCache)
+	serviceCache := tmpServiceCache
+	if serviceCache.HeavyDataActivity {
+		if !heavyDataActivity {
+			return nil, ab, isCompressed, errormdl.SERVICENOTFOUND, errormdl.Wrap("Service is marked as heavy data... kindly use heavymql route")
+		}
+	}
+	if serviceCache.AsyncService {
+		asyncToken := guidmdl.GetGUID()
+		data, err := sjson.SetBytes(data, "asyncToken", asyncToken)
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return nil, nil, isCompressed, errormdl.SJSONERROR, errormdl.CheckErr(err)
+		}
+		rs := gjson.ParseBytes(data)
+		serviceCache.preHooksExec(&rs, &principalObj)
+		go serviceCache.Service(&rs, principalObj)
+		servingTime := time.Since(start)
+		go statemdl.UpdateServiceStateWithBranch(name, branch, servingTime, serviceError, isRestricted, isRoleBased)
+		return asyncToken, nil, isCompressed, errormdl.NOERROR, nil
+	}
+	rs := gjson.ParseBytes(data)
+
+	// This step is actual execution of service
+	serviceCache.preHooksExec(&rs, &principalObj)
+	result, ab, serviceError = serviceCache.Service(&rs, principalObj)
+
+	servingTime := time.Since(start)
+	// Record State for every service
+	go statemdl.UpdateServiceStateWithBranch(name, branch, servingTime, serviceError, isRestricted, isRoleBased)
+
+	if serviceError == nil {
+		serviceCache.postHooksExec(result, &principalObj)
+		if serviceCache.HeavyDataActivity {
+			isCompressed = true
+		}
+	}
+
+	return result, ab, isCompressed, errormdl.EXPECTATIONFAILED, serviceError
+}
+
+// executeService is same as `executeServiceWithBranch` but works for `main` branch
+func executeService(name string, data []byte, isRestricted, isRoleBased, heavyDataActivity bool, principalObj servicebuildermdl.Principal) (interface{}, *servicebuildermdl.AbstractBusinessLogicHolder, bool, int, error) {
+
+	return executeServiceWithBranch(name, constantmdl.Branch_Main, data, isRestricted, isRoleBased, heavyDataActivity, principalObj)
+}
+
+func validateRoleFromToken(principalObj servicebuildermdl.Principal, service ServiceCache) bool {
+	// check if group from request is present in groups associated with the service.
+	for _, g := range service.Groups {
+		for _, tokenGroup := range principalObj.Groups {
+			if g == tokenGroup {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func (s ServiceCache) preHooksExec(rs *gjson.Result, principalObj *servicebuildermdl.Principal) {
+	for i := 0; i < len(s.PreHooks); i++ {
+		var service interface{}
+		activityName := ConcatenateEntityWithBranch(s.PreHooks[i].ActivityName, s.PreHooks[i].Branch)
+		var found bool
+		if s.PreHooks[i].ActorType == "ROLEBASED" {
+			service, found = roleBasedServices.Get(activityName)
+		}
+		if s.PreHooks[i].ActorType == "RESTRICTED" {
+			service, found = restrictedServices.Get(activityName)
+		}
+		if s.PreHooks[i].ActorType == "OPEN" {
+			service, found = openServices.Get(activityName)
+		}
+		if !found {
+			loggermdl.LogError("Pre Hook Not found: ", activityName, " for actor type: ", s.PreHooks[i].ActorType, " branch:", s.PreHooks[i].Branch)
+			return
+		}
+		tmpServiceCache := service.(ServiceCache)
+		serviceCache := tmpServiceCache
+		go serviceCache.Service(rs, *principalObj)
+	}
+}
+
+func (s ServiceCache) postHooksExec(data interface{}, principalObj *servicebuildermdl.Principal) {
+	if len(s.PostHooks) == 0 {
+		return
+	}
+	rs := gjson.Result{}
+	if data != nil {
+		// Fixed: Panic if data is nil.
+		objType := reflect.TypeOf(data).String()
+		if strings.Contains(objType, "map[string]") {
+			ba, _ := json.Marshal(data)
+			rs = gjson.ParseBytes(ba)
+		}
+	}
+	for i := 0; i < len(s.PostHooks); i++ {
+		activityName := ConcatenateEntityWithBranch(s.PostHooks[i].ActivityName, s.PostHooks[i].Branch)
+		var service interface{}
+		var found bool
+		if s.PostHooks[i].ActorType == "ROLEBASED" {
+			service, found = roleBasedServices.Get(activityName)
+		}
+		if s.PostHooks[i].ActorType == "RESTRICTED" {
+			service, found = restrictedServices.Get(activityName)
+		}
+		if s.PostHooks[i].ActorType == "OPEN" {
+			service, found = openServices.Get(activityName)
+		}
+		if !found {
+			loggermdl.LogError("Post Hook Not found: ", activityName, " for actor type: ", s.PostHooks[i].ActorType, " Branch:", s.PostHooks[i].Branch)
+			return
+		}
+		tmpServiceCache := service.(ServiceCache)
+		serviceCache := tmpServiceCache
+		go serviceCache.Service(&rs, *principalObj)
+	}
+}
diff --git a/v2/routebuildermdl/routebuildermdl_test.go b/v2/routebuildermdl/routebuildermdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d65c268b8e103c41e363c7e1481348998e0fde80
--- /dev/null
+++ b/v2/routebuildermdl/routebuildermdl_test.go
@@ -0,0 +1,226 @@
+package routebuildermdl
+
+// import (
+// 	"bytes"
+// 	"fmt"
+// 	"mime/multipart"
+// 	"net/http"
+// 	"net/http/httptest"
+// 	"testing"
+
+// 	"github.com/tidwall/gjson"
+
+// 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+
+// 	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+// 	"github.com/stretchr/testify/assert"
+
+// 	"github.com/gin-gonic/gin"
+// )
+
+// var router *gin.Engine
+
+// func DummyService(data *gjson.Result, pricipalObj servicebuildermdl.Principal) (interface{}, error) {
+// 	return "success", nil
+// }
+
+// func RDummyService(data *gjson.Result, pricipalObj servicebuildermdl.Principal) (interface{}, error) {
+// 	fmt.Println(pricipalObj)
+// 	return "success", nil
+// }
+
+// func RBDummyService(data *gjson.Result, pricipalObj servicebuildermdl.Principal) (interface{}, error) {
+// 	return "success", nil
+// }
+// func MultiparService(form *multipart.Form, pricipalObj servicebuildermdl.Principal) (interface{}, error) {
+// 	return "success", nil
+// }
+
+// type testBl struct {
+// 	servicebuildermdl.AbstractBusinessLogicHolder
+// }
+
+// func GetBLHolder() *testBl {
+// 	test := testBl{}
+// 	test.New(&servicebuildermdl.Principal{})
+// 	return &test
+// }
+
+// // func MasterService() {
+// // 	// test := GetBLHolder()
+// // 	// sb := servicebuildermdl.GetSB("MasterService", &test.AbstractBusinessLogicHolder).
+// // 	// 	AddStep("Test -1", "", nil, nil, nil)
+// // 	// RegisterMasterService("MasterService", sb, true, true)
+
+// // }
+// func init() {
+// 	router = gin.New()
+// 	o := router.Group("/o")
+// 	r := router.Group("/r")
+// 	c := r.Group("/c")
+// 	Init(o, r, c, "mysupersecretpassword")
+// 	CreateResponseHeader("DummyService").
+// 		EnableReponseCache().
+// 		AddMethod("test", nil).
+// 		SetResponseHeader()
+// 	CreateResponseHeader("RDummyService").SetResponseHeader()
+// 	RegisterNormalService("DummyService", DummyService, false, false)
+// 	RegisterNormalService("RDummyService", RDummyService, true, false)
+// 	RegisterNormalService("RBDummyService", RBDummyService, true, true)
+// 	RegisterFormBasedService("MultiparService", MultiparService, true, true)
+// 	CreateResponseHeader("DummyService").
+// 		DisableReponseCache().
+// 		SetResponseHeader()
+// 	// MasterService()
+// }
+
+// func Test_OpenHandler(t *testing.T) {
+// 	login := `
+// 	{
+// 		"loginId": "demo",
+// 		"password": "demo"
+// 	}
+// 	`
+// 	req, _ := http.NewRequest("POST", "/o/mql", bytes.NewBuffer([]byte(login)))
+// 	req.Header.Set("Service-Header", "DummyService")
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+
+// 	assert.Equal(t, resp.Code, 200)
+// }
+
+// func Test_RestrictedHandler(t *testing.T) {
+// 	login := `
+// 	{
+// 		"loginId": "demo",
+// 		"password": "demo"
+// 	}
+// 	`
+// 	req, _ := http.NewRequest("POST", "/r/mql", bytes.NewBuffer([]byte(login)))
+// 	req.Header.Set("Service-Header", "RDummyService")
+// 	req.Header.Set("Authorization", "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzYW5kZWVwc3MiLCJncm91cHMiOlsiYWRtaW4iXX0.chD9SRf_UqnV9eT5PQ-z52iA7iOFWB4Ck0BuJVF5Sgo")
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+
+// 	assert.Equal(t, resp.Code, 200)
+// }
+
+// func Test_RoleBasedHandler(t *testing.T) {
+// 	login := `
+// 	{
+// 		"loginId": "demo",
+// 		"password": "demo"
+// 	}
+// 	`
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", bytes.NewBuffer([]byte(login)))
+// 	req.Header.Set("Service-Header", "RBDummyService")
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+
+// 	assert.Equal(t, resp.Code, 200)
+// }
+
+// func Test1_RestrictedHandler(t *testing.T) {
+// 	login := `
+// 	{
+// 		"loginId": "demo",
+// 		"password": "demo"
+// 	}
+// 	`
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", bytes.NewBuffer([]byte(login)))
+// 	req.Header.Set("Service-Header", "RDummyService")
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+
+// 	assert.Equal(t, resp.Code, 417)
+// }
+// func Test2_RestrictedHandler(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn2 = true
+// 	login := `
+// 	{
+// 		"loginId": "demo",
+// 		"password": "demo"
+// 	}
+// 	`
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", bytes.NewBuffer([]byte(login)))
+// 	req.Header.Set("Service-Header", "RDummyService")
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+// 	errormdl.IsTestingNegetiveCaseOn2 = true
+
+// 	assert.Equal(t, resp.Code, 417)
+// }
+
+// func Test1_MultipartHandler(t *testing.T) {
+// 	body := &bytes.Buffer{}
+// 	writer := multipart.NewWriter(body)
+// 	part, _ := writer.CreateFormFile("file", "tmp.json")
+// 	part.Write([]byte("tmp data"))
+// 	writer.Close()
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", body)
+// 	req.Header.Set("Service-Header", "MultiparService")
+// 	req.Header.Add("Content-Type", writer.FormDataContentType())
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+
+// 	assert.Equal(t, resp.Code, 200)
+// }
+
+// func Test2_MultipartHandler(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn = true
+// 	body := &bytes.Buffer{}
+// 	writer := multipart.NewWriter(body)
+// 	part, _ := writer.CreateFormFile("file", "tmp.json")
+// 	part.Write([]byte("tmp data"))
+// 	writer.Close()
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", body)
+// 	req.Header.Set("Service-Header", "MultiparService")
+// 	req.Header.Add("Content-Type", writer.FormDataContentType())
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+// 	errormdl.IsTestingNegetiveCaseOn = false
+
+// 	assert.Equal(t, resp.Code, 417)
+// }
+
+// func Test3_MultipartHandler(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOn1 = true
+// 	body := &bytes.Buffer{}
+// 	writer := multipart.NewWriter(body)
+// 	part, _ := writer.CreateFormFile("file", "tmp.json")
+// 	part.Write([]byte("tmp data"))
+// 	writer.Close()
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", body)
+// 	req.Header.Set("Service-Header", "MultiparService")
+// 	req.Header.Add("Content-Type", writer.FormDataContentType())
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+// 	errormdl.IsTestingNegetiveCaseOn1 = false
+
+// 	assert.Equal(t, resp.Code, 417)
+// }
+// func Test4_MultipartHandler(t *testing.T) {
+// 	body := &bytes.Buffer{}
+// 	writer := multipart.NewWriter(body)
+// 	part, _ := writer.CreateFormFile("file", "tmp.json")
+// 	part.Write([]byte("tmp data"))
+// 	writer.Close()
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", body)
+// 	req.Header.Set("Service-Header", "MultiparService")
+// 	// req.Header.Add("Content-Type", writer.FormDataContentType())
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+
+// 	assert.Equal(t, resp.Code, 417)
+// }
+
+// func Test_MasterService(t *testing.T) {
+
+// 	req, _ := http.NewRequest("POST", "/r/c/mql", nil)
+// 	req.Header.Set("Service-Header", "MasterService")
+// 	resp := httptest.NewRecorder()
+// 	router.ServeHTTP(resp, req)
+
+// 	assert.Equal(t, resp.Code, 417)
+// }
diff --git a/v2/routebuildermdl/serviceCachemdl.go b/v2/routebuildermdl/serviceCachemdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..c2de7dc0ccb7a9150f3b7aafdf1753b2924bd101
--- /dev/null
+++ b/v2/routebuildermdl/serviceCachemdl.go
@@ -0,0 +1,207 @@
+package routebuildermdl
+
+import (
+	"strings"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/servicebuildermdl"
+
+	version "github.com/hashicorp/go-version"
+	"github.com/tidwall/gjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+)
+
+var (
+	restrictedServices cachemdl.FastCacheHelper
+	roleBasedServices  cachemdl.FastCacheHelper
+	openServices       cachemdl.FastCacheHelper
+	loginService       func(*gjson.Result, servicebuildermdl.Principal) (interface{}, string, error)
+
+	applicationVersion      *version.Version
+	minimumSupportedVersion *version.Version
+	isAppVersionEnabled     bool
+	isStrictMode            bool
+)
+
+func init() {
+	restrictedServices.Setup(10, time.Second*600, time.Second*600)
+	openServices.Setup(10, time.Second*600, time.Second*600)
+	roleBasedServices.Setup(10, time.Second*600, time.Second*600)
+}
+
+// Hook - Hook
+type Hook struct {
+	ActivityName string
+	ActorType    string
+	Branch       string
+}
+
+// ServiceCache ServiceCache object
+type ServiceCache struct {
+	Service            func(*gjson.Result, servicebuildermdl.Principal) (interface{}, *servicebuildermdl.AbstractBusinessLogicHolder, error)
+	AsyncService       bool
+	HeavyDataActivity  bool
+	PreHooks           []Hook
+	PostHooks          []Hook
+	Groups             []string
+	TransactionEnabled bool
+	DatabaseType       string // database type for transaction begin(MYSQL,SQLSERVER etc.)
+	TransactionHost    string // host to begin transaction. if it is empty then transaction will begin on default host
+}
+
+// ServiceType -ServiceType
+type ServiceType = func(*gjson.Result, servicebuildermdl.Principal) (interface{}, *servicebuildermdl.AbstractBusinessLogicHolder, error)
+
+// GetRestrictedServicePtr -GetRestrictedServicePtr
+func GetRestrictedServicePtr(serviceName string) (ServiceType, error) {
+	intService, ok := restrictedServices.Get(serviceName)
+	if !ok {
+		return nil, errormdl.Wrap("Service Not found")
+	}
+	serviceCache, ok := intService.(ServiceCache)
+	if !ok {
+		return nil, errormdl.Wrap("Service Not Casted")
+	}
+	return serviceCache.Service, nil
+}
+
+// GetOpenServicePtr -GetOpenServicePtr
+func GetOpenServicePtr(serviceName string) (ServiceType, error) {
+	intService, ok := openServices.Get(serviceName)
+	if !ok {
+		return nil, errormdl.Wrap("Service Not found")
+	}
+	serviceCache, ok := intService.(ServiceCache)
+	if !ok {
+		return nil, errormdl.Wrap("Service Not Casted")
+	}
+	return serviceCache.Service, nil
+}
+
+// GetRoleBasedServicePtr -GetRoleBasedServicePtr
+func GetRoleBasedServicePtr(serviceName string) (ServiceType, error) {
+	intService, ok := roleBasedServices.Get(serviceName)
+	if !ok {
+		return nil, errormdl.Wrap("Service Not found")
+	}
+	serviceCache, ok := intService.(ServiceCache)
+	if !ok {
+		return nil, errormdl.Wrap("Service Not Casted")
+	}
+	return serviceCache.Service, nil
+}
+
+// RegisterNormalService is for transactional services
+func RegisterNormalService(serviceName string, servicePtr ServiceType, isRestricted, isRoleBased bool) {
+	service := ServiceCache{
+		Service: servicePtr,
+	}
+	commonServiceRegistration(serviceName, service, isRestricted, isRoleBased)
+}
+
+// RegisterServiceInSLS is for transactional services
+func RegisterServiceInSLS(serviceName string, servicePtr ServiceCache, isRestricted, isRoleBased bool) {
+	// service := ServiceCache{
+	// 	Service: servicePtr,
+	// }
+	commonServiceRegistration(serviceName, servicePtr, isRestricted, isRoleBased)
+}
+
+// RegisterLoginSerivce is for login service
+func RegisterLoginSerivce(service func(*gjson.Result, servicebuildermdl.Principal) (interface{}, string, error)) {
+	loginService = service
+}
+
+func commonServiceRegistration(serviceName string, service ServiceCache, isRestricted, isRoleBased bool) {
+	if isRestricted {
+		if isRoleBased {
+			roleBasedServices.SetNoExpiration(serviceName, service)
+		} else {
+			restrictedServices.SetNoExpiration(serviceName, service)
+		}
+	} else {
+		openServices.SetNoExpiration(serviceName, service)
+	}
+}
+
+// SetAppVersion -SetAppVersion
+func SetAppVersion(appVersion, minSupportedVersion string) error {
+
+	app, err := version.NewVersion(appVersion)
+	if errormdl.CheckErr(err) != nil {
+		return errormdl.CheckErr(err)
+	}
+	min, err := version.NewVersion(minSupportedVersion)
+	if errormdl.CheckErr(err) != nil {
+		return errormdl.CheckErr(err)
+	}
+
+	versionValidation := min.Compare(app)
+	if versionValidation == 1 {
+		return errormdl.Wrap("Minimum version is more than app version")
+	}
+	if versionValidation == 0 {
+		isStrictMode = true
+	}
+	applicationVersion = app
+	minimumSupportedVersion = min
+	isAppVersionEnabled = true
+	return nil
+}
+
+// CallService - calls service with provided configuration, returns result and error from executed service
+func CallService(name string, rs *gjson.Result, isRestricted bool, isRoleBased bool, p servicebuildermdl.Principal) (interface{}, error) {
+	return CallServiceWithBranch(name, constantmdl.Branch_Main, rs, isRestricted, isRoleBased, p)
+}
+
+// CallServiceWithBranch - calls service of given branch with provided configuration, returns result and error from executed service
+func CallServiceWithBranch(name, branch string, rs *gjson.Result, isRestricted bool, isRoleBased bool, p servicebuildermdl.Principal) (interface{}, error) {
+	var found bool
+	var service interface{}
+	if strings.TrimSpace(branch) == "" {
+		loggermdl.LogError("Branch is empty")
+		return nil, errormdl.Wrap("Branch is empty")
+	}
+	activityName := ConcatenateEntityWithBranch(name, branch)
+	// if isRestricted {
+	// 	if isRoleBased {
+	// 		service, found = roleBasedServices.Get(activityName)
+	// 	} else {
+	// 		service, found = restrictedServices.Get(activityName)
+	// 	}
+	// } else {
+	// 	service, found = openServices.Get(activityName)
+	// }
+	switch {
+	case isRestricted == true && isRoleBased == true:
+		service, found = roleBasedServices.Get(activityName)
+	case isRestricted == true && isRoleBased == false:
+		service, found = restrictedServices.Get(activityName)
+	case isRestricted == false:
+		service, found = openServices.Get(activityName)
+	}
+
+	if !found {
+		loggermdl.LogError("Service Not Found: " + name)
+		return nil, errormdl.Wrap("Service Not Found: " + name)
+	}
+
+	tmpSvcCache, ok := service.(ServiceCache)
+	if !ok {
+		loggermdl.LogError("Unable to cast service object for ", name)
+		return nil, errormdl.Wrap("Service execution failed for " + name)
+	}
+
+	serviceCache := tmpSvcCache
+	res, _, err := serviceCache.Service(rs, p)
+	if err != nil {
+		loggermdl.LogError("Service execution failed for ", name, " : ", err)
+		return nil, err
+	}
+	return res, nil
+}
diff --git a/v2/routebuildermdl/serviceCachemdl_test.go b/v2/routebuildermdl/serviceCachemdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..36555e26a4d654e313d11678211f519fbd4baef2
--- /dev/null
+++ b/v2/routebuildermdl/serviceCachemdl_test.go
@@ -0,0 +1,36 @@
+package routebuildermdl
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestSetAppVersion(t *testing.T) {
+	err := SetAppVersion("1.0.1", "1.0.0")
+	assert.NoError(t, err)
+}
+func Test1SetAppVersion(t *testing.T) {
+	err := SetAppVersion("1.0.1", "1.0.1")
+	assert.NoError(t, err)
+}
+
+func Test2SetAppVersion(t *testing.T) {
+	err := SetAppVersion("1.0", "1.0.1")
+	assert.Error(t, err)
+}
+
+func Test3SetAppVersion(t *testing.T) {
+	err := SetAppVersion("", "1.0.1")
+	assert.Error(t, err)
+}
+
+func Test4SetAppVersion(t *testing.T) {
+	err := SetAppVersion("1.0.1", "2.0.1")
+	assert.Error(t, err)
+}
+
+func Test5SetAppVersion(t *testing.T) {
+	err := SetAppVersion("1.0.1", "")
+	assert.Error(t, err)
+}
diff --git a/v2/securitymdl/devsecurityconstants.go b/v2/securitymdl/devsecurityconstants.go
new file mode 100644
index 0000000000000000000000000000000000000000..8647186b2435a6cc5b0eb9e99b5fbabbe49cac8e
--- /dev/null
+++ b/v2/securitymdl/devsecurityconstants.go
@@ -0,0 +1,12 @@
+//+build !prod
+
+package securitymdl
+
+// IV set IV for dev mode
+var IV = ""
+
+var securityKey = []byte{}
+
+func init() {
+	IV = "AAAAAAAAAAAAAAAA"
+}
diff --git a/v2/securitymdl/fdbSecurity.go b/v2/securitymdl/fdbSecurity.go
new file mode 100644
index 0000000000000000000000000000000000000000..9aa9888ad8d83245bc890f35a739780a3971e093
--- /dev/null
+++ b/v2/securitymdl/fdbSecurity.go
@@ -0,0 +1,176 @@
+package securitymdl
+
+/*
+
+All FDB related security options are provided.
+
+User needs to Set fdb options on application startup.
+In case of CS, this is done by restricted and rolebased global cache.
+
+*/
+
+import (
+	"errors"
+	"path/filepath"
+	"strings"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/hashmdl"
+
+	"github.com/tidwall/gjson"
+)
+
+const (
+	SEC_ENCKey     = "encKey"
+	SEC_UserKey    = "userKey"
+	SEC_FieldQuery = "fieldQuery"
+	SEC_SECURITY   = "security"
+)
+
+const (
+	// SharedPrefix prefix represents the file is sharable i.e encryption key does not include (fdbSec.fieldQuery)
+	SharedPrefix = "ss_"
+	EmptySTR     = ""
+)
+
+// FDBSecOptions provide options to be set for fdb security.
+// All fields are compulsorry. On applying options, we will get error if any field is empty.
+// Only these options are exposed as actual
+type FDBSecOptions struct {
+	EncKey         string // the global encryption key used in the project. This key will be applicable in all cases.
+	UserDefinedKey string // the user defined key in the project. This key will be applicable in all cases.
+	FieldQuery     string // query to get dynamic field. Ex. Each student data can be encrypted with studentID. Applicable only for the shared bucket.
+}
+
+// FDBSecParams provide options for the FDB security for the FDB CRUD operations.
+type FDBSecParams struct {
+	EnableSecurity    bool
+	EnableCompression bool
+}
+
+func NewFDBOptions() *FDBSecOptions {
+	return new(FDBSecOptions)
+}
+
+// Apply sets the internal security variable.
+//
+// Returns error if options fail while validations and if Apply() is already called.
+func (fo *FDBSecOptions) Apply() error {
+	if fdbSec.setSec {
+		return errors.New("fdb security is already set")
+	}
+
+	if err := fo.Validate(); err != nil {
+		return err
+	}
+	fdbSec.setSec = true
+	fdbSec.encKey = fo.EncKey
+	fdbSec.userDefinedKey = fo.UserDefinedKey
+	fdbSec.fieldQuery = fo.FieldQuery
+
+	fdbSec.enableSharedFiles = fdbSec.fieldQuery != EmptySTR // if true, trow error if result from query is empty string
+	return nil
+}
+
+type fdbSecurity struct {
+	setSec         bool   // set is in Apply() method.
+	encKey         string // the global encryption key used in the project
+	userDefinedKey string // the user defined key in the project
+	fieldQuery     string // query to get dynamic field. Ex. Each student data can be encrypted with studentID. Allowed to be empty. If empty, use same encryption for all cases.
+
+	enableSharedFiles bool // True if fieldQuery is not empty.
+}
+
+var fdbSec fdbSecurity
+
+// Validate checks options for empty fields.
+func (fs *FDBSecOptions) Validate() error {
+	if fs.EncKey == "" {
+		return errors.New("FDB security: encryption key can not be empty")
+	}
+	if fs.UserDefinedKey == "" {
+		return errors.New("FDB security: user defined key can not be empty")
+	}
+	// if fs.FieldQuery == "" {
+	// 	// this is allowed for now. If empty, same encryption is used for all buckets.
+	// 	// return errors.New("FDB security: field query can not be empty")
+	// }
+	return nil
+}
+
+// GetFDBSecOptions returns the copy of fdb options set by Apply() method.
+func GetFDBSecOptions() FDBSecOptions {
+	return FDBSecOptions{
+		EncKey:         fdbSec.encKey,
+		UserDefinedKey: fdbSec.userDefinedKey,
+		FieldQuery:     fdbSec.fieldQuery,
+	}
+}
+
+// GenSecKey generates the security key for FDB.
+//
+// If fileName has prefix "ss_" then key is generated by
+// 1. Global encryption key (fdbSec.encKey)
+// 2. UserDefinedKey (fdbSec.userDefinedKey)
+// 3. fileName
+//
+// else result by firing (fdbSec.fieldQuery) on data is also included in generation. If rusult is empty, expect an error.
+//
+// Order for key: data.Get(fdbSec.fieldQuery).String() + fdbSec.encKey + fdbSec.userDefinedKey + fileName(only)
+func GenSecKeyBytes(fileName string, data *gjson.Result) ([]byte, error) {
+	// here data is String not JSON
+	rs := gjson.Parse(data.String())
+	_, fileName = filepath.Split(fileName)
+	if fileName == EmptySTR {
+		return nil, errors.New("GenerateSecurityKey: fileName must not be empty")
+	}
+
+	// Warning: The order of string concatenation must be preserved as specified.
+	key := fdbSec.encKey + fdbSec.userDefinedKey + fileName
+
+	useFieldQuery, err := CheckDataForFieldQuery(fileName, &rs)
+	if err != nil {
+		return nil, err
+	}
+
+	if useFieldQuery {
+		key = rs.Get(fdbSec.fieldQuery).String() + key
+	}
+	hash, err := hashmdl.Get128BitHash([]byte(key))
+	if err != nil {
+		return nil, errors.New("GenerateSecurityKey: " + err.Error())
+	}
+
+	hashedKey := hash[:]
+	return hashedKey, nil
+}
+
+func GenSecKey(fileName string, data *gjson.Result) (string, error) {
+	key, err := GenSecKeyBytes(fileName, data)
+	if err != nil {
+		return "", err
+	}
+
+	return string(key), nil
+}
+
+// CheckDataForFieldQuery checks for result of field query in data.
+//
+// If no query is provided OR Not shared file, the result will be false, nil
+//
+// Else will check for query result. If result is empty, error will be thrown.
+//
+// Check only if security is enabled for the database.
+//
+// fileName must not be empty and data must not be nil
+func CheckDataForFieldQuery(fileName string, data *gjson.Result) (bool, error) {
+	if strings.HasPrefix(fileName, SharedPrefix) || !fdbSec.enableSharedFiles {
+		// this is a shared file OR no query provided. No need to check for dynamic query result.
+		return false, nil
+	}
+
+	if data.Get(fdbSec.fieldQuery).String() == EmptySTR {
+		return false, errors.New("CheckDataForFieldQuery: got empty data for defined field query " + fdbSec.fieldQuery)
+	}
+
+	return true, nil
+}
diff --git a/v2/securitymdl/prodsecurityconstants.go b/v2/securitymdl/prodsecurityconstants.go
new file mode 100644
index 0000000000000000000000000000000000000000..b28a49defa5d137d8d66c80f39adec6e1ecc2dcc
--- /dev/null
+++ b/v2/securitymdl/prodsecurityconstants.go
@@ -0,0 +1,12 @@
+//+build prod
+
+package securitymdl
+
+// IV set IV for prod mode
+var IV = ""
+
+var securityKey = []byte{}
+
+func init() {
+	IV = "AAAAAAAAAAAAAAAA"
+}
diff --git a/v2/securitymdl/securitymdl.go b/v2/securitymdl/securitymdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..fd9e8e617eaa74cdb28131c04c6a2facb8c3033c
--- /dev/null
+++ b/v2/securitymdl/securitymdl.go
@@ -0,0 +1,255 @@
+package securitymdl
+
+import (
+	"bytes"
+	"crypto/aes"
+	"crypto/cipher"
+	"encoding/base64"
+	"math/rand"
+	"strconv"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	OneOfOne "github.com/OneOfOne/xxhash"
+	"golang.org/x/crypto/bcrypt"
+)
+
+// SecurityKeyGeneratorFunc - header definition
+type SecurityKeyGeneratorFunc func(string) ([]byte, error)
+
+var securityKeyGeneratorFunc SecurityKeyGeneratorFunc
+
+// SetSecurityKeyGeneratorFunc - sets default security Key
+func SetSecurityKeyGeneratorFunc(genratorFunc SecurityKeyGeneratorFunc) {
+	securityKeyGeneratorFunc = genratorFunc
+}
+
+// GetSecurityKeyGeneratorFunc - returns security Key
+func GetSecurityKeyGeneratorFunc() SecurityKeyGeneratorFunc {
+	return securityKeyGeneratorFunc
+}
+
+// SetSecurityConfig will set security key and initializationVector
+func SetSecurityConfig(secKey []byte, initializationVector string) {
+
+	loggermdl.LogDebug("SetSecurityConfig Started")
+
+	SetIV(initializationVector)
+	SetSecurityKey(secKey)
+
+	loggermdl.LogDebug("SetSecurityConfig Ended")
+}
+
+// SetIV will set initializationVector
+func SetIV(initializationVector string) {
+	loggermdl.LogDebug("SetIV Started")
+
+	if initializationVector != "" {
+		IV = initializationVector
+	}
+
+	loggermdl.LogDebug("SetIV Ended")
+}
+
+// SetSecurityKey will set Security key
+func SetSecurityKey(secKey []byte) {
+
+	loggermdl.LogDebug("SetSecurityKey Started")
+
+	if secKey != nil && len(secKey) != 1 {
+		securityKey = secKey
+	}
+
+	loggermdl.LogDebug("SetSecurityKey Ended")
+}
+
+//AESEncrypt Encrypts given text
+func AESEncrypt(plainText, key []byte) ([]byte, error) {
+
+	block, newCipherErr := aes.NewCipher(key)
+	if errormdl.CheckErr(newCipherErr) != nil {
+		loggermdl.LogError("error occured while calling aes.NewCipher() : ", errormdl.CheckErr(newCipherErr))
+		return nil, errormdl.CheckErr(newCipherErr)
+	}
+
+	padding := block.BlockSize() - len(plainText)%block.BlockSize()
+
+	padtext := bytes.Repeat([]byte{byte(padding)}, padding)
+
+	plainText = append(plainText, padtext...)
+
+	cipherText := make([]byte, len(plainText))
+
+	ivBytes := []byte(IV)
+	if ivBytes == nil || len(ivBytes) < 1 || len(ivBytes) != block.BlockSize() {
+		loggermdl.LogError("IV length must equal block size")
+		return nil, errormdl.Wrap("IV length must equal block size")
+	}
+	cbc := cipher.NewCBCEncrypter(block, ivBytes)
+
+	cbc.CryptBlocks(cipherText, plainText)
+
+	encodedData := make([]byte, base64.StdEncoding.EncodedLen(len(plainText)))
+
+	base64.StdEncoding.Encode(encodedData, cipherText)
+
+	return encodedData, nil
+}
+
+//AESDecrypt Decrypts given cipher text
+func AESDecrypt(encodedData, key []byte) ([]byte, error) {
+
+	decodedData := make([]byte, base64.StdEncoding.DecodedLen(len(encodedData)))
+	n, decoreErr := base64.StdEncoding.Decode(decodedData, encodedData)
+	if errormdl.CheckErr(decoreErr) != nil {
+		loggermdl.LogError("error occured while calling base64.StdEncoding.Decode() : ", errormdl.CheckErr(decoreErr))
+		return nil, errormdl.CheckErr(decoreErr)
+	}
+	cipherText := decodedData[:n]
+
+	block, newCipherErr := aes.NewCipher(key)
+	if errormdl.CheckErr1(newCipherErr) != nil {
+		loggermdl.LogError("error occured while calling aes.NewCipher : ", errormdl.CheckErr1(newCipherErr))
+		return nil, errormdl.CheckErr1(newCipherErr)
+	}
+
+	ivBytes := []byte(IV)
+	if ivBytes == nil || len(ivBytes) < 1 || len(ivBytes) != block.BlockSize() {
+		loggermdl.LogError("IV length must equal block size ")
+		return nil, errormdl.Wrap("IV length must equal block size ")
+	}
+
+	cbc := cipher.NewCBCDecrypter(block, []byte(IV))
+
+	calculatedCipherSize := len(cipherText) % cbc.BlockSize()
+	if errormdl.CheckInt1(calculatedCipherSize) != 0 {
+		loggermdl.LogError("crypto/cipher: input not full blocks")
+		return nil, errormdl.Wrap("crypto/cipher: input not full blocks")
+	}
+
+	cbc.CryptBlocks(cipherText, cipherText)
+
+	length := len(cipherText)
+	if errormdl.CheckInt(length) < 1 {
+		loggermdl.LogError("length of cipherText is less than 1")
+		return nil, errormdl.Wrap("length of cipherText is less than 1")
+	}
+
+	unpadding := int(cipherText[length-1])
+	difference := length - unpadding
+
+	if errormdl.CheckInt2(difference) < 0 {
+		return nil, errormdl.Wrap("length of (length - unpadding) is less than 0")
+	}
+
+	return cipherText[:(length - unpadding)], nil
+}
+
+// //AESEncryptDefault method to encrypt with default config (securityKey)
+// func AESEncryptDefault(plainText []byte) ([]byte, error) {
+
+// 	loggermdl.LogDebug("AESEncryptDefault Started")
+
+// 	compressedText, compressionError := Compress(plainText)
+// 	if compressionError != nil {
+// 		return plainText, compressionError
+// 	}
+
+// 	loggermdl.LogDebug("AESEncryptDefault Started")
+
+// 	return AESEncrypt(compressedText, securityKey)
+// }
+
+// // AESDecryptDefault method to decrypt with default config (securityKey)
+// func AESDecryptDefault(encodedData []byte) ([]byte, error) {
+
+// 	loggermdl.LogDebug("AESDecryptDefault Started")
+
+// 	compressedText, decryptionError := AESDecrypt(encodedData, securityKey)
+// 	if decryptionError != nil {
+// 		return encodedData, decryptionError
+// 	}
+// 	byteArray, err := Decompress(compressedText)
+// 	if err != nil {
+
+// 	}
+
+// 	loggermdl.LogDebug("AESDecryptDefault Started")
+
+// 	return byteArray, nil
+// }
+
+// CreateSecurityKey generates random string of given length
+func CreateSecurityKey(keyLength int) (string, error) {
+	if keyLength <= constantmdl.MAX_RANDOM_STRING_LENGTH {
+		seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
+		b := make([]byte, keyLength)
+		for i := range b {
+			b[i] = constantmdl.CharSet[seededRand.Intn(keyLength)]
+		}
+		return string(b), nil
+	}
+	return "", errormdl.Wrap("length should be less than 256 bytes (2048 bits)")
+}
+
+// SaltPassword Salt using bcrypt creates saltedString of given string,it generates new saltedString each time for the same input
+func SaltPassword(password string) (string, error) {
+	hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError("error occured while calling bcrypt.GenerateFromPassword() : ", errormdl.CheckErr(err))
+		return "", errormdl.CheckErr(err)
+	}
+	return string(hash), nil
+}
+
+// CheckPasswordSalt - compares hashed password with its possible plaintext equivalent. Returns true on match, or an false on mismatch.
+func CheckPasswordSalt(hashedPassword, password string) bool {
+	err := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(password))
+	return err == nil
+}
+
+// GetHash - returns Hash Check sum of given plaintext
+func GetHash(plaintext string) (string, error) {
+	h := OneOfOne.New64()
+	_, err := h.Write([]byte(plaintext))
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError("error occured while calling GetHash : ", errormdl.CheckErr(err))
+		return "", errormdl.CheckErr(err)
+	}
+	return strconv.Itoa(int(h.Sum64())), nil
+}
+
+// GetHashAndSalt - returns Hash Check sum of given plaintext+ salt combination
+func GetHashAndSalt(plaintext, salt string) (string, error) {
+	hashAndSalt, err := GetHash(plaintext + salt)
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError("error occured while calling GetHashAndSalt : ", errormdl.CheckErr(err))
+		return "", errormdl.CheckErr(err)
+	}
+	return hashAndSalt, nil
+}
+
+// PaddingDataUsingpkcs7 - This methods is copy from https://github.com/mergermarket/go-pkcs7
+func PaddingDataUsingpkcs7(buf []byte, size int) ([]byte, error) {
+	bufLen := len(buf)
+	padLen := size - bufLen%size
+	padded := make([]byte, bufLen+padLen)
+	copy(padded, buf)
+	for i := 0; i < padLen; i++ {
+		padded[bufLen+i] = byte(padLen)
+	}
+	return padded, nil
+}
+
+// UnpaddingDataUsingpkcs7 - his methods is copy from https://github.com/mergermarket/go-pkcs7
+func UnpaddingDataUsingpkcs7(padded []byte, size int) ([]byte, error) {
+	if len(padded)%size != 0 {
+		return nil, errormdl.Wrap("Padded value wasn't in correct size")
+	}
+	bufLen := len(padded) - int(padded[len(padded)-1])
+	buf := make([]byte, bufLen)
+	copy(buf, padded[:bufLen])
+	return buf, nil
+}
diff --git a/v2/securitymdl/securitymdl_test.go b/v2/securitymdl/securitymdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..20ec5f6dde24f5c2e0d88c47e495c66ce0eb149c
--- /dev/null
+++ b/v2/securitymdl/securitymdl_test.go
@@ -0,0 +1,258 @@
+package securitymdl
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"github.com/stretchr/testify/assert"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+)
+
+// ===========================================================================================
+// ============================================= Config ======================================
+// ===========================================================================================
+
+func TestSetSecurityConfigBlankValues(t *testing.T) {
+	secKey := ""
+	initializationVector := ""
+	SetSecurityConfig([]byte(secKey), initializationVector)
+	assert.Equal(t, IV, "AAAAAAAAAAAAAAAA", "Matching")
+}
+
+func TestSetSecurityConfig(t *testing.T) {
+	secKey := "1234567891234567"
+	initializationVector := "BBBBBBBBBBBBBBBB"
+	SetSecurityConfig([]byte(secKey), initializationVector)
+	assert.Equal(t, IV, "BBBBBBBBBBBBBBBB", "Matching")
+}
+
+func TestSetSecurityConfigDefaultValues(t *testing.T) {
+	secKey := "1234567891234567"
+	initializationVector := "AAAAAAAAAAAAAAAA"
+	SetSecurityConfig([]byte(secKey), initializationVector)
+	assert.Equal(t, IV, "AAAAAAAAAAAAAAAA", "Matching")
+}
+
+// ===========================================================================================
+// =========================================== AESEncrypt ====================================
+// ===========================================================================================
+
+func TestAESEncryptSuccess(t *testing.T) {
+	plainText := "Test for success"
+	key := "1234567891234567"
+	encText, encError := AESEncrypt([]byte(plainText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+	assert.Equal(t, "HAtVHhxx9+ULClrO1dkSKMiu6IciRmQ2PcQi4kSsLn4=", string(encText), "Encryption Successful")
+}
+
+func TestAESEncryptSmallKeyLength(t *testing.T) {
+	plainText := "Test for success"
+	key := "123456789123456"
+	encText, encError := AESEncrypt([]byte(plainText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "Error occured due to key size")
+}
+
+func TestAESEncryptIVLessThanBlock(t *testing.T) {
+	plainText := "Test for success"
+	key := "1234567891234567"
+	IV = "A"
+	encText, encError := AESEncrypt([]byte(plainText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "IV size less than block")
+}
+
+// ===========================================================================================
+// =========================================== AESDecrypt ====================================
+// ===========================================================================================
+
+func TestAESDecryptSuccess(t *testing.T) {
+	cipherText := []byte("HAtVHhxx9+ULClrO1dkSKMiu6IciRmQ2PcQi4kSsLn4=")
+	key := "1234567891234567"
+	IV = "AAAAAAAAAAAAAAAA"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("decrypted text : ", string(encText))
+	loggermdl.LogError("error is : ", encError)
+	assert.Equal(t, "Test for success", string(encText), "Decryption Successful")
+}
+
+func TestAESDecryptSmallKeyLength(t *testing.T) {
+	cipherText := []byte("HAtVHhxx9+ULClrO1dkSKMiu6IciRmQ2PcQi4kSsLn4=")
+	key := "123456789123456"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "Error occured due to key size")
+}
+
+func TestAESDecryptDecodeError(t *testing.T) {
+	cipherText := []byte("HAtVHhxx9+ULClrO1dkSKMiu6IciRmQ2PcQi4kSsLn=")
+	key := "123456789123456"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "Decode error")
+}
+
+func TestAESDecryptCipherLessThan1(t *testing.T) {
+	cipherText := []byte("")
+	key := "1234567891234567"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "CipherLessThan1")
+}
+
+func TestAESDecryptCipherSize(t *testing.T) {
+	cipherText := []byte("")
+	key := "1234567891234567"
+	IV = "A"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "IV size less than block")
+}
+
+func TestAESDecryptIVLessThanBlock(t *testing.T) {
+	cipherText := []byte("")
+	key := "1234567891234567"
+	IV = "A"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "IV size less than block")
+}
+
+func TestAESDecryptDifferenceCheck(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOnCheckInt1 = true
+	cipherText := []byte("HAtVHhxx9+ULClrO1dkSKMiu6IciRmQ2PcQi4kSsLn4=")
+	key := "1234567891234567"
+	IV = "AAAAAAAAAAAAAAAA"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("!!!!!!!!!!!!!!!!!!!!!!!!!!encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, " / crypto/cipher: input not full blocks")
+	errormdl.IsTestingNegetiveCaseOnCheckInt1 = false
+}
+
+func TestAESDecryptDifferenceCheck2(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOnCheckInt2 = true
+	cipherText := []byte("HAtVHhxx9+ULClrO1dkSKMiu6IciRmQ2PcQi4kSsLn4=")
+	key := "1234567891234567"
+	IV = "AAAAAAAAAAAAAAAA"
+	encText, encError := AESDecrypt([]byte(cipherText), []byte(key))
+	loggermdl.LogInfo("!!!!!!!!!!!!!!!!!!!!!!!!!!encrypted text : ", encText)
+	loggermdl.LogError("error is : ", encError)
+
+	assert.Error(t, encError, "length of (length - unpadding) is less than 0 / crypto/cipher: input not full blocks")
+	errormdl.IsTestingNegetiveCaseOnCheckInt2 = false
+}
+
+func TestCreateSecurityKey(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOnCheckInt2 = true
+	keyLength := 256
+	key, _ := CreateSecurityKey(keyLength)
+	assert.Len(t, key, keyLength, "length is equal")
+	errormdl.IsTestingNegetiveCaseOnCheckInt2 = false
+}
+func TestCreateSecurityKeyForOutOfRange(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOnCheckInt2 = true
+	keyLength := 257
+	_, keyerr := CreateSecurityKey(keyLength)
+	loggermdl.LogError("error is : ", keyerr)
+	assert.Error(t, keyerr, "length is out of range,length should be less than 256 bytes (2048 bits)")
+	errormdl.IsTestingNegetiveCaseOnCheckInt2 = false
+}
+func BenchmarkCreateSecurityKey(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		CreateSecurityKey(16)
+	}
+}
+
+func TestSaltPassword(t *testing.T) {
+	saltedPwd, _ := SaltPassword("P@ssw0rd")
+	assert.NotZero(t, len(saltedPwd), "Should give len")
+}
+
+func TestSaltPasswordError(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	_, err := SaltPassword("P@ssw0rd")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should return error")
+}
+func BenchmarkSaltPassword(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		SaltPassword("P@ssw0rd")
+	}
+}
+
+func TestCheckPasswordHash(t *testing.T) {
+	match := CheckPasswordSalt("$2a$04$hers/Xb2u00e8wg4e.S7Cu7JbUm4TTR4ED3wU7HTNuuwNGJxOqMZu", "P@ssw0rd")
+	assert.True(t, match)
+}
+func BenchmarkCheckPasswordHash(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		CheckPasswordSalt("$2a$04$hers/Xb2u00e8wg4e.S7Cu7JbUm4TTR4ED3wU7HTNuuwNGJxOqMZu", "P@ssw0rd")
+	}
+}
+
+func TestGetHash(t *testing.T) {
+	saltedPwd, _ := GetHash("P@ssw0rd")
+	assert.NotZero(t, len(saltedPwd), "Should give len")
+}
+
+func TestGetHashError(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	_, err := GetHash("P@ssw0rd")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should return error")
+}
+
+func TestGetHashAndSalt(t *testing.T) {
+	saltedPwd, _ := GetHashAndSalt("P@ssw0rd", "my-salt-key")
+	assert.NotZero(t, len(saltedPwd), "Should give len")
+}
+
+func TestGetHashAndSaltError(t *testing.T) {
+	errormdl.IsTestingNegetiveCaseOn = true
+	_, err := GetHashAndSalt("P@ssw0rd", "my-salt-key")
+	errormdl.IsTestingNegetiveCaseOn = false
+	assert.Error(t, err, "This should return error")
+}
+
+func TestPaddingDataUsingpkcs7(t *testing.T) {
+	data := []byte("hello")
+	size := 16
+	paddingData, err := PaddingDataUsingpkcs7(data, size)
+	if err != nil {
+		assert.Errorf(t, err, "padding error : ")
+	}
+	expected := []byte("hello\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b")
+	assert.Equal(t, expected, paddingData)
+	t.Logf("Padding Data : %v", paddingData)
+}
+
+func TestUnpaddingDataUsingpkcs7(t *testing.T) {
+	paddingdata := []byte{104, 101, 108, 108, 111, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}
+	size := 16
+	unPaddingData, err := UnpaddingDataUsingpkcs7(paddingdata, size)
+	if err != nil {
+		assert.Errorf(t, err, "padding error : ")
+	}
+	expected := "hello"
+	assert.Equal(t, expected, string(unPaddingData))
+	t.Logf("Unpadding Data : %v and string is %v", unPaddingData, string(unPaddingData))
+}
diff --git a/v2/servicebuildermdl/servicebuildermdl.go b/v2/servicebuildermdl/servicebuildermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc7ef208c0ab3465091babc8a7f0192d01f828d8
--- /dev/null
+++ b/v2/servicebuildermdl/servicebuildermdl.go
@@ -0,0 +1,731 @@
+//@author  Ajit Jagtap
+//@version Mon Jul 09 2018 14:00:05 GMT+0530 (IST)
+
+// Package servicebuildermdl will help you run BL and fetch data.
+package servicebuildermdl
+
+import (
+	"database/sql"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/dgraph-io/dgo"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/utiliymdl/guidmdl"
+	"github.com/tidwall/sjson"
+
+	linq "github.com/ahmetb/go-linq/v3"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/constantmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/validationmdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/zhouzhuojie/conditions"
+
+	"github.com/tidwall/gjson"
+)
+
+// GlobalConfigModel - GlobalConfigModel
+type GlobalConfigModel struct {
+	Key          string   `json:"key"`
+	Value        string   `json:"value"`
+	Restrictions []string `json:"restrictions"`
+}
+
+var globalConfig map[string]GlobalConfigModel
+var globalConfigMutex sync.Mutex
+var once sync.Once
+
+var ruleCache map[string]conditions.Expr
+var mutex = &sync.Mutex{}
+
+// get server ip address
+var (
+	serverIP = func() string {
+		ifaces, err := net.Interfaces()
+		if err != nil {
+			return ""
+		}
+		for _, iface := range ifaces {
+			if iface.Flags&net.FlagUp == 0 {
+				continue // interface down
+			}
+			if iface.Flags&net.FlagLoopback != 0 {
+				continue // loopback interface
+			}
+			addrs, err := iface.Addrs()
+			if err != nil {
+				return ""
+			}
+			for _, addr := range addrs {
+				var ip net.IP
+				switch v := addr.(type) {
+				case *net.IPNet:
+					ip = v.IP
+				case *net.IPAddr:
+					ip = v.IP
+				}
+				if ip == nil || ip.IsLoopback() {
+					continue
+				}
+				if ip = ip.To4(); ip == nil {
+					continue // not an ipv4 address
+				}
+				return ip.String()
+			}
+		}
+		return ""
+	}()
+)
+
+func init() {
+	ruleCache = make(map[string]conditions.Expr)
+	globalConfig = make(map[string]GlobalConfigModel)
+	globalConfigMutex = sync.Mutex{}
+}
+
+// DebugInfo - DebugInfo
+type DebugInfo struct {
+	StackTrace      strings.Builder `json:"stackTrace"`
+	PerformanceInfo strings.Builder `json:"performanceInfo"`
+}
+
+// LoadData is a method sign for loader methods
+type LoadData = func(ab *AbstractBusinessLogicHolder) error
+
+// FinalStepProcessOutput is a method sign for loader methods
+type FinalStepProcessOutput = func(ab *AbstractBusinessLogicHolder) (*interface{}, error)
+
+// AbstractBusinessLogicHolder use this type to inheritance
+type AbstractBusinessLogicHolder struct {
+	localServiceData  map[string]interface{}
+	pricipalObject    Principal
+	globalConfigData  map[string]GlobalConfigModel
+	GlobalErrorCode   int
+	ServiceError      interface{}
+	TransactionEnable bool
+	DatabaseType      string // database type for transaction begin(MYSQL,SQLSERVER etc.)
+	IgnoreStrictMode  bool
+	Branch            string  // branch name provided in the header
+	TXN               *sql.Tx // transaction for MySQL
+	SQLServerTXN      *sql.Tx // Transaction for SQLServer
+	GraphDbTXN        *dgo.Txn
+}
+
+// SetGlobalConfig - SetGlobalConfig
+func SetGlobalConfig(configs map[string]GlobalConfigModel) {
+	globalConfigMutex.Lock()
+	defer globalConfigMutex.Unlock()
+	once.Do(func() {
+		if configs != nil {
+			globalConfig = configs
+		}
+	})
+}
+
+func (ab *AbstractBusinessLogicHolder) SetBranch(branch string) {
+	ab.Branch = branch
+}
+
+// GetDataString will give you string
+func (ab *AbstractBusinessLogicHolder) GetDataString(key string) (string, bool) {
+	//check in map
+	temp, found := ab.localServiceData[key]
+	if errormdl.CheckBool(!found) {
+		return "", false
+	}
+	// cast it
+	value, ok := temp.(string)
+	if errormdl.CheckBool1(!ok) {
+		return "", false
+	}
+	return value, true
+}
+
+// GetDataInt will give you int
+func (ab *AbstractBusinessLogicHolder) GetDataInt(key string) (int64, bool) {
+	//check in map
+	temp, found := ab.localServiceData[key]
+	if errormdl.CheckBool(!found) {
+		return 0, false
+	}
+	// cast it
+	value, ok := temp.(int64)
+	if errormdl.CheckBool1(!ok) {
+		return 0, false
+	}
+	return value, true
+}
+
+// GetDataInterface will give you int
+func (ab *AbstractBusinessLogicHolder) GetDataInterface(key string) (interface{}, bool) {
+	//check in map
+	temp, found := ab.localServiceData[key]
+	if errormdl.CheckBool(!found) {
+		return nil, false
+	}
+	return temp, true
+}
+
+// GetDataResultset will give you int
+func (ab *AbstractBusinessLogicHolder) GetDataResultset(key string) (*gjson.Result, bool) {
+	//check in map
+	temp, found := ab.localServiceData[key]
+	if errormdl.CheckBool(!found) {
+		loggermdl.LogWarn("Key not found -", key)
+		return &gjson.Result{}, false
+	}
+	// cast it
+	value, ok := temp.(*gjson.Result)
+
+	if errormdl.CheckBool1(!ok) {
+		return &gjson.Result{}, false
+	}
+	return value, true
+}
+
+// GetMQLRequestData - returns MQLRequestData
+func (ab *AbstractBusinessLogicHolder) GetMQLRequestData() (*gjson.Result, bool) {
+	//check in map
+	temp, found := ab.localServiceData[constantmdl.MQLRequestData]
+	if errormdl.CheckBool(!found) {
+		loggermdl.LogWarn("MQL Request Data not Found")
+		return &gjson.Result{}, false
+	}
+	// cast it
+	value, ok := temp.(*gjson.Result)
+
+	if errormdl.CheckBool1(!ok) {
+		return &gjson.Result{}, false
+	}
+	return value, true
+}
+
+// GetDataBool will give you int
+func (ab *AbstractBusinessLogicHolder) GetDataBool(key string) (bool, bool) {
+	//check in map
+	temp, found := ab.localServiceData[key]
+	if errormdl.CheckBool(!found) {
+		return false, false
+	}
+	// cast it
+	value, ok := temp.(bool)
+	if errormdl.CheckBool1(!ok) {
+		return false, false
+	}
+	return value, true
+}
+
+// GetCustomData will give you string
+func (ab *AbstractBusinessLogicHolder) GetCustomData(key string) (interface{}, bool) {
+	//check in map
+	temp, found := ab.localServiceData[key]
+	if errormdl.CheckBool(!found) {
+		return 0, false
+	}
+	// cast it
+	return temp, true
+}
+
+// GetGlobalConfigString - return string value for global config key
+func (ab *AbstractBusinessLogicHolder) GetGlobalConfigString(key string) (string, bool) {
+	globalConfigMutex.Lock()
+	defer globalConfigMutex.Unlock()
+	value, found := ab.globalConfigData[key]
+	if errormdl.CheckBool(!found) {
+		return "", false
+	}
+	if len(value.Restrictions) > 0 {
+		if linq.From(value.Restrictions).WhereT(func(str string) bool {
+			return str == "Open" || str == "OPEN"
+		}).Any() {
+			return value.Value, true
+		}
+		if (linq.From(value.Restrictions).WhereT(func(str string) bool {
+			return str == "Restricted" || str == "RESTRICTED"
+		}).Any()) && ab.pricipalObject.UserID != "" {
+			return value.Value, true
+		}
+
+		for i := 0; i < len(value.Restrictions); i++ {
+			for j := 0; j < len(ab.pricipalObject.Groups); j++ {
+				if ab.pricipalObject.Groups[j] == value.Restrictions[i] {
+					return value.Value, true
+				}
+			}
+		}
+		return "", false
+	}
+	return value.Value, true
+}
+
+// New will create memory for your data
+func (ab *AbstractBusinessLogicHolder) New(principalObject *Principal) *AbstractBusinessLogicHolder {
+	ab.localServiceData = make(map[string]interface{})
+	ab.globalConfigData = globalConfig
+	ab.pricipalObject = *principalObject
+	ab.GlobalErrorCode = 0
+	ab.ServiceError = nil
+	return ab
+}
+
+// SetResultset will return map data with finaldata key
+func (ab *AbstractBusinessLogicHolder) SetResultset(key string, obj *gjson.Result) {
+	ab.localServiceData[key] = obj
+}
+
+// SetMQLRequestData - set value
+func (ab *AbstractBusinessLogicHolder) SetMQLRequestData(obj *gjson.Result) {
+	ab.localServiceData[constantmdl.MQLRequestData] = obj
+}
+
+// SetByteData will set byte data as gjson.Result
+func (ab *AbstractBusinessLogicHolder) SetByteData(key string, obj []byte) {
+	rs := gjson.ParseBytes(obj)
+	ab.localServiceData[key] = &rs
+}
+
+// SetMQLToken will set token in header
+func (ab *AbstractBusinessLogicHolder) SetMQLToken(token string) {
+	ab.localServiceData["MQLToken"] = token
+}
+
+// SetCustomData set custom user data in map
+func (ab *AbstractBusinessLogicHolder) SetCustomData(key string, data interface{}) {
+	ab.localServiceData[key] = data
+}
+
+// SetErrorData will set error
+func (ab *AbstractBusinessLogicHolder) SetErrorData(data interface{}) {
+	ab.ServiceError = data
+}
+
+// GetFinalData will return map data with finaldata key
+func (ab *AbstractBusinessLogicHolder) GetFinalData() *interface{} {
+	a := ab.localServiceData["finaldata"]
+	return &a
+}
+
+// SetFinalData will return map data with finaldata key
+func (ab *AbstractBusinessLogicHolder) SetFinalData(data interface{}) {
+	ab.localServiceData["finaldata"] = data
+}
+
+// GetClientIP will returns client ip address
+func (ab *AbstractBusinessLogicHolder) GetClientIP() string {
+	return ab.pricipalObject.ClientIP
+}
+
+// GetServerIP will returns server ip address
+func (ab *AbstractBusinessLogicHolder) GetServerIP() string {
+	return serverIP
+}
+
+// SetErrorCode - SetErrorCode in service context
+func (ab *AbstractBusinessLogicHolder) SetErrorCode(code int) {
+	ab.GlobalErrorCode = code
+}
+
+// GetErrorData - GetErrorData in service context
+func (ab *AbstractBusinessLogicHolder) GetErrorData() interface{} {
+	if ab == nil {
+		return nil
+	}
+	return ab.ServiceError
+}
+
+// GetErrorCode - GetErrorCode in service context
+func (ab *AbstractBusinessLogicHolder) GetErrorCode() int {
+	if ab == nil {
+		return 0
+	}
+	return ab.GlobalErrorCode
+}
+
+// EchoBL sample EchoBL logic handler
+func (ab *AbstractBusinessLogicHolder) EchoBL() (map[string]interface{}, error) {
+	// loggermdl.LogWarn("EchoBL called")
+	return map[string]interface{}{
+		"ok": int64(1),
+	}, nil
+}
+
+// Step help to maintain steps
+type Step struct {
+	Stepname          string
+	expr              conditions.Expr
+	processDataFunc   LoadData
+	IsValidator       bool
+	ValidationDataKey string
+	ValidationFunc    func(interface{}) error
+	RunFunc           func() (map[string]interface{}, error)
+	ErrorFunc         func() (map[string]interface{}, error)
+	JumpStep          string
+}
+
+// ServiceBuilder will help you to run steps
+type ServiceBuilder struct {
+	ServiceName         string
+	steps               []Step
+	businessLogicHolder *AbstractBusinessLogicHolder
+	ServiceError        error
+}
+
+// GetSB Gives you service builder from where you can run steps
+func GetSB(name string, ab *AbstractBusinessLogicHolder) *ServiceBuilder {
+	newsb := ServiceBuilder{}
+	newsb.ServiceName = name
+	newsb.businessLogicHolder = ab
+	return &newsb
+}
+
+// AddStep will add func step with rule
+// Stepname : Give Name to step. It will appear in log.
+// Rule : Give Ybl rule
+// blfunc : Give Business Logic function pointer
+// errorfunc : Give Error  function pointer
+func (sb *ServiceBuilder) AddStep(stepname, rule string, ld LoadData, blfunc, errorfunc func() (map[string]interface{}, error)) *ServiceBuilder {
+	if errormdl.CheckErr(sb.ServiceError) != nil {
+		loggermdl.LogError(sb.ServiceError)
+		return sb
+	}
+	step := Step{}
+
+	//Check rule in cache
+	mutex.Lock()
+	cachedRule, found := ruleCache[rule]
+	mutex.Unlock()
+	if errormdl.CheckBool(found) {
+		step.expr = cachedRule
+	} else {
+
+		// Parse the condition language and get expression
+		p := conditions.NewParser(strings.NewReader(rule))
+		expr, err := p.Parse()
+		if errormdl.CheckErr1(err) != nil {
+			loggermdl.LogError("Error in step: ", stepname, err)
+			sb.ServiceError = errormdl.CheckErr1(err)
+			return sb
+		}
+		step.expr = expr
+		mutex.Lock()
+		ruleCache[rule] = expr
+		mutex.Unlock()
+	}
+
+	step.RunFunc = blfunc
+	step.ErrorFunc = errorfunc
+	step.Stepname = stepname
+	step.processDataFunc = ld
+	sb.steps = append(sb.steps, step)
+	return sb
+}
+
+// AddStepWithGoTo - AddStep with goto pointer
+func (sb *ServiceBuilder) AddStepWithGoTo(stepname, rule string, ld LoadData, blfunc, errorfunc func() (map[string]interface{}, error), gotoStepName string) *ServiceBuilder {
+	if errormdl.CheckErr(sb.ServiceError) != nil {
+		loggermdl.LogError(sb.ServiceError)
+		return sb
+	}
+	step := Step{}
+
+	//Check rule in cache
+	mutex.Lock()
+	cachedRule, found := ruleCache[rule]
+	mutex.Unlock()
+	if errormdl.CheckBool(found) {
+		step.expr = cachedRule
+	} else {
+
+		// Parse the condition language and get expression
+		p := conditions.NewParser(strings.NewReader(rule))
+		expr, err := p.Parse()
+		if errormdl.CheckErr1(err) != nil {
+			loggermdl.LogError("Error in step: ", stepname, err)
+			sb.ServiceError = errormdl.CheckErr1(err)
+			return sb
+		}
+		step.expr = expr
+		mutex.Lock()
+		ruleCache[rule] = expr
+		mutex.Unlock()
+	}
+
+	step.RunFunc = blfunc
+	step.ErrorFunc = errorfunc
+	step.Stepname = stepname
+	step.processDataFunc = ld
+	step.JumpStep = gotoStepName
+	sb.steps = append(sb.steps, step)
+	return sb
+}
+
+// AddValidation add validation step for give dataKey
+func (sb *ServiceBuilder) AddValidation(dataKey string, validationfunc func(interface{}) error) *ServiceBuilder {
+	if errormdl.CheckErr(sb.ServiceError) != nil {
+		loggermdl.LogError(sb.ServiceError)
+		return sb
+	}
+	step := Step{}
+	step.IsValidator = true
+	step.ValidationDataKey = dataKey
+	step.ValidationFunc = validationfunc
+	step.Stepname = "Validation Step"
+	sb.steps = append(sb.steps, step)
+	return sb
+}
+
+func (sb *ServiceBuilder) findStepIndex(stepName string) (int, bool) {
+	for i, step := range sb.steps {
+		if step.Stepname == stepName {
+			return i, true
+		}
+	}
+	return 0, false
+}
+
+// Run all Steps one by one
+func (sb *ServiceBuilder) Run(fn FinalStepProcessOutput) (*interface{}, error) {
+	if errormdl.CheckErr(sb.ServiceError) != nil {
+		loggermdl.LogError(sb.ServiceError)
+		return nil, errormdl.CheckErr(sb.ServiceError)
+	}
+	_, ok := sb.businessLogicHolder.localServiceData["finaldata"]
+	if ok {
+		return sb.businessLogicHolder.GetFinalData(), nil
+	}
+	maxStepCount := 100
+	for i := 0; i < len(sb.steps); i++ {
+		if maxStepCount == 0 {
+			loggermdl.LogError("Your steps are in recursion and cross limit of 100 steps")
+			return nil, errormdl.Wrap("Your steps are in recursion and cross limit of 100 steps")
+		}
+		maxStepCount--
+
+		// Validation
+		if sb.steps[i].IsValidator {
+			validationError := sb.executeValidationFunction(sb.steps[i].ValidationDataKey, sb.steps[i].ValidationFunc)
+			if errormdl.CheckErr(validationError) != nil {
+				return nil, errormdl.CheckErr(validationError)
+			}
+			continue
+		}
+
+		//Load Data
+		if sb.steps[i].processDataFunc != nil {
+			daoError := sb.steps[i].processDataFunc(sb.businessLogicHolder)
+			if errormdl.CheckErr1(daoError) != nil {
+				loggermdl.LogError(daoError)
+				return nil, errormdl.CheckErr1(daoError)
+			}
+		}
+
+		//Run step func
+		tmp, blError := sb.steps[i].RunFunc()
+		if errormdl.CheckErr2(blError) != nil {
+			loggermdl.LogError(blError)
+			return nil, errormdl.CheckErr2(blError)
+		}
+		// Validation using conditions
+		result, evaluteError := conditions.Evaluate(sb.steps[i].expr, tmp)
+		if errormdl.CheckErr3(evaluteError) != nil {
+			loggermdl.LogError(evaluteError)
+			return nil, errormdl.CheckErr3(evaluteError)
+		}
+		// if validation fails
+		if !result {
+			// loggermdl.LogWarn(sb.steps[i].Stepname, "Failed", result)
+			// jump step is a functionality like go to on particular step
+			if sb.steps[i].JumpStep != "" {
+				_, recoveryError := sb.executeErrorFunction(sb.steps[i].ErrorFunc)
+				if errormdl.CheckErr(recoveryError) != nil {
+					loggermdl.LogError(recoveryError)
+					return nil, errormdl.CheckErr(recoveryError)
+				}
+				index, ok := sb.findStepIndex(sb.steps[i].JumpStep)
+				if !ok {
+					loggermdl.LogError("Step Name spcify in GOTO not found: " + sb.steps[i].JumpStep)
+					return nil, errormdl.Wrap("Step Name spcify in GOTO not found: " + sb.steps[i].JumpStep)
+				}
+				i = index - 1
+				continue
+			}
+			return sb.executeErrorFunction(sb.steps[i].ErrorFunc)
+		}
+	}
+	return sb.finalOutput(fn)
+}
+
+// executeValidationFunction exceute when validation failed for any step
+func (sb *ServiceBuilder) executeValidationFunction(dataKey string, fn func(interface{}) error) error {
+	validationData, ok := sb.businessLogicHolder.localServiceData[dataKey]
+	if !ok {
+		loggermdl.LogError("Data Not Found For Validation: " + dataKey)
+		return errormdl.Wrap("Data Not Found For Validation: " + dataKey)
+	}
+	if fn == nil {
+		return validationmdl.ValidateStruct(validationData)
+	}
+	return fn(validationData)
+}
+
+// executeErrorFunction exceute when validation failed for any step
+func (sb *ServiceBuilder) executeErrorFunction(fn func() (map[string]interface{}, error)) (*interface{}, error) {
+	if fn == nil {
+		loggermdl.LogError("Data Validation failed and No recovery function found")
+		return nil, errormdl.Wrap("Data Validation failed and No recovery function found")
+	}
+	_, err := fn()
+	if errormdl.CheckErr(err) != nil {
+		loggermdl.LogError(err)
+		return nil, errormdl.CheckErr(err)
+	}
+	return sb.businessLogicHolder.GetFinalData(), nil
+}
+
+// finalOutput return Final output
+func (sb *ServiceBuilder) finalOutput(fn FinalStepProcessOutput) (*interface{}, error) {
+	if fn == nil {
+		return sb.businessLogicHolder.GetFinalData(), nil
+	}
+	return fn(sb.businessLogicHolder)
+}
+
+// Principal - Object inside JWT token
+type Principal struct {
+	UserID            string    `json:"userId"`
+	Groups            []string  `json:"groups"`
+	SessionExpiration time.Time `json:"sessionExpiration"`
+	ClientIP          string    `json:"clientIP"`
+	HitsCount         int       `json:"hitsCount"`
+	Token             string    `json:"token"`
+	Metadata          string    `json:"metadata"`
+}
+
+// // SetPrincipalObject - Set Principal object to BLHolder
+// func (ab *AbstractBusinessLogicHolder) SetPrincipalObject(object *Principal) {
+// 	ab.pricipalObject = *object
+// }
+
+// GetPrincipalObject - return Principal object from BLHolder
+func (ab *AbstractBusinessLogicHolder) GetPrincipalObject() *Principal {
+	return &ab.pricipalObject
+}
+
+// APIResponse - APIResponse
+type APIResponse struct {
+	StatusCode int
+	Body       []byte
+	Headers    []Header
+}
+type Header struct {
+	Key   string
+	Value []string
+}
+
+// GetResposeObject - return Response object from BLHolder
+func (ab *AbstractBusinessLogicHolder) GetResposeObject(responseKey string) (*APIResponse, error) {
+	tmp, ok := ab.localServiceData[responseKey]
+	if !ok {
+		return &APIResponse{}, errormdl.Wrap("Response not found for key: " + responseKey)
+	}
+	value, ok := tmp.(APIResponse)
+	if !ok {
+		return &APIResponse{}, errormdl.Wrap("Data inside memory is not of type APIResponse: " + responseKey)
+	}
+	return &value, nil
+}
+
+// FetchValues -FetchValues
+func (ab *AbstractBusinessLogicHolder) FetchValues(keyName, query string) (gjson.Result, int, error) {
+	var result gjson.Result
+	if keyName == "Principal" {
+		if query == "userId" {
+			result = gjson.Parse(`{"loginId":"` + ab.GetPrincipalObject().UserID + `"}`).Get("loginId")
+
+		}
+		if query == "groups" {
+			var err error
+			groupData := ""
+			for _, group := range ab.GetPrincipalObject().Groups {
+				groupData, err = sjson.Set(groupData, "-1", group)
+				if err != nil {
+					loggermdl.LogError(err)
+					return result, errormdl.SJSONERROR, err
+				}
+			}
+			result = gjson.Parse(groupData)
+		}
+		if query == "clientIP" {
+			result = gjson.Parse(`{"clientIP":"` + ab.GetPrincipalObject().ClientIP + `"}`).Get("clientIP")
+		}
+		if query == "token" {
+			result = gjson.Parse(`{"token":"` + ab.GetPrincipalObject().Token + `"}`).Get("token")
+		}
+		if strings.Contains(query, "metadata") {
+			if strings.Contains(query, ":") {
+				token := strings.Split(query, ":")
+				if len(token) > 0 {
+					result = gjson.Parse(ab.GetPrincipalObject().Metadata).Get(token[1])
+				}
+			} else {
+				result = gjson.Parse(ab.GetPrincipalObject().Metadata)
+			}
+		}
+
+		return result, errormdl.NOERROR, nil
+	}
+
+	if keyName == "GlobalConfig" {
+		result, ok := ab.GetGlobalConfigString(query)
+		if !ok {
+			loggermdl.LogError("Key Not Found in global config: " + query)
+			return gjson.Parse(result), errormdl.KEYNOTFOUND, errormdl.Wrap("Key Not Found in global config: " + query)
+		}
+		loggermdl.LogInfo(result)
+		return gjson.Parse(`{"config":"` + result + `"}`).Get("config"), errormdl.NOERROR, nil
+	}
+
+	if keyName == "~tokenUserId" {
+		return gjson.Parse(`{"loginId":"` + ab.GetPrincipalObject().UserID + `"}`).Get("loginId"), errormdl.NOERROR, nil
+	}
+
+	if keyName == "~TIME" {
+		TIME, err := sjson.Set("{}", "time", time.Now().Unix())
+		if errormdl.CheckErr(err) != nil {
+			loggermdl.LogError(err)
+			return result, errormdl.SJSONERROR, errormdl.CheckErr(err)
+		}
+		return gjson.Parse(TIME).Get("time"), errormdl.NOERROR, nil
+	}
+
+	if keyName == "~GUID" {
+		return gjson.Parse(`{"guid":"` + guidmdl.GetGUID() + `"}`).Get("guid"), errormdl.NOERROR, nil
+	}
+
+	if keyName == "EMPTYJSON" {
+		return gjson.Parse("{}"), errormdl.NOERROR, nil
+	}
+
+	rs, ok := ab.GetDataResultset(keyName)
+	if !ok {
+		loggermdl.LogError("Key Not Found: " + keyName)
+		return result, errormdl.KEYNOTFOUND, errormdl.Wrap("Key Not Found: " + keyName)
+	}
+	if query != "*" {
+		result = rs.Get(query)
+	} else {
+		result = *rs
+	}
+	return result, errormdl.NOERROR, nil
+}
+
+// GetAllAbsData - GetAllAbsData
+func (ab *AbstractBusinessLogicHolder) GetAbLocalServiceData() map[string]interface{} {
+	return ab.localServiceData
+}
diff --git a/v2/servicebuildermdl/servicebuildermdl_test.go b/v2/servicebuildermdl/servicebuildermdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..52a17fdd693aad292686be0026eefec67014d5ad
--- /dev/null
+++ b/v2/servicebuildermdl/servicebuildermdl_test.go
@@ -0,0 +1,908 @@
+package servicebuildermdl
+
+import (
+	"encoding/json"
+	"errors"
+	"strconv"
+	"testing"
+	"time"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	dalmdl "corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/dalmdl/dao"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/tidwall/gjson"
+)
+
+var fastCache cachemdl.FastCacheHelper
+var loaddata LoadData
+
+func init() {
+	fastCache = cachemdl.FastCacheHelper{}
+	fastCache.Setup(5000, time.Minute*500, time.Minute*500)
+}
+
+//Create Type with abstract Type
+type MyBLLayer struct {
+	AbstractBusinessLogicHolder
+}
+
+// // attach methods to it
+// func (m *MyBLLayer) FindAndVerifyMobileNumber() map[string]interface{} {
+// 	loginID := "noone"
+
+// 	//check in cache
+// 	fresult, okay := fastCache.Get("#[mobileNo==\"7875383220\"].loginId")
+// 	if okay {
+// 		loginID = fresult.(string)
+// 	} else {
+// 		result1, ok := m.GetDataResultset("daodata1")
+
+// 		if !ok {
+// 			loggermdl.LogError("error in convert")
+// 		}
+// 		loginID = result1.Get("#[mobileNo==\"7875383220\"].loginId").String()
+// 		fastCache.Set("#[mobileNo==\"7875383220\"].loginId", loginID)
+// 	}
+
+// 	//add few var in local map and fetch it
+// 	result2, _ := m.GetDataResultset("daodata1")
+// 	m.SetResultset("myresult2", result2)
+// 	m.LocalServiceData["myint"] = 5
+// 	m.GetDataResultset("myresult2")
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+// 	m.GetDataResultset("myresult2")
+
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+// 	cnt1, _ := m.GetDataInt("hi")
+// 	if cnt1 > 1 {
+// 		fmt.Println("hmm")
+// 	}
+
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+// 	cnt2, _ := m.GetDataInt("hi")
+// 	if cnt2 > 1 {
+// 		fmt.Println("hmm")
+// 	}
+
+// 	m.SetFinalData(loginID)
+
+// 	return map[string]interface{}{
+// 		"owner": loginID,
+// 	}
+
+// }
+
+//Now we will write DAO
+func GetUsersInfo() (*gjson.Result, error) {
+	return dalmdl.GetDAO().FilePath("../testingdata/users.json").IsCacheable().Query("*").Run()
+}
+
+// func TestService(t *testing.T) {
+// 	mb := MyBLLayer{}
+// 	mb.New(&Principal{})
+
+// 	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+// 		data, err := GetUsersInfo()
+// 		if err != nil {
+// 			loggermdl.LogError(err)
+// 			return err
+// 		}
+// 		mb.LocalServiceData["daodata1"] = data
+// 		return nil
+// 	}
+
+// 	// GetSB("mysrv", &mb.AbstractBusinessLogicHolder).
+// 	// 	AddStep("Check Mobile", "owner in ('SystemIntegrator')", loaddata, mb.FindAndVerifyMobileNumber, mb.EchoBL)
+
+// 	// loaddata := func(ab *AbstractBusinessLogicHolder) {
+// 	// 	mb.LocalServiceData["daodata1"] = GetUsersInfo()
+// 	// }
+
+// 	// finalStep := func(ab *AbstractBusinessLogicHolder) *interface{} {
+// 	// 	s :=
+// 	// 	return &s
+// 	// }
+
+// 	sb, err := GetSB("mysrv", &mb.AbstractBusinessLogicHolder).
+// 		AddStep("Check Mobile", "owner = 'SystemIntegrator'", loaddata, mb.FindAndVerifyMobileNumber, mb.EchoBL).
+// 		AddStep("Test STEP 2", "owner = 'SystemIntegrator'", nil, mb.FindAndVerifyMobileNumber, mb.EchoBL).
+// 		Run().FinalOutput(nil)
+// 	if err != nil {
+// 		loggermdl.LogError(err)
+// 	} else {
+// 		assert.Equal(t, "SystemIntegrator", *sb, "this should be same")
+
+// 	}
+
+// }
+
+/*****************************************************/
+
+func (m *MyBLLayer) BL1() (map[string]interface{}, error) {
+	condition, ok1 := m.GetDataBool("condition")
+	rs, ok := m.GetDataResultset("daodata1")
+	if ok && ok1 {
+		tmp := rs.Get("#[isEnabled==" + strconv.FormatBool(condition) + "]#").Get("#.loginId")
+		loggermdl.LogInfo(tmp)
+		m.SetResultset("BL1Data", &tmp)
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL2() (map[string]interface{}, error) {
+	loginIDs, ok1 := m.GetDataResultset("BL1Data")
+	rs, ok := m.GetDataResultset("daodata1")
+	if ok && ok1 {
+		var tmp []interface{}
+		loginIDs.ForEach(func(key, value gjson.Result) bool {
+			tmp = append(tmp, rs.Get("#[loginId=="+value.String()+"]#").Value())
+			return true
+		})
+		m.SetFinalData(tmp)
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL3() (map[string]interface{}, error) {
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	condition, ok1 := m.GetDataBool("condition")
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	rs, ok := m.GetDataResultset("daodata1")
+	if ok && ok1 {
+		tmp := rs.Get("#[isEnabled==" + strconv.FormatBool(condition) + "]#").Get("#.loginId")
+		loggermdl.LogInfo(tmp)
+		m.SetResultset("BL1Data", &tmp)
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL4() (map[string]interface{}, error) {
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+	condition, ok1 := m.GetDataBool("condition")
+	rs, ok := m.GetDataResultset("daodata1")
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+	if ok && ok1 {
+		tmp := rs.Get("#[isEnabled==" + strconv.FormatBool(condition) + "]#").Get("#.loginId")
+		loggermdl.LogInfo(tmp)
+		m.SetResultset("BL1Data", &tmp)
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL5() (map[string]interface{}, error) {
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+	condition, ok1 := m.GetDataBool("condition")
+	rs, ok := m.GetDataResultset("daodata1")
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+	if ok && ok1 {
+		tmp := rs.Get("#[isEnabled==" + strconv.FormatBool(condition) + "]#").Get("#.loginId")
+		loggermdl.LogInfo(tmp)
+		m.SetResultset("BL1Data", &tmp)
+	} else {
+		return nil, errors.New("NO_DATA_FOUND")
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL6() (map[string]interface{}, error) {
+	condition, ok1 := m.GetDataBool("condition")
+	rs, ok := m.GetDataResultset("daodata1")
+	if ok && ok1 {
+		tmp := rs.Get("#[isEnabled==" + strconv.FormatBool(condition) + "]#").Get("#").Int()
+		loggermdl.LogInfo(tmp)
+		m.SetCustomData("BL1Data", tmp)
+		loginIDs, ok1 := m.GetDataInt("BL1Data")
+		loggermdl.LogInfo(loginIDs, ok1)
+		if ok1 {
+			m.SetFinalData(loginIDs)
+		}
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL7() (map[string]interface{}, error) {
+	loginIDs, ok1 := m.GetDataInt("BL1Data")
+	loggermdl.LogInfo(loginIDs, ok1)
+	if ok1 {
+		m.SetFinalData(loginIDs)
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL8() (map[string]interface{}, error) {
+	rs, ok := m.GetDataResultset("daodata1")
+	if ok {
+		tmp := rs.Get("#[isEnabled==true]#").Get("#").Value()
+		m.SetCustomData("BL1Data", tmp)
+		loginIDs, ok1 := m.GetDataInt("BL1Data")
+		if ok1 {
+			m.SetFinalData(loginIDs)
+		}
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL9() (map[string]interface{}, error) {
+	loginIDs, ok1 := m.GetDataInterface("BL1Data1")
+	loggermdl.LogInfo(loginIDs, ok1)
+	if ok1 {
+		m.SetFinalData(loginIDs)
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func (m *MyBLLayer) BL10() (map[string]interface{}, error) {
+	loginId, ok1 := m.GetDataString("loginId")
+	rs, ok := m.GetDataResultset("daodata1")
+	if ok && ok1 {
+		tmp := rs.Get("#[loginId==" + loginId + "]#").Value()
+		loggermdl.LogInfo(tmp)
+		m.SetFinalData(tmp)
+
+	}
+	return map[string]interface{}{
+		"$1": true,
+	}, nil
+}
+
+func Test1Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+	mb.SetCustomData("condition", false)
+
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL1, mb.EchoBL).
+		AddStep("Step2", "$1 == true", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+}
+
+func Test2Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "abc == true", loaddata, mb.BL1, mb.EchoBL).
+		AddStep("Step2", "abc == true", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.Error(t, Err, "No Error Should be return")
+
+}
+
+func Test3Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL1, mb.EchoBL).
+		AddStep("Step2", "$2 == true", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.Error(t, Err, "No Error Should be return")
+
+}
+
+func Test4Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL1, mb.EchoBL).
+		AddStep("Step2", "$1 == false", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test5Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL3, mb.EchoBL).
+		AddStep("Step2", "$1 == false", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test6Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL4, mb.EchoBL).
+		AddStep("Step2", "$1 == false", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test7Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL5, mb.EchoBL).
+		AddStep("Step2", "$1 == false", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.Error(t, Err, "No Error Should be return")
+
+}
+
+func Test8Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		return errors.New("CUSTOM_DAO_ERROR")
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL1, mb.EchoBL).
+		AddStep("Step2", "$1 == false", nil, mb.BL2, mb.EchoBL).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.Error(t, Err, "No Error Should be return")
+
+}
+
+func Test9Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		// mb.LocalServiceData["daodata1"] = data
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test1Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == false", loaddata, mb.BL1, mb.ErrorFunction).
+		AddStep("Step2", "$1 == true", nil, mb.BL2, mb.ErrorFunction).
+		Run(nil)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.Error(t, Err, "No Error Should be return")
+
+}
+
+func (m *MyBLLayer) ErrorFunction() (map[string]interface{}, error) {
+	return nil, errors.New("Error_Func")
+}
+
+func Test10Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	resultFunc := func(ab *AbstractBusinessLogicHolder) (*interface{}, error) {
+		return ab.GetFinalData(), nil
+	}
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test10Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL1, mb.ErrorFunction).
+		AddStep("Step2", "$1 == true", nil, mb.BL2, mb.ErrorFunction).
+		Run(resultFunc)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test11Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	resultFunc := func(ab *AbstractBusinessLogicHolder) (*interface{}, error) {
+		return ab.GetFinalData(), nil
+	}
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test11Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL6, mb.ErrorFunction).
+		AddStep("Step2", "$1 == true", nil, mb.BL7, mb.ErrorFunction).
+		Run(resultFunc)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test12Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	resultFunc := func(ab *AbstractBusinessLogicHolder) (*interface{}, error) {
+		return ab.GetFinalData(), nil
+	}
+	mb.SetCustomData("condition", false)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	ab, Err := GetSB("Test10Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL6, mb.ErrorFunction).
+		AddStep("Step2", "$1 == true", nil, mb.BL7, mb.ErrorFunction).
+		Run(resultFunc)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test13Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	resultFunc := func(ab *AbstractBusinessLogicHolder) (*interface{}, error) {
+		return ab.GetFinalData(), nil
+	}
+	mb.SetCustomData("condition", false)
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+	ab, Err := GetSB("Test10Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL6, mb.ErrorFunction).
+		AddStep("Step2", "$1 == true", nil, mb.BL7, mb.ErrorFunction).
+		Run(resultFunc)
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test14Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	resultFunc := func(ab *AbstractBusinessLogicHolder) (*interface{}, error) {
+		return ab.GetFinalData(), nil
+	}
+	mb.SetCustomData("condition", false)
+	ab, Err := GetSB("Test14Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL8, mb.ErrorFunction).
+		AddStep("Step2", "$1 == true", nil, mb.BL9, mb.ErrorFunction).
+		Run(resultFunc)
+
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test15Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	resultFunc := func(ab *AbstractBusinessLogicHolder) (*interface{}, error) {
+		return ab.GetFinalData(), nil
+	}
+	mb.SetCustomData("condition", false)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	_, Err := GetSB("Test15Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL8, mb.ErrorFunction).
+		AddStep("Step2", "$1 == true", nil, mb.BL9, mb.ErrorFunction).
+		Run(resultFunc)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	if Err != nil {
+		loggermdl.LogError(Err)
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test16Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("loginId", "roshanp1")
+	ab, Err := GetSB("Test16Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL10, mb.ErrorFunction).
+		Run(nil)
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test17Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("loginId", "roshanp1")
+	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+	ab, Err := GetSB("Test17Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL10, mb.ErrorFunction).
+		Run(nil)
+	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test18Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("loginId", "roshanp1")
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+	ab, Err := GetSB("Test17Service", &mb.AbstractBusinessLogicHolder).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL10, mb.ErrorFunction).
+		Run(nil)
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+type StructToValidate struct {
+	Name string `json:"name" valid:"required,alpha,length(4|8)"`
+	Age  int    `json:"age" valid:"required,range(18|50)"`
+}
+
+func Test19Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	structToValidate := StructToValidate{}
+	structToValidate.Name = "Jarvis"
+	structToValidate.Age = 23
+	mb.SetCustomData("input", structToValidate)
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("loginId", "roshanp1")
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+	ab, Err := GetSB("Test17Service", &mb.AbstractBusinessLogicHolder).
+		AddValidation("input", nil).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL10, mb.ErrorFunction).
+		Run(nil)
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.NoError(t, Err, "No Error Should be return")
+
+}
+
+func Test20Service(t *testing.T) {
+	mb := MyBLLayer{}
+	mb.New(&Principal{})
+
+	structToValidate := StructToValidate{}
+	structToValidate.Name = "Jarvis"
+	mb.SetCustomData("input", structToValidate)
+	loaddata := func(ab *AbstractBusinessLogicHolder) error {
+		data, err := GetUsersInfo()
+		if err != nil {
+			loggermdl.LogError(err)
+			return err
+		}
+		mb.SetResultset("daodata1", data)
+		return nil
+	}
+
+	mb.SetCustomData("loginId", "roshanp1")
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = true
+	ab, Err := GetSB("Test17Service", &mb.AbstractBusinessLogicHolder).
+		AddValidation("input", nil).
+		AddStep("Step1", "$1 == true", loaddata, mb.BL10, mb.ErrorFunction).
+		Run(nil)
+	errormdl.IsTestingNegetiveCaseOnCheckBool1 = false
+	if Err != nil {
+		loggermdl.LogError(Err)
+	} else {
+		ba, _ := json.Marshal(ab)
+		loggermdl.LogInfo(string(ba))
+	}
+	assert.Error(t, Err, "Error Should be return by validator")
+
+}
diff --git a/v2/sessionmanagermdl/sessionmanager.go b/v2/sessionmanagermdl/sessionmanager.go
new file mode 100644
index 0000000000000000000000000000000000000000..016db6c2eaa047bc4fb863d6a0d81fb6bc19e633
--- /dev/null
+++ b/v2/sessionmanagermdl/sessionmanager.go
@@ -0,0 +1,162 @@
+package sessionmanagermdl
+
+import (
+	"errors"
+	"time"
+
+	"github.com/tidwall/gjson"
+	"github.com/tidwall/sjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/cachemdl"
+)
+
+// Entry is a data to be stored against a key.
+type Entry struct {
+	Data       gjson.Result `json:"data,omitempty"`
+	Expiration int64        `json:"expiration,omitempty"`
+	ExpiredAT  int64        `json:"expiredAt,omitempty"`
+}
+
+const (
+	// keys for the entry object
+	KEY_DATA       = "data"
+	KEY_EXPIREDAT  = "expiredAt"
+	KEY_EXPIRATION = "expiration"
+)
+
+var store cachemdl.Cacher
+
+var ErrSessionNotFound = errors.New("SESSION_NOT_FOUND")
+var ErrInvalidDataType = errors.New("INVALID_DATA_Type")
+
+// Init initializes session manager with provided cache. Subsequent calls will not have any effect after first initialization.
+func Init(cache cachemdl.Cacher) {
+	if store != nil {
+		return
+	}
+
+	store = cache
+}
+
+// NewEntry prepares the object required to store data in session.
+//
+// The `exp` field interprets time in seconds. Ex. For 5 seconds, set `5`
+func NewEntry(val gjson.Result, exp int64) Entry {
+	duration := time.Duration(exp) * time.Second
+	deadLine := time.Now().Add(duration).Unix()
+	return Entry{
+		Data:       val,
+		Expiration: exp,
+		ExpiredAT:  deadLine,
+	}
+}
+
+// NewRedisEntry prepares the entry for redis cache. This is required because redis accepts a byte array.
+func NewRedisEntry(entry Entry) string {
+	var data string
+
+	// the error can be ignored here as we have valid keys and data values
+	data, _ = sjson.Set(data, KEY_DATA, entry.Data.Value())
+	data, _ = sjson.Set(data, KEY_EXPIRATION, entry.Expiration)
+	data, _ = sjson.Set(data, KEY_EXPIREDAT, entry.ExpiredAT)
+
+	return data
+}
+
+// ToObject returns an cache entry as an object. It is better than sjson.Set() as we need to perform gjson.Parse().
+func ToObject(entry Entry) map[string]interface{} {
+	return map[string]interface{}{
+		KEY_DATA:       entry.Data.Value(),
+		KEY_EXPIRATION: entry.Expiration,
+		KEY_EXPIREDAT:  entry.ExpiredAT,
+	}
+}
+
+// Store adds/ updates the entry against the provided key.
+func Store(key string, entry Entry) {
+	duration := time.Duration(entry.Expiration) * time.Second
+
+	// if session manager uses redis cache, the data field (gjson.Result) is saved as is.
+	// This adds irrelevant fields in redis cache and we get them on retrieve operation.
+	// The following operation needs to be performed so that the data is marshaled correctly. Redis only accepts []byte{}.
+	if store.Type() == cachemdl.TypeRedisCache {
+		store.SetWithExpiration(key, NewRedisEntry(entry), duration)
+		return
+	}
+
+	store.SetWithExpiration(key, entry, duration)
+}
+
+// Retrieve returns the entry present against the provided key. If a key is not available or data stored is not of type gjson.Result, a non nil error will be returned
+func Retrieve(key string) (Entry, error) {
+	data, ok := store.Get(key)
+	if !ok {
+		return Entry{}, ErrSessionNotFound
+	}
+
+	switch v := data.(type) {
+	case string: // for result from redis cache
+		res := gjson.Parse(v)
+		return Entry{
+			Data:       res.Get(KEY_DATA),
+			Expiration: res.Get(KEY_EXPIRATION).Int(),
+			ExpiredAT:  res.Get(KEY_EXPIREDAT).Int(),
+		}, nil
+
+	case Entry: // for result from fastcache
+		return v, nil
+
+	default:
+		return Entry{}, ErrInvalidDataType
+	}
+}
+
+// RetrieveAll returns all entries present in memory. **Not for production use. May add performance costs**
+func RetrieveAll() map[string]interface{} {
+	return store.GetAll()
+}
+
+// RetrieveAndExtend returns the entry and extends the entry expiration by provided `SECONDS`, only if remaining time < extendBy.
+// If extendBy < 0, it is same as Retrieve function.
+func RetrieveAndExtend(key string, extendBy int64) (Entry, error) {
+	entry, err := Retrieve(key)
+	if err != nil {
+		return Entry{}, err
+	}
+
+	if extendBy > 0 {
+		timeRemaining := entry.ExpiredAT - time.Now().Unix()
+
+		if timeRemaining < extendBy {
+			// update with new expiratin
+			entry.ExpiredAT = time.Now().Add(time.Second * time.Duration(extendBy)).Unix()
+			entry.Expiration = extendBy
+			Store(key, entry)
+		}
+	}
+
+	return entry, nil
+}
+
+// RetrieveAndDelete deletes the entry after first retrieval
+func RetrieveAndDelete(key string) (Entry, error) {
+	entry, err := Retrieve(key)
+	if err != nil {
+		return Entry{}, err
+	}
+
+	store.Delete(key)
+
+	return entry, nil
+}
+
+// Delete removes the entry from session manager. If the key is not present, error `ErrSessionNotFound` will be thrown. Caller can ignore error if this is acceptable.
+func Delete(key string) error {
+	_, ok := store.Get(key)
+	if !ok {
+		return ErrSessionNotFound
+	}
+
+	store.Delete(key)
+	return nil
+}
diff --git a/v2/sjsonhelpermdl/sjsonhelpermdl.go b/v2/sjsonhelpermdl/sjsonhelpermdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..26c9e287aec62e9e6882472d79bc231cc0ddc3ff
--- /dev/null
+++ b/v2/sjsonhelpermdl/sjsonhelpermdl.go
@@ -0,0 +1,695 @@
+package sjsonhelpermdl
+
+import (
+	jsongo "encoding/json"
+	"reflect"
+	"strconv"
+	"unsafe"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/tidwall/gjson"
+)
+
+// SetMultiple - provided interface  help to set multiple values in your input json
+func SetMultiple(input string, keys []string, values []interface{}) (string, error) {
+	if len(keys) != len(values) {
+		loggermdl.LogError("Length og keys and values should be equal")
+		return input, errormdl.Wrap("Length og keys and values should be equal")
+	}
+	var setError error
+	for i, key := range keys {
+		input, setError = Set(input, key, values[i])
+		if errormdl.CheckErr(setError) != nil {
+			loggermdl.LogError(setError)
+			return input, errormdl.CheckErr(setError)
+		}
+	}
+	return input, nil
+}
+
+// SetInGJSON - provided interface  help to set multiple values in your input GJSON
+func SetInGJSON(input gjson.Result, keys []string, values []interface{}) (*gjson.Result, error) {
+	if len(keys) != len(values) {
+		loggermdl.LogError("Length og keys and values should be equal")
+		return &input, errormdl.Wrap("Length og keys and values should be equal")
+	}
+	inputData := input.String()
+	var setError error
+	for i, key := range keys {
+		inputData, setError = Set(inputData, key, values[i])
+		if errormdl.CheckErr(setError) != nil {
+			loggermdl.LogError(setError)
+			return &input, errormdl.CheckErr(setError)
+		}
+	}
+	input = gjson.Parse(inputData)
+	return &input, nil
+}
+
+type errorType struct {
+	msg string
+}
+
+func (err *errorType) Error() string {
+	return err.msg
+}
+
+// Options represents additional options for the Set and Delete functions.
+type Options struct {
+	// Optimistic is a hint that the value likely exists which
+	// allows for the sjson to perform a fast-track search and replace.
+	Optimistic bool
+	// ReplaceInPlace is a hint to replace the input json rather than
+	// allocate a new json byte slice. When this field is specified
+	// the input json will not longer be valid and it should not be used
+	// In the case when the destination slice doesn't have enough free
+	// bytes to replace the data in place, a new bytes slice will be
+	// created under the hood.
+	// The Optimistic flag must be set to true and the input must be a
+	// byte slice in order to use this field.
+	ReplaceInPlace bool
+}
+
+type pathResult struct {
+	part  string // current key part
+	path  string // remaining path
+	force bool   // force a string key
+	more  bool   // there is more path to parse
+}
+
+func parsePath(path string, isStringKey bool) (pathResult, error) {
+	var r pathResult
+	if len(path) > 0 && path[0] == ':' {
+		r.force = true
+		path = path[1:]
+	}
+	for i := 0; i < len(path); i++ {
+		if !isStringKey && path[i] == '.' {
+			r.part = path[:i]
+			r.path = path[i+1:]
+			r.more = true
+			return r, nil
+		}
+		if path[i] == '*' || path[i] == '?' {
+			return r, &errorType{"wildcard characters not allowed in path"}
+		} else if path[i] == '#' {
+			return r, &errorType{"array access character not allowed in path"}
+		}
+		if path[i] == '\\' {
+			// go into escape mode. this is a slower path that
+			// strips off the escape character from the part.
+			epart := []byte(path[:i])
+			i++
+			if i < len(path) {
+				epart = append(epart, path[i])
+				i++
+				for ; i < len(path); i++ {
+					if path[i] == '\\' {
+						i++
+						if i < len(path) {
+							epart = append(epart, path[i])
+						}
+						continue
+					} else if path[i] == '.' {
+						r.part = string(epart)
+						r.path = path[i+1:]
+						r.more = true
+						return r, nil
+					} else if path[i] == '*' || path[i] == '?' {
+						return r, &errorType{
+							"wildcard characters not allowed in path"}
+					} else if path[i] == '#' {
+						return r, &errorType{
+							"array access character not allowed in path"}
+					}
+					epart = append(epart, path[i])
+				}
+			}
+			// append the last part
+			r.part = string(epart)
+			return r, nil
+		}
+	}
+	r.part = path
+	return r, nil
+}
+
+func mustMarshalString(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] < ' ' || s[i] > 0x7f || s[i] == '"' {
+			return true
+		}
+	}
+	return false
+}
+
+// appendStringify makes a json string and appends to buf.
+func appendStringify(buf []byte, s string) []byte {
+	if mustMarshalString(s) {
+		b, _ := jsongo.Marshal(s)
+		return append(buf, b...)
+	}
+	buf = append(buf, '"')
+	buf = append(buf, s...)
+	buf = append(buf, '"')
+	return buf
+}
+
+// appendBuild builds a json block from a json path.
+func appendBuild(buf []byte, array bool, paths []pathResult, raw string,
+	stringify bool) []byte {
+	if !array {
+		buf = appendStringify(buf, paths[0].part)
+		buf = append(buf, ':')
+	}
+	if len(paths) > 1 {
+		n, numeric := atoui(paths[1])
+		if numeric || (!paths[1].force && paths[1].part == "-1") {
+			buf = append(buf, '[')
+			buf = appendRepeat(buf, "null,", n)
+			buf = appendBuild(buf, true, paths[1:], raw, stringify)
+			buf = append(buf, ']')
+		} else {
+			buf = append(buf, '{')
+			buf = appendBuild(buf, false, paths[1:], raw, stringify)
+			buf = append(buf, '}')
+		}
+	} else {
+		if stringify {
+			buf = appendStringify(buf, raw)
+		} else {
+			buf = append(buf, raw...)
+		}
+	}
+	return buf
+}
+
+// atoui does a rip conversion of string -> unigned int.
+func atoui(r pathResult) (n int, ok bool) {
+	if r.force {
+		return 0, false
+	}
+	for i := 0; i < len(r.part); i++ {
+		if r.part[i] < '0' || r.part[i] > '9' {
+			return 0, false
+		}
+		n = n*10 + int(r.part[i]-'0')
+	}
+	return n, true
+}
+
+// appendRepeat repeats string "n" times and appends to buf.
+func appendRepeat(buf []byte, s string, n int) []byte {
+	for i := 0; i < n; i++ {
+		buf = append(buf, s...)
+	}
+	return buf
+}
+
+// trim does a rip trim
+func trim(s string) string {
+	for len(s) > 0 {
+		if s[0] <= ' ' {
+			s = s[1:]
+			continue
+		}
+		break
+	}
+	for len(s) > 0 {
+		if s[len(s)-1] <= ' ' {
+			s = s[:len(s)-1]
+			continue
+		}
+		break
+	}
+	return s
+}
+
+// deleteTailItem deletes the previous key or comma.
+func deleteTailItem(buf []byte) ([]byte, bool) {
+loop:
+	for i := len(buf) - 1; i >= 0; i-- {
+		// look for either a ',',':','['
+		switch buf[i] {
+		case '[':
+			return buf, true
+		case ',':
+			return buf[:i], false
+		case ':':
+			// delete tail string
+			i--
+			for ; i >= 0; i-- {
+				if buf[i] == '"' {
+					i--
+					for ; i >= 0; i-- {
+						if buf[i] == '"' {
+							i--
+							if i >= 0 && i == '\\' {
+								i--
+								continue
+							}
+							for ; i >= 0; i-- {
+								// look for either a ',','{'
+								switch buf[i] {
+								case '{':
+									return buf[:i+1], true
+								case ',':
+									return buf[:i], false
+								}
+							}
+						}
+					}
+					break
+				}
+			}
+			break loop
+		}
+	}
+	return buf, false
+}
+
+var errNoChange = &errorType{"no change"}
+
+func appendRawPaths(buf []byte, jstr string, paths []pathResult, raw string,
+	stringify, del bool) ([]byte, error) {
+	var err error
+	var res gjson.Result
+	var found bool
+	if del {
+		if paths[0].part == "-1" && !paths[0].force {
+			res = gjson.Get(jstr, "#")
+			if res.Int() > 0 {
+				res = gjson.Get(jstr, strconv.FormatInt(int64(res.Int()-1), 10))
+				found = true
+			}
+		}
+	}
+	if !found {
+		res = gjson.Get(jstr, paths[0].part)
+	}
+	if res.Index > 0 {
+		if len(paths) > 1 {
+			buf = append(buf, jstr[:res.Index]...)
+			buf, err = appendRawPaths(buf, res.Raw, paths[1:], raw,
+				stringify, del)
+			if err != nil {
+				return nil, err
+			}
+			buf = append(buf, jstr[res.Index+len(res.Raw):]...)
+			return buf, nil
+		}
+		buf = append(buf, jstr[:res.Index]...)
+		var exidx int // additional forward stripping
+		if del {
+			var delNextComma bool
+			buf, delNextComma = deleteTailItem(buf)
+			if delNextComma {
+				i, j := res.Index+len(res.Raw), 0
+				for ; i < len(jstr); i, j = i+1, j+1 {
+					if jstr[i] <= ' ' {
+						continue
+					}
+					if jstr[i] == ',' {
+						exidx = j + 1
+					}
+					break
+				}
+			}
+		} else {
+			if stringify {
+				buf = appendStringify(buf, raw)
+			} else {
+				buf = append(buf, raw...)
+			}
+		}
+		buf = append(buf, jstr[res.Index+len(res.Raw)+exidx:]...)
+		return buf, nil
+	}
+	if del {
+		return nil, errNoChange
+	}
+	n, numeric := atoui(paths[0])
+	isempty := true
+	for i := 0; i < len(jstr); i++ {
+		if jstr[i] > ' ' {
+			isempty = false
+			break
+		}
+	}
+	if isempty {
+		if numeric {
+			jstr = "[]"
+		} else {
+			jstr = "{}"
+		}
+	}
+	jsres := gjson.Parse(jstr)
+	if jsres.Type != gjson.JSON {
+		if numeric {
+			jstr = "[]"
+		} else {
+			jstr = "{}"
+		}
+		jsres = gjson.Parse(jstr)
+	}
+	var comma bool
+	for i := 1; i < len(jsres.Raw); i++ {
+		if jsres.Raw[i] <= ' ' {
+			continue
+		}
+		if jsres.Raw[i] == '}' || jsres.Raw[i] == ']' {
+			break
+		}
+		comma = true
+		break
+	}
+	switch jsres.Raw[0] {
+	default:
+		return nil, &errorType{"json must be an object or array"}
+	case '{':
+		buf = append(buf, '{')
+		buf = appendBuild(buf, false, paths, raw, stringify)
+		if comma {
+			buf = append(buf, ',')
+		}
+		buf = append(buf, jsres.Raw[1:]...)
+		return buf, nil
+	case '[':
+		var appendit bool
+		if !numeric {
+			if paths[0].part == "-1" && !paths[0].force {
+				appendit = true
+			} else {
+				return nil, &errorType{
+					"cannot set array element for non-numeric key '" +
+						paths[0].part + "'"}
+			}
+		}
+		if appendit {
+			njson := trim(jsres.Raw)
+			if njson[len(njson)-1] == ']' {
+				njson = njson[:len(njson)-1]
+			}
+			buf = append(buf, njson...)
+			if comma {
+				buf = append(buf, ',')
+			}
+
+			buf = appendBuild(buf, true, paths, raw, stringify)
+			buf = append(buf, ']')
+			return buf, nil
+		}
+		buf = append(buf, '[')
+		ress := jsres.Array()
+		for i := 0; i < len(ress); i++ {
+			if i > 0 {
+				buf = append(buf, ',')
+			}
+			buf = append(buf, ress[i].Raw...)
+		}
+		if len(ress) == 0 {
+			buf = appendRepeat(buf, "null,", n-len(ress))
+		} else {
+			buf = appendRepeat(buf, ",null", n-len(ress))
+			if comma {
+				buf = append(buf, ',')
+			}
+		}
+		buf = appendBuild(buf, true, paths, raw, stringify)
+		buf = append(buf, ']')
+		return buf, nil
+	}
+}
+
+func isOptimisticPath(path string) bool {
+	for i := 0; i < len(path); i++ {
+		if path[i] < '.' || path[i] > 'z' {
+			return false
+		}
+		if path[i] > '9' && path[i] < 'A' {
+			return false
+		}
+		if path[i] > 'z' {
+			return false
+		}
+	}
+	return true
+}
+
+func set(jstr, path, raw string,
+	stringify, del, optimistic, inplace, isStringKey bool) ([]byte, error) {
+	if path == "" {
+		return nil, &errorType{"path cannot be empty"}
+	}
+	if !del && optimistic && isOptimisticPath(path) {
+		res := gjson.Get(jstr, path)
+		if res.Exists() && res.Index > 0 {
+			sz := len(jstr) - len(res.Raw) + len(raw)
+			if stringify {
+				sz += 2
+			}
+			if inplace && sz <= len(jstr) {
+				if !stringify || !mustMarshalString(raw) {
+					jsonh := *(*reflect.StringHeader)(unsafe.Pointer(&jstr))
+					jsonbh := reflect.SliceHeader{
+						Data: jsonh.Data, Len: jsonh.Len, Cap: jsonh.Len}
+					jbytes := *(*[]byte)(unsafe.Pointer(&jsonbh))
+					if stringify {
+						jbytes[res.Index] = '"'
+						copy(jbytes[res.Index+1:], []byte(raw))
+						jbytes[res.Index+1+len(raw)] = '"'
+						copy(jbytes[res.Index+1+len(raw)+1:],
+							jbytes[res.Index+len(res.Raw):])
+					} else {
+						copy(jbytes[res.Index:], []byte(raw))
+						copy(jbytes[res.Index+len(raw):],
+							jbytes[res.Index+len(res.Raw):])
+					}
+					return jbytes[:sz], nil
+				}
+				return nil, nil
+			}
+			buf := make([]byte, 0, sz)
+			buf = append(buf, jstr[:res.Index]...)
+			if stringify {
+				buf = appendStringify(buf, raw)
+			} else {
+				buf = append(buf, raw...)
+			}
+			buf = append(buf, jstr[res.Index+len(res.Raw):]...)
+			return buf, nil
+		}
+	}
+	// parse the path, make sure that it does not contain invalid characters
+	// such as '#', '?', '*'
+	paths := make([]pathResult, 0, 4)
+	r, err := parsePath(path, isStringKey)
+	if err != nil {
+		return nil, err
+	}
+	paths = append(paths, r)
+	for r.more {
+		if r, err = parsePath(r.path, isStringKey); err != nil {
+			return nil, err
+		}
+		paths = append(paths, r)
+	}
+
+	njson, err := appendRawPaths(nil, jstr, paths, raw, stringify, del)
+	if err != nil {
+		return nil, err
+	}
+	return njson, nil
+}
+
+// Set sets a json value for the specified path.
+// A path is in dot syntax, such as "name.last" or "age".
+// This function expects that the json is well-formed, and does not validate.
+// Invalid json will not panic, but it may return back unexpected results.
+// An error is returned if the path is not valid.
+//
+// A path is a series of keys separated by a dot.
+//
+//  {
+//    "name": {"first": "Tom", "last": "Anderson"},
+//    "age":37,
+//    "children": ["Sara","Alex","Jack"],
+//    "friends": [
+//      {"first": "James", "last": "Murphy"},
+//      {"first": "Roger", "last": "Craig"}
+//    ]
+//  }
+//  "name.last"          >> "Anderson"
+//  "age"                >> 37
+//  "children.1"         >> "Alex"
+//
+func Set(json, path string, value interface{}) (string, error) {
+	return SetOptions(json, path, value, nil, false)
+}
+
+// SetAsAStringKey - SetAsAStringKey
+func SetAsAStringKey(json, path string, value interface{}) (string, error) {
+	return SetOptions(json, path, value, nil, true)
+}
+
+// SetOptions sets a json value for the specified path with options.
+// A path is in dot syntax, such as "name.last" or "age".
+// This function expects that the json is well-formed, and does not validate.
+// Invalid json will not panic, but it may return back unexpected results.
+// An error is returned if the path is not valid.
+func SetOptions(json, path string, value interface{},
+	opts *Options, isStringKey bool) (string, error) {
+	if opts != nil {
+		if opts.ReplaceInPlace {
+			// it's not safe to replace bytes in-place for strings
+			// copy the Options and set options.ReplaceInPlace to false.
+			nopts := *opts
+			opts = &nopts
+			opts.ReplaceInPlace = false
+		}
+	}
+	jsonh := *(*reflect.StringHeader)(unsafe.Pointer(&json))
+	jsonbh := reflect.SliceHeader{Data: jsonh.Data, Len: jsonh.Len}
+	jsonb := *(*[]byte)(unsafe.Pointer(&jsonbh))
+	res, err := SetBytesOptions(jsonb, path, value, opts, isStringKey)
+	return string(res), err
+}
+
+// SetBytes sets a json value for the specified path.
+// If working with bytes, this method preferred over
+// Set(string(data), path, value)
+func SetBytes(json []byte, path string, value interface{}, isStringKey bool) ([]byte, error) {
+	return SetBytesOptions(json, path, value, nil, isStringKey)
+}
+
+// SetBytesOptions sets a json value for the specified path with options.
+// If working with bytes, this method preferred over
+// SetOptions(string(data), path, value)
+func SetBytesOptions(json []byte, path string, value interface{},
+	opts *Options, isStringKey bool) ([]byte, error) {
+	var optimistic, inplace bool
+	if opts != nil {
+		optimistic = opts.Optimistic
+		inplace = opts.ReplaceInPlace
+	}
+	jstr := *(*string)(unsafe.Pointer(&json))
+	var res []byte
+	var err error
+	switch v := value.(type) {
+	default:
+		b, err := jsongo.Marshal(value)
+		if err != nil {
+			return nil, err
+		}
+		raw := *(*string)(unsafe.Pointer(&b))
+		res, err = set(jstr, path, raw, false, false, optimistic, inplace, isStringKey)
+	case dtype:
+		res, err = set(jstr, path, "", false, true, optimistic, inplace, isStringKey)
+	case string:
+		res, err = set(jstr, path, v, true, false, optimistic, inplace, isStringKey)
+	case []byte:
+		raw := *(*string)(unsafe.Pointer(&v))
+		res, err = set(jstr, path, raw, true, false, optimistic, inplace, isStringKey)
+	case bool:
+		if v {
+			res, err = set(jstr, path, "true", false, false, optimistic, inplace, isStringKey)
+		} else {
+			res, err = set(jstr, path, "false", false, false, optimistic, inplace, isStringKey)
+		}
+	case int8:
+		res, err = set(jstr, path, strconv.FormatInt(int64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case int16:
+		res, err = set(jstr, path, strconv.FormatInt(int64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case int32:
+		res, err = set(jstr, path, strconv.FormatInt(int64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case int64:
+		res, err = set(jstr, path, strconv.FormatInt(int64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case uint8:
+		res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case uint16:
+		res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case uint32:
+		res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case uint64:
+		res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10),
+			false, false, optimistic, inplace, isStringKey)
+	case float32:
+		res, err = set(jstr, path, strconv.FormatFloat(float64(v), 'f', -1, 64),
+			false, false, optimistic, inplace, isStringKey)
+	case float64:
+		res, err = set(jstr, path, strconv.FormatFloat(float64(v), 'f', -1, 64),
+			false, false, optimistic, inplace, isStringKey)
+	}
+	if err == errNoChange {
+		return json, nil
+	}
+	return res, err
+}
+
+// SetRaw sets a raw json value for the specified path.
+// This function works the same as Set except that the value is set as a
+// raw block of json. This allows for setting premarshalled json objects.
+func SetRaw(json, path, value string, isStringKey bool) (string, error) {
+	return SetRawOptions(json, path, value, nil, isStringKey)
+}
+
+// SetRawOptions sets a raw json value for the specified path with options.
+// This furnction works the same as SetOptions except that the value is set
+// as a raw block of json. This allows for setting premarshalled json objects.
+func SetRawOptions(json, path, value string, opts *Options, isStringKey bool) (string, error) {
+	var optimistic bool
+	if opts != nil {
+		optimistic = opts.Optimistic
+	}
+	res, err := set(json, path, value, false, false, optimistic, false, isStringKey)
+	if err == errNoChange {
+		return json, nil
+	}
+	return string(res), err
+}
+
+// SetRawBytes sets a raw json value for the specified path.
+// If working with bytes, this method preferred over
+// SetRaw(string(data), path, value)
+func SetRawBytes(json []byte, path string, value []byte, isStringKey bool) ([]byte, error) {
+	return SetRawBytesOptions(json, path, value, nil, isStringKey)
+}
+
+// SetRawBytesOptions sets a raw json value for the specified path with options.
+// If working with bytes, this method preferred over
+// SetRawOptions(string(data), path, value, opts)
+func SetRawBytesOptions(json []byte, path string, value []byte,
+	opts *Options, isStringKey bool) ([]byte, error) {
+	jstr := *(*string)(unsafe.Pointer(&json))
+	vstr := *(*string)(unsafe.Pointer(&value))
+	var optimistic, inplace bool
+	if opts != nil {
+		optimistic = opts.Optimistic
+		inplace = opts.ReplaceInPlace
+	}
+	res, err := set(jstr, path, vstr, false, false, optimistic, inplace, isStringKey)
+	if err == errNoChange {
+		return json, nil
+	}
+	return res, err
+}
+
+type dtype struct{}
+
+// Delete deletes a value from json for the specified path.
+func Delete(json, path string) (string, error) {
+	return Set(json, path, dtype{})
+}
+
+// DeleteBytes deletes a value from json for the specified path.
+func DeleteBytes(json []byte, path string) ([]byte, error) {
+	return SetBytes(json, path, dtype{}, false)
+}
diff --git a/v2/sjsonhelpermdl/sjsonhelpermdl_test.go b/v2/sjsonhelpermdl/sjsonhelpermdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..732ae7e1ba384411c0998ba559e293bfa68d1fce
--- /dev/null
+++ b/v2/sjsonhelpermdl/sjsonhelpermdl_test.go
@@ -0,0 +1,60 @@
+package sjsonhelpermdl
+
+import (
+	"encoding/json"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/tidwall/gjson"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestSetMultiple(t *testing.T) {
+	input := ""
+	keys := []string{"name", "age", "time"}
+	values := []interface{}{"abc", 23, time.Now()}
+	output, setError := SetMultiple(input, keys, values)
+	fmt.Println(output)
+	assert.NoError(t, setError)
+
+}
+
+func TestSetinGJSON(t *testing.T) {
+	input := gjson.Parse("")
+	keys := []string{"name", "age", "time"}
+	values := []interface{}{"abc", 23, time.Now()}
+	output, setError := SetInGJSON(input, keys, values)
+	fmt.Println(output.Value())
+	assert.NoError(t, setError)
+
+}
+
+func BenchmarkSetinGJSON(b *testing.B) {
+	for index := 0; index < b.N; index++ {
+		input := gjson.Parse("")
+		keys := []string{"name.sub.sub1", "age"}
+		values := []interface{}{"abc", 23}
+		SetInGJSON(input, keys, values)
+	}
+}
+
+func BenchmarkTest(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		tmpStr := struct {
+			Name struct {
+				Sub struct {
+					Sub1 string `json:"sub1"`
+				}
+			}
+			Age int `json:"age"`
+		}{}
+		tmp := "{}"
+		// tmp := `{"name":"roshan", "age": 23}`
+		json.Unmarshal([]byte(tmp), &tmpStr)
+		tmpStr.Name.Sub.Sub1 = "abc"
+		tmpStr.Age = 23
+		json.Marshal(tmpStr)
+	}
+}
diff --git a/v2/statemdl/state.go b/v2/statemdl/state.go
new file mode 100644
index 0000000000000000000000000000000000000000..57b3db3742ed5ac64b34fe584e3770e4ac29959e
--- /dev/null
+++ b/v2/statemdl/state.go
@@ -0,0 +1,429 @@
+package statemdl
+
+import (
+	"sync"
+	"time"
+
+	cron "gopkg.in/robfig/cron.v2"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/filemdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+
+	"github.com/pquerna/ffjson/ffjson"
+)
+
+var projectName string
+
+// Statistic - for app application
+type Statistic struct {
+	ServiceName  string        `json:"serviceName"`
+	Name         string        `json:"name"`
+	TotalHits    int64         `json:"totalHits"`
+	MaxTime      time.Duration `json:"maxTime"`
+	MinTime      time.Duration `json:"minTime"`
+	TotalTime    time.Duration `json:"totalTime"`
+	ErrorCount   int64         `json:"errorCount"`
+	ErrorTime    *time.Time    `json:"errorTime"`
+	LastError    string        `json:"lastError"`
+	Description  string        `json:"description"`
+	IsRestricted bool          `json:"isRestricted"`
+	IsRoleBased  bool          `json:"isRoleBased"`
+	Branch       string        `json:"branch"` // Only applicable to activity stats
+}
+
+type groupResponse struct {
+	GroupTime string `json:"name"`
+	Hits      int64  `json:"hits"`
+}
+
+type clientResponse struct {
+	ServicesState  map[string]Statistic `json:"servicesState"`
+	QueryState     map[string]Statistic `json:"queryState"`
+	TotalSMS       int64                `json:"totalSMS"`
+	TotalEmail     int64                `json:"totalEmail"`
+	TotalOTP       int64                `json:"totalOTP"`
+	OTPInCache     int64                `json:"otpInCache"`
+	TotalMongoHits int64                `json:"totalMongoHits"`
+	TotalMySQLHits int64                `json:"totalMySQLHits"`
+	TotalHits      int64                `json:"totalHits"`
+	CacheHits      int64                `json:"cacheHits"`
+	CacheMiss      int64                `json:"cacheMiss"`
+	StartTime      time.Time            `json:"startTime"`
+	NextTime       time.Time            `json:"nextTime"`
+	GroupReport    []groupResponse      `json:"groupReport"`
+}
+
+type entityHits struct {
+	TotalSMS   int64 `json:"totalSMS"`
+	TotalEmail int64 `json:"totalEmail"`
+	TotalOTP   int64 `json:"totalOTP"`
+	OTPInCache int64 `json:"otpInCache"`
+	Mutex      *sync.Mutex
+}
+
+type dbHits struct {
+	MongoHits int64 `json:"mongoHits"`
+	MySQLHits int64 `json:"mysqlHits"`
+	Mutex     *sync.Mutex
+}
+
+type cacheStates struct {
+	totalHits      int64
+	cacheHits      int64
+	cacheMiss      int64
+	cacheHitsMutex *sync.Mutex
+}
+
+// entityHits - manages hits for SMS and EMAILS
+var entityHitsObj entityHits
+
+var clientReponseMutex *sync.Mutex
+
+// dbHitsObj - manages hits for Mongo and MySQL
+var dbHitsObj dbHits
+var lastKept = 20
+var stateCache map[string]Statistic
+var queryCache map[string]Statistic
+var stateMutex = &sync.Mutex{}
+var cacheStatistic *cacheStates
+
+var groupCount int64
+var groupMutex = &sync.Mutex{}
+
+// serverStartTime - server start time
+var serverStartTime time.Time
+
+var clientResponseData clientResponse
+
+var initStatus = false
+
+func init() {
+	// cacheStatistic = &cacheStates{
+	// 	cacheHitsMutex: &sync.Mutex{},
+	// }
+	// entityHitsObj.Mutex = &sync.Mutex{}
+	// dbHitsObj.Mutex = &sync.Mutex{}
+	// serverStartTime = time.Now()
+	// ba := readStatisticsFile()
+	// unmarshalErr := ffjson.Unmarshal(ba, &clientResponseData)
+	// if errormdl.CheckErr(unmarshalErr) != nil {
+	// 	loggermdl.LogError(unmarshalErr)
+	// 	return
+	// }
+	// stateCache = clientResponseData.ServicesState
+	// if stateCache == nil {
+	// 	stateCache = make(map[string]Statistic)
+	// }
+	// queryCache = clientResponseData.QueryState
+	// if queryCache == nil {
+	// 	queryCache = make(map[string]Statistic)
+	// }
+	// clientResponseData.NextTime = time.Now().Add(600 * time.Second)
+	// c := cron.New()
+	// c.AddFunc("@every 30s", collectStatistics)
+	// c.Start()
+}
+
+// Init - Init
+func Init(name string) {
+	projectName = name
+	cacheStatistic = &cacheStates{
+		cacheHitsMutex: &sync.Mutex{},
+	}
+	clientReponseMutex = &sync.Mutex{}
+	entityHitsObj.Mutex = &sync.Mutex{}
+	dbHitsObj.Mutex = &sync.Mutex{}
+	serverStartTime = time.Now()
+	ba := readStatisticsFile()
+	unmarshalErr := ffjson.Unmarshal(ba, &clientResponseData)
+	if errormdl.CheckErr(unmarshalErr) != nil {
+		loggermdl.LogError(unmarshalErr)
+		return
+	}
+	stateCache = clientResponseData.ServicesState
+	if stateCache == nil {
+		stateCache = make(map[string]Statistic)
+	}
+	queryCache = clientResponseData.QueryState
+	if queryCache == nil {
+		queryCache = make(map[string]Statistic)
+	}
+	clientResponseData.NextTime = time.Now().Add(600 * time.Second)
+	c := cron.New()
+	c.AddFunc("@every 30s", collectStatistics)
+	c.Start()
+	initStatus = true
+}
+
+func readStatisticsFile() []byte {
+	filePath := getFilePath()
+	if !filemdl.FileAvailabilityCheck(filePath) {
+		return []byte("{}")
+	}
+	ba, readErr := filemdl.ReadFile(filePath)
+	if errormdl.CheckErr(readErr) != nil {
+		loggermdl.LogError(readErr)
+		return []byte("{}")
+	}
+	return ba
+}
+
+func getFilePath() string {
+	if projectName == "" {
+		return "./statistics.json"
+	}
+	return "./statistics/" + projectName + ".json"
+}
+
+func updateGlobalHit() {
+	if !initStatus {
+		return
+	}
+	cacheStatistic.cacheHitsMutex.Lock()
+	updateGroupCache(cacheStatistic.totalHits)
+	cacheStatistic.totalHits++
+	cacheStatistic.cacheHitsMutex.Unlock()
+}
+
+func updateGroupCache(hitCount int64) {
+	if !initStatus {
+		return
+	}
+	groupMutex.Lock()
+	groupCount++
+	groupMutex.Unlock()
+}
+
+// UpdateServiceState - update entry of service in state map
+func UpdateServiceState(serviceName string, servingTime time.Duration, serviceError error, isRestricted, isRoleBased bool) {
+	UpdateServiceStateWithBranch(serviceName, "main", servingTime, serviceError, isRestricted, isRoleBased)
+}
+
+func concatenateNameWithBranch(name, branch string) string {
+	if branch == "" {
+		return name + "_main"
+	}
+
+	return name + "_" + branch
+}
+
+// UpdateServiceStateWithBranch - update entry of service from a branch in state map
+func UpdateServiceStateWithBranch(serviceName, branch string, servingTime time.Duration, serviceError error, isRestricted, isRoleBased bool) {
+	if !initStatus {
+		return
+	}
+	stateMutex.Lock()
+
+	key := concatenateNameWithBranch(serviceName, branch)
+
+	serviceState, ok := stateCache[key]
+	if !ok {
+		serviceState = Statistic{
+			ServiceName:  serviceName,
+			Name:         serviceName,
+			IsRestricted: isRestricted,
+			IsRoleBased:  isRoleBased,
+			Branch:       branch,
+		}
+	}
+	serviceState.TotalHits++
+	if serviceError != nil {
+		serviceState.ErrorCount++
+		serviceState.LastError = serviceError.Error()
+		ct := time.Now()
+		serviceState.ErrorTime = &ct
+	} else {
+		serviceState.TotalTime += servingTime
+		if servingTime > serviceState.MaxTime {
+			serviceState.MaxTime = servingTime
+		}
+		if servingTime < serviceState.MinTime || serviceState.MinTime == 0 {
+			serviceState.MinTime = servingTime
+		}
+	}
+	stateCache[key] = serviceState
+	stateMutex.Unlock()
+	updateGlobalHit()
+}
+
+// UpdateQueryState - update entry of service in state map
+func UpdateQueryState(queryName string, name string, servingTime time.Duration, serviceError error, isRestricted, isRoleBased bool) {
+	if !initStatus {
+		return
+	}
+	stateMutex.Lock()
+	queryState, ok := queryCache[queryName]
+	if !ok {
+		queryState = Statistic{
+			ServiceName:  queryName,
+			Name:         name,
+			IsRestricted: isRestricted,
+			IsRoleBased:  isRoleBased,
+		}
+	}
+	queryState.TotalHits++
+	if serviceError != nil {
+		queryState.ErrorCount++
+		queryState.LastError = serviceError.Error()
+		ct := time.Now()
+		queryState.ErrorTime = &ct
+	} else {
+		queryState.TotalTime += servingTime
+		if servingTime > queryState.MaxTime {
+			queryState.MaxTime = servingTime
+		}
+		if servingTime < queryState.MinTime || queryState.MinTime == 0 {
+			queryState.MinTime = servingTime
+		}
+	}
+	queryCache[queryName] = queryState
+	stateMutex.Unlock()
+}
+
+// UpdateGlobalServiceCacheState - update only cache hits and miss count for all services
+func UpdateGlobalServiceCacheState(cacheHit bool) {
+	if !initStatus {
+		return
+	}
+	cacheStatistic.cacheHitsMutex.Lock()
+	defer cacheStatistic.cacheHitsMutex.Unlock()
+	if cacheHit {
+		cacheStatistic.cacheHits++
+	} else {
+		cacheStatistic.cacheMiss++
+	}
+}
+
+// EmailHits - update only email hits count for all services
+func EmailHits() {
+	if !initStatus {
+		return
+	}
+	entityHitsObj.Mutex.Lock()
+	defer entityHitsObj.Mutex.Unlock()
+	entityHitsObj.TotalEmail++
+}
+
+// OTPHits - update only OTPs hits count for all services -- DPK [12-June-2018]
+func OTPHits() {
+	if !initStatus {
+		return
+	}
+	entityHitsObj.Mutex.Lock()
+	entityHitsObj.TotalOTP++
+	entityHitsObj.Mutex.Unlock()
+}
+
+// OTPInCache - update only OTPs hits count for all services -- DPK [12-June-2018]
+func OTPInCache(count int64) {
+	if !initStatus {
+		return
+	}
+	entityHitsObj.Mutex.Lock()
+	entityHitsObj.OTPInCache = count
+	entityHitsObj.Mutex.Unlock()
+}
+
+// SMSHits - update only sms hits count for all services
+func SMSHits() {
+	if !initStatus {
+		return
+	}
+	entityHitsObj.Mutex.Lock()
+	defer entityHitsObj.Mutex.Unlock()
+	entityHitsObj.TotalSMS++
+}
+
+// MongoHits - update only mongo hits count for all services
+func MongoHits() {
+	if !initStatus {
+		return
+	}
+	dbHitsObj.Mutex.Lock()
+	defer dbHitsObj.Mutex.Unlock()
+	dbHitsObj.MongoHits++
+}
+
+// MySQLHits - update only MySQL hits count for all services
+func MySQLHits() {
+	if !initStatus {
+		return
+	}
+	dbHitsObj.Mutex.Lock()
+	defer dbHitsObj.Mutex.Unlock()
+	dbHitsObj.MySQLHits++
+}
+
+func collectStatistics() {
+	if !initStatus {
+		return
+	}
+	clientReponseMutex.Lock()
+	defer clientReponseMutex.Unlock()
+	clientResponseData.StartTime = serverStartTime
+	cacheStatistic.cacheHitsMutex.Lock()
+	clientResponseData.TotalHits += cacheStatistic.totalHits
+	clientResponseData.CacheHits += cacheStatistic.cacheHits
+	clientResponseData.CacheMiss += cacheStatistic.cacheMiss
+	cacheStatistic.totalHits = 0
+	cacheStatistic.cacheMiss = 0
+	cacheStatistic.cacheHits = 0
+	cacheStatistic.cacheHitsMutex.Unlock()
+
+	entityHitsObj.Mutex.Lock()
+	clientResponseData.TotalEmail += entityHitsObj.TotalEmail
+	entityHitsObj.TotalEmail = 0
+	clientResponseData.OTPInCache = entityHitsObj.OTPInCache
+	entityHitsObj.OTPInCache = 0
+	clientResponseData.TotalOTP += entityHitsObj.TotalOTP
+	entityHitsObj.TotalOTP = 0
+	clientResponseData.TotalSMS += entityHitsObj.TotalSMS
+	entityHitsObj.TotalSMS = 0
+	entityHitsObj.Mutex.Unlock()
+
+	// DB hits collect
+	dbHitsObj.Mutex.Lock()
+	clientResponseData.TotalMongoHits += dbHitsObj.MongoHits
+	dbHitsObj.MongoHits = 0
+	clientResponseData.TotalMySQLHits += dbHitsObj.MySQLHits
+	dbHitsObj.MySQLHits = 0
+	dbHitsObj.Mutex.Unlock()
+
+	groupMutex.Lock()
+	current := time.Now()
+	if current.After(clientResponseData.NextTime) || len(clientResponseData.GroupReport) == 0 {
+		gr := groupResponse{}
+		gr.GroupTime = current.String()
+		gr.Hits = groupCount
+		clientResponseData.GroupReport = append(clientResponseData.GroupReport, gr)
+		clientResponseData.NextTime = time.Now().Add(30 * time.Second)
+	} else {
+		clientResponseData.GroupReport[len(clientResponseData.GroupReport)-1].Hits += groupCount
+	}
+	length := len(clientResponseData.GroupReport)
+	if length > lastKept {
+		clientResponseData.GroupReport = clientResponseData.GroupReport[length-lastKept:]
+	}
+	groupCount = 0
+	groupMutex.Unlock()
+	clientResponseData.ServicesState = stateCache
+	clientResponseData.QueryState = queryCache
+
+	// the marshall function reads data from clientResponseData.ServicesState and clientResponseData.QueryState
+	// as both are maps, we have passed a referrence of stateCache and queryCache respectively.
+	// The following lock guards these two underlying data structures.
+	stateMutex.Lock()
+	defer stateMutex.Unlock()
+
+	ba, marshalError := ffjson.Marshal(clientResponseData)
+	if errormdl.CheckErr(marshalError) != nil {
+		loggermdl.LogError(marshalError)
+		return
+	}
+
+	writeErr := filemdl.WriteFile(getFilePath(), ba, true, false)
+	if errormdl.CheckErr(writeErr) != nil {
+		loggermdl.LogError(writeErr)
+	}
+}
diff --git a/v2/statemdl/state_fasthttp.go b/v2/statemdl/state_fasthttp.go
new file mode 100644
index 0000000000000000000000000000000000000000..e263a6c3316aa9ac5a30f4f45720251babe22eb4
--- /dev/null
+++ b/v2/statemdl/state_fasthttp.go
@@ -0,0 +1,27 @@
+// +build fasthttp
+
+package statemdl
+
+import (
+	"encoding/json"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	routing "github.com/qiangxue/fasthttp-routing"
+)
+
+// StateHandler handler function for sta
+func StateHandler(c *routing.Context) error {
+	clientReponseMutex.Lock()
+	defer clientReponseMutex.Unlock()
+	c.Response.Header.Set("Access-Control-Allow-Origin", "*")
+	ba, err := json.Marshal(clientResponseData)
+	if err != nil {
+		loggermdl.LogError("Failed to marshall statitics - ", err)
+		c.SetStatusCode(417)
+		return nil
+	}
+	c.SetContentType("application/json")
+	c.WriteData(string(ba))
+	c.SetStatusCode(200)
+	return nil
+}
diff --git a/v2/statemdl/state_gin.go b/v2/statemdl/state_gin.go
new file mode 100644
index 0000000000000000000000000000000000000000..c997ca62569135c9bb996cb996fa8aac472c6798
--- /dev/null
+++ b/v2/statemdl/state_gin.go
@@ -0,0 +1,15 @@
+// +build !fasthttp
+
+package statemdl
+
+import (
+	"github.com/gin-gonic/gin"
+)
+
+// StateHandler handler function for sta
+func StateHandler(c *gin.Context) {
+	clientReponseMutex.Lock()
+	defer clientReponseMutex.Unlock()
+	c.Header("Access-Control-Allow-Origin", "*")
+	c.JSON(200, clientResponseData)
+}
diff --git a/v2/utiliymdl/guidmdl/guidmdl.go b/v2/utiliymdl/guidmdl/guidmdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..4ee8379a207b0dd498c9772e2f1d177e28857fe9
--- /dev/null
+++ b/v2/utiliymdl/guidmdl/guidmdl.go
@@ -0,0 +1,11 @@
+package guidmdl
+
+import (
+	"github.com/segmentio/ksuid"
+)
+
+// GetGUID will give UUID
+func GetGUID() string {
+	//TODO: Check GUID version
+	return ksuid.New().String()
+}
diff --git a/v2/utiliymdl/guidmdl/guidmdl_test.go b/v2/utiliymdl/guidmdl/guidmdl_test.go
new file mode 100755
index 0000000000000000000000000000000000000000..f9901e09b188bfd77942593d46ea8cc7c5e02353
--- /dev/null
+++ b/v2/utiliymdl/guidmdl/guidmdl_test.go
@@ -0,0 +1,14 @@
+package guidmdl
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestGetGUID(t *testing.T) {
+	id := GetGUID()
+	id2 := GetGUID()
+	assert.NotEmpty(t, id, "New Guid should not be empty")
+	assert.NotEqual(t, id, id2, "Two GUID should not be equal")
+}
diff --git a/v2/utiliymdl/randommdl/utilitymdl.go b/v2/utiliymdl/randommdl/utilitymdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..5c36435df240a4c22331e08f2761347b8ec89aad
--- /dev/null
+++ b/v2/utiliymdl/randommdl/utilitymdl.go
@@ -0,0 +1,21 @@
+package randommdl
+
+import (
+	"math/rand"
+	"time"
+)
+
+// RandomString Get Random String
+func RandomString(l int) string {
+	bytes := make([]byte, l)
+	for i := 0; i < l; i++ {
+		bytes[i] = byte(RandInt(65, 90))
+	}
+	return string(bytes)
+}
+
+// RandInt Get Random int
+func RandInt(min int, max int) int {
+	rand.Seed(time.Now().UnixNano()) // to provide random value on every server start
+	return min + rand.Intn(max-min)
+}
diff --git a/v2/utiliymdl/randommdl/utilitymdl_test.go b/v2/utiliymdl/randommdl/utilitymdl_test.go
new file mode 100755
index 0000000000000000000000000000000000000000..c78267150f588c526201567cd625dd68161f8b8d
--- /dev/null
+++ b/v2/utiliymdl/randommdl/utilitymdl_test.go
@@ -0,0 +1,28 @@
+package randommdl
+
+import (
+	"testing"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	"github.com/stretchr/testify/assert"
+)
+
+func Test_RandomString(t *testing.T) {
+	str1 := RandomString(3)
+	str2 := RandomString(3)
+	assert.NotEqual(t, str1, str2, "These random strings should not match")
+}
+func Test_RandomInt(t *testing.T) {
+	int1 := RandInt(1, 100)
+	loggermdl.LogDebug("str1 value:: ", int1)
+	int2 := RandInt(1, 100)
+	loggermdl.LogDebug("str2 value:: ", int2)
+	assert.NotEqual(t, int1, int2, "These random int should not match")
+}
+func Test_RandomIntRestricted(t *testing.T) {
+	int1 := RandInt(5, 6)
+	loggermdl.LogDebug("str1 value:: ", int1)
+	int2 := RandInt(5, 6)
+	loggermdl.LogDebug("str2 value:: ", int2)
+	assert.Equal(t, int1, int2, "These restricted int should  match")
+}
diff --git a/v2/utiliymdl/stringmdl/stringmdl.go b/v2/utiliymdl/stringmdl/stringmdl.go
new file mode 100755
index 0000000000000000000000000000000000000000..110e72081468f168ae42885145f7b909d4065793
--- /dev/null
+++ b/v2/utiliymdl/stringmdl/stringmdl.go
@@ -0,0 +1,39 @@
+//@author  Ajit Jagtap
+//@version Thu Jul 05 2018 10:02:40 GMT+0530 (IST)
+
+// Package stringmdl will help you to play with strings
+package stringmdl
+
+import (
+	"encoding/base64"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"github.com/lithammer/fuzzysearch/fuzzy"
+)
+
+// FuzzyFind will find string in array
+func FuzzyFind(find string, arr *[]string) []string {
+	return fuzzy.Find(find, *arr)
+}
+
+// FuzzyRankFind will find rank of string in array
+func FuzzyRankFind(find string, arr *[]string) []fuzzy.Rank {
+	return fuzzy.RankFind(find, *arr)
+}
+
+// This function is failing FuzzyRankMatch will find rank of string other string
+// func FuzzyRankMatch(find, str string) int {
+// 	return fuzzy.RankMatch(find, str)
+// }
+
+// ConvertStringToBase64 will Convert String To Base64
+func ConvertStringToBase64(str string) string {
+	encoded := base64.StdEncoding.EncodeToString([]byte(str))
+	return encoded
+}
+
+// ConvertBase64ToString will Convert Base64 To String
+func ConvertBase64ToString(basestr string) (string, error) {
+	conStr, err := base64.StdEncoding.DecodeString(basestr)
+	return string(conStr), errormdl.CheckErr(err)
+}
diff --git a/v2/utiliymdl/stringmdl/stringmdl_test.go b/v2/utiliymdl/stringmdl/stringmdl_test.go
new file mode 100755
index 0000000000000000000000000000000000000000..2f05605f57a6cfcf16fe280889c0f60057aedb87
--- /dev/null
+++ b/v2/utiliymdl/stringmdl/stringmdl_test.go
@@ -0,0 +1,29 @@
+package stringmdl
+
+import (
+	"testing"
+
+	_ "github.com/lithammer/fuzzysearch/fuzzy"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestConvertBase64(t *testing.T) {
+	somestring := "some string"
+	base64 := ConvertStringToBase64(somestring)
+	base64New, err := ConvertBase64ToString(base64)
+	assert.NoError(t, err, "It Should not gen error")
+	assert.Equal(t, somestring, base64New, "Orginal string and converted string should be same. ")
+}
+
+func TestStringTesting(t *testing.T) {
+	//Find In String
+	arr := []string{"This is new string", "and str", "something which should not find"}
+	findResult := FuzzyFind("str", &arr)
+	assert.Len(t, findResult, 2, "it should find str")
+
+	result := FuzzyRankFind("str", &arr)
+	assert.Len(t, result, 2, "This should find count of match")
+
+	//matchcnt := FuzzyRankMatch("kitten", "sitting")
+	//assert.Equal(t, 3, matchcnt, "Match count should be same")
+}
diff --git a/v2/validationmdl/validationcore/CONTRIBUTING.md b/v2/validationmdl/validationcore/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..dd4b0ed5f4fb40350bf63a0f88291b921c547cfd
--- /dev/null
+++ b/v2/validationmdl/validationcore/CONTRIBUTING.md
@@ -0,0 +1,12 @@
+# Contributing
+
+## Must follow the guide for issues
+  - Use the search tool before opening a new issue.
+  - Please provide source code and stack trace if you found a bug.
+  - Please review the existing issues, [project cards](https://github.com/thedevsaddam/govalidator/projects/1) and provide feedback to them
+
+## Pull Request Process
+  - Open your pull request against `dev` branch
+  - It should pass all tests in the available continuous integrations systems such as TravisCI.
+  - You should add/modify tests to cover your proposed code changes.
+  - If your pull request contains a new feature, please document it on the README.
diff --git a/v2/validationmdl/validationcore/LICENSE.md b/v2/validationmdl/validationcore/LICENSE.md
new file mode 100644
index 0000000000000000000000000000000000000000..5786a9421b13c54aadb26eec80bda6628c144bb4
--- /dev/null
+++ b/v2/validationmdl/validationcore/LICENSE.md
@@ -0,0 +1,21 @@
+# The MIT License (MIT)
+
+Copyright (c) 2017 Saddam H <thedevsaddam@gmail.com>
+
+> Permission is hereby granted, free of charge, to any person obtaining a copy
+> of this software and associated documentation files (the "Software"), to deal
+> in the Software without restriction, including without limitation the rights
+> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+> copies of the Software, and to permit persons to whom the Software is
+> furnished to do so, subject to the following conditions:
+>
+> The above copyright notice and this permission notice shall be included in
+> all copies or substantial portions of the Software.
+>
+> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+> THE SOFTWARE.
diff --git a/v2/validationmdl/validationcore/README.md b/v2/validationmdl/validationcore/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..11eff74fee33bf7f84c061748ff85214bee5d77a
--- /dev/null
+++ b/v2/validationmdl/validationcore/README.md
@@ -0,0 +1,232 @@
+Package govalidator
+=========================
+[![Build Status](https://travis-ci.org/thedevsaddam/govalidator.svg?branch=master)](https://travis-ci.org/thedevsaddam/govalidator)
+[![Project status](https://img.shields.io/badge/version-1.9-green.svg)](https://github.com/thedevsaddam/govalidator/releases)
+[![Go Report Card](https://goreportcard.com/badge/github.com/thedevsaddam/govalidator)](https://goreportcard.com/report/github.com/thedevsaddam/govalidator)
+[![Coverage Status](https://coveralls.io/repos/github/thedevsaddam/govalidator/badge.svg?branch=master)](https://coveralls.io/github/thedevsaddam/govalidator?branch=master)
+[![GoDoc](https://godoc.org/github.com/thedevsaddam/govalidator?status.svg)](https://godoc.org/github.com/thedevsaddam/govalidator)
+[![License](https://img.shields.io/dub/l/vibe-d.svg)](https://github.com/thedevsaddam/govalidator/blob/dev/LICENSE.md)
+
+Validate golang request data with simple rules. Highly inspired by Laravel's request validation.
+
+
+### Installation
+
+Install the package using
+```go
+$ go get github.com/thedevsaddam/govalidator
+// or
+$ go get gopkg.in/thedevsaddam/govalidator.v1
+```
+
+### Usage
+
+To use the package import it in your `*.go` code
+```go
+import "github.com/thedevsaddam/govalidator"
+// or
+import "gopkg.in/thedevsaddam/govalidator.v1"
+```
+
+### Example
+
+***Validate `form-data`, `x-www-form-urlencoded` and `query params`***
+
+```go
+
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/thedevsaddam/govalidator"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	rules := govalidator.MapData{
+		"username": []string{"required", "between:3,8"},
+		"email":    []string{"required", "min:4", "max:20", "email"},
+		"web":      []string{"url"},
+		"phone":    []string{"digits:11"},
+		"agree":    []string{"bool"},
+		"dob":      []string{"date"},
+	}
+
+	messages := govalidator.MapData{
+		"username": []string{"required:আপনাকে অবশ্যই ইউজারনেম দিতে হবে", "between:ইউজারনেম অবশ্যই ৩-৮ অক্ষর হতে হবে"},
+		"phone":    []string{"digits:ফোন নাম্বার অবশ্যই ১১ নম্বারের হতে হবে"},
+	}
+
+	opts := govalidator.Options{
+		Request:         r,        // request object
+		Rules:           rules,    // rules map
+		Messages:        messages, // custom message map (Optional)
+		RequiredDefault: true,     // all the field to be pass the rules
+	}
+	v := govalidator.New(opts)
+	e := v.Validate()
+	err := map[string]interface{}{"validationError": e}
+	w.Header().Set("Content-type", "application/json")
+	json.NewEncoder(w).Encode(err)
+}
+
+func main() {
+	http.HandleFunc("/", handler)
+	fmt.Println("Listening on port: 9000")
+	http.ListenAndServe(":9000", nil)
+}
+
+```
+
+Send request to the server using curl or postman: `curl GET "http://localhost:9000?web=&phone=&zip=&dob=&agree="`
+
+
+***Response***
+```json
+{
+    "validationError": {
+        "agree": [
+            "The agree may only contain boolean value, string or int 0, 1"
+        ],
+        "dob": [
+            "The dob field must be a valid date format. e.g: yyyy-mm-dd, yyyy/mm/dd etc"
+        ],
+        "email": [
+            "The email field is required",
+            "The email field must be a valid email address"
+        ],
+        "phone": [
+            "ফোন নাম্বার অবশ্যই ১১ নম্বারের হতে হবে"
+        ],
+        "username": [
+            "আপনাকে অবশ্যই ইউজারনেম দিতে হবে",
+            "ইউজারনেম অবশ্যই ৩-৮ অক্ষর হতে হবে"
+        ],
+        "web": [
+            "The web field format is invalid"
+        ]
+    }
+}
+```
+
+### More examples
+
+***Validate file***
+
+* [Validate file](doc/FILE_VALIDATION.md)
+
+***Validate `application/json` or `text/plain` as raw body***
+
+* [Validate JSON to simple struct](doc/SIMPLE_STRUCT_VALIDATION.md)
+* [Validate JSON to map](doc/MAP_VALIDATION.md)
+* [Validate JSON to nested struct](doc/NESTED_STRUCT.md)
+* [Validate using custom rule](doc/CUSTOM_RULE.md)
+
+### Validation Rules
+* `alpha` The field under validation must be entirely alphabetic characters.
+* `alpha_dash` The field under validation may have alpha-numeric characters, as well as dashes and underscores.
+* `alpha_num` The field under validation must be entirely alpha-numeric characters.
+* `between:numeric,numeric` The field under validation check the length of characters/ length of array, slice, map/ range between two integer or float number etc.
+* `numeric` The field under validation must be entirely numeric characters.
+* `numeric_between:numeric,numeric` The field under validation must be a numeric value between the range.
+   e.g: `numeric_between:18,65` may contains numeric value like `35`, `55` . You can also pass float value to check
+* `bool` The field under validation must be able to be cast as a boolean. Accepted input are `true, false, 1, 0, "1" and "0"`.
+* `credit_card` The field under validation must have a valid credit card number. Accepted cards are `Visa, MasterCard, American Express, Diners Club, Discover and JCB card`
+* `coordinate` The field under validation must have a value of valid coordinate.
+* `css_color` The field under validation must have a value of valid CSS color. Accepted colors are `hex, rgb, rgba, hsl, hsla` like `#909, #00aaff, rgb(255,122,122)`
+* `date` The field under validation must have a valid date of format yyyy-mm-dd or yyyy/mm/dd.
+* `date:dd-mm-yyyy` The field under validation must have a valid date of format dd-mm-yyyy.
+* `digits:int` The field under validation must be numeric and must have an exact length of value.
+* `digits_between:int,int` The field under validation must be numeric and must have length between the range.
+   e.g: `digits_between:3,5` may contains digits like `2323`, `12435`
+* `in:foo,bar` The field under validation must have one of the values. e.g: `in:admin,manager,user` must contain the values (admin or manager or user)
+* `not_in:foo,bar` The field under validation must have one value except foo,bar. e.g: `not_in:admin,manager,user` must not contain the values (admin or manager or user)
+* `email` The field under validation must have a valid email.
+* `float` The field under validation must have a valid float number.
+* `max:numeric` The field under validation must have a min length of characters for string, items length for slice/map, value for integer or float.
+   e.g: `min:3` may contains characters minimum length of 3 like `"john", "jane", "jane321"` but not `"mr", "xy"`
+* `max:numeric` The field under validation must have a max length of characters for string, items length for slice/map, value for integer or float.
+   e.g: `max:6` may contains characters maximum length of 6 like `"john doe", "jane doe"` but not `"john", "jane"`
+* `len:numeric` The field under validation must have an exact length of characters, exact integer or float value, exact size of map/slice.
+   e.g: `len:4` may contains characters exact length of 4 like `Food, Mood, Good`
+* `ip` The field under validation must be a valid IP address.
+* `ip_v4` The field under validation must be a valid IP V4 address.
+* `ip_v6` The field under validation must be a valid IP V6 address.
+* `json` The field under validation must be a valid JSON string.
+* `lat` The field under validation must be a valid latitude.
+* `lon` The field under validation must be a valid longitude.
+* `regex:regular expression` The field under validation validate against the regex. e.g: `regex:^[a-zA-Z]+$` validate the letters.
+* `required` The field under validation must be present in the input data and not empty. A field is considered "empty" if one of the following conditions are true: 1) The value is null. 2)The value is an empty string. 3) Zero length of map, slice. 4) Zero value for integer or float
+* `size:integer` The field under validation validate a file size only in form-data ([see example](doc/FILE_VALIDATION.md))
+* `ext:jpg,png` The field under validation validate a file extension ([see example](doc/FILE_VALIDATION.md))
+* `mime:image/jpg,image/png` The field under validation validate a file mime type ([see example](doc/FILE_VALIDATION.md))
+* `url` The field under validation must be a valid URL.
+* `uuid` The field under validation must be a valid UUID.
+* `uuid_v3` The field under validation must be a valid UUID V3.
+* `uuid_v4` The field under validation must be a valid UUID V4.
+* `uuid_v5` The field under validation must be a valid UUID V5.
+
+### Add Custom Rules
+
+```go
+func init() {
+	// simple example
+	govalidator.AddCustomRule("must_john", func(field string, rule string, message string, value interface{}) error {
+		val := value.(string)
+		if val != "john" || val != "John" {
+			return fmt.Errorf("The %s field must be John or john", field)
+		}
+		return nil
+	})
+
+	// custom rules to take fixed length word.
+	// e.g: word:5 will throw error if the field does not contain exact 5 word
+	govalidator.AddCustomRule("word", func(field string, rule string, message string, value interface{}) error {
+		valSlice := strings.Fields(value.(string))
+		l, _ := strconv.Atoi(strings.TrimPrefix(rule, "word:")) //handle other error
+		if len(valSlice) != l {
+			return fmt.Errorf("The %s field must be %d word", field, l)
+		}
+		return nil
+	})
+
+}
+```
+Note: Array, map, slice can be validated by adding custom rules.
+
+### Custom Message/ Localization
+If you need to translate validation message you can pass messages as options.
+
+```go
+messages := govalidator.MapData{
+	"username": []string{"required:You must provide username", "between:The username field must be between 3 to 8 chars"},
+	"zip":      []string{"numeric:Please provide zip field as numeric"},
+}
+
+opts := govalidator.Options{
+	Messages:        messages,
+}
+```
+
+### Contribution
+If you are interested to make the package better please send pull requests or create an issue so that others can fix.
+[Read the contribution guide here](CONTRIBUTING.md)
+
+### Contributors
+
+- [Jun Kimura](https://github.com/bluele)
+- [Steve HIll](https://github.com/stevehill1981)
+- [ErickSkrauch](https://github.com/erickskrauch)
+- [Sakib Sami](https://github.com/s4kibs4mi)
+- [Rip](https://github.com/ripbandit)
+- [Jose Nazario](https://github.com/paralax)
+
+### See all [contributors](https://github.com/thedevsaddam/govalidator/graphs/contributors)
+
+### See [benchmarks](doc/BENCHMARK.md)
+### Read [API documentation](https://godoc.org/github.com/thedevsaddam/govalidator)
+
+### **License**
+The **govalidator** is an open-source software licensed under the [MIT License](LICENSE.md).
diff --git a/v2/validationmdl/validationcore/doc/BENCHMARK.md b/v2/validationmdl/validationcore/doc/BENCHMARK.md
new file mode 100644
index 0000000000000000000000000000000000000000..ab378b425323c1c7c40d8a5882652ed5e0d0d18e
--- /dev/null
+++ b/v2/validationmdl/validationcore/doc/BENCHMARK.md
@@ -0,0 +1,36 @@
+Benchmarks
+===================
+
+Machine: Mac Book Pro-2015 2.7GHz 8GB
+Go version: go1.8.1 darwin/amd64
+
+| ➜ go test -run=XXX -bench=. -benchmem=true |           |            |           |              |
+|--------------------------------------------|-----------|------------|-----------|--------------|
+| Benchmark_IsAlpha-4                        | 5000000   | 323 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsAlphaDash-4                    | 3000000   | 415 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsAlphaNumeric-4                 | 5000000   | 338 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsBoolean-4                      | 100000000 | 10.6 ns/op | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsCreditCard-4                   | 3000000   | 543 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsCoordinate-4                   | 2000000   | 950 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsCSSColor-4                     | 5000000   | 300 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsDate-4                         | 2000000   | 719 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsDateDDMMYY-4                   | 3000000   | 481 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsEmail-4                        | 1000000   | 1172 ns/op | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsFloat-4                        | 3000000   | 432 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsIn-4                           | 200000000 | 7.34 ns/op | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsJSON-4                         | 1000000   | 1595 ns/op | 768 B/op  | 12 allocs/op |
+| Benchmark_IsNumeric-4                      | 10000000  | 195 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsLatitude-4                     | 3000000   | 523 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsLongitude-4                    | 3000000   | 516 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsIP-4                           | 1000000   | 1073 ns/op | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsIPV4-4                         | 3000000   | 580 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsIPV6-4                         | 1000000   | 1288 ns/op | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsMatchedRegex-4                 | 200000    | 7133 ns/op | 5400 B/op | 66 allocs/op |
+| Benchmark_IsURL-4                          | 1000000   | 1159 ns/op | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsUUID-4                         | 2000000   | 832 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsUUID3-4                        | 2000000   | 783 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsUUID4-4                        | 2000000   | 899 ns/op  | 0 B/op    | 0 allocs/op  |
+| Benchmark_IsUUID5-4                        | 2000000   | 828 ns/op  | 0 B/op    | 0 allocs/op  |
+| BenchmarkRoller_Start-4                    | 200000    | 6869 ns/op | 2467 B/op | 28 allocs/op |
+| Benchmark_isContainRequiredField-4         | 300000000 | 4.23 ns/op | 0 B/op    | 0 allocs/op  |
+| Benchmark_Validate-4                       | 200000    | 9347 ns/op | 664 B/op  | 28 allocs/op |
diff --git a/v2/validationmdl/validationcore/doc/CUSTOM_RULE.md b/v2/validationmdl/validationcore/doc/CUSTOM_RULE.md
new file mode 100644
index 0000000000000000000000000000000000000000..f8c4ab266c01111373375f65e0ca6fba1ebf659c
--- /dev/null
+++ b/v2/validationmdl/validationcore/doc/CUSTOM_RULE.md
@@ -0,0 +1,86 @@
+
+### Validate with custom rule
+
+You can register custom validation rules. This rule will work for both `Validate` and `ValidateJSON` method. You will get all the information you need to validate an input.
+
+```go
+package main
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"strconv"
+	"strings"
+
+	"github.com/thedevsaddam/govalidator"
+)
+
+func init() {
+	// custom rules to take fixed length word.
+	// e.g: max_word:5 will throw error if the field contains more than 5 words
+	govalidator.AddCustomRule("max_word", func(field string, rule string, message string, value interface{}) error {
+		valSlice := strings.Fields(value.(string))
+		l, _ := strconv.Atoi(strings.TrimPrefix(rule, "max_word:")) //handle other error
+		if len(valSlice) > l {
+			if message != "" {
+				return errors.New(message)
+			}
+			return fmt.Errorf("The %s field must not be greater than %d words", field, l)
+		}
+		return nil
+	})
+}
+
+type article struct {
+	Title string   `json:"title"`
+	Body  string   `json:"body"`
+	Tags  []string `json:"tags"`
+}
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	var article article
+	rules := govalidator.MapData{
+		"title": []string{"between:10,120"},
+		"body":  []string{"max_word:150"}, // using custom rule max_word
+		"tags":  []string{"between:3,5"},
+	}
+
+	opts := govalidator.Options{
+		Request:         r,
+		Data:            &article,
+		Rules:           rules,
+		RequiredDefault: true, //force user to fill all the inputs
+	}
+
+	v := govalidator.New(opts)
+	e := v.ValidateJSON()
+	err := map[string]interface{}{"validationError": e}
+	w.Header().Set("Content-type", "applciation/json")
+	json.NewEncoder(w).Encode(err)
+}
+
+func main() {
+	http.HandleFunc("/", handler)
+	fmt.Println("Listening on port: 9000")
+	http.ListenAndServe(":9000", nil)
+}
+
+```
+***Resposne***
+```json
+{
+    "validationError": {
+        "body": [
+            "The body field must not be greater than 150 words"
+        ],
+        "tags": [
+            "The tags field must be between 3 and 5"
+        ],
+        "title": [
+            "The title field must be between 10 and 120"
+        ]
+    }
+}
+```
diff --git a/v2/validationmdl/validationcore/doc/FILE_VALIDATION.md b/v2/validationmdl/validationcore/doc/FILE_VALIDATION.md
new file mode 100644
index 0000000000000000000000000000000000000000..55e68d128db3e88da4b9874a9fb3e80aa26a3408
--- /dev/null
+++ b/v2/validationmdl/validationcore/doc/FILE_VALIDATION.md
@@ -0,0 +1,67 @@
+
+### Validate File
+
+For `multipart/form-data` validation, use `file:` prefix to _field_ name which contains the file. If use custom message then also use the `file:` prefix to Messages MapData key.
+
+```go
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/thedevsaddam/govalidator"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	rules := govalidator.MapData{
+		"file:photo": []string{"ext:jpg,png", "size:10000", "mime:jpg,png", "required"},
+	}
+
+	messages := govalidator.MapData{
+			"file:photo": []string{"ext:Only jpg/png is allowed", "required:Photo is required"},
+	}
+
+	opts := govalidator.Options{
+		Request: r,     // request object
+		Rules:   rules, // rules map,
+		Messages: messages,
+	}
+	v := govalidator.New(opts)
+	e := v.Validate()
+	err := map[string]interface{}{"validationError": e}
+	w.Header().Set("Content-type", "applciation/json")
+	json.NewEncoder(w).Encode(err)
+}
+
+func main() {
+	http.HandleFunc("/", handler)
+	fmt.Println("Listening on port: 9000")
+	http.ListenAndServe(":9000", nil)
+}
+
+```
+***Resposne***
+```json
+{
+    "validationError": {
+        "photo": [
+            "Photo is required"
+        ]
+    }
+}
+
+or
+
+{
+    "validationError": {
+        "photo": [
+            "Only jpg/png is allowed",
+            "The photo field size is can not be greater than 10000 bytes",
+            "The photo field file mime text/plain is invalid"
+        ]
+    }
+}
+```
+Note: At this time it can validate only single file.
diff --git a/v2/validationmdl/validationcore/doc/MAP_VALIDATION.md b/v2/validationmdl/validationcore/doc/MAP_VALIDATION.md
new file mode 100644
index 0000000000000000000000000000000000000000..e3e0c505a83e041845af6b63819d3d2e2625d78d
--- /dev/null
+++ b/v2/validationmdl/validationcore/doc/MAP_VALIDATION.md
@@ -0,0 +1,85 @@
+### Validate JSON body into Map
+
+When using ValidateJSON you must provide data struct or map, rules and request. You can also pass message rules if you need custom message or localization.
+
+```go
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/thedevsaddam/govalidator"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	rules := govalidator.MapData{
+		"username": []string{"required", "between:3,5"},
+		"email":    []string{"required", "min:4", "max:20", "email"},
+		"web":      []string{"url"},
+		"age":      []string{"numeric_between:18,56"},
+	}
+
+	data := make(map[string]interface{}, 0)
+
+	opts := govalidator.Options{
+		Request: r,
+		Rules:   rules,
+		Data:    &data,
+	}
+
+	vd := govalidator.New(opts)
+	e := vd.ValidateJSON()
+	fmt.Println(data)
+	err := map[string]interface{}{"validation error": e}
+	w.Header().Set("Content-type", "applciation/json")
+	json.NewEncoder(w).Encode(err)
+}
+
+func main() {
+	http.HandleFunc("/", handler)
+	fmt.Println("Listening on port: 9000")
+	http.ListenAndServe(":9000", nil)
+}
+
+```
+
+***Resposne***
+```json
+{
+    "validationError": {
+        "age": [
+            "The age field must be between 18 and 56"
+        ],
+        "dob": [
+            "The dob field must be a valid date format. e.g: yyyy-mm-dd, yyyy/mm/dd etc"
+        ],
+        "email": [
+            "The email field is required",
+            "The email field must be a valid email address"
+        ],
+        "phone": [
+            "The phone field must be 11 digits"
+        ],
+        "postalCode": [
+            "The postalCode field must be 4 digits"
+        ],
+        "roles": [
+            "The roles field must be length of 4"
+        ],
+        "username": [
+            "The username field is required",
+            "The username field must be between 3 and 8"
+        ],
+        "village": [
+            "The village field must be between 3 and 10"
+        ],
+        "web": [
+            "The web field format is invalid"
+        ]
+    }
+}
+```
+
+Note: You can pass custom message
diff --git a/v2/validationmdl/validationcore/doc/NESTED_STRUCT.md b/v2/validationmdl/validationcore/doc/NESTED_STRUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..3425b83a5f4224d6b19fdf4ae6dd27a5f355bb90
--- /dev/null
+++ b/v2/validationmdl/validationcore/doc/NESTED_STRUCT.md
@@ -0,0 +1,95 @@
+
+### Validate JSON body with nested struct and slice
+
+
+```go
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/thedevsaddam/govalidator"
+)
+
+type (
+	user struct {
+		Username string `json:"username"`
+		Email    string `json:"email"`
+		Web      string `json:"web"`
+		Age      int    `json:"age"`
+		Phone    string `json:"phone"`
+		Agree    bool   `json:"agree"`
+		DOB      string `json:"dob"`
+		Address  address
+		Roles    []string `json:"roles"`
+	}
+
+	address struct {
+		Village    string `json:"village"`
+		PostalCode string `json:"postalCode"`
+	}
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	var usr user
+	rules := govalidator.MapData{
+		"username":   []string{"required", "between:3,8"},
+		"email":      []string{"required", "min:4", "max:20", "email"},
+		"web":        []string{"url"},
+		"age":        []string{"between:18,56"},
+		"phone":      []string{"digits:11"},
+		"agree":      []string{"bool"},
+		"dob":        []string{"date"},
+		"village":    []string{"between:3,10"},
+		"postalCode": []string{"digits:4"},
+		"roles":      []string{"len:4"},
+	}
+	opts := govalidator.Options{
+		Request:         r,     // request object
+		Rules:           rules, // rules map
+		Data:            &usr,
+		RequiredDefault: true, // all the field to be required
+	}
+	v := govalidator.New(opts)
+	e := v.ValidateJSON()
+	fmt.Println(usr)
+	err := map[string]interface{}{"validationError": e}
+	w.Header().Set("Content-type", "applciation/json")
+	json.NewEncoder(w).Encode(err)
+}
+
+func main() {
+	http.HandleFunc("/", handler)
+	fmt.Println("Listening on port: 9000")
+	http.ListenAndServe(":9000", nil)
+}
+
+```
+***Resposne***
+```json
+{
+    "validationError": {
+        "email": [
+            "The email field must be minimum 4 char",
+            "The email field must be a valid email address"
+        ],
+        "phone": [
+            "The phone field must be 11 digits"
+        ],
+        "postalCode": [
+            "The postalCode field must be 4 digits"
+        ],
+        "roles": [
+            "The roles field must be length of 4"
+        ],
+        "village": [
+            "The village field must be between 3 and 10"
+        ],
+        "web": [
+            "The web field format is invalid"
+        ]
+    }
+}
+```
diff --git a/v2/validationmdl/validationcore/doc/SIMPLE_STRUCT_VALIDATION.md b/v2/validationmdl/validationcore/doc/SIMPLE_STRUCT_VALIDATION.md
new file mode 100644
index 0000000000000000000000000000000000000000..be9e8f685ef99dc823c24919060041e532d9a9c2
--- /dev/null
+++ b/v2/validationmdl/validationcore/doc/SIMPLE_STRUCT_VALIDATION.md
@@ -0,0 +1,79 @@
+
+### Validate JSON body into a simple Struct
+
+When using ValidateJSON you must provide data struct or map, rules and request. You can also pass message rules if you need custom message or localization.
+
+```go
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/thedevsaddam/govalidator"
+)
+
+type user struct {
+	Username string           `json:"username"`
+	Email    string           `json:"email"`
+	Web      string           `json:"web"`
+	Age      govalidator.Int  `json:"age"`
+	Agree    govalidator.Bool `json:"agree"`
+}
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	var user user
+	rules := govalidator.MapData{
+		"username": []string{"required", "between:3,5"},
+		"email":    []string{"required", "min:4", "max:20", "email"},
+		"web":      []string{"url"},
+		"age":      []string{"required"},
+		"agree":    []string{"required"},
+	}
+
+	opts := govalidator.Options{
+		Request: r,
+		Data:    &user,
+		Rules:   rules,
+	}
+
+	v := govalidator.New(opts)
+	e := v.ValidateJSON()
+	fmt.Println(user) // your incoming JSON data in Go data struct
+	err := map[string]interface{}{"validationError": e}
+	w.Header().Set("Content-type", "applciation/json")
+	json.NewEncoder(w).Encode(err)
+}
+
+func main() {
+	http.HandleFunc("/", handler)
+	fmt.Println("Listening on port: 9000")
+	http.ListenAndServe(":9000", nil)
+}
+
+```
+***Resposne***
+```json
+{
+    "validationError": {
+        "age": [
+            "The age field is required"
+        ],
+        "agree": [
+            "The agree field is required"
+        ],
+        "email": [
+            "The email field is required",
+            "The email field must be minimum 4 char",
+            "The email field must be a valid email address"
+        ],
+        "username": [
+            "The username field is required",
+            "The username field must be between 3 and 5"
+        ]
+    }
+}
+```
+
+#### Note: When using `required` rule with number or boolean data, use provided custom type like: Int, Int64, Float32, Float64 or Bool
diff --git a/v2/validationmdl/validationcore/errors.go b/v2/validationmdl/validationcore/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd96b3a01ee1e23ab95287cf8ac09d456a602ca0
--- /dev/null
+++ b/v2/validationmdl/validationcore/errors.go
@@ -0,0 +1,11 @@
+package govalidator
+
+import "errors"
+
+var (
+	errStringToInt          = errors.New("govalidator: unable to parse string to integer")
+	errStringToFloat        = errors.New("govalidator: unable to parse string to float")
+	errValidateArgsMismatch = errors.New("govalidator: provide at least *http.Request and rules for Validate method")
+	errInvalidArgument      = errors.New("govalidator: invalid number of argument")
+	errRequirePtr           = errors.New("govalidator: provide pointer to the data structure")
+)
diff --git a/v2/validationmdl/validationcore/helper.go b/v2/validationmdl/validationcore/helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..e4b3f1f99a4067add71269ae1a1c0a937a14f8bf
--- /dev/null
+++ b/v2/validationmdl/validationcore/helper.go
@@ -0,0 +1,152 @@
+package govalidator
+
+import (
+	"encoding/json"
+	"regexp"
+)
+
+// isAlpha check the input is letters (a-z,A-Z) or not
+func isAlpha(str string) bool {
+	return regexAlpha.MatchString(str)
+}
+
+// isAlphaDash check the input is letters, number with dash and underscore
+func isAlphaDash(str string) bool {
+	return regexAlphaDash.MatchString(str)
+}
+
+// isAlphaNumeric check the input is alpha numeric or not
+func isAlphaNumeric(str string) bool {
+	return regexAlphaNumeric.MatchString(str)
+}
+
+// isBoolean check the input contains boolean type values
+// in this case: "0", "1", "true", "false", "True", "False"
+func isBoolean(str string) bool {
+	bools := []string{"0", "1", "true", "false", "True", "False"}
+	for _, b := range bools {
+		if b == str {
+			return true
+		}
+	}
+	return false
+}
+
+//isCreditCard check the provided card number is a valid
+//  Visa, MasterCard, American Express, Diners Club, Discover or JCB card
+func isCreditCard(card string) bool {
+	return regexCreditCard.MatchString(card)
+}
+
+// isCoordinate is a valid Coordinate or not
+func isCoordinate(str string) bool {
+	return regexCoordinate.MatchString(str)
+}
+
+// isCSSColor is a valid CSS color value (hex, rgb, rgba, hsl, hsla) etc like #909, #00aaff, rgb(255,122,122)
+func isCSSColor(str string) bool {
+	return regexCSSColor.MatchString(str)
+}
+
+// isDate check the date string is valid or not
+func isDate(date string) bool {
+	return regexDate.MatchString(date)
+}
+
+// isDateDDMMYY check the date string is valid or not
+func isDateDDMMYY(date string) bool {
+	return regexDateDDMMYY.MatchString(date)
+}
+
+// isEmail check a email is valid or not
+func isEmail(email string) bool {
+	return regexEmail.MatchString(email)
+}
+
+// isFloat check the input string is a float or not
+func isFloat(str string) bool {
+	return regexFloat.MatchString(str)
+}
+
+// isIn check if the niddle exist in the haystack
+func isIn(haystack []string, niddle string) bool {
+	for _, h := range haystack {
+		if h == niddle {
+			return true
+		}
+	}
+	return false
+}
+
+// isJSON check wheather the input string is a valid json or not
+func isJSON(str string) bool {
+	var data interface{}
+	if err := json.Unmarshal([]byte(str), &data); err != nil {
+		return false
+	}
+	return true
+}
+
+// isNumeric check the provided input string is numeric or not
+func isNumeric(str string) bool {
+	return regexNumeric.MatchString(str)
+}
+
+// isLatitude check the provided input string is a valid latitude or not
+func isLatitude(str string) bool {
+	return regexLatitude.MatchString(str)
+}
+
+// isLongitude check the provided input string is a valid longitude or not
+func isLongitude(str string) bool {
+	return regexLongitude.MatchString(str)
+}
+
+// isIP check the provided input string is a valid IP address or not
+func isIP(str string) bool {
+	return regexIP.MatchString(str)
+}
+
+// isIPV4 check the provided input string is a valid IP address version 4 or not
+// Ref: https://en.wikipedia.org/wiki/IPv4
+func isIPV4(str string) bool {
+	return regexIPV4.MatchString(str)
+}
+
+// isIPV6 check the provided input string is a valid IP address version 6 or not
+// Ref: https://en.wikipedia.org/wiki/IPv6
+func isIPV6(str string) bool {
+	return regexIPV6.MatchString(str)
+}
+
+// isMatchedRegex match the regular expression string provided in first argument
+// with second argument which is also a string
+func isMatchedRegex(rxStr, str string) bool {
+	rx := regexp.MustCompile(rxStr)
+	return rx.MatchString(str)
+}
+
+// isURL check a URL is valid or not
+func isURL(url string) bool {
+	return regexURL.MatchString(url)
+}
+
+// isUUID check the provided string is valid UUID or not
+func isUUID(str string) bool {
+	return regexUUID.MatchString(str)
+}
+
+// isUUID3 check the provided string is valid UUID version 3 or not
+func isUUID3(str string) bool {
+	return regexUUID3.MatchString(str)
+}
+
+// isUUID4 check the provided string is valid UUID version 4 or not
+func isUUID4(str string) bool {
+	return regexUUID4.MatchString(str)
+}
+
+// isUUID5 check the provided string is valid UUID version 5 or not
+func isUUID5(str string) bool {
+	return regexUUID5.MatchString(str)
+}
diff --git a/v2/validationmdl/validationcore/helper_test.go b/v2/validationmdl/validationcore/helper_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..abe98ecf6bcf225d8bb845c227972f85c014ec64
--- /dev/null
+++ b/v2/validationmdl/validationcore/helper_test.go
@@ -0,0 +1,466 @@
+package govalidator
+
+import "testing"
+
+type inputs map[string]bool
+
+var (
+	_alpha = inputs{
+		"abcdefghijgklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": true,
+		"7877**": false,
+		"abc":    true,
+		")(^%&)": false,
+	}
+	_alphaDash = inputs{
+		"abcdefghijgklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-": true,
+		"John_Do-E": true,
+		"+=a(0)":    false,
+	}
+	_alphaNumeric = inputs{
+		"abcdefghijgklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890": true,
+		"090a": true,
+		"*&*)": false,
+	}
+	_boolStringsList = inputs{
+		"0":     true,
+		"1":     true,
+		"true":  true,
+		"false": true,
+		"o":     false,
+		"a":     false,
+	}
+	// Ref: https://www.freeformatter.com/credit-card-number-generator-validator.html
+	_creditCardList = inputs{
+		"4896644531043572": true,
+		"2221005631780408": true,
+		"349902515380498":  true,
+		"6011843157272458": true,
+		"3543358904915048": true,
+		"5404269782892303": true,
+		"4508168417293390": true,
+		"0604595245598387": false,
+		"6388244169973297": false,
+	}
+	_coordinateList = inputs{
+		"30.297018,-78.486328": true,
+		"40.044438,-104.0625":  true,
+		"58.068581,-99.580078": true,
+		"abc, xyz":             false,
+		"0, 887":               false,
+	}
+	_cssColorList = inputs{
+		"#000":           true,
+		"#00aaff":        true,
+		"rgb(123,32,12)": true,
+		"#0":             false,
+		"#av":            false,
+	}
+	_dateList = inputs{
+		"2016-10-14": true,
+		"2013/02/18": true,
+		"2020/12/30": true,
+		"0001/14/30": false,
+	}
+	_dateDDMMYYList = inputs{
+		"01-01-2000": true,
+		"28/02/2001": true,
+		"01/12/2000": true,
+		"2012/11/30": false,
+		"201/11/30":  false,
+	}
+	_emailList = inputs{
+		"john@example.com":       true,
+		"thedevsaddam@gmail.com": true,
+		"jane@yahoo.com":         true,
+		"janeahoo.com":           false,
+		"janea@.com":             false,
+	}
+	_floatList         = inputs{"123": true, "12.50": true, "33.07": true, "abc": false, "o0.45": false}
+	_roleList          = []string{"admin", "manager", "supervisor"}
+	_validJSONString   = `{"FirstName": "Bob", "LastName": "Smith"}`
+	_invalidJSONString = `{"invalid json"}`
+	_numericStringList = inputs{"12": true, "09": true, "878": true, "100": true, "a": false, "xyz": false}
+	_latList           = inputs{"30.297018": true, "40.044438": true, "a": false, "xyz": false}
+	_lonList           = inputs{"-78.486328": true, "-104.0625": true, "a": false, "xyz": false}
+	_ipList            = inputs{"10.255.255.255": true, "172.31.255.255": true, "192.168.255.255": true, "a92.168.255.255": false, "172.31.255.25b": false}
+	_ipV6List          = inputs{
+		"1200:0000:AB00:1234:0000:2552:7777:1313": true,
+		"21DA:D3:0:2F3B:2AA:FF:FE28:9C5A":         true,
+		"10.255.255.255":                          false,
+	}
+	_urlList = inputs{
+		"http://www.google.com":  true,
+		"https://www.google.com": true,
+		"https://facebook.com":   true,
+		"yahoo.com":              true,
+		"adca":                   false,
+	}
+	_uuidList = inputs{
+		"ee7cf0a0-1922-401b-a1ae-6ec9261484c0": true,
+		"ee7cf0a0-1922-401b-a1ae-6ec9261484c1": true,
+		"ee7cf0a0-1922-401b-a1ae-6ec9261484a0": true,
+		"39888f87-fb62-5988-a425-b2ea63f5b81e": false,
+	}
+	_uuidV3List = inputs{
+		"a987fbc9-4bed-3078-cf07-9141ba07c9f3": true,
+		"b987fbc9-4bed-3078-cf07-9141ba07c9f3": true,
+		"ee7cf0a0-1922-401b-a1ae-6ec9261484c0": false,
+	}
+	_uuidV4List = inputs{
+		"df7cca36-3d7a-40f4-8f06-ae03cc22f045": true,
+		"ef7cca36-3d7a-40f4-8f06-ae03cc22f048": true,
+		"b987fbc9-4bed-3078-cf07-9141ba07c9f3": false,
+	}
+	_uuidV5List = inputs{
+		"39888f87-fb62-5988-a425-b2ea63f5b81e": true,
+		"33388f87-fb62-5988-a425-b2ea63f5b81f": true,
+		"b987fbc9-4bed-3078-cf07-9141ba07c9f3": false,
+	}
+)
+
+func Test_IsAlpha(t *testing.T) {
+	for a, s := range _alpha {
+		if isAlpha(a) != s {
+			t.Error("IsAlpha failed to determine alpha!")
+		}
+	}
+}
+
+func Benchmark_IsAlpha(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isAlpha("abcdAXZY")
+	}
+}
+
+func Test_IsAlphaDash(t *testing.T) {
+	for a, s := range _alphaDash {
+		if isAlphaDash(a) != s {
+			t.Error("IsAlphaDash failed to determine alpha dash!")
+		}
+	}
+}
+
+func Benchmark_IsAlphaDash(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isAlphaDash("John_Do-E")
+	}
+}
+
+func Test_IsAlphaNumeric(t *testing.T) {
+	for a, s := range _alphaNumeric {
+		if isAlphaNumeric(a) != s {
+			t.Error("IsAlphaNumeric failed to determine alpha numeric!")
+		}
+	}
+}
+
+func Benchmark_IsAlphaNumeric(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isAlphaNumeric("abc12AZ")
+	}
+}
+
+func Test_IsBoolean(t *testing.T) {
+	for b, s := range _boolStringsList {
+		if isBoolean(b) != s {
+			t.Error("IsBoolean failed to determine boolean!")
+		}
+	}
+}
+
+func Benchmark_IsBoolean(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isBoolean("true")
+	}
+}
+
+func Test_IsCreditCard(t *testing.T) {
+	for card, state := range _creditCardList {
+		if isCreditCard(card) != state {
+			t.Error("IsCreditCard failed to determine credit card!")
+		}
+	}
+}
+
+func Benchmark_IsCreditCard(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isCreditCard("2221005631780408")
+	}
+}
+
+func Test_IsCoordinate(t *testing.T) {
+	for c, s := range _coordinateList {
+		if isCoordinate(c) != s {
+			t.Error("IsCoordinate failed to determine coordinate!")
+		}
+	}
+}
+
+func Benchmark_IsCoordinate(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isCoordinate("30.297018,-78.486328")
+	}
+}
+
+func Test_IsCSSColor(t *testing.T) {
+	for c, s := range _cssColorList {
+		if isCSSColor(c) != s {
+			t.Error("IsCSSColor failed to determine css color code!")
+		}
+	}
+}
+
+func Benchmark_IsCSSColor(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isCSSColor("#00aaff")
+	}
+}
+
+func Test_IsDate(t *testing.T) {
+	for d, s := range _dateList {
+		if isDate(d) != s {
+			t.Error("IsDate failed to determine date!")
+		}
+	}
+}
+
+func Benchmark_IsDate(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isDate("2016-10-14")
+	}
+}
+
+func Test_IsDateDDMMYY(t *testing.T) {
+	for d, s := range _dateDDMMYYList {
+		if isDateDDMMYY(d) != s {
+			t.Error("IsDateDDMMYY failed to determine date!")
+		}
+	}
+}
+
+func Benchmark_IsDateDDMMYY(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isDateDDMMYY("23-10-2014")
+	}
+}
+
+func Test_IsEmail(t *testing.T) {
+	for e, s := range _emailList {
+		if isEmail(e) != s {
+			t.Error("IsEmail failed to determine email!")
+		}
+	}
+}
+
+func Benchmark_IsEmail(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isEmail("thedevsaddam@gmail.com")
+	}
+}
+
+func Test_IsFloat(t *testing.T) {
+	for f, s := range _floatList {
+		if isFloat(f) != s {
+			t.Error("IsFloat failed to determine float value!")
+		}
+	}
+}
+
+func Benchmark_IsFloat(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isFloat("123.001")
+	}
+}
+
+func Test_IsIn(t *testing.T) {
+	if !isIn(_roleList, "admin") {
+		t.Error("IsIn failed!")
+	}
+}
+
+func Benchmark_IsIn(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isIn(_roleList, "maager")
+	}
+}
+
+func Test_IsJSON(t *testing.T) {
+	if !isJSON(_validJSONString) {
+		t.Error("IsJSON failed!")
+	}
+	if isJSON(_invalidJSONString) {
+		t.Error("IsJSON unable to detect invalid json!")
+	}
+}
+
+func Benchmark_IsJSON(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isJSON(_validJSONString)
+	}
+}
+
+func Test_IsNumeric(t *testing.T) {
+	for n, s := range _numericStringList {
+		if isNumeric(n) != s {
+			t.Error("IsNumeric failed!")
+		}
+	}
+}
+
+func Benchmark_IsNumeric(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isNumeric("123")
+	}
+}
+
+func Test_IsLatitude(t *testing.T) {
+	for n, s := range _latList {
+		if isLatitude(n) != s {
+			t.Error("IsLatitude failed!")
+		}
+	}
+}
+
+func Benchmark_IsLatitude(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isLatitude("30.297018")
+	}
+}
+
+func Test_IsLongitude(t *testing.T) {
+	for n, s := range _lonList {
+		if isLongitude(n) != s {
+			t.Error("IsLongitude failed!")
+		}
+	}
+}
+
+func Benchmark_IsLongitude(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isLongitude("-78.486328")
+	}
+}
+
+func Test_IsIP(t *testing.T) {
+	for i, s := range _ipList {
+		if isIP(i) != s {
+			t.Error("IsIP failed!")
+		}
+	}
+}
+
+func Benchmark_IsIP(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isIP("10.255.255.255")
+	}
+}
+
+func Test_IsIPV4(t *testing.T) {
+	for i, s := range _ipList {
+		if isIPV4(i) != s {
+			t.Error("IsIPV4 failed!")
+		}
+	}
+}
+
+func Benchmark_IsIPV4(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isIPV4("10.255.255.255")
+	}
+}
+
+func Test_IsIPV6(t *testing.T) {
+	for i, s := range _ipV6List {
+		if isIPV6(i) != s {
+			t.Error("IsIPV4 failed!")
+		}
+	}
+}
+
+func Benchmark_IsIPV6(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isIPV6("10.255.255.255")
+	}
+}
+
+func Test_IsMatchedRegex(t *testing.T) {
+	if !isMatchedRegex("^(name|age)$", "name") {
+		t.Error("IsMatchedRegex failed!")
+	}
+}
+
+func Benchmark_IsMatchedRegex(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isMatchedRegex("^(name|age)$", "name")
+	}
+}
+
+func Test_IsURL(t *testing.T) {
+	for u, s := range _urlList {
+		if isURL(u) != s {
+			t.Error("IsURL failed!")
+		}
+	}
+}
+
+func Benchmark_IsURL(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isURL("https://www.facebook.com")
+	}
+}
+
+func Test_IsUUID(t *testing.T) {
+	for u, s := range _uuidList {
+		if isUUID(u) != s {
+			t.Error("IsUUID failed!")
+		}
+	}
+}
+
+func Benchmark_IsUUID(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isUUID("ee7cf0a0-1922-401b-a1ae-6ec9261484c0")
+	}
+}
+
+func Test_IsUUID3(t *testing.T) {
+	for u, s := range _uuidV3List {
+		if isUUID3(u) != s {
+			t.Error("IsUUID3 failed!")
+		}
+	}
+}
+
+func Benchmark_IsUUID3(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isUUID3("a987fbc9-4bed-3078-cf07-9141ba07c9f3")
+	}
+}
+
+func Test_IsUUID4(t *testing.T) {
+	for u, s := range _uuidV4List {
+		if isUUID4(u) != s {
+			t.Error("IsUUID4 failed!")
+		}
+	}
+}
+
+func Benchmark_IsUUID4(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isUUID4("57b73598-8764-4ad0-a76a-679bb6640eb1")
+	}
+}
+
+func Test_IsUUID5(t *testing.T) {
+	for u, s := range _uuidV5List {
+		if isUUID5(u) != s {
+			t.Error("IsUUID5 failed!")
+		}
+	}
+}
+
+func Benchmark_IsUUID5(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isUUID5("987fbc97-4bed-5078-9f07-9141ba07c9f3")
+	}
+}
diff --git a/v2/validationmdl/validationcore/regex_patterns.go b/v2/validationmdl/validationcore/regex_patterns.go
new file mode 100644
index 0000000000000000000000000000000000000000..bdb5f2b4f0e937daed3b2c583b98d6cb9a5124e5
--- /dev/null
+++ b/v2/validationmdl/validationcore/regex_patterns.go
@@ -0,0 +1,74 @@
+package govalidator
+
+import (
+	"regexp"
+)
+
+const (
+	// Alpha represents regular expression for alpha chartacters
+	Alpha string = "^[a-zA-Z]+$"
+	// AlphaDash represents regular expression for alpha chartacters with underscore and ash
+	AlphaDash string = "^[a-zA-Z0-9_-]+$"
+	// AlphaNumeric represents regular expression for alpha numeric chartacters
+	AlphaNumeric string = "^[a-zA-Z0-9]+$"
+	// CreditCard represents regular expression for credit cards like (Visa, MasterCard, American Express, Diners Club, Discover, and JCB cards). Ref: https://stackoverflow.com/questions/9315647/regex-credit-card-number-tests
+	CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|[25][1-7][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
+	// Coordinate represents latitude and longitude regular expression
+	Coordinate string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?),\\s*[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" // Ref: https://stackoverflow.com/questions/3518504/regular-expression-for-matching-latitude-longitude-coordinates
+	// CSSColor represents css valid color code with hex, rgb, rgba, hsl, hsla etc. Ref: http://www.regexpal.com/97509
+	CSSColor string = "^(#([\\da-f]{3}){1,2}|(rgb|hsl)a\\((\\d{1,3}%?,\\s?){3}(1|0?\\.\\d+)\\)|(rgb|hsl)\\(\\d{1,3}%?(,\\s?\\d{1,3}%?){2}\\))$"
+	// Date represents regular expression for valid date like: yyyy-mm-dd
+	Date string = "^(((19|20)([2468][048]|[13579][26]|0[48])|2000)[/-]02[/-]29|((19|20)[0-9]{2}[/-](0[469]|11)[/-](0[1-9]|[12][0-9]|30)|(19|20)[0-9]{2}[/-](0[13578]|1[02])[/-](0[1-9]|[12][0-9]|3[01])|(19|20)[0-9]{2}[/-]02[/-](0[1-9]|1[0-9]|2[0-8])))$"
+	// DateDDMMYY represents regular expression for valid date of format dd/mm/yyyy , dd-mm-yyyy etc.Ref: http://regexr.com/346hf
+	DateDDMMYY string = "^(0?[1-9]|[12][0-9]|3[01])[\\/\\-](0?[1-9]|1[012])[\\/\\-]\\d{4}$"
+	// Email represents regular expression for email
+	Email string = "^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+$"
+	// Float represents regular expression for finding fload number
+	Float string = "^[+-]?([0-9]*[.])?[0-9]+$"
+	// IP represents regular expression for ip address
+	IP string = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
+	// IPV4 represents regular expression for ip address version 4
+	IPV4 string = "^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\/([0-9]|[1-2][0-9]|3[0-2]))?$"
+	// IPV6 represents regular expression for ip address version 6
+	IPV6 string = `^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$`
+	// Latitude represents latitude regular expression
+	Latitude string = "^(\\+|-)?(?:90(?:(?:\\.0{1,6})?)|(?:[0-9]|[1-8][0-9])(?:(?:\\.[0-9]{1,6})?))$"
+	// Longitude represents longitude regular expression
+	Longitude string = "^(\\+|-)?(?:180(?:(?:\\.0{1,6})?)|(?:[0-9]|[1-9][0-9]|1[0-7][0-9])(?:(?:\\.[0-9]{1,6})?))$"
+	// Numeric represents regular expression for numeric
+	Numeric string = "^[0-9]+$"
+	// URL represents regular expression for url
+	URL string = "^(?:http(s)?:\\/\\/)?[\\w.-]+(?:\\.[\\w\\.-]+)+[\\w\\-\\._~:/?#[\\]@!\\$&'\\(\\)\\*\\+,;=.]+$" // Ref: https://stackoverflow.com/questions/136505/searching-for-uuids-in-text-with-regex
+	// UUID represents regular expression for UUID
+	UUID string = "^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12}$"
+	// UUID3 represents regular expression for UUID version 3
+	UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+	// UUID4 represents regular expression for UUID version 4
+	UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+	// UUID5 represents regular expression for UUID version 5
+	UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+)
+
+var (
+	regexAlpha        = regexp.MustCompile(Alpha)
+	regexAlphaDash    = regexp.MustCompile(AlphaDash)
+	regexAlphaNumeric = regexp.MustCompile(AlphaNumeric)
+	regexCreditCard   = regexp.MustCompile(CreditCard)
+	regexCoordinate   = regexp.MustCompile(Coordinate)
+	regexCSSColor     = regexp.MustCompile(CSSColor)
+	regexDate         = regexp.MustCompile(Date)
+	regexDateDDMMYY   = regexp.MustCompile(DateDDMMYY)
+	regexEmail        = regexp.MustCompile(Email)
+	regexFloat        = regexp.MustCompile(Float)
+	regexNumeric      = regexp.MustCompile(Numeric)
+	regexLatitude     = regexp.MustCompile(Latitude)
+	regexLongitude    = regexp.MustCompile(Longitude)
+	regexIP           = regexp.MustCompile(IP)
+	regexIPV4         = regexp.MustCompile(IPV4)
+	regexIPV6         = regexp.MustCompile(IPV6)
+	regexURL          = regexp.MustCompile(URL)
+	regexUUID         = regexp.MustCompile(UUID)
+	regexUUID3        = regexp.MustCompile(UUID3)
+	regexUUID4        = regexp.MustCompile(UUID4)
+	regexUUID5        = regexp.MustCompile(UUID5)
+)
diff --git a/v2/validationmdl/validationcore/roller.go b/v2/validationmdl/validationcore/roller.go
new file mode 100644
index 0000000000000000000000000000000000000000..a200f0451585124422da3dcddee2998fbb3acb04
--- /dev/null
+++ b/v2/validationmdl/validationcore/roller.go
@@ -0,0 +1,300 @@
+package govalidator
+
+import (
+	"reflect"
+	"strings"
+)
+
+// ROADMAP
+// traverse map or struct
+// detect each type
+// if type is struct or map then traverse it
+// if type is not struct or map then just push them in parent map's key as key and value of it
+// make flatten all the type in map[string]interface{}
+// in this case mapWalker will do the task
+
+// roller represents a roller type that will be used to flatten our data in a map[string]interface{}
+type roller struct {
+	root          map[string]interface{}
+	typeName      string
+	tagIdentifier string
+	tagSeparator  string
+}
+
+// start start traversing through the tree
+func (r *roller) start(iface interface{}) {
+	//initialize the Tree
+	r.root = make(map[string]interface{})
+	r.typeName = ""
+	ifv := reflect.ValueOf(iface)
+	ift := reflect.TypeOf(iface)
+	if ift.Kind() == reflect.Ptr {
+		ifv = ifv.Elem()
+		ift = ift.Elem()
+	}
+	canInterface := ifv.CanInterface()
+	//check the provided root elment
+	switch ift.Kind() {
+	case reflect.Struct:
+		if canInterface {
+			r.traverseStruct(ifv.Interface())
+		}
+	case reflect.Map:
+		if ifv.Len() > 0 {
+			if canInterface {
+				r.traverseMap(ifv.Interface())
+			}
+		}
+	case reflect.Slice:
+		if canInterface {
+			r.push("slice", ifv.Interface())
+		}
+	}
+}
+
+// setTagIdentifier set the struct tag identifier. e.g: json, validate etc
+func (r *roller) setTagIdentifier(i string) {
+	r.tagIdentifier = i
+}
+
+// setTagSeparator set the struct tag separator. e.g: pipe (|) or comma (,)
+func (r *roller) setTagSeparator(s string) {
+	r.tagSeparator = s
+}
+
+// getFlatMap get the all flatten values
+func (r *roller) getFlatMap() map[string]interface{} {
+	return r.root
+}
+
+// getFlatVal return interfac{} value if exist
+func (r *roller) getFlatVal(key string) (interface{}, bool) {
+	var val interface{}
+	var ok bool
+	if val, ok = r.root[key]; ok {
+		return val, ok
+	}
+	return val, ok
+}
+
+// push add value to map if key does not exist
+func (r *roller) push(key string, val interface{}) bool {
+	if _, ok := r.root[key]; ok {
+		return false
+	}
+	r.root[key] = val
+	return true
+}
+
+// traverseStruct through all structs and add it to root
+func (r *roller) traverseStruct(iface interface{}) {
+	ifv := reflect.ValueOf(iface)
+	ift := reflect.TypeOf(iface)
+
+	if ift.Kind() == reflect.Ptr {
+		ifv = ifv.Elem()
+		ift = ift.Elem()
+	}
+
+	for i := 0; i < ift.NumField(); i++ {
+		v := ifv.Field(i)
+		rfv := ift.Field(i)
+
+		switch v.Kind() {
+		case reflect.Struct:
+			var typeName string
+			if len(rfv.Tag.Get(r.tagIdentifier)) > 0 {
+				tags := strings.Split(rfv.Tag.Get(r.tagIdentifier), r.tagSeparator)
+				if tags[0] != "-" {
+					typeName = tags[0]
+				}
+			} else {
+				typeName = rfv.Name
+			}
+			if v.CanInterface() {
+				switch v.Type().String() {
+				case "govalidator.Int":
+					r.push(typeName, v.Interface())
+				case "govalidator.Int64":
+					r.push(typeName, v.Interface())
+				case "govalidator.Float32":
+					r.push(typeName, v.Interface())
+				case "govalidator.Float64":
+					r.push(typeName, v.Interface())
+				case "govalidator.Bool":
+					r.push(typeName, v.Interface())
+				default:
+					r.typeName = ift.Name()
+					r.traverseStruct(v.Interface())
+				}
+			}
+		case reflect.Map:
+			if v.CanInterface() {
+				r.traverseMap(v.Interface())
+			}
+		case reflect.Ptr: // if the field inside struct is Ptr then get the type and underlying values as interface{}
+			ptrReflectionVal := reflect.Indirect(v)
+			if !isEmpty(ptrReflectionVal) {
+				ptrField := ptrReflectionVal.Type()
+				switch ptrField.Kind() {
+				case reflect.Struct:
+					if v.CanInterface() {
+						r.traverseStruct(v.Interface())
+					}
+				case reflect.Map:
+					if v.CanInterface() {
+						r.traverseMap(v.Interface())
+					}
+				}
+			}
+		default:
+			if len(rfv.Tag.Get(r.tagIdentifier)) > 0 {
+				tags := strings.Split(rfv.Tag.Get(r.tagIdentifier), r.tagSeparator)
+				// add if first tag is not hyphen
+				if tags[0] != "-" {
+					if v.CanInterface() {
+						r.push(tags[0], v.Interface())
+					}
+				}
+			} else {
+				if v.Kind() == reflect.Ptr {
+					if ifv.CanInterface() {
+						r.push(ift.Name()+"."+rfv.Name, ifv.Interface())
+					}
+				} else {
+					if v.CanInterface() {
+						r.push(ift.Name()+"."+rfv.Name, v.Interface())
+					}
+				}
+			}
+		}
+	}
+}
+
+// traverseMap through all the map and add it to root
+func (r *roller) traverseMap(iface interface{}) {
+	ifv := reflect.ValueOf(iface)
+	ift := reflect.TypeOf(iface)
+	if ift.Kind() == reflect.Ptr {
+		ifv = ifv.Elem()
+		ift = ift.Elem()
+	}
+
+	switch iface.(type) {
+	case map[string]interface{}:
+		for k, v := range iface.(map[string]interface{}) {
+			switch reflect.TypeOf(v).Kind() {
+			case reflect.Struct:
+				r.typeName = k // set the map key as name
+				r.traverseStruct(v)
+			case reflect.Map:
+				r.typeName = k // set the map key as name
+				r.traverseMap(v)
+			case reflect.Ptr: // if the field inside map is Ptr then get the type and underlying values as interface{}
+				switch reflect.TypeOf(v).Elem().Kind() {
+				case reflect.Struct:
+					r.traverseStruct(v)
+				case reflect.Map:
+					switch v.(type) {
+					case *map[string]interface{}:
+						r.traverseMap(*v.(*map[string]interface{}))
+					case *map[string]string:
+						r.traverseMap(*v.(*map[string]string))
+					case *map[string]bool:
+						r.traverseMap(*v.(*map[string]bool))
+					case *map[string]int:
+						r.traverseMap(*v.(*map[string]int))
+					case *map[string]int8:
+						r.traverseMap(*v.(*map[string]int8))
+					case *map[string]int16:
+						r.traverseMap(*v.(*map[string]int16))
+					case *map[string]int32:
+						r.traverseMap(*v.(*map[string]int32))
+					case *map[string]int64:
+						r.traverseMap(*v.(*map[string]int64))
+					case *map[string]float32:
+						r.traverseMap(*v.(*map[string]float32))
+					case *map[string]float64:
+						r.traverseMap(*v.(*map[string]float64))
+					case *map[string]uint:
+						r.traverseMap(*v.(*map[string]uint))
+					case *map[string]uint8:
+						r.traverseMap(*v.(*map[string]uint8))
+					case *map[string]uint16:
+						r.traverseMap(*v.(*map[string]uint16))
+					case *map[string]uint32:
+						r.traverseMap(*v.(*map[string]uint32))
+					case *map[string]uint64:
+						r.traverseMap(*v.(*map[string]uint64))
+					case *map[string]uintptr:
+						r.traverseMap(*v.(*map[string]uintptr))
+					}
+				default:
+					r.push(k, v.(interface{}))
+				}
+			default:
+				r.push(k, v)
+			}
+		}
+	case map[string]string:
+		for k, v := range iface.(map[string]string) {
+			r.push(k, v)
+		}
+	case map[string]bool:
+		for k, v := range iface.(map[string]bool) {
+			r.push(k, v)
+		}
+	case map[string]int:
+		for k, v := range iface.(map[string]int) {
+			r.push(k, v)
+		}
+	case map[string]int8:
+		for k, v := range iface.(map[string]int8) {
+			r.push(k, v)
+		}
+	case map[string]int16:
+		for k, v := range iface.(map[string]int16) {
+			r.push(k, v)
+		}
+	case map[string]int32:
+		for k, v := range iface.(map[string]int32) {
+			r.push(k, v)
+		}
+	case map[string]int64:
+		for k, v := range iface.(map[string]int64) {
+			r.push(k, v)
+		}
+	case map[string]float32:
+		for k, v := range iface.(map[string]float32) {
+			r.push(k, v)
+		}
+	case map[string]float64:
+		for k, v := range iface.(map[string]float64) {
+			r.push(k, v)
+		}
+	case map[string]uint:
+		for k, v := range iface.(map[string]uint) {
+			r.push(k, v)
+		}
+	case map[string]uint8:
+		for k, v := range iface.(map[string]uint8) {
+			r.push(k, v)
+		}
+	case map[string]uint16:
+		for k, v := range iface.(map[string]uint16) {
+			r.push(k, v)
+		}
+	case map[string]uint32:
+		for k, v := range iface.(map[string]uint32) {
+			r.push(k, v)
+		}
+	case map[string]uint64:
+		for k, v := range iface.(map[string]uint64) {
+			r.push(k, v)
+		}
+	case map[string]uintptr:
+		for k, v := range iface.(map[string]uintptr) {
+			r.push(k, v)
+		}
+	}
+}
diff --git a/v2/validationmdl/validationcore/roller_test.go b/v2/validationmdl/validationcore/roller_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..30e2032bc74c67e6ef0e5e51385383ea2d8e3015
--- /dev/null
+++ b/v2/validationmdl/validationcore/roller_test.go
@@ -0,0 +1,411 @@
+package govalidator
+
+import (
+	"testing"
+)
+
+type Earth struct {
+	Human
+	Name     string
+	Liveable bool
+	Planet   map[string]interface{}
+}
+
+type Human struct {
+	Male
+	Female
+}
+
+type Male struct {
+	Name string
+	Age  int
+}
+
+type Female struct {
+	Name string
+	Age  int
+}
+
+type deepLevel struct {
+	Deep   string
+	Levels map[string]string
+}
+
+type structWithTag struct {
+	Name string `validate:"name"`
+	Age  int    `validate:"age"`
+}
+
+var p = map[string]interface{}{
+	"naam":  "Jane",
+	"bois":  29,
+	"white": true,
+}
+var dl = deepLevel{
+	Deep: "So much deep",
+	Levels: map[string]string{
+		"level 1": "20 m",
+		"level 2": "30 m",
+		"level 3": "80 m",
+		"level 4": "103 m",
+	},
+}
+var planet = map[string]interface{}{
+	"name":      "mars",
+	"age":       1000,
+	"red":       true,
+	"deepLevel": dl,
+	"p":         p,
+}
+var male = Male{"John", 33}
+var female = Female{"Jane", 30}
+var h = Human{
+	male,
+	female,
+}
+var e = Earth{
+	h,
+	"green earth",
+	true,
+	planet,
+}
+
+var m = make(map[string]interface{})
+
+type structWithPointerToEmbeddedStruct struct {
+	Male   *Male
+	Female *Female
+	Planet *map[string]interface{}
+}
+
+func init() {
+	m["earth"] = e
+	m["person"] = "John Doe"
+	m["iface"] = map[string]string{"another_person": "does it change root!"}
+	m["array"] = [5]int{1, 4, 5, 6, 7}
+}
+
+func TestRoller_push(t *testing.T) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(male)
+	if r.push("Male.Name", "set new name") != false {
+		t.Error("push failed!")
+	}
+}
+
+func TestRoller_Start(t *testing.T) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(m)
+	if len(r.getFlatMap()) != 20 {
+		t.Error("Start failed!")
+	}
+}
+
+func BenchmarkRoller_Start(b *testing.B) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	for n := 0; n < b.N; n++ {
+		r.start(m)
+	}
+}
+
+func Test_Roller_Start_empty_map(t *testing.T) {
+	r := roller{}
+	emap := make(map[string]interface{}, 0)
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(emap)
+	if len(r.getFlatMap()) > 0 {
+		t.Error("Failed to validate empty map")
+	}
+}
+
+func TestRoller_traverseStructWithEmbeddedPointerStructAndMap(t *testing.T) {
+	r := roller{}
+	s := structWithPointerToEmbeddedStruct{
+		&male,
+		&female,
+		&p,
+	}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(s)
+	if len(r.getFlatMap()) != 4 {
+		t.Error("traverseStructWithEmbeddedPointerStructAndMap failed!")
+	}
+}
+
+func TestRoller_traverseMapWithPointerStructAndMap(t *testing.T) {
+	r := roller{}
+	mapOfPointerVals := map[string]interface{}{
+		"structField":        male,
+		"structPointerField": &female,
+		"mapPointerField":    &p,
+	}
+
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(mapOfPointerVals)
+	if len(r.getFlatMap()) != 7 {
+		t.Error("traverseMapWithPointerStructAndMap failed!")
+	}
+}
+
+func TestRoller_StartPointerToStruct(t *testing.T) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(&male)
+	if len(r.getFlatMap()) != 2 {
+		t.Error("StartPointerToStruct failed!")
+	}
+}
+
+func TestRoller_StartMap(t *testing.T) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(m)
+	if len(r.getFlatMap()) != 20 {
+		t.Error("StartMap failed!")
+	}
+}
+
+func TestRoller_StartPointerToMap(t *testing.T) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(&p)
+	if len(r.getFlatMap()) != 3 {
+		t.Error("StartPointerToMap failed!")
+	}
+}
+
+func TestRoller_StartStruct(t *testing.T) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(h)
+
+	if len(r.getFlatMap()) != 4 {
+		t.Error("StartStruct failed!")
+	}
+}
+
+func TestRoller_StartStructWithTag(t *testing.T) {
+	r := roller{}
+	swTag := structWithTag{"John", 44}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(swTag)
+
+	if len(r.getFlatMap()) != 2 {
+		t.Error("StartStructWithTag failed!")
+	}
+}
+
+func TestRoller_StartStructPointerWithTag(t *testing.T) {
+	r := roller{}
+	swTag := structWithTag{"John", 44}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(&swTag)
+
+	if len(r.getFlatMap()) != 2 {
+		t.Error("StartStructPointerWithTag failed!")
+	}
+}
+
+func TestRoller_GetFlatVal(t *testing.T) {
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(m)
+
+	//check struct field with string
+	name, _ := r.getFlatVal("Male.Name")
+	if name != "John" {
+		t.Error("GetFlatVal failed for struct string field!")
+	}
+
+	//check struct field with int
+	age, _ := r.getFlatVal("Male.Age")
+	if age != 33 {
+		t.Error("GetFlatVal failed for struct int field!")
+	}
+
+	//check struct field with array
+	intArrOf5, _ := r.getFlatVal("array")
+	if len(intArrOf5.([5]int)) != 5 {
+		t.Error("GetFlatVal failed for struct array of [5]int field!")
+	}
+
+	//check map key of string
+	person, _ := r.getFlatVal("person")
+	if person != "John Doe" {
+		t.Error("GetFlatVal failed for map[string]string!")
+	}
+
+	//check not existed key
+	_, ok := r.getFlatVal("not_existed_key")
+	if ok {
+		t.Error("GetFlatVal failed for not available key!")
+	}
+}
+
+func TestRoller_PremitiveDataType(t *testing.T) {
+	mStr := map[string]string{"oneStr": "hello", "twoStr": "Jane", "threeStr": "Doe"}
+	mBool := map[string]bool{"oneBool": true, "twoBool": false, "threeBool": true}
+	mInt := map[string]int{"oneInt": 1, "twoInt": 2, "threeInt": 3}
+	mInt8 := map[string]int8{"oneInt8": 1, "twoInt8": 2, "threeInt8": 3}
+	mInt16 := map[string]int16{"oneInt16": 1, "twoInt16": 2, "threeInt16": 3}
+	mInt32 := map[string]int32{"oneInt32": 1, "twoInt32": 2, "threeInt32": 3}
+	mInt64 := map[string]int64{"oneInt64": 1, "twoInt64": 2, "threeInt64": 3}
+	mFloat32 := map[string]float32{"onefloat32": 1.09, "twofloat32": 20.87, "threefloat32": 11.3}
+	mFloat64 := map[string]float64{"onefloat64": 10.88, "twofloat64": 92.09, "threefloat64": 3.90}
+	mUintptr := map[string]uintptr{"oneUintptr": 1, "twoUintptr": 2, "threeUintptr": 3}
+	mUint := map[string]uint{"oneUint": 1, "twoUint": 2, "threeUint": 3}
+	mUint8 := map[string]uint8{"oneUint8": 1, "twoUint8": 2, "threeUint8": 3}
+	mUint16 := map[string]uint16{"oneUint16": 1, "twoUint16": 2, "threeUint16": 3}
+	mUint32 := map[string]uint32{"oneUint32": 1, "twoUint32": 2, "threeUint32": 3}
+	mUint64 := map[string]uint64{"oneUint64": 1, "twoUint64": 2, "threeUint64": 3}
+	mComplex := map[string]interface{}{
+		"ptrToMapString":  &mStr,
+		"ptrToMapBool":    &mBool,
+		"ptrToMapInt":     &mInt,
+		"ptrToMapInt8":    &mInt8,
+		"ptrToMapInt16":   &mInt16,
+		"ptrToMapInt32":   &mInt32,
+		"ptrToMapInt64":   &mInt64,
+		"ptrToMapfloat32": &mFloat32,
+		"ptrToMapfloat64": &mFloat64,
+		"ptrToMapUintptr": &mUintptr,
+		"ptrToMapUint":    &mUint,
+		"ptrToMapUint8":   &mUint8,
+		"ptrToMapUint16":  &mUint16,
+		"ptrToMapUint32":  &mUint32,
+		"ptrToMapUint64":  &mUint64,
+	}
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(mComplex)
+	itemsLen := len(mComplex) * 3
+	if len(r.getFlatMap()) != itemsLen {
+		t.Error("PremitiveDataType failed!")
+	}
+}
+
+func TestRoller_sliceOfType(t *testing.T) {
+	males := []Male{
+		{Name: "John", Age: 29},
+		{Name: "Jane", Age: 23},
+		{Name: "Tom", Age: 10},
+	}
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(males)
+	i, _ := r.getFlatVal("slice")
+	if len(i.([]Male)) != len(males) {
+		t.Error("slice failed!")
+	}
+}
+
+func TestRoller_ptrSliceOfType(t *testing.T) {
+	males := []Male{
+		{Name: "John", Age: 29},
+		{Name: "Jane", Age: 23},
+		{Name: "Tom", Age: 10},
+	}
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(&males)
+	i, _ := r.getFlatVal("slice")
+	if len(i.([]Male)) != len(males) {
+		t.Error("slice failed!")
+	}
+}
+
+func TestRoller_MapWithPointerPremitives(t *testing.T) {
+	type customType string
+	var str string
+	var varInt int
+	var varInt8 int8
+	var varInt16 int16
+	var varInt32 int32
+	var varInt64 int64
+	var varFloat32 float32
+	var varFloat64 float64
+	var varUint uint
+	var varUint8 uint8
+	var varUint16 uint16
+	var varUint32 uint32
+	var varUint64 uint64
+	var varUintptr uintptr
+	var x customType = "custom"
+	y := []string{"y", "z"}
+
+	males := map[string]interface{}{
+		"string":     &str,
+		"int":        &varInt,
+		"int8":       &varInt8,
+		"int16":      &varInt16,
+		"int32":      &varInt32,
+		"int64":      &varInt64,
+		"float32":    &varFloat32,
+		"float64":    &varFloat64,
+		"uint":       &varUint,
+		"uint8":      &varUint8,
+		"uint16":     &varUint16,
+		"uint32":     &varUint32,
+		"uint64":     &varUint64,
+		"uintPtr":    &varUintptr,
+		"customType": &x,
+		"y":          &y,
+	}
+	r := roller{}
+	r.setTagIdentifier("validate")
+	r.setTagSeparator("|")
+	r.start(males)
+
+	val, _ := r.getFlatVal("customType")
+	if *val.(*customType) != "custom" {
+		t.Error("fetching custom type value failed!")
+	}
+
+	valY, _ := r.getFlatVal("y")
+	if len(*valY.(*[]string)) != len(y) {
+		t.Error("fetching pointer to struct value failed!")
+	}
+
+	if len(r.getFlatMap()) != len(males) {
+		t.Error("MapWithPointerPremitives failed!")
+	}
+}
+
+type structWithCustomType struct {
+	Name      string  `json:"name"`
+	Integer   Int     `json:"integer"`
+	Integer64 Int64   `json:"integer64"`
+	Fpoint32  Float32 `json:"float32"`
+	Fpoint64  Float64 `json:"float64"`
+	Boolean   Bool    `json:"bool"`
+}
+
+func TestRoller_StartCustomType(t *testing.T) {
+	r := roller{}
+	swTag := structWithCustomType{Name: "John Doe", Integer: Int{Value: 44}}
+	r.setTagIdentifier("json")
+	r.setTagSeparator("|")
+	r.start(&swTag)
+	if len(r.getFlatMap()) != 6 {
+		t.Error("failed to push custom type")
+	}
+}
diff --git a/v2/validationmdl/validationcore/rules.go b/v2/validationmdl/validationcore/rules.go
new file mode 100644
index 0000000000000000000000000000000000000000..3a384257701debad0bc53d8d9170527e0ba8b8ae
--- /dev/null
+++ b/v2/validationmdl/validationcore/rules.go
@@ -0,0 +1,1020 @@
+package govalidator
+
+import (
+	"errors"
+	"fmt"
+	"mime/multipart"
+	"net/url"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// var mutex = &sync.Mutex{}
+// var Cnt = 0
+
+var rulesFuncMap = make(map[string]func(string, string, string, interface{}) error)
+
+// AddCustomRule help to add custom rules for validator
+// First argument it takes the rule name and second arg a func
+// Second arg must have this signature below
+// fn func(name string, fn func(field string, rule string, message string, value interface{}) error
+// see example in readme: https://github.com/thedevsaddam/govalidator#add-custom-rules
+func AddCustomRule(name string, fn func(field string, rule string, message string, value interface{}) error) {
+	if isRuleExist(name) {
+		panic(fmt.Errorf("govalidator: %s is already defined in rules", name))
+	}
+	rulesFuncMap[name] = fn
+}
+
+// validateCustomRules validate custom rules
+func validateCustomRules(field string, rule string, message string, value interface{}, errsBag url.Values) {
+
+	// loggermdl.LogDebug(field, " - ", rule)
+	// mutex.Lock()
+	// Cnt++
+	// mutex.Unlock()
+
+	for k, v := range rulesFuncMap {
+		if k == rule || strings.HasPrefix(rule, k+":") {
+			err := v(field, rule, message, value)
+			if err != nil {
+				errsBag.Add(field, err.Error())
+			}
+			break
+		}
+	}
+}
+
+func init() {
+
+	// Required check the Required fields
+	AddCustomRule("required", func(field, rule, message string, value interface{}) error {
+		err := fmt.Errorf("The %s field is required", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if value == nil {
+			return err
+		}
+		if _, ok := value.(multipart.File); ok {
+			return nil
+		}
+		rv := reflect.ValueOf(value)
+		switch rv.Kind() {
+		case reflect.String, reflect.Array, reflect.Slice, reflect.Map:
+			if rv.Len() == 0 {
+				return err
+			}
+		case reflect.Int:
+			if isEmpty(value.(int)) {
+				return err
+			}
+		case reflect.Int8:
+			if isEmpty(value.(int8)) {
+				return err
+			}
+		case reflect.Int16:
+			if isEmpty(value.(int16)) {
+				return err
+			}
+		case reflect.Int32:
+			if isEmpty(value.(int32)) {
+				return err
+			}
+		case reflect.Int64:
+			if isEmpty(value.(int64)) {
+				return err
+			}
+		case reflect.Float32:
+			if isEmpty(value.(float32)) {
+				return err
+			}
+		case reflect.Float64:
+			if isEmpty(value.(float64)) {
+				return err
+			}
+		case reflect.Uint:
+			if isEmpty(value.(uint)) {
+				return err
+			}
+		case reflect.Uint8:
+			if isEmpty(value.(uint8)) {
+				return err
+			}
+		case reflect.Uint16:
+			if isEmpty(value.(uint16)) {
+				return err
+			}
+		case reflect.Uint32:
+			if isEmpty(value.(uint32)) {
+				return err
+			}
+		case reflect.Uint64:
+			if isEmpty(value.(uint64)) {
+				return err
+			}
+		case reflect.Uintptr:
+			if isEmpty(value.(uintptr)) {
+				return err
+			}
+		case reflect.Struct:
+			switch rv.Type().String() {
+			case "govalidator.Int":
+				if v, ok := value.(Int); ok {
+					if !v.IsSet {
+						return err
+					}
+				}
+			case "govalidator.Int64":
+				if v, ok := value.(Int64); ok {
+					if !v.IsSet {
+						return err
+					}
+				}
+			case "govalidator.Float32":
+				if v, ok := value.(Float32); ok {
+					if !v.IsSet {
+						return err
+					}
+				}
+			case "govalidator.Float64":
+				if v, ok := value.(Float64); ok {
+					if !v.IsSet {
+						return err
+					}
+				}
+			case "govalidator.Bool":
+				if v, ok := value.(Bool); ok {
+					if !v.IsSet {
+						return err
+					}
+				}
+			default:
+				panic("govalidator: invalid custom type for required rule")
+
+			}
+
+		default:
+			panic("govalidator: invalid type for required rule")
+
+		}
+		return nil
+	})
+
+	// Regex check the custom Regex rules
+	// Regex:^[a-zA-Z]+$ means this field can only contain alphabet (a-z and A-Z)
+	AddCustomRule("regex", func(field, rule, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field format is invalid", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		rxStr := strings.TrimPrefix(rule, "regex:")
+		if !isMatchedRegex(rxStr, str) {
+			return err
+		}
+		return nil
+	})
+
+	// Alpha check if provided field contains valid letters
+	AddCustomRule("alpha", func(field string, vlaue string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s may only contain letters", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isAlpha(str) {
+			return err
+		}
+		return nil
+	})
+
+	// AlphaDash check if provided field contains valid letters, numbers, underscore and dash
+	AddCustomRule("alpha_dash", func(field string, vlaue string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s may only contain letters, numbers, and dashes", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isAlphaDash(str) {
+			return err
+		}
+		return nil
+	})
+
+	// AlphaNumeric check if provided field contains valid letters and numbers
+	AddCustomRule("alpha_num", func(field string, vlaue string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s may only contain letters and numbers", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isAlphaNumeric(str) {
+			return err
+		}
+		return nil
+	})
+
+	// Boolean check if provided field contains Boolean
+	// in this case: "0", "1", 0, 1, "true", "false", true, false etc
+	AddCustomRule("bool", func(field string, vlaue string, message string, value interface{}) error {
+		err := fmt.Errorf("The %s may only contain boolean value, string or int 0, 1", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		switch value.(type) {
+		case bool:
+			//if value is boolean then pass
+		case string:
+			if !isBoolean(value.(string)) {
+				return err
+			}
+		case int:
+			v := value.(int)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case int8:
+			v := value.(int8)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case int16:
+			v := value.(int16)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case int32:
+			v := value.(int32)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case int64:
+			v := value.(int64)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case uint:
+			v := value.(uint)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case uint8:
+			v := value.(uint8)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case uint16:
+			v := value.(uint16)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case uint32:
+			v := value.(uint32)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case uint64:
+			v := value.(uint64)
+			if v != 0 && v != 1 {
+				return err
+			}
+		case uintptr:
+			v := value.(uintptr)
+			if v != 0 && v != 1 {
+				return err
+			}
+		}
+		return nil
+	})
+
+	// Between check the fields character length range
+	// if the field is array, map, slice then the valdiation rule will be the length of the data
+	// if the value is int or float then the valdiation rule will be the value comparison
+	AddCustomRule("between", func(field string, rule string, message string, value interface{}) error {
+		rng := strings.Split(strings.TrimPrefix(rule, "between:"), ",")
+		if len(rng) != 2 {
+			panic(errInvalidArgument)
+		}
+		minFloat, err := strconv.ParseFloat(rng[0], 64)
+		if err != nil {
+			panic(errStringToInt)
+		}
+		maxFloat, err := strconv.ParseFloat(rng[1], 64)
+		if err != nil {
+			panic(errStringToInt)
+		}
+		min := int(minFloat)
+
+		max := int(maxFloat)
+
+		err = fmt.Errorf("The %s field must be between %d and %d", field, min, max)
+		if message != "" {
+			err = errors.New(message)
+		}
+		rv := reflect.ValueOf(value)
+		switch rv.Kind() {
+		case reflect.String, reflect.Array, reflect.Map, reflect.Slice:
+			inLen := rv.Len()
+			if !(inLen >= min && inLen <= max) {
+				return err
+			}
+		case reflect.Int:
+			in := value.(int)
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Int8:
+			in := int(value.(int8))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Int16:
+			in := int(value.(int16))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Int32:
+			in := int(value.(int32))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Int64:
+			in := int(value.(int64))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Uint:
+			in := int(value.(uint))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Uint8:
+			in := int(value.(uint8))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Uint16:
+			in := int(value.(uint16))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Uint32:
+			in := int(value.(uint32))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Uint64:
+			in := int(value.(uint64))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Uintptr:
+			in := int(value.(uintptr))
+			if !(in >= min && in <= max) {
+				return err
+			}
+		case reflect.Float32:
+			in := float64(value.(float32))
+			if !(in >= minFloat && in <= maxFloat) {
+				return fmt.Errorf("The %s field must be between %f and %f", field, minFloat, maxFloat)
+			}
+		case reflect.Float64:
+			in := value.(float64)
+			if !(in >= minFloat && in <= maxFloat) {
+				return fmt.Errorf("The %s field must be between %f and %f", field, minFloat, maxFloat)
+			}
+
+		}
+
+		return nil
+	})
+
+	// CreditCard check if provided field contains valid credit card number
+	// Accepted cards are Visa, MasterCard, American Express, Diners Club, Discover and JCB card
+	AddCustomRule("credit_card", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a valid credit card number", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isCreditCard(str) {
+			return err
+		}
+		return nil
+	})
+
+	// Coordinate check if provided field contains valid Coordinate
+	AddCustomRule("coordinate", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a valid coordinate", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isCoordinate(str) {
+			return err
+		}
+		return nil
+	})
+
+	// ValidateCSSColor check if provided field contains a valid CSS color code
+	AddCustomRule("css_color", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a valid CSS color code", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isCSSColor(str) {
+			return err
+		}
+		return nil
+	})
+
+	// Digits check the exact matching length of digit (0,9)
+	// Digits:5 means the field must have 5 digit of length.
+	// e.g: 12345 or 98997 etc
+	AddCustomRule("digits", func(field string, rule string, message string, value interface{}) error {
+		l, err := strconv.Atoi(strings.TrimPrefix(rule, "digits:"))
+		if err != nil {
+			panic(errStringToInt)
+		}
+		err = fmt.Errorf("The %s field must be %d digits", field, l)
+		if l == 1 {
+			err = fmt.Errorf("The %s field must be 1 digit", field)
+		}
+		if message != "" {
+			err = errors.New(message)
+		}
+		str := toString(value)
+		if len(str) != l || !isNumeric(str) {
+			return err
+		}
+
+		return nil
+	})
+
+	// DigitsBetween check if the field contains only digit and length between provided range
+	// e.g: digits_between:4,5 means the field can have value like: 8887 or 12345 etc
+	AddCustomRule("digits_between", func(field string, rule string, message string, value interface{}) error {
+		rng := strings.Split(strings.TrimPrefix(rule, "digits_between:"), ",")
+		if len(rng) != 2 {
+			panic(errInvalidArgument)
+		}
+		min, err := strconv.Atoi(rng[0])
+		if err != nil {
+			panic(errStringToInt)
+		}
+		max, err := strconv.Atoi(rng[1])
+		if err != nil {
+			panic(errStringToInt)
+		}
+		err = fmt.Errorf("The %s field must be digits between %d and %d", field, min, max)
+		if message != "" {
+			err = errors.New(message)
+		}
+		str := toString(value)
+		if !isNumeric(str) || !(len(str) >= min && len(str) <= max) {
+			return err
+		}
+
+		return nil
+	})
+
+	// Date check the provided field is valid Date
+	AddCustomRule("date", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		if rule == "date:dd-mm-yyyy" {
+			if !isDateDDMMYY(str) {
+				if message != "" {
+					return errors.New(message)
+				}
+				return fmt.Errorf("The %s field must be a valid date format. e.g: dd-mm-yyyy, dd/mm/yyyy etc", field)
+			}
+		}
+		if !isDate(str) {
+			if message != "" {
+				return errors.New(message)
+			}
+			return fmt.Errorf("The %s field must be a valid date format. e.g: yyyy-mm-dd, yyyy/mm/dd etc", field)
+		}
+		return nil
+	})
+
+	// Email check the provided field is valid Email
+	AddCustomRule("email", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a valid email address", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isEmail(str) {
+			return err
+		}
+		return nil
+	})
+
+	// validFloat check the provided field is valid float number
+	AddCustomRule("float", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a float number", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isFloat(str) {
+			return err
+		}
+		return nil
+	})
+
+	// IP check if provided field is valid IP address
+	AddCustomRule("ip", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a valid IP address", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isIP(str) {
+			return err
+		}
+		return nil
+	})
+
+	// IP check if provided field is valid IP v4 address
+	AddCustomRule("ip_v4", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a valid IPv4 address", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isIPV4(str) {
+			return err
+		}
+		return nil
+	})
+
+	// IP check if provided field is valid IP v6 address
+	AddCustomRule("ip_v6", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be a valid IPv6 address", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isIPV6(str) {
+			return err
+		}
+		return nil
+	})
+
+	// ValidateJSON check if provided field contains valid json string
+	AddCustomRule("json", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must contain valid JSON string", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isJSON(str) {
+			return err
+		}
+		return nil
+	})
+
+	/// Latitude check if provided field contains valid Latitude
+	AddCustomRule("lat", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must contain valid latitude", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isLatitude(str) {
+			return err
+		}
+		return nil
+	})
+
+	// Longitude check if provided field contains valid Longitude
+	AddCustomRule("lon", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must contain valid longitude", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isLongitude(str) {
+			return err
+		}
+		return nil
+	})
+
+	// Length check the field's character Length
+	AddCustomRule("len", func(field string, rule string, message string, value interface{}) error {
+		l, err := strconv.Atoi(strings.TrimPrefix(rule, "len:"))
+		if err != nil {
+			panic(errStringToInt)
+		}
+		err = fmt.Errorf("The %s field must be length of %d", field, l)
+		if message != "" {
+			err = errors.New(message)
+		}
+		rv := reflect.ValueOf(value)
+		switch rv.Kind() {
+		case reflect.String, reflect.Array, reflect.Map, reflect.Slice:
+			vLen := rv.Len()
+			if vLen != l {
+				return err
+			}
+		default:
+			str := toString(value) //force the value to be string
+			if len(str) != l {
+				return err
+			}
+		}
+
+		return nil
+	})
+
+	// Min check the field's minimum character length for string, value for int, float and size for array, map, slice
+	AddCustomRule("min", func(field string, rule string, message string, value interface{}) error {
+		mustLen := strings.TrimPrefix(rule, "min:")
+		lenInt, err := strconv.Atoi(mustLen)
+		if err != nil {
+			panic(errStringToInt)
+		}
+		lenFloat, err := strconv.ParseFloat(mustLen, 64)
+		if err != nil {
+			panic(errStringToFloat)
+		}
+		errMsg := fmt.Errorf("The %s field value can not be less than %d", field, lenInt)
+		if message != "" {
+			errMsg = errors.New(message)
+		}
+		errMsgFloat := fmt.Errorf("The %s field value can not be less than %f", field, lenFloat)
+		if message != "" {
+			errMsgFloat = errors.New(message)
+		}
+		rv := reflect.ValueOf(value)
+		switch rv.Kind() {
+		case reflect.String:
+			inLen := rv.Len()
+			if inLen < lenInt {
+				if message != "" {
+					return errors.New(message)
+				}
+				return fmt.Errorf("The %s field must be minimum %d char", field, lenInt)
+			}
+		case reflect.Array, reflect.Map, reflect.Slice:
+			inLen := rv.Len()
+			if inLen < lenInt {
+				if message != "" {
+					return errors.New(message)
+				}
+				return fmt.Errorf("The %s field must be minimum %d in size", field, lenInt)
+			}
+		case reflect.Int:
+			in := value.(int)
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Int8:
+			in := int(value.(int8))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Int16:
+			in := int(value.(int16))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Int32:
+			in := int(value.(int32))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Int64:
+			in := int(value.(int64))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Uint:
+			in := int(value.(uint))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Uint8:
+			in := int(value.(uint8))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Uint16:
+			in := int(value.(uint16))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Uint32:
+			in := int(value.(uint32))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Uint64:
+			in := int(value.(uint64))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Uintptr:
+			in := int(value.(uintptr))
+			if in < lenInt {
+				return errMsg
+			}
+		case reflect.Float32:
+			in := value.(float32)
+			if in < float32(lenFloat) {
+				return errMsgFloat
+			}
+		case reflect.Float64:
+			in := value.(float64)
+			if in < lenFloat {
+				return errMsgFloat
+			}
+
+		}
+
+		return nil
+	})
+
+	// Max check the field's maximum character length for string, value for int, float and size for array, map, slice
+	AddCustomRule("max", func(field string, rule string, message string, value interface{}) error {
+		mustLen := strings.TrimPrefix(rule, "max:")
+		lenInt, err := strconv.Atoi(mustLen)
+		if err != nil {
+			panic(errStringToInt)
+		}
+		lenFloat, err := strconv.ParseFloat(mustLen, 64)
+		if err != nil {
+			panic(errStringToFloat)
+		}
+		errMsg := fmt.Errorf("The %s field value can not be greater than %d", field, lenInt)
+		if message != "" {
+			errMsg = errors.New(message)
+		}
+		errMsgFloat := fmt.Errorf("The %s field value can not be greater than %f", field, lenFloat)
+		if message != "" {
+			errMsgFloat = errors.New(message)
+		}
+		rv := reflect.ValueOf(value)
+		switch rv.Kind() {
+		case reflect.String:
+			inLen := rv.Len()
+			if inLen > lenInt {
+				if message != "" {
+					return errors.New(message)
+				}
+				return fmt.Errorf("The %s field must be maximum %d char", field, lenInt)
+			}
+		case reflect.Array, reflect.Map, reflect.Slice:
+			inLen := rv.Len()
+			if inLen > lenInt {
+				if message != "" {
+					return errors.New(message)
+				}
+				return fmt.Errorf("The %s field must be minimum %d in size", field, lenInt)
+			}
+		case reflect.Int:
+			in := value.(int)
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Int8:
+			in := int(value.(int8))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Int16:
+			in := int(value.(int16))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Int32:
+			in := int(value.(int32))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Int64:
+			in := int(value.(int64))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Uint:
+			in := int(value.(uint))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Uint8:
+			in := int(value.(uint8))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Uint16:
+			in := int(value.(uint16))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Uint32:
+			in := int(value.(uint32))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Uint64:
+			in := int(value.(uint64))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Uintptr:
+			in := int(value.(uintptr))
+			if in > lenInt {
+				return errMsg
+			}
+		case reflect.Float32:
+			in := value.(float32)
+			if in > float32(lenFloat) {
+				return errMsgFloat
+			}
+		case reflect.Float64:
+			in := value.(float64)
+			if in > lenFloat {
+				return errMsgFloat
+			}
+
+		}
+
+		return nil
+	})
+
+	// Numeric check if the value of the field is Numeric
+	AddCustomRule("numeric", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be numeric", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isNumeric(str) {
+			return err
+		}
+		return nil
+	})
+
+	// NumericBetween check if the value field numeric value range
+	// e.g: numeric_between:18, 65 means number value must be in between a numeric value 18 & 65
+	AddCustomRule("numeric_between", func(field string, rule string, message string, value interface{}) error {
+		rng := strings.Split(strings.TrimPrefix(rule, "numeric_between:"), ",")
+		if len(rng) != 2 {
+			panic(errInvalidArgument)
+		}
+		// check for integer value
+		_min, err := strconv.ParseFloat(rng[0], 64)
+		if err != nil {
+			panic(errStringToInt)
+		}
+		min := int(_min)
+		_max, err := strconv.ParseFloat(rng[1], 64)
+		if err != nil {
+			panic(errStringToInt)
+		}
+		max := int(_max)
+		errMsg := fmt.Errorf("The %s field must be numeric value between %d and %d", field, min, max)
+		if message != "" {
+			errMsg = errors.New(message)
+		}
+
+		val := toString(value)
+
+		if !strings.Contains(rng[0], ".") || !strings.Contains(rng[1], ".") {
+			digit, errs := strconv.Atoi(val)
+			if errs != nil {
+				return errMsg
+			}
+			if !(digit >= min && digit <= max) {
+				return errMsg
+			}
+		}
+		// check for float value
+		minFloat, err := strconv.ParseFloat(rng[0], 64)
+		if err != nil {
+			panic(errStringToFloat)
+		}
+		maxFloat, err := strconv.ParseFloat(rng[1], 64)
+		if err != nil {
+			panic(errStringToFloat)
+		}
+		errMsg = fmt.Errorf("The %s field must be numeric value between %f and %f", field, minFloat, maxFloat)
+		if message != "" {
+			errMsg = errors.New(message)
+		}
+
+		digit, err := strconv.ParseFloat(val, 64)
+		if err != nil {
+			return errMsg
+		}
+		if !(digit >= minFloat && digit <= maxFloat) {
+			return errMsg
+		}
+		return nil
+	})
+
+	// ValidateURL check if provided field is valid URL
+	AddCustomRule("url", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field format is invalid", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isURL(str) {
+			return err
+		}
+		return nil
+	})
+
+	// UUID check if provided field contains valid UUID
+	AddCustomRule("uuid", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must contain valid UUID", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isUUID(str) {
+			return err
+		}
+		return nil
+	})
+
+	// UUID3 check if provided field contains valid UUID of version 3
+	AddCustomRule("uuid_v3", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must contain valid UUID V3", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isUUID3(str) {
+			return err
+		}
+		return nil
+	})
+
+	// UUID4 check if provided field contains valid UUID of version 4
+	AddCustomRule("uuid_v4", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must contain valid UUID V4", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isUUID4(str) {
+			return err
+		}
+		return nil
+	})
+
+	// UUID5 check if provided field contains valid UUID of version 5
+	AddCustomRule("uuid_v5", func(field string, rule string, message string, value interface{}) error {
+		str := toString(value)
+		err := fmt.Errorf("The %s field must contain valid UUID V5", field)
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isUUID5(str) {
+			return err
+		}
+		return nil
+	})
+
+	// In check if provided field equals one of the values specified in the rule
+	AddCustomRule("in", func(field string, rule string, message string, value interface{}) error {
+		rng := strings.Split(strings.TrimPrefix(rule, "in:"), ",")
+		if len(rng) == 0 {
+			panic(errInvalidArgument)
+		}
+		str := toString(value)
+		err := fmt.Errorf("The %s field must be one of %v", field, strings.Join(rng, ", "))
+		if message != "" {
+			err = errors.New(message)
+		}
+		if !isIn(rng, str) {
+			return err
+		}
+		return nil
+	})
+
+	// In check if provided field equals one of the values specified in the rule
+	AddCustomRule("not_in", func(field string, rule string, message string, value interface{}) error {
+		rng := strings.Split(strings.TrimPrefix(rule, "not_in:"), ",")
+		if len(rng) == 0 {
+			panic(errInvalidArgument)
+		}
+		str := toString(value)
+		err := fmt.Errorf("The %s field must not be any of %v", field, strings.Join(rng, ", "))
+		if message != "" {
+			err = errors.New(message)
+		}
+		if isIn(rng, str) {
+			return err
+		}
+		return nil
+	})
+}
diff --git a/v2/validationmdl/validationcore/rules_test.go b/v2/validationmdl/validationcore/rules_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7de69d7bfed1bb497713fda178f580a1702ef801
--- /dev/null
+++ b/v2/validationmdl/validationcore/rules_test.go
@@ -0,0 +1,1973 @@
+package govalidator
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"testing"
+)
+
+func Test_AddCustomRule(t *testing.T) {
+	AddCustomRule("__x__", func(f string, rule string, message string, v interface{}) error {
+		if v.(string) != "xyz" {
+			return fmt.Errorf("The %s field must be xyz", f)
+		}
+		return nil
+	})
+	if len(rulesFuncMap) <= 0 {
+		t.Error("AddCustomRule failed to add new rule")
+	}
+}
+
+func Test_AddCustomRule_panic(t *testing.T) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Errorf("AddCustomRule failed to panic")
+		}
+	}()
+	AddCustomRule("__x__", func(f string, rule string, message string, v interface{}) error {
+		if v.(string) != "xyz" {
+			return fmt.Errorf("The %s field must be xyz", f)
+		}
+		return nil
+	})
+}
+
+func Test_validateExtraRules(t *testing.T) {
+	errsBag := url.Values{}
+	validateCustomRules("f_field", "__x__", "a", "", errsBag)
+	if len(errsBag) != 1 {
+		t.Error("validateExtraRules failed")
+	}
+}
+
+//================================= rules =================================
+func Test_Required(t *testing.T) {
+	type tRequired struct {
+		Str       string  `json:"_str"`
+		Int       int     `json:"_int"`
+		Int8      int8    `json:"_int8"`
+		Int16     int16   `json:"_int16"`
+		Int32     int32   `json:"_int32"`
+		Int64     int64   `json:"_int64"`
+		Uint      uint    `json:"_uint"`
+		Uint8     uint8   `json:"_uint8"`
+		Uint16    uint16  `json:"_uint16"`
+		Uint32    uint32  `json:"_uint32"`
+		Uint64    uint64  `json:"_uint64"`
+		Uintptr   uintptr `json:"_uintptr"`
+		Flaot32   float32 `json:"_float32"`
+		Flaot64   float64 `json:"_float64"`
+		Integer   Int     `json:"integer"`
+		Integer64 Int64   `json:"integer64"`
+		Fpoint32  Float32 `json:"float32"`
+		Fpoint64  Float64 `json:"float64"`
+		Boolean   Bool    `json:"boolean"`
+	}
+
+	rules := MapData{
+		"_str":      []string{"required"},
+		"_int":      []string{"required"},
+		"_int8":     []string{"required"},
+		"_int16":    []string{"required"},
+		"_int32":    []string{"required"},
+		"_int64":    []string{"required"},
+		"_uint":     []string{"required"},
+		"_uint8":    []string{"required"},
+		"_uint16":   []string{"required"},
+		"_uint32":   []string{"required"},
+		"_uint64":   []string{"required"},
+		"_uintptr":  []string{"required"},
+		"_float32":  []string{"required"},
+		"_float64":  []string{"required"},
+		"integer":   []string{"required"},
+		"integer64": []string{"required"},
+		"float32":   []string{"required"},
+		"float64":   []string{"required"},
+		"boolean":   []string{"required"},
+	}
+
+	postRequired := map[string]string{}
+
+	var trequired tRequired
+
+	body, _ := json.Marshal(postRequired)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"_str": []string{"required:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &trequired,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 19 {
+		t.Log(validationErr)
+		t.Error("required validation failed!")
+	}
+
+	if validationErr.Get("_str") != "custom_message" {
+		t.Error("required rule custom message failed")
+	}
+}
+
+func Test_Regex(t *testing.T) {
+	type tRegex struct {
+		Name string `json:"name"`
+	}
+
+	postRegex := tRegex{Name: "john"}
+	var tregex tRegex
+
+	body, _ := json.Marshal(postRegex)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"name": []string{"regex:custom_message"},
+	}
+
+	rules := MapData{
+		"name": []string{"regex:^[0-9]+$"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &tregex,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("regex validation failed!")
+	}
+
+	if validationErr.Get("name") != "custom_message" {
+		t.Error("regex rule custom message failed")
+	}
+
+}
+
+func Test_Alpha(t *testing.T) {
+	type user struct {
+		Name string `json:"name"`
+	}
+
+	postUser := user{Name: "9080"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"name": []string{"alpha:custom_message"},
+	}
+
+	rules := MapData{
+		"name": []string{"alpha"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("alpha validation failed!")
+	}
+
+	if validationErr.Get("name") != "custom_message" {
+		t.Error("alpha custom message failed!")
+	}
+}
+
+func Test_AlphaDash(t *testing.T) {
+	type user struct {
+		Name string `json:"name"`
+	}
+
+	postUser := user{Name: "9090$"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"name": []string{"alpha_dash:custom_message"},
+	}
+
+	rules := MapData{
+		"name": []string{"alpha_dash"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("alpha_dash validation failed!")
+	}
+
+	if validationErr.Get("name") != "custom_message" {
+		t.Error("alpha dash custom message failed!")
+	}
+}
+
+func Test_AlphaNumeric(t *testing.T) {
+	type user struct {
+		Name string `json:"name"`
+	}
+
+	postUser := user{Name: "aE*Sb$"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"name": []string{"alpha_num"},
+	}
+
+	messages := MapData{
+		"name": []string{"alpha_num:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("alpha_num validation failed!")
+	}
+
+	if validationErr.Get("name") != "custom_message" {
+		t.Error("alpha num custom message failed!")
+	}
+}
+
+func Test_Boolean(t *testing.T) {
+	type Bools struct {
+		BoolStr     string  `json:"boolStr"`
+		BoolInt     int     `json:"boolInt"`
+		BoolInt8    int8    `json:"boolInt8"`
+		BoolInt16   int16   `json:"boolInt16"`
+		BoolInt32   int32   `json:"boolInt32"`
+		BoolInt64   int64   `json:"boolInt64"`
+		BoolUint    uint    `json:"boolUint"`
+		BoolUint8   uint8   `json:"boolUint8"`
+		BoolUint16  uint16  `json:"boolUint16"`
+		BoolUint32  uint32  `json:"boolUint32"`
+		BoolUint64  uint64  `json:"boolUint64"`
+		BoolUintptr uintptr `json:"boolUintptr"`
+		Bool        bool    `json:"_bool"`
+	}
+
+	postBools := Bools{
+		BoolStr:     "abc",
+		BoolInt:     90,
+		BoolInt8:    10,
+		BoolInt16:   22,
+		BoolInt32:   76,
+		BoolInt64:   9,
+		BoolUint:    5,
+		BoolUint8:   9,
+		BoolUint16:  9,
+		BoolUint32:  9,
+		BoolUint64:  8,
+		BoolUintptr: 9,
+	}
+	var boolObj Bools
+
+	body, _ := json.Marshal(postBools)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"boolStr":     []string{"bool"},
+		"boolInt":     []string{"bool"},
+		"boolInt8":    []string{"bool"},
+		"boolInt16":   []string{"bool"},
+		"boolInt32":   []string{"bool"},
+		"boolInt64":   []string{"bool"},
+		"boolUint":    []string{"bool"},
+		"boolUint8":   []string{"bool"},
+		"boolUint16":  []string{"bool"},
+		"boolUint32":  []string{"bool"},
+		"boolUint64":  []string{"bool"},
+		"boolUintptr": []string{"bool"},
+	}
+
+	messages := MapData{
+		"boolStr":  []string{"bool:custom_message"},
+		"boolInt":  []string{"bool:custom_message"},
+		"boolUint": []string{"bool:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &boolObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 12 {
+		t.Error("bool validation failed!")
+	}
+
+	if validationErr.Get("boolStr") != "custom_message" ||
+		validationErr.Get("boolInt") != "custom_message" ||
+		validationErr.Get("boolUint") != "custom_message" {
+		t.Error("bool custom message failed!")
+	}
+}
+
+func Test_Between(t *testing.T) {
+	type user struct {
+		Str     string  `json:"str"`
+		Int     int     `json:"_int"`
+		Int8    int8    `json:"_int8"`
+		Int16   int16   `json:"_int16"`
+		Int32   int32   `json:"_int32"`
+		Int64   int64   `json:"_int64"`
+		Uint    uint    `json:"_uint"`
+		Uint8   uint8   `json:"_uint8"`
+		Uint16  uint16  `json:"_uint16"`
+		Uint32  uint32  `json:"_uint32"`
+		Uint64  uint64  `json:"_uint64"`
+		Uintptr uintptr `json:"_uintptr"`
+		Float32 float32 `json:"_float32"`
+		Float64 float64 `json:"_float64"`
+		Slice   []int   `json:"_slice"`
+	}
+
+	postUser := user{}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"str":      []string{"between:3,5"},
+		"_int":     []string{"between:3,5"},
+		"_int8":    []string{"between:3,5"},
+		"_int16":   []string{"between:3,5"},
+		"_int32":   []string{"between:3,5"},
+		"_int64":   []string{"between:3,5"},
+		"_uint":    []string{"between:3,5"},
+		"_uint8":   []string{"between:3,5"},
+		"_uint16":  []string{"between:3,5"},
+		"_uint32":  []string{"between:3,5"},
+		"_uint64":  []string{"between:3,5"},
+		"_uintptr": []string{"between:3,5"},
+		"_float32": []string{"between:3.5,5.9"},
+		"_float64": []string{"between:3.3,6.2"},
+		"_slice":   []string{"between:3,5"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	vd.SetDefaultRequired(true)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 15 {
+		t.Error("between validation failed!")
+	}
+}
+
+func Test_CreditCard(t *testing.T) {
+	type user struct {
+		CreditCard string `json:"credit_card"`
+	}
+
+	postUser := user{CreditCard: "87080"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"credit_card": []string{"credit_card:custom_message"},
+	}
+
+	rules := MapData{
+		"credit_card": []string{"credit_card"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("credit card validation failed!")
+	}
+
+	if validationErr.Get("credit_card") != "custom_message" {
+		t.Error("credit_card custom message failed!")
+	}
+}
+
+func Test_Coordinate(t *testing.T) {
+	type user struct {
+		Coordinate string `json:"coordinate"`
+	}
+
+	postUser := user{Coordinate: "8080"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"coordinate": []string{"coordinate:custom_message"},
+	}
+
+	rules := MapData{
+		"coordinate": []string{"coordinate"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("coordinate validation failed!")
+	}
+
+	if validationErr.Get("coordinate") != "custom_message" {
+		t.Error("coordinate custom message failed!")
+	}
+}
+
+func Test_CSSColor(t *testing.T) {
+	type user struct {
+		Color string `json:"color"`
+	}
+
+	postUser := user{Color: "8080"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"color": []string{"css_color"},
+	}
+
+	messages := MapData{
+		"color": []string{"css_color:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("CSS color validation failed!")
+	}
+
+	if validationErr.Get("color") != "custom_message" {
+		t.Error("css_color custom message failed!")
+	}
+}
+
+func Test_Digits(t *testing.T) {
+	type user struct {
+		Zip   string `json:"zip"`
+		Level string `json:"level"`
+	}
+
+	postUser := user{Zip: "8322", Level: "10"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"zip":   []string{"digits:5"},
+		"level": []string{"digits:1"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 2 {
+		t.Error("Digits validation failed!")
+	}
+}
+
+func Test_DigitsBetween(t *testing.T) {
+	type user struct {
+		Zip   string `json:"zip"`
+		Level string `json:"level"`
+	}
+
+	postUser := user{Zip: "8322", Level: "10"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"zip":   []string{"digits_between:5,10"},
+		"level": []string{"digits_between:5,10"},
+	}
+
+	messages := MapData{
+		"zip":   []string{"digits_between:custom_message"},
+		"level": []string{"digits_between:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 2 {
+		t.Error("digits between validation failed!")
+	}
+
+	if validationErr.Get("zip") != "custom_message" ||
+		validationErr.Get("level") != "custom_message" {
+		t.Error("digits_between custom message failed!")
+	}
+}
+
+func Test_DigitsBetweenPanic(t *testing.T) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Errorf("Digits between failed to panic!")
+		}
+	}()
+	type user struct {
+		Zip   string `json:"zip"`
+		Level string `json:"level"`
+	}
+
+	postUser := user{Zip: "8322", Level: "10"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"zip":   []string{"digits_between:5"},
+		"level": []string{"digits_between:i,k"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 2 {
+		t.Error("Digits between panic failed!")
+	}
+}
+
+func Test_Date(t *testing.T) {
+	type user struct {
+		DOB         string `json:"dob"`
+		JoiningDate string `json:"joining_date"`
+	}
+
+	postUser := user{DOB: "invalida date", JoiningDate: "10"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"dob":          []string{"date"},
+		"joining_date": []string{"date:dd-mm-yyyy"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 2 {
+		t.Log(validationErr)
+		t.Error("Date validation failed!")
+	}
+}
+
+func Test_Date_message(t *testing.T) {
+	type user struct {
+		DOB         string `json:"dob"`
+		JoiningDate string `json:"joining_date"`
+	}
+
+	postUser := user{DOB: "invalida date", JoiningDate: "10"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"dob":          []string{"date"},
+		"joining_date": []string{"date:dd-mm-yyyy"},
+	}
+
+	messages := MapData{
+		"dob":          []string{"date:custom_message"},
+		"joining_date": []string{"date:dd-mm-yyyy:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("dob") != "custom_message" {
+		t.Error("Date custom message validation failed!")
+	}
+	if k := validationErr.Get("dob"); k != "custom_message" {
+		t.Error("Date date:dd-mm-yyyy custom message validation failed!")
+	}
+}
+
+func Test_Email(t *testing.T) {
+	type user struct {
+		Email string `json:"email"`
+	}
+
+	postUser := user{Email: "invalid email"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"email": []string{"email"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("Email validation failed!")
+	}
+}
+
+func Test_Email_message(t *testing.T) {
+	type user struct {
+		Email string `json:"email"`
+	}
+
+	postUser := user{Email: "invalid email"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"email": []string{"email"},
+	}
+
+	messages := MapData{
+		"email": []string{"email:custom_message"},
+	}
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("email") != "custom_message" {
+		t.Error("Email message validation failed!")
+	}
+}
+
+func Test_Float(t *testing.T) {
+	type user struct {
+		CGPA string `json:"cgpa"`
+	}
+
+	postUser := user{CGPA: "invalid cgpa"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"cgpa": []string{"float"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("Float validation failed!")
+	}
+}
+
+func Test_Float_message(t *testing.T) {
+	type user struct {
+		CGPA string `json:"cgpa"`
+	}
+
+	postUser := user{CGPA: "invalid cgpa"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"cgpa": []string{"float"},
+	}
+
+	messages := MapData{
+		"cgpa": []string{"float:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("cgpa") != "custom_message" {
+		t.Error("Float custom message failed!")
+	}
+}
+
+func Test_IP(t *testing.T) {
+	type user struct {
+		IP string `json:"ip"`
+	}
+
+	postUser := user{IP: "invalid IP"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"ip": []string{"ip"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("IP validation failed!")
+	}
+}
+
+func Test_IP_message(t *testing.T) {
+	type user struct {
+		IP string `json:"ip"`
+	}
+
+	postUser := user{IP: "invalid IP"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"ip": []string{"ip:custom_message"},
+	}
+
+	rules := MapData{
+		"ip": []string{"ip"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("ip") != "custom_message" {
+		t.Error("IP custom message failed!")
+	}
+}
+
+func Test_IPv4(t *testing.T) {
+	type user struct {
+		IP string `json:"ip"`
+	}
+
+	postUser := user{IP: "invalid IP"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"ip": []string{"ip_v4"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("IP v4 validation failed!")
+	}
+}
+
+func Test_IPv4_message(t *testing.T) {
+	type user struct {
+		IP string `json:"ip"`
+	}
+
+	postUser := user{IP: "invalid IP"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"ip": []string{"ip_v4:custom_message"},
+	}
+
+	rules := MapData{
+		"ip": []string{"ip_v4"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("ip") != "custom_message" {
+		t.Error("IP v4 custom message failed!")
+	}
+}
+
+func Test_IPv6(t *testing.T) {
+	type user struct {
+		IP string `json:"ip"`
+	}
+
+	postUser := user{IP: "invalid IP"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"ip": []string{"ip_v6"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("IP v6 validation failed!")
+	}
+}
+
+func Test_IPv6_message(t *testing.T) {
+	type user struct {
+		IP string `json:"ip"`
+	}
+
+	postUser := user{IP: "invalid IP"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"ip": []string{"ip_v6:custom_message"},
+	}
+
+	rules := MapData{
+		"ip": []string{"ip_v6"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("ip") != "custom_message" {
+		t.Error("IP v6 custom message failed!")
+	}
+}
+
+func Test_JSON(t *testing.T) {
+	type user struct {
+		Settings string `json:"settings"`
+	}
+
+	postUser := user{Settings: "invalid json"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"settings": []string{"json"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("JSON validation failed!")
+	}
+}
+
+func Test_JSON_valid(t *testing.T) {
+	type user struct {
+		Settings string `json:"settings"`
+	}
+
+	postUser := user{Settings: `{"name": "John Doe", "age": 30}`}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"settings": []string{"json"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Log(validationErr)
+		t.Error("Validation failed for valid JSON")
+	}
+}
+
+func Test_JSON_message(t *testing.T) {
+	type user struct {
+		Settings string `json:"settings"`
+	}
+
+	postUser := user{Settings: "invalid json"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"settings": []string{"json:custom_message"},
+	}
+
+	rules := MapData{
+		"settings": []string{"json"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("settings") != "custom_message" {
+		t.Error("JSON custom message failed!")
+	}
+}
+
+func Test_LatLon(t *testing.T) {
+	type Location struct {
+		Latitude  string `json:"lat"`
+		Longitude string `json:"lon"`
+	}
+
+	postLocation := Location{Latitude: "invalid lat", Longitude: "invalid lon"}
+	var loc Location
+
+	body, _ := json.Marshal(postLocation)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"lat": []string{"lat"},
+		"lon": []string{"lon"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &loc,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 2 {
+		t.Log(validationErr)
+		t.Error("Lat Lon validation failed!")
+	}
+}
+
+func Test_LatLon_valid(t *testing.T) {
+	type Location struct {
+		Latitude  string `json:"lat"`
+		Longitude string `json:"lon"`
+	}
+
+	postLocation := Location{Latitude: "23.810332", Longitude: "90.412518"}
+	var loc Location
+
+	body, _ := json.Marshal(postLocation)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"lat": []string{"lat"},
+		"lon": []string{"lon"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &loc,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Log(validationErr)
+		t.Error("Valid Lat Lon validation failed!")
+	}
+}
+
+func Test_LatLon_message(t *testing.T) {
+	type Location struct {
+		Latitude  string `json:"lat"`
+		Longitude string `json:"lon"`
+	}
+
+	postLocation := Location{Latitude: "invalid lat", Longitude: "invalid lon"}
+	var loc Location
+
+	body, _ := json.Marshal(postLocation)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"lat": []string{"lat:custom_message"},
+		"lon": []string{"lon:custom_message"},
+	}
+
+	rules := MapData{
+		"lat": []string{"lat"},
+		"lon": []string{"lon"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &loc,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("lat") != "custom_message" ||
+		validationErr.Get("lon") != "custom_message" {
+		t.Error("Lat lon custom message failed")
+	}
+}
+
+func Test_Len(t *testing.T) {
+	type user struct {
+		Name        string   `json:"name"`
+		Roll        int      `json:"roll"`
+		Permissions []string `json:"permissions"`
+	}
+
+	postUser := user{
+		Name:        "john",
+		Roll:        11,
+		Permissions: []string{"create", "delete", "update"},
+	}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"name":        []string{"len:5"},
+		"roll":        []string{"len:5"},
+		"permissions": []string{"len:10"},
+	}
+
+	opts := Options{
+		Request: req,
+		Data:    &userObj,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 3 {
+		t.Log(validationErr)
+		t.Error("Len validation failed!")
+	}
+}
+
+func Test_Len_message(t *testing.T) {
+	type user struct {
+		Name        string   `json:"name"`
+		Roll        int      `json:"roll"`
+		Permissions []string `json:"permissions"`
+	}
+
+	postUser := user{
+		Name:        "john",
+		Roll:        11,
+		Permissions: []string{"create", "delete", "update"},
+	}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"name":        []string{"len:custom_message"},
+		"roll":        []string{"len:custom_message"},
+		"permissions": []string{"len:custom_message"},
+	}
+
+	rules := MapData{
+		"name":        []string{"len:5"},
+		"roll":        []string{"len:5"},
+		"permissions": []string{"len:10"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if validationErr.Get("name") != "custom_message" ||
+		validationErr.Get("roll") != "custom_message" ||
+		validationErr.Get("permissions") != "custom_message" {
+		t.Error("len custom message failed")
+	}
+}
+
+func Test_Numeric(t *testing.T) {
+	type user struct {
+		NID string `json:"nid"`
+	}
+
+	postUser := user{NID: "invalid nid"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"nid": []string{"numeric"},
+	}
+
+	messages := MapData{
+		"nid": []string{"numeric:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("Numeric validation failed!")
+	}
+
+	if validationErr.Get("nid") != "custom_message" {
+		t.Error("Numeric custom message failed!")
+	}
+}
+
+func Test_Numeric_valid(t *testing.T) {
+	type user struct {
+		NID string `json:"nid"`
+	}
+
+	postUser := user{NID: "109922"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"nid": []string{"numeric"},
+	}
+
+	messages := MapData{
+		"nid": []string{"numeric:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Log(validationErr)
+		t.Error("Valid numeric validation failed!")
+	}
+}
+
+func Test_NumericBetween(t *testing.T) {
+	type user struct {
+		Age  int    `json:"age"`
+		CGPA string `json:"cgpa"`
+	}
+
+	postUser := user{Age: 77, CGPA: "2.90"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"age":  []string{"numeric_between:18,60"},
+		"cgpa": []string{"numeric_between:3.5,4.9"},
+	}
+
+	messages := MapData{
+		"age":  []string{"numeric_between:custom_message"},
+		"cgpa": []string{"numeric_between:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 2 {
+		t.Error("numeric_between validation failed!")
+	}
+
+	if validationErr.Get("age") != "custom_message" ||
+		validationErr.Get("cgpa") != "custom_message" {
+		t.Error("numeric_between custom message failed!")
+	}
+}
+
+func Test_URL(t *testing.T) {
+	type user struct {
+		Web string `json:"web"`
+	}
+
+	postUser := user{Web: "invalid url"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"web": []string{"url"},
+	}
+
+	messages := MapData{
+		"web": []string{"url:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Log(validationErr)
+		t.Error("URL validation failed!")
+	}
+
+	if validationErr.Get("web") != "custom_message" {
+		t.Error("URL custom message failed!")
+	}
+}
+
+func Test_UR_valid(t *testing.T) {
+	type user struct {
+		Web string `json:"web"`
+	}
+
+	postUser := user{Web: "www.google.com"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"web": []string{"url"},
+	}
+
+	messages := MapData{
+		"web": []string{"url:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Error("Valid URL validation failed!")
+	}
+}
+
+func Test_UUIDS(t *testing.T) {
+	type user struct {
+		UUID   string `json:"uuid"`
+		UUIDV3 string `json:"uuid3"`
+		UUIDV4 string `json:"uuid4"`
+		UUIDV5 string `json:"uuid5"`
+	}
+
+	postUser := user{
+		UUID:   "invalid uuid",
+		UUIDV3: "invalid uuid",
+		UUIDV4: "invalid uuid",
+		UUIDV5: "invalid uuid",
+	}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"uuid":  []string{"uuid"},
+		"uuid3": []string{"uuid_v3"},
+		"uuid4": []string{"uuid_v4"},
+		"uuid5": []string{"uuid_v5"},
+	}
+
+	messages := MapData{
+		"uuid":  []string{"uuid:custom_message"},
+		"uuid3": []string{"uuid_v3:custom_message"},
+		"uuid4": []string{"uuid_v4:custom_message"},
+		"uuid5": []string{"uuid_v5:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 4 {
+		t.Error("UUID validation failed!")
+	}
+
+	if validationErr.Get("uuid") != "custom_message" ||
+		validationErr.Get("uuid3") != "custom_message" ||
+		validationErr.Get("uuid4") != "custom_message" ||
+		validationErr.Get("uuid5") != "custom_message" {
+		t.Error("UUID custom message failed!")
+	}
+
+}
+
+func Test_min(t *testing.T) {
+	type Body struct {
+		Str     string   `json:"_str"`
+		Slice   []string `json:"_slice"`
+		Int     int      `json:"_int"`
+		Int8    int8     `json:"_int8"`
+		Int16   int16    `json:"_int16"`
+		Int32   int32    `json:"_int32"`
+		Int64   int64    `json:"_int64"`
+		Uint    uint     `json:"_uint"`
+		Uint8   uint8    `json:"_uint8"`
+		Uint16  uint16   `json:"_uint16"`
+		Uint32  uint32   `json:"_uint32"`
+		Uint64  uint64   `json:"_uint64"`
+		Uintptr uintptr  `json:"_uintptr"`
+		Float32 float32  `json:"_float32"`
+		Float64 float64  `json:"_float64"`
+	}
+
+	postBody := Body{
+		Str:     "xyz",
+		Slice:   []string{"x", "y"},
+		Int:     2,
+		Int8:    2,
+		Int16:   2,
+		Int32:   2,
+		Int64:   2,
+		Uint:    2,
+		Uint8:   2,
+		Uint16:  2,
+		Uint32:  2,
+		Uint64:  2,
+		Uintptr: 2,
+		Float32: 2.4,
+		Float64: 3.2,
+	}
+
+	var bodyObj Body
+
+	body, _ := json.Marshal(postBody)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"_str":     []string{"min:5"},
+		"_slice":   []string{"min:5"},
+		"_int":     []string{"min:5"},
+		"_int8":    []string{"min:5"},
+		"_int16":   []string{"min:5"},
+		"_int32":   []string{"min:5"},
+		"_int64":   []string{"min:5"},
+		"_uint":    []string{"min:5"},
+		"_uint8":   []string{"min:5"},
+		"_uint16":  []string{"min:5"},
+		"_uint32":  []string{"min:5"},
+		"_uint64":  []string{"min:5"},
+		"_uintptr": []string{"min:5"},
+		"_float32": []string{"min:5"},
+		"_float64": []string{"min:5"},
+	}
+
+	messages := MapData{
+		"_str":     []string{"min:custom_message"},
+		"_slice":   []string{"min:custom_message"},
+		"_int":     []string{"min:custom_message"},
+		"_uint":    []string{"min:custom_message"},
+		"_float32": []string{"min:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &bodyObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 15 {
+		t.Error("min validation failed!")
+	}
+
+	if validationErr.Get("_str") != "custom_message" ||
+		validationErr.Get("_slice") != "custom_message" ||
+		validationErr.Get("_int") != "custom_message" ||
+		validationErr.Get("_uint") != "custom_message" ||
+		validationErr.Get("_float32") != "custom_message" {
+		t.Error("min custom message failed!")
+	}
+}
+
+func Test_max(t *testing.T) {
+	type Body struct {
+		Str     string   `json:"_str"`
+		Slice   []string `json:"_slice"`
+		Int     int      `json:"_int"`
+		Int8    int8     `json:"_int8"`
+		Int16   int16    `json:"_int16"`
+		Int32   int32    `json:"_int32"`
+		Int64   int64    `json:"_int64"`
+		Uint    uint     `json:"_uint"`
+		Uint8   uint8    `json:"_uint8"`
+		Uint16  uint16   `json:"_uint16"`
+		Uint32  uint32   `json:"_uint32"`
+		Uint64  uint64   `json:"_uint64"`
+		Uintptr uintptr  `json:"_uintptr"`
+		Float32 float32  `json:"_float32"`
+		Float64 float64  `json:"_float64"`
+	}
+
+	postBody := Body{
+		Str:     "xyzabc",
+		Slice:   []string{"x", "y", "z"},
+		Int:     20,
+		Int8:    20,
+		Int16:   20,
+		Int32:   20,
+		Int64:   20,
+		Uint:    20,
+		Uint8:   20,
+		Uint16:  20,
+		Uint32:  20,
+		Uint64:  20,
+		Uintptr: 20,
+		Float32: 20.4,
+		Float64: 30.2,
+	}
+
+	var bodyObj Body
+
+	body, _ := json.Marshal(postBody)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	rules := MapData{
+		"_str":     []string{"max:5"},
+		"_slice":   []string{"max:2"},
+		"_int":     []string{"max:5"},
+		"_int8":    []string{"max:5"},
+		"_int16":   []string{"max:5"},
+		"_int32":   []string{"max:5"},
+		"_int64":   []string{"max:5"},
+		"_uint":    []string{"max:5"},
+		"_uint8":   []string{"max:5"},
+		"_uint16":  []string{"max:5"},
+		"_uint32":  []string{"max:5"},
+		"_uint64":  []string{"max:5"},
+		"_uintptr": []string{"max:5"},
+		"_float32": []string{"max:5"},
+		"_float64": []string{"max:5"},
+	}
+
+	messages := MapData{
+		"_str":     []string{"max:custom_message"},
+		"_slice":   []string{"max:custom_message"},
+		"_int":     []string{"max:custom_message"},
+		"_uint":    []string{"max:custom_message"},
+		"_float32": []string{"max:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &bodyObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 15 {
+		t.Error(validationErr)
+		t.Error("max validation failed!")
+	}
+
+	if validationErr.Get("_str") != "custom_message" ||
+		validationErr.Get("_slice") != "custom_message" ||
+		validationErr.Get("_int") != "custom_message" ||
+		validationErr.Get("_uint") != "custom_message" ||
+		validationErr.Get("_float32") != "custom_message" {
+		t.Error("max custom message failed!")
+	}
+}
+
+func Test_In(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "4"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"in:1,2,3"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("in validation failed!")
+	}
+
+	if validationErr.Get("input") != "custom_message" {
+		t.Error("in custom message failed!")
+	}
+}
+
+func Test_In_valid(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "1"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"in:1,2,3"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Error("in validation was triggered when valid!")
+	}
+}
+
+func Test_In_string(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "bob"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"in:tom,dick,harry"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("in validation failed!")
+	}
+
+	if validationErr.Get("input") != "custom_message" {
+		t.Error("in custom message failed!")
+	}
+}
+
+func Test_In_string_valid(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "dick"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"in:tom,dick,harry"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Error("in validation was triggered when valid!")
+	}
+}
+
+func Test_NotIn(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "2"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"not_in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"not_in:1,2,3"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("not_in validation failed!")
+	}
+
+	if validationErr.Get("input") != "custom_message" {
+		t.Error("not_in custom message failed!")
+	}
+}
+
+func Test_NotIn_valid(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "4"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"not_in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"not_in:1,2,3"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Error("not_in validation was triggered when valid!")
+	}
+}
+
+func Test_NotIn_string(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "harry"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"not_in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"not_in:tom,dick,harry"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 1 {
+		t.Error("not_in validation failed!")
+	}
+
+	if validationErr.Get("input") != "custom_message" {
+		t.Error("not_in custom message failed!")
+	}
+}
+
+func Test_NotIn_string_valid(t *testing.T) {
+	type user struct {
+		Input string `json:"input"`
+	}
+
+	postUser := user{Input: "bob"}
+	var userObj user
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	messages := MapData{
+		"input": []string{"not_in:custom_message"},
+	}
+
+	rules := MapData{
+		"input": []string{"not_in:tom,dick,harry"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Data:     &userObj,
+		Rules:    rules,
+		Messages: messages,
+	}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 0 {
+		t.Error("not_in validation was triggered when valid!")
+	}
+}
diff --git a/v2/validationmdl/validationcore/type.go b/v2/validationmdl/validationcore/type.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f5109b7106da6300ce08a70c26dba68b25884b7
--- /dev/null
+++ b/v2/validationmdl/validationcore/type.go
@@ -0,0 +1,133 @@
+package govalidator
+
+import (
+	"bytes"
+	"encoding/json"
+)
+
+// Int describes a custom type of built-in int data type
+type Int struct {
+	Value int  `json:"value"`
+	IsSet bool `json:"isSet"`
+}
+
+var null = []byte("null")
+
+// UnmarshalJSON ...
+func (i *Int) UnmarshalJSON(data []byte) error {
+	if bytes.Compare(data, null) == 0 {
+		return nil
+	}
+	i.IsSet = true
+	var temp int
+	if err := json.Unmarshal(data, &temp); err != nil {
+		return err
+	}
+	i.Value = temp
+	return nil
+}
+
+// MarshalJSON ...
+func (i *Int) MarshalJSON() ([]byte, error) {
+	return json.Marshal(i.Value)
+}
+
+// Int64 describes a custom type of built-in int64 data type
+type Int64 struct {
+	Value int64 `json:"value"`
+	IsSet bool  `json:"isSet"`
+}
+
+// UnmarshalJSON ...
+func (i *Int64) UnmarshalJSON(data []byte) error {
+	if bytes.Compare(data, null) == 0 {
+		return nil
+	}
+	i.IsSet = true
+	var temp int64
+	if err := json.Unmarshal(data, &temp); err != nil {
+		return err
+	}
+	i.Value = temp
+	return nil
+}
+
+// MarshalJSON ...
+func (i *Int64) MarshalJSON() ([]byte, error) {
+	return json.Marshal(i.Value)
+}
+
+// Float32 describes a custom type of built-in float32 data type
+type Float32 struct {
+	Value float32 `json:"value"`
+	IsSet bool    `json:"isSet"`
+}
+
+// UnmarshalJSON ...
+func (i *Float32) UnmarshalJSON(data []byte) error {
+	if bytes.Compare(data, null) == 0 {
+		return nil
+	}
+	i.IsSet = true
+	var temp float32
+	if err := json.Unmarshal(data, &temp); err != nil {
+		return err
+	}
+	i.Value = temp
+	return nil
+}
+
+// MarshalJSON ...
+func (i *Float32) MarshalJSON() ([]byte, error) {
+	return json.Marshal(i.Value)
+}
+
+// Float64 describes a custom type of built-in float64 data type
+type Float64 struct {
+	Value float64 `json:"value"`
+	IsSet bool    `json:"isSet"`
+}
+
+// UnmarshalJSON ...
+func (i *Float64) UnmarshalJSON(data []byte) error {
+	if bytes.Compare(data, null) == 0 {
+		return nil
+	}
+	i.IsSet = true
+	var temp float64
+	if err := json.Unmarshal(data, &temp); err != nil {
+		return err
+	}
+	i.Value = temp
+	return nil
+}
+
+// MarshalJSON ...
+func (i *Float64) MarshalJSON() ([]byte, error) {
+	return json.Marshal(i.Value)
+}
+
+// Bool describes a custom type of built-in bool data type
+type Bool struct {
+	Value bool `json:"value"`
+	IsSet bool `json:"isSet"`
+}
+
+// UnmarshalJSON ...
+func (i *Bool) UnmarshalJSON(data []byte) error {
+	if bytes.Compare(data, null) == 0 {
+		return nil
+	}
+	i.IsSet = true
+	var temp bool
+	if err := json.Unmarshal(data, &temp); err != nil {
+		return err
+	}
+	i.Value = temp
+	return nil
+}
+
+// MarshalJSON ...
+func (i *Bool) MarshalJSON() ([]byte, error) {
+	return json.Marshal(i.Value)
+}
diff --git a/v2/validationmdl/validationcore/utils.go b/v2/validationmdl/validationcore/utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8a271577dbba592e8964b6b666942cfbcc64d81
--- /dev/null
+++ b/v2/validationmdl/validationcore/utils.go
@@ -0,0 +1,57 @@
+package govalidator
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// containsRequiredField check rules contain any required field
+func isContainRequiredField(rules []string) bool {
+	for _, rule := range rules {
+		if rule == "required" {
+			return true
+		}
+	}
+	return false
+}
+
+// isRuleExist check if the provided rule name is exist or not
+func isRuleExist(rule string) bool {
+	if strings.Contains(rule, ":") {
+		rule = strings.Split(rule, ":")[0]
+	}
+	extendedRules := []string{"size", "mime", "ext"}
+	for _, r := range extendedRules {
+		if r == rule {
+			return true
+		}
+	}
+	if _, ok := rulesFuncMap[rule]; ok {
+		return true
+	}
+	return false
+}
+
+// toString force data to be string
+func toString(v interface{}) string {
+	str, ok := v.(string)
+	if !ok {
+		str = fmt.Sprintf("%#v", v)
+	}
+	return str
+}
+
+// isEmpty check a type is Zero
+func isEmpty(x interface{}) bool {
+	rt := reflect.TypeOf(x)
+	if rt == nil {
+		return true
+	}
+	rv := reflect.ValueOf(x)
+	switch rv.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice:
+		return rv.Len() == 0
+	}
+	return reflect.DeepEqual(x, reflect.Zero(rt).Interface())
+}
diff --git a/v2/validationmdl/validationcore/utils110.go b/v2/validationmdl/validationcore/utils110.go
new file mode 100644
index 0000000000000000000000000000000000000000..8742d7482eb7b90eafb2e812a67f548a4ec99d20
--- /dev/null
+++ b/v2/validationmdl/validationcore/utils110.go
@@ -0,0 +1,55 @@
+// +build go1.10
+
+package govalidator
+
+import (
+	"io"
+	"net/http"
+	"path/filepath"
+	"strings"
+)
+
+// getFileInfo read file from request and return file name, extension, mime and size
+func getFileInfo(r *http.Request, field string) (bool, string, string, string, int64, error) {
+	file, multipartFileHeader, err := r.FormFile(field)
+	if err != nil {
+		return false, "", "", "", 0, err
+	}
+	// Create a buffer to store the header of the file in
+	fileHeader := make([]byte, 512)
+
+	// Copy the headers into the FileHeader buffer
+	if _, err := file.Read(fileHeader); err != nil {
+		if err != io.EOF {
+			return false, "", "", "", 0, err
+		}
+	}
+
+	// set position back to start.
+	if _, err := file.Seek(0, 0); err != nil {
+		return false, "", "", "", 0, err
+	}
+
+	mime := http.DetectContentType(fileHeader)
+	if subs := "; charset=utf-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	if subs := ";charset=utf-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	if subs := "; charset=UTF-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	if subs := ";charset=UTF-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	fExist := false
+	if file != nil {
+		fExist = true
+	}
+	return fExist, multipartFileHeader.Filename,
+		strings.TrimPrefix(filepath.Ext(multipartFileHeader.Filename), "."),
+		strings.TrimSpace(mime),
+		multipartFileHeader.Size,
+		nil
+}
diff --git a/v2/validationmdl/validationcore/utils_pre110.go b/v2/validationmdl/validationcore/utils_pre110.go
new file mode 100644
index 0000000000000000000000000000000000000000..58ce6cad8ac27d68c2d008837a738d8d83a4a30e
--- /dev/null
+++ b/v2/validationmdl/validationcore/utils_pre110.go
@@ -0,0 +1,60 @@
+// +build !go1.10
+
+package govalidator
+
+import (
+	"io"
+	"net/http"
+	"path/filepath"
+	"strings"
+)
+
+// Sizer interface
+type Sizer interface {
+	Size() int64
+}
+
+// getFileInfo read file from request and return file name, extension, mime and size
+func getFileInfo(r *http.Request, field string) (bool, string, string, string, int64, error) {
+	file, multipartFileHeader, err := r.FormFile(field)
+	if err != nil {
+		return false, "", "", "", 0, err
+	}
+	// Create a buffer to store the header of the file in
+	fileHeader := make([]byte, 512)
+
+	// Copy the headers into the FileHeader buffer
+	if _, err := file.Read(fileHeader); err != nil {
+		if err != io.EOF {
+			return false, "", "", "", 0, err
+		}
+	}
+
+	// set position back to start.
+	if _, err := file.Seek(0, 0); err != nil {
+		return false, "", "", "", 0, err
+	}
+
+	mime := http.DetectContentType(fileHeader)
+	if subs := "; charset=utf-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	if subs := ";charset=utf-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	if subs := "; charset=UTF-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	if subs := ";charset=UTF-8"; strings.Contains(mime, subs) {
+		mime = strings.Replace(mime, subs, "", -1)
+	}
+	fExist := false
+	if file != nil {
+		fExist = true
+	}
+	return fExist, multipartFileHeader.Filename,
+		strings.TrimPrefix(filepath.Ext(multipartFileHeader.Filename), "."),
+		strings.TrimSpace(mime),
+		file.(Sizer).Size(),
+		nil
+}
diff --git a/v2/validationmdl/validationcore/utils_test.go b/v2/validationmdl/validationcore/utils_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ef77a19f0e998ff4dc43a0a921af2bf5f9764d4
--- /dev/null
+++ b/v2/validationmdl/validationcore/utils_test.go
@@ -0,0 +1,100 @@
+package govalidator
+
+import (
+	"reflect"
+	"strings"
+	"testing"
+)
+
+func Test_isContainRequiredField(t *testing.T) {
+	if !isContainRequiredField([]string{"required", "email"}) {
+		t.Error("isContainRequiredField failed!")
+	}
+
+	if isContainRequiredField([]string{"numeric", "min:5"}) {
+		t.Error("isContainRequiredField failed!")
+	}
+}
+
+func Benchmark_isContainRequiredField(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		isContainRequiredField([]string{"required", "email"})
+	}
+}
+
+type person struct{}
+
+func (person) Details() string {
+	return "John Doe"
+}
+
+func (person) Age(age string) string {
+	return "Age: " + age
+}
+
+func Test_isRuleExist(t *testing.T) {
+	if !isRuleExist("required") {
+		t.Error("isRuleExist failed for valid rule")
+	}
+	if isRuleExist("not exist") {
+		t.Error("isRuleExist failed for invalid rule")
+	}
+	if !isRuleExist("mime") {
+		t.Error("extended rules failed")
+	}
+}
+
+func Test_toString(t *testing.T) {
+	Int := 100
+	str := toString(Int)
+	typ := reflect.ValueOf(str).Kind()
+	if typ != reflect.String {
+		t.Error("toString failed!")
+	}
+}
+
+func Test_isEmpty(t *testing.T) {
+	var Int int
+	var Int8 int
+	var Float32 float32
+	var Str string
+	var Slice []int
+	var e interface{}
+	list := map[string]interface{}{
+		"_int":             Int,
+		"_int8":            Int8,
+		"_float32":         Float32,
+		"_str":             Str,
+		"_slice":           Slice,
+		"_empty_interface": e,
+	}
+	for k, v := range list {
+		if !isEmpty(v) {
+			t.Errorf("%v failed", k)
+		}
+	}
+}
+
+func Test_getFileInfo(t *testing.T) {
+	req, err := buildMocFormReq()
+	if err != nil {
+		t.Error("request failed", err)
+	}
+	fExist, fn, ext, mime, size, _ := getFileInfo(req, "file")
+	if !fExist {
+		t.Error("file does not exist")
+	}
+	if fn != "BENCHMARK.md" {
+		t.Error("failed to get file name")
+	}
+	if ext != "md" {
+		t.Error("failed to get file extension")
+	}
+	if !strings.Contains(mime, "text/plain") {
+		t.Log(mime)
+		t.Error("failed to get file mime")
+	}
+	if size <= 0 {
+		t.Error("failed to get file size")
+	}
+}
diff --git a/v2/validationmdl/validationcore/validate_file.go b/v2/validationmdl/validationcore/validate_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..cedf4ed53a14342d673cf5ff3a5424ebc4106ae6
--- /dev/null
+++ b/v2/validationmdl/validationcore/validate_file.go
@@ -0,0 +1,73 @@
+package govalidator
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+)
+
+// validateFiles validate file size, mimes, extension etc
+func validateFiles(r *http.Request, field, rule, msg string, errsBag url.Values) {
+	_, _, ext, mime, size, fErr := getFileInfo(r, field)
+	// check size
+	if strings.HasPrefix(rule, "size:") {
+		l, err := strconv.ParseInt(strings.TrimPrefix(rule, "size:"), 10, 64)
+		if err != nil {
+			panic(errStringToInt)
+		}
+		if size > l {
+			if msg != "" {
+				errsBag.Add(field, msg)
+			} else {
+				errsBag.Add(field, fmt.Sprintf("The %s field size is can not be greater than %d bytes", field, l))
+			}
+		}
+		if fErr != nil {
+			errsBag.Add(field, fmt.Sprintf("The %s field failed to read file when fetching size", field))
+		}
+	}
+
+	// check extension
+	if strings.HasPrefix(rule, "ext:") {
+		exts := strings.Split(strings.TrimPrefix(rule, "ext:"), ",")
+		f := false
+		for _, e := range exts {
+			if e == ext {
+				f = true
+			}
+		}
+		if !f {
+			if msg != "" {
+				errsBag.Add(field, msg)
+			} else {
+				errsBag.Add(field, fmt.Sprintf("The %s field file extension %s is invalid", field, ext))
+			}
+		}
+		if fErr != nil {
+			errsBag.Add(field, fmt.Sprintf("The %s field failed to read file when fetching extension", field))
+		}
+	}
+
+	// check mimes
+	if strings.HasPrefix(rule, "mime:") {
+		mimes := strings.Split(strings.TrimPrefix(rule, "mime:"), ",")
+		f := false
+		for _, m := range mimes {
+			if m == mime {
+				f = true
+			}
+		}
+		if !f {
+			if msg != "" {
+				errsBag.Add(field, msg)
+			} else {
+				errsBag.Add(field, fmt.Sprintf("The %s field file mime %s is invalid", field, mime))
+			}
+		}
+		if fErr != nil {
+			errsBag.Add(field, fmt.Sprintf("The %s field failed to read file when fetching mime", field))
+		}
+	}
+}
diff --git a/v2/validationmdl/validationcore/validate_file_test.go b/v2/validationmdl/validationcore/validate_file_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f122876b6cebfb8d4f3f59b8aae7e05f2553956
--- /dev/null
+++ b/v2/validationmdl/validationcore/validate_file_test.go
@@ -0,0 +1,141 @@
+package govalidator
+
+import (
+	"bytes"
+	"io"
+	"mime/multipart"
+	"net/http"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+// buildMocFormReq prepare a moc form data request with a test file
+func buildMocFormReq() (*http.Request, error) {
+	fPath := "doc/BENCHMARK.md"
+	body := &bytes.Buffer{}
+	writer := multipart.NewWriter(body)
+	file, err := os.Open(fPath)
+	if err != nil {
+		return nil, err
+	}
+	part, err := writer.CreateFormFile("file", filepath.Base(fPath))
+	if err != nil {
+		return nil, err
+	}
+	io.Copy(part, file)
+	file.Close()
+	err = writer.Close()
+	if err != nil {
+		return nil, err
+	}
+
+	req, err := http.NewRequest("POST", "www.example.com", body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", writer.FormDataContentType())
+	return req, nil
+}
+
+func Test_validateFiles(t *testing.T) {
+	req, err := buildMocFormReq()
+	if err != nil {
+		t.Error("request failed", err)
+	}
+	rules := MapData{
+		"file:file": []string{"ext:jpg,pdf", "size:10", "mime:application/pdf", "required"},
+	}
+
+	opts := Options{
+		Request: req,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	validationErr := vd.Validate()
+	if len(validationErr) != 1 {
+		t.Error("file validation failed!")
+	}
+}
+
+func Test_validateFiles_message(t *testing.T) {
+	req, err := buildMocFormReq()
+	if err != nil {
+		t.Error("request failed", err)
+	}
+	rules := MapData{
+		"file:file": []string{"ext:jpg,pdf", "size:10", "mime:application/pdf", "required"},
+	}
+
+	msgs := MapData{
+		"file:file": []string{"ext:custom_message"},
+	}
+
+	opts := Options{
+		Request:  req,
+		Rules:    rules,
+		Messages: msgs,
+	}
+
+	vd := New(opts)
+	validationErr := vd.Validate()
+	if len(validationErr) != 1 {
+		t.Error("file validation failed!")
+	}
+	if validationErr.Get("file") != "custom_message" {
+		t.Log(validationErr)
+		t.Error("failed custom message for file validation")
+	}
+}
+
+func Test_validateFiles_CustomRule(t *testing.T) {
+	req, err := buildMocFormReq()
+	if err != nil {
+		t.Error("request failed", err)
+	}
+
+	customRule1WasExecuted := false
+	isMultipartFile := false
+	AddCustomRule("customRule1", func(field string, rule string, message string, value interface{}) error {
+		customRule1WasExecuted = true
+		_, isMultipartFile = value.(multipart.File)
+		return nil
+	})
+
+	customRule2WasExecuted := false
+	isValueNil := false
+	AddCustomRule("customRule2", func(field string, rule string, message string, value interface{}) error {
+		customRule2WasExecuted = true
+		isValueNil = value == nil
+		return nil
+	})
+
+	rules := MapData{
+		"file:file":   []string{"customRule1"},
+		"file:avatar": []string{"customRule2"},
+	}
+
+	opts := Options{
+		Request: req,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	vd.Validate()
+	if !customRule1WasExecuted {
+		t.Error("file validation performed without custom rule!")
+	}
+
+	if !isMultipartFile {
+		t.Error("passed to custom rule value is not file!")
+	}
+
+	if !customRule2WasExecuted {
+		t.Error("file validation performed without custom rule!")
+	}
+
+	if !isValueNil {
+		t.Error("passed to custom rule value is not nil!")
+	}
+}
diff --git a/v2/validationmdl/validationcore/validator.go b/v2/validationmdl/validationcore/validator.go
new file mode 100644
index 0000000000000000000000000000000000000000..503339040da07db6b64b1edb354258910b67850d
--- /dev/null
+++ b/v2/validationmdl/validationcore/validator.go
@@ -0,0 +1,294 @@
+package govalidator
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"reflect"
+	"strings"
+)
+
+const (
+	tagIdentifier         = "json" //tagName idetify the struct tag for govalidator
+	tagSeparator          = "|"    //tagSeparator use to separate tags in struct
+	defaultFormSize int64 = 1024 * 1024 * 1
+)
+
+type (
+	// MapData represents basic data structure for govalidator Rules and Messages
+	MapData map[string][]string
+
+	// Options describes configuration option for validator
+	Options struct {
+		Data            interface{} // Data represents structure for JSON body
+		Request         *http.Request
+		RequiredDefault bool    // RequiredDefault represents if all the fields are by default required or not
+		Rules           MapData // Rules represents rules for form-data/x-url-encoded/query params data
+		Messages        MapData // Messages represents custom/localize message for rules
+		TagIdentifier   string  // TagIdentifier represents struct tag identifier, e.g: json or validate etc
+		FormSize        int64   //Form represents the multipart forom data max memory size in bytes
+		JSONData        []byte  // For validating JSON data
+	}
+
+	// Validator represents a validator with options
+	Validator struct {
+		Opts Options // Opts contains all the options for validator
+	}
+)
+
+// New return a new validator object using provided options
+func New(opts Options) *Validator {
+	return &Validator{Opts: opts}
+}
+
+// getMessage return if a custom message exist against the field name and rule
+// if not available it return an empty string
+func (v *Validator) getCustomMessage(field, rule string) string {
+	tmp := rule + ":"
+	if msgList, ok := v.Opts.Messages[field]; ok {
+		for _, m := range msgList {
+			//if rules has params, remove params. e.g: between:3,5 would be between
+			if strings.Contains(rule, ":") {
+				rule = strings.Split(rule, ":")[0]
+			}
+			if strings.HasPrefix(m, tmp) {
+				return strings.TrimPrefix(m, tmp)
+			}
+		}
+	}
+	return ""
+}
+
+// SetDefaultRequired change the required behavior of fields
+// Default value if false
+// If SetDefaultRequired set to true then it will mark all the field in the rules list as required
+func (v *Validator) SetDefaultRequired(required bool) {
+	v.Opts.RequiredDefault = required
+}
+
+// SetTagIdentifier change the default tag identifier (json) to your custom tag.
+func (v *Validator) SetTagIdentifier(identifier string) {
+	v.Opts.TagIdentifier = identifier
+}
+
+// Validate validate request data like form-data, x-www-form-urlencoded and query params
+// see example in README.md file
+// ref: https://github.com/thedevsaddam/govalidator#example
+func (v *Validator) Validate() url.Values {
+	// if request object and rules not passed rise a panic
+	if len(v.Opts.Rules) == 0 || v.Opts.Request == nil {
+		panic(errValidateArgsMismatch)
+	}
+	errsBag := url.Values{}
+
+	// get non required rules
+	nr := v.getNonRequiredFields()
+
+	for field, rules := range v.Opts.Rules {
+		if _, ok := nr[field]; ok {
+			continue
+		}
+		for _, rule := range rules {
+			if !isRuleExist(rule) {
+				panic(fmt.Errorf("govalidator: %s is not a valid rule", rule))
+			}
+			msg := v.getCustomMessage(field, rule)
+			// validate file
+			if strings.HasPrefix(field, "file:") {
+				fld := strings.TrimPrefix(field, "file:")
+				file, fh, _ := v.Opts.Request.FormFile(fld)
+				if file != nil && fh.Filename != "" {
+					validateFiles(v.Opts.Request, fld, rule, msg, errsBag)
+					validateCustomRules(fld, rule, msg, file, errsBag)
+				} else {
+					validateCustomRules(fld, rule, msg, nil, errsBag)
+				}
+			} else {
+				// validate if custom rules exist
+				reqVal := strings.TrimSpace(v.Opts.Request.Form.Get(field))
+				validateCustomRules(field, rule, msg, reqVal, errsBag)
+			}
+		}
+	}
+
+	return errsBag
+}
+
+// getNonRequiredFields remove non required rules fields from rules if requiredDefault field is false
+// and if the input data is empty for this field
+func (v *Validator) getNonRequiredFields() map[string]struct{} {
+	if v.Opts.FormSize > 0 {
+		v.Opts.Request.ParseMultipartForm(v.Opts.FormSize)
+	} else {
+		v.Opts.Request.ParseMultipartForm(defaultFormSize)
+	}
+
+	inputs := v.Opts.Request.Form
+	nr := make(map[string]struct{})
+	if !v.Opts.RequiredDefault {
+		for k, r := range v.Opts.Rules {
+			isFile := strings.HasPrefix(k, "file:")
+			if _, ok := inputs[k]; !ok && !isFile {
+				if !isContainRequiredField(r) {
+					nr[k] = struct{}{}
+				}
+			}
+		}
+	}
+	return nr
+}
+
+// ValidateJSON validate request data from JSON body to Go struct
+// see example in README.md file
+func (v *Validator) ValidateJSON() url.Values {
+	if len(v.Opts.Rules) == 0 {
+		panic(errValidateArgsMismatch)
+	}
+	if len(v.Opts.Rules) == 0 || v.Opts.Request == nil {
+		panic(errValidateArgsMismatch)
+	}
+	if reflect.TypeOf(v.Opts.Data).Kind() != reflect.Ptr {
+		panic(errRequirePtr)
+	}
+	errsBag := url.Values{}
+
+	defer v.Opts.Request.Body.Close()
+	err := json.NewDecoder(v.Opts.Request.Body).Decode(v.Opts.Data)
+	if err != nil {
+		errsBag.Add("_error", err.Error())
+		return errsBag
+	}
+	r := roller{}
+	r.setTagIdentifier(tagIdentifier)
+	if v.Opts.TagIdentifier != "" {
+		r.setTagIdentifier(v.Opts.TagIdentifier)
+	}
+	r.setTagSeparator(tagSeparator)
+	r.start(v.Opts.Data)
+
+	//clean if the key is not exist or value is empty or zero value
+	nr := v.getNonRequiredJSONFields(r.getFlatMap())
+
+	for field, rules := range v.Opts.Rules {
+		if _, ok := nr[field]; ok {
+			continue
+		}
+		value, _ := r.getFlatVal(field)
+		for _, rule := range rules {
+			if !isRuleExist(rule) {
+				panic(fmt.Errorf("govalidator: %s is not a valid rule", rule))
+			}
+			msg := v.getCustomMessage(field, rule)
+			validateCustomRules(field, rule, msg, value, errsBag)
+		}
+	}
+
+	return errsBag
+}
+
+// getNonRequiredJSONFields get non required rules fields from rules if requiredDefault field is false
+// and if the input data is empty for this field
+func (v *Validator) getNonRequiredJSONFields(inputs map[string]interface{}) map[string]struct{} {
+	nr := make(map[string]struct{})
+	if !v.Opts.RequiredDefault {
+		for k, r := range v.Opts.Rules {
+			if val := inputs[k]; isEmpty(val) {
+				if !isContainRequiredField(r) {
+					nr[k] = struct{}{}
+				}
+			}
+		}
+	}
+	return nr
+}
+
+//ValidateJSONData method
+func (v *Validator) ValidateJSONData() url.Values {
+
+	if len(v.Opts.Rules) == 0 {
+		panic(errValidateArgsMismatch)
+	}
+
+	errsBag := url.Values{}
+
+	//This code is working - Do not delete
+	unmarshalError := json.Unmarshal(v.Opts.JSONData, &v.Opts.Data)
+	if unmarshalError != nil {
+		errsBag.Add("_error", unmarshalError.Error())
+		return errsBag
+	}
+
+	// data := make(map[string]interface{}, 0)
+	// v.Opts.Data = &data
+
+	// decodeError := json.NewDecoder(bytes.NewBuffer(v.Opts.JSONData)).Decode(v.Opts.Data)
+	// if decodeError != nil {
+	// 	errsBag.Add("_error", decodeError.Error())
+	// 	return errsBag
+	// }
+
+	r := roller{}
+	r.setTagIdentifier(tagIdentifier)
+	if v.Opts.TagIdentifier != "" {
+		r.setTagIdentifier(v.Opts.TagIdentifier)
+	}
+	r.setTagSeparator(tagSeparator)
+	r.start(v.Opts.Data)
+
+	//clean if the key is not exist or value is empty or zero value
+	nr := v.getNonRequiredJSONFields(r.getFlatMap())
+
+	for field, rules := range v.Opts.Rules {
+		if _, ok := nr[field]; ok {
+			continue
+		}
+		value, _ := r.getFlatVal(field)
+		for _, rule := range rules {
+			if !isRuleExist(rule) {
+				panic(fmt.Errorf("govalidator: %s is not a valid rule", rule))
+			}
+			msg := v.getCustomMessage(field, rule)
+			validateCustomRules(field, rule, msg, value, errsBag)
+		}
+	}
+
+	return errsBag
+}
+
+//ValidateJSONData method
+func (v *Validator) ValidateJSONString() url.Values {
+
+	if len(v.Opts.Rules) == 0 {
+		panic(errValidateArgsMismatch)
+	}
+
+	errsBag := url.Values{}
+
+	r := roller{}
+	r.setTagIdentifier(tagIdentifier)
+	if v.Opts.TagIdentifier != "" {
+		r.setTagIdentifier(v.Opts.TagIdentifier)
+	}
+	r.setTagSeparator(tagSeparator)
+	r.start(v.Opts.Data)
+
+	//clean if the key is not exist or value is empty or zero value
+	nr := v.getNonRequiredJSONFields(r.getFlatMap())
+
+	for field, rules := range v.Opts.Rules {
+		if _, ok := nr[field]; ok {
+			continue
+		}
+		value, _ := r.getFlatVal(field)
+		for _, rule := range rules {
+			if !isRuleExist(rule) {
+				panic(fmt.Errorf("govalidator: %s is not a valid rule", rule))
+			}
+			msg := v.getCustomMessage(field, rule)
+			validateCustomRules(field, rule, msg, value, errsBag)
+		}
+	}
+
+	return errsBag
+}
diff --git a/v2/validationmdl/validationcore/validator_test.go b/v2/validationmdl/validationcore/validator_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..6d1397f0a09c8a24c5f0469a4ef320dce19ba519
--- /dev/null
+++ b/v2/validationmdl/validationcore/validator_test.go
@@ -0,0 +1,186 @@
+package govalidator
+
+import (
+	"bytes"
+	"encoding/json"
+	"net/http"
+	"net/url"
+	"testing"
+)
+
+func TestValidator_SetDefaultRequired(t *testing.T) {
+	v := New(Options{})
+	v.SetDefaultRequired(true)
+	if !v.Opts.RequiredDefault {
+		t.Error("SetDefaultRequired failed")
+	}
+}
+
+func TestValidator_Validate(t *testing.T) {
+	var URL *url.URL
+	URL, _ = url.Parse("http://www.example.com")
+	params := url.Values{}
+	params.Add("name", "John Doe")
+	params.Add("username", "jhondoe")
+	params.Add("email", "john@mail.com")
+	params.Add("zip", "8233")
+	URL.RawQuery = params.Encode()
+	r, _ := http.NewRequest("GET", URL.String(), nil)
+	rulesList := MapData{
+		"name":  []string{"required"},
+		"age":   []string{"between:5,16"},
+		"email": []string{"email"},
+		"zip":   []string{"digits:4"},
+	}
+
+	opts := Options{
+		Request: r,
+		Rules:   rulesList,
+	}
+	v := New(opts)
+	validationError := v.Validate()
+	if len(validationError) > 0 {
+		t.Error("Validate failed to validate correct inputs!")
+	}
+
+	defer func() {
+		if r := recover(); r == nil {
+			t.Errorf("Validate did not panic")
+		}
+	}()
+
+	v1 := New(Options{Rules: MapData{}})
+	v1.Validate()
+}
+
+func Benchmark_Validate(b *testing.B) {
+	var URL *url.URL
+	URL, _ = url.Parse("http://www.example.com")
+	params := url.Values{}
+	params.Add("name", "John Doe")
+	params.Add("age", "27")
+	params.Add("email", "john@mail.com")
+	params.Add("zip", "8233")
+	URL.RawQuery = params.Encode()
+	r, _ := http.NewRequest("GET", URL.String(), nil)
+	rulesList := MapData{
+		"name":  []string{"required"},
+		"age":   []string{"numeric_between:18,60"},
+		"email": []string{"email"},
+		"zip":   []string{"digits:4"},
+	}
+
+	opts := Options{
+		Request: r,
+		Rules:   rulesList,
+	}
+	v := New(opts)
+	for n := 0; n < b.N; n++ {
+		v.Validate()
+	}
+}
+
+//============ validate json test ====================
+
+func TestValidator_ValidateJSON(t *testing.T) {
+	type User struct {
+		Name    string `json:"name"`
+		Email   string `json:"email"`
+		Address string `json:"address"`
+		Age     int    `json:"age"`
+		Zip     string `json:"zip"`
+		Color   int    `json:"color"`
+	}
+
+	postUser := User{
+		Name:    "",
+		Email:   "inalid email",
+		Address: "",
+		Age:     1,
+		Zip:     "122",
+		Color:   5,
+	}
+
+	rules := MapData{
+		"name":    []string{"required"},
+		"email":   []string{"email"},
+		"address": []string{"required", "between:3,5"},
+		"age":     []string{"bool"},
+		"zip":     []string{"len:4"},
+		"color":   []string{"min:10"},
+	}
+
+	var user User
+
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	opts := Options{
+		Request: req,
+		Data:    &user,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	vd.SetTagIdentifier("json")
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 5 {
+		t.Error("ValidateJSON failed")
+	}
+}
+
+func TestValidator_ValidateJSON_NULLValue(t *testing.T) {
+	type User struct {
+		Name   string `json:"name"`
+		Count  Int    `json:"count"`
+		Option Int    `json:"option"`
+		Active Bool   `json:"active"`
+	}
+
+	rules := MapData{
+		"name":   []string{"required"},
+		"count":  []string{"required"},
+		"option": []string{"required"},
+		"active": []string{"required"},
+	}
+
+	postUser := map[string]interface{}{
+		"name":   "John Doe",
+		"count":  0,
+		"option": nil,
+		"active": nil,
+	}
+
+	var user User
+	body, _ := json.Marshal(postUser)
+	req, _ := http.NewRequest("POST", "http://www.example.com", bytes.NewReader(body))
+
+	opts := Options{
+		Request: req,
+		Data:    &user,
+		Rules:   rules,
+	}
+
+	vd := New(opts)
+	vd.SetTagIdentifier("json")
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 2 {
+		t.Error("ValidateJSON failed")
+	}
+}
+
+func TestValidator_ValidateJSON_panic(t *testing.T) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Errorf("ValidateJSON did not panic")
+		}
+	}()
+
+	opts := Options{}
+
+	vd := New(opts)
+	validationErr := vd.ValidateJSON()
+	if len(validationErr) != 5 {
+		t.Error("ValidateJSON failed")
+	}
+}
diff --git a/v2/validationmdl/validationmdl.go b/v2/validationmdl/validationmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca08e3164b0f5690f15f44f39545c5d7a76287fc
--- /dev/null
+++ b/v2/validationmdl/validationmdl.go
@@ -0,0 +1,155 @@
+package validationmdl
+
+import (
+	"errors"
+	"net/url"
+
+	"github.com/tidwall/gjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/errormdl"
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	requestValidator "corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/validationmdl/validationcore"
+	govalidator "github.com/asaskevich/govalidator"
+)
+
+// var cnt int
+
+// //ValidateRequest func validates the given model
+// func ValidateRequest(httpRequest *http.Request, validationRules, validationMessages requestValidator.MapData) map[string]interface{} {
+
+// 	cnt++
+// 	//Get the content type of the request as validations for content types are different
+// 	contentType := httpRequest.Header.Get("Content-Type")
+
+// 	//Initialize the validation errors as blank
+// 	var validationErrors url.Values
+
+// 	//Set validator options
+// 	opts := requestValidator.Options{
+// 		Request: httpRequest,
+// 		Rules:   validationRules,
+// 	}
+
+// 	//Set custom validation messages if sent from user
+// 	if validationMessages != nil {
+// 		opts.Messages = validationMessages
+// 	}
+
+// 	if contentType == "application/json" || contentType == "text/plain" {
+// 		//Validate request type json and text (RAW data from request)
+// 		data := make(map[string]interface{}, 0)
+// 		opts.Data = &data
+// 		validator := requestValidator.New(opts)
+// 		validationErrors = validator.ValidateJSON()
+
+// 	} else {
+// 		//Validate request type form-data, form-urlencoded
+// 		validator := requestValidator.New(opts)
+// 		validationErrors = validator.Validate()
+// 	}
+
+// 	if len(validationErrors) > 0 {
+// 		errs := map[string]interface{}{"validationErrors": validationErrors}
+// 		return errs
+// 	}
+// 	return nil
+// }
+
+// // ValidateStruct validates the structures with govalidator
+// func ValidateStruct(structToValidate interface{}) error {
+// 	validationResult, err := govalidator.ValidateStruct(structToValidate)
+// 	if err != nil {
+// 		return err
+// 	}
+// 	if !errormdl.CheckBool(validationResult) {
+// 		return errors.New("ERROR:ValidateStruct function error")
+// 	}
+// 	return nil
+// }
+
+// //ValidateJSONData to validate JSON data
+// func ValidateJSONData(jsonData []byte, validationRules, validationMessages requestValidator.MapData) map[string]interface{} {
+
+// 	//Initialize the validation errors as blank
+// 	var validationErrors url.Values
+
+// 	//Set validator options
+// 	opts := requestValidator.Options{
+// 		Rules:    validationRules,
+// 		JSONData: jsonData,
+// 	}
+
+// 	//Set custom validation messages if sent from user
+// 	if validationMessages != nil {
+// 		opts.Messages = validationMessages
+// 	}
+
+// 	validator := requestValidator.New(opts)
+// 	validationErrors = validator.ValidateJSONData()
+
+// 	if len(validationErrors) > 0 {
+// 		errs := map[string]interface{}{"validationErrors": validationErrors}
+// 		return errs
+// 	}
+
+// 	return nil
+// }
+
+// ValidateJSONString to validate JSON data
+func ValidateJSONString(jsonString string, validationRules, validationMessages requestValidator.MapData) map[string]interface{} {
+
+	var validationErrors url.Values
+
+	opts := requestValidator.Options{
+		Rules: validationRules,
+	}
+	data, ok := gjson.Parse(jsonString).Value().(map[string]interface{})
+	if !ok {
+		loggermdl.LogError("can not cast to map")
+		return nil
+	}
+	opts.Data = data
+
+	validator := requestValidator.New(opts)
+	validationErrors = validator.ValidateJSONString()
+
+	if len(validationErrors) > 0 {
+		errs := map[string]interface{}{"validationErrors": validationErrors}
+		return errs
+	}
+	return nil
+}
+
+// ValidateGJSONResult to validate JSON data
+func ValidateGJSONResult(rs *gjson.Result, validationRules, validationMessages requestValidator.MapData) map[string]interface{} {
+	var validationErrors url.Values
+	opts := requestValidator.Options{
+		Rules: validationRules,
+	}
+	data, ok := rs.Value().(map[string]interface{})
+	if !ok {
+		loggermdl.LogError("can not cast to map", rs.Value())
+		return map[string]interface{}{"error": "cannot cast to map"}
+	}
+	opts.Data = data
+
+	validator := requestValidator.New(opts)
+	validationErrors = validator.ValidateJSONString()
+	if len(validationErrors) > 0 {
+		errs := map[string]interface{}{"validationErrors": validationErrors}
+		return errs
+	}
+	return nil
+}
+
+// ValidateStruct validates the structures with govalidator
+func ValidateStruct(structToValidate interface{}) error {
+	validationResult, err := govalidator.ValidateStruct(structToValidate)
+	if err != nil {
+		return err
+	}
+	if !errormdl.CheckBool(validationResult) {
+		return errors.New("ERROR:ValidateStruct function error")
+	}
+	return nil
+}
diff --git a/v2/validationmdl/validationmdl_test.go b/v2/validationmdl/validationmdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea307bf82f2f3c7267d9e233ab3c1557f8a38ae5
--- /dev/null
+++ b/v2/validationmdl/validationmdl_test.go
@@ -0,0 +1,440 @@
+package validationmdl
+
+import (
+	"testing"
+
+	"github.com/tidwall/gjson"
+
+	"corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/loggermdl"
+	requestValidator "corelab.mkcl.org/MKCLOS/coredevelopmentplatform/corepkgv2/validationmdl/validationcore"
+)
+
+type RequestBodyData struct {
+	Email    string `json:"email"`
+	Password string `json:"password"`
+}
+
+//Validation rules
+var validationRules = requestValidator.MapData{
+	"email":    []string{"required", "min:7", "max:20", "email"},
+	"password": []string{"required"},
+}
+
+//Validation messages
+var validationMessages = requestValidator.MapData{
+	// "email":    []string{"required:Email required", "min:Email min len", "max:Email max len", "email:Invalid email"},
+	// "password": []string{"required:Password required"},
+}
+
+// func TestValidateRequest(t *testing.T) {
+
+// 	//Cases for sending raw data in request
+// 	//Case 1: Http request for sunny day scenario
+// 	sunnyDayData := RequestBodyData{Email: "test@mkcl.org", Password: "test"}
+// 	sunnyDayByteArray, _ := ffjson.Marshal(&sunnyDayData)
+// 	sunnyDayHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(sunnyDayByteArray)))
+// 	sunnyDayHTTPRequest.Header.Set("Content-Type", "application/json")
+
+// 	//Case 2 : Http request for blank email
+// 	blankEmailData := RequestBodyData{Email: "", Password: "test"}
+// 	blankEmailByteArray, _ := ffjson.Marshal(&blankEmailData)
+// 	blankEmailHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(blankEmailByteArray)))
+// 	blankEmailHTTPRequest.Header.Set("Content-Type", "application/json")
+// 	blankEmailExpectedResult := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Email required", "Email min len", "Invalid email"},
+// 		},
+// 	}
+
+// 	//Case 3 : Http request for blank password
+// 	blankPasswordData := RequestBodyData{Email: "test@mkcl.org", Password: ""}
+// 	blankPasswordByteArray, _ := ffjson.Marshal(&blankPasswordData)
+// 	blankPasswordHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(blankPasswordByteArray)))
+// 	blankPasswordHTTPRequest.Header.Set("Content-Type", "application/json")
+// 	blankPasswordExpectedResult := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"password": []string{"Password required"},
+// 		},
+// 	}
+
+// 	//Case 4 : Http request for email with shorter length than required
+// 	shortEmailLengthData := RequestBodyData{Email: "a@c.v", Password: "test"}
+// 	shortEmailLengthByteArray, _ := ffjson.Marshal(&shortEmailLengthData)
+// 	shortEmailLengthHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(shortEmailLengthByteArray)))
+// 	shortEmailLengthHTTPRequest.Header.Set("Content-Type", "application/json")
+// 	shortEmailLengthExpectedResult := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Email min len"},
+// 		},
+// 	}
+
+// 	//Case 5 : Http request for email with longer length than required
+// 	longEmailLengthData := RequestBodyData{Email: "testEmail@Testcompany.testdomain", Password: "test"}
+// 	longEmailLengthByteArray, _ := ffjson.Marshal(&longEmailLengthData)
+// 	longEmailLengthHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(longEmailLengthByteArray)))
+// 	longEmailLengthHTTPRequest.Header.Set("Content-Type", "application/json")
+// 	longEmailLengthExpectedResult := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Email max len"},
+// 		},
+// 	}
+
+// 	//Case 6 : Http request for invalid email id
+// 	invalidEmailData := RequestBodyData{Email: "testemail", Password: "test"}
+// 	invalidEmailByteArray, _ := ffjson.Marshal(&invalidEmailData)
+// 	invalidEmailHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(invalidEmailByteArray)))
+// 	invalidEmailHTTPRequest.Header.Set("Content-Type", "application/json")
+// 	invalidEmailExpectedResult := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Invalid email"},
+// 		},
+// 	}
+
+// 	//Case 7 : Http request for blank email using form encoding
+// 	sunnyDayForm := url.Values{}
+// 	sunnyDayForm.Add("email", "")
+// 	sunnyDayForm.Add("password", "password")
+// 	sunnyDayHTTPRequestFormEnc, _ := http.NewRequest("POST", "test.com", strings.NewReader(sunnyDayForm.Encode()))
+// 	sunnyDayHTTPRequestFormEnc.PostForm = sunnyDayForm
+// 	sunnyDayHTTPRequestFormEnc.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+// 	sunnyDayExpectedResultFormEnc := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Email required", "Email min len", "Invalid email"},
+// 		},
+// 	}
+
+// 	//Case 8 : Http request for blank email using form encoding
+// 	blankEmailForm := url.Values{}
+// 	blankEmailForm.Add("email", "")
+// 	blankEmailForm.Add("password", "password")
+// 	blankEmailHTTPRequestFormEnc, _ := http.NewRequest("POST", "test.com", strings.NewReader(blankEmailForm.Encode()))
+// 	blankEmailHTTPRequestFormEnc.PostForm = blankEmailForm
+// 	blankEmailHTTPRequestFormEnc.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+// 	blankEmailExpectedResultFormEnc := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Email required", "Email min len", "Invalid email"},
+// 		},
+// 	}
+
+// 	//Case 9 : Http request for blank password using form encoding
+// 	blankPasswordForm := url.Values{}
+// 	blankPasswordForm.Add("email", "test@mkcl.org")
+// 	blankPasswordForm.Add("password", "")
+// 	blankPasswordHTTPRequestFormEnc, _ := http.NewRequest("POST", "test.com", strings.NewReader(blankPasswordForm.Encode()))
+// 	blankPasswordHTTPRequestFormEnc.PostForm = blankPasswordForm
+// 	blankPasswordHTTPRequestFormEnc.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+// 	blankPasswordExpectedResultFormEnc := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"password": []string{"Password required"},
+// 		},
+// 	}
+
+// 	//Case 10 : Http request for email with shorter length than required using form encoding
+// 	shortEmailLengthForm := url.Values{}
+// 	shortEmailLengthForm.Add("email", "a@v.c")
+// 	shortEmailLengthForm.Add("password", "testPass")
+// 	shortEmailLengthHTTPRequestFormEnc, _ := http.NewRequest("POST", "test.com", strings.NewReader(shortEmailLengthForm.Encode()))
+// 	shortEmailLengthHTTPRequestFormEnc.PostForm = shortEmailLengthForm
+// 	shortEmailLengthHTTPRequestFormEnc.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+// 	shortEmailLengthExpectedResultFormEnc := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Email min len"},
+// 		},
+// 	}
+
+// 	//Case 11 : Http request for email with longer length than required using form encoding
+// 	longEmailLengthForm := url.Values{}
+// 	longEmailLengthForm.Add("email", "testEmail@Testcompany.testdomain")
+// 	longEmailLengthForm.Add("password", "testPass")
+// 	longEmailLengthHTTPRequestFormEnc, _ := http.NewRequest("POST", "test.com", strings.NewReader(longEmailLengthForm.Encode()))
+// 	longEmailLengthHTTPRequestFormEnc.PostForm = longEmailLengthForm
+// 	longEmailLengthHTTPRequestFormEnc.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+// 	longEmailLengthExpectedResultFormEnc := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Email max len"},
+// 		},
+// 	}
+
+// 	//Case 12 : Http request for invalid email using form encoding
+// 	invalidEmailLengthForm := url.Values{}
+// 	invalidEmailLengthForm.Add("email", "testasdfasdf")
+// 	invalidEmailLengthForm.Add("password", "test")
+// 	invalidEmailLengthHTTPRequestFormEnc, _ := http.NewRequest("POST", "test.com", strings.NewReader(invalidEmailLengthForm.Encode()))
+// 	invalidEmailLengthHTTPRequestFormEnc.PostForm = invalidEmailLengthForm
+// 	invalidEmailLengthHTTPRequestFormEnc.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+// 	invalidEmailLengthExpectedResultFormEnc := map[string]interface{}{
+// 		"validationErrors": url.Values{
+// 			"email": []string{"Invalid email"},
+// 		},
+// 	}
+
+// 	type args struct {
+// 		httpRequest        *http.Request
+// 		validationRules    requestValidator.MapData
+// 		validationMessages requestValidator.MapData
+// 	}
+// 	tests := []struct {
+// 		name string
+// 		args args
+// 		want map[string]interface{}
+// 	}{
+// 		{"TestSunnyDay", args{sunnyDayHTTPRequest, validationRules, validationMessages}, nil},
+// 		{"BlankEmailValidation", args{blankEmailHTTPRequest, validationRules, validationMessages}, blankEmailExpectedResult},
+// 		{"BlankPasswordValidation", args{blankPasswordHTTPRequest, validationRules, validationMessages}, blankPasswordExpectedResult},
+// 		{"ShortEmailLengthValidation", args{shortEmailLengthHTTPRequest, validationRules, validationMessages}, shortEmailLengthExpectedResult},
+// 		{"LongEmailLengthValidation", args{longEmailLengthHTTPRequest, validationRules, validationMessages}, longEmailLengthExpectedResult},
+// 		{"InvalidEmailValidation", args{invalidEmailHTTPRequest, validationRules, validationMessages}, invalidEmailExpectedResult},
+// 		{"SunnyDayFormEnc", args{sunnyDayHTTPRequestFormEnc, validationRules, validationMessages}, sunnyDayExpectedResultFormEnc},
+// 		{"BlankEmailValidationFormEnc", args{blankEmailHTTPRequestFormEnc, validationRules, validationMessages}, blankEmailExpectedResultFormEnc},
+// 		{"BlankPasswordValidationFormEnc", args{blankPasswordHTTPRequestFormEnc, validationRules, validationMessages}, blankPasswordExpectedResultFormEnc},
+// 		{"ShortEmailLengthValidationFormEnc", args{shortEmailLengthHTTPRequestFormEnc, validationRules, validationMessages}, shortEmailLengthExpectedResultFormEnc},
+// 		{"LongEmailLengthValidationFormEnc", args{longEmailLengthHTTPRequestFormEnc, validationRules, validationMessages}, longEmailLengthExpectedResultFormEnc},
+// 		{"InvalidEmailLengthValidationFormEnc", args{invalidEmailLengthHTTPRequestFormEnc, validationRules, validationMessages}, invalidEmailLengthExpectedResultFormEnc},
+// 	}
+// 	for _, tt := range tests {
+// 		t.Run(tt.name, func(t *testing.T) {
+// 			if got := ValidateRequest(tt.args.httpRequest, tt.args.validationRules, tt.args.validationMessages); !reflect.DeepEqual(got, tt.want) {
+// 				t.Errorf("ValidateRequest() = %v, want %v", got, tt.want)
+// 			}
+// 		})
+// 	}
+
+// }
+
+// func BenchmarkValidateRequest(b *testing.B) {
+
+// 	//Validation rules
+// 	validationRules := requestValidator.MapData{
+// 		"email":    []string{"required", "min:5", "max:20", "email"},
+// 		"password": []string{"required"},
+// 	}
+// 	//Validation messages
+// 	validationMessages := requestValidator.MapData{
+// 		"email":    []string{"required:Email Id is required", "min:Min length 5 required", "max:Max length 20 allowed", "email:Enter a valid email"},
+// 		"password": []string{"required:Password is required"},
+// 	}
+
+// 	sunnyDayData := RequestBodyData{Email: "test@mkcl.org", Password: "test"}
+// 	sunnyDayByteArray, _ := ffjson.Marshal(&sunnyDayData)
+// 	sunnyDayHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(sunnyDayByteArray)))
+// 	sunnyDayHTTPRequest.Header.Set("Content-Type", "application/json")
+
+// 	for i := 0; i < b.N; i++ {
+
+// 		ValidateRequest(sunnyDayHTTPRequest, validationRules, validationMessages)
+
+// 	}
+// }
+
+// func BenchmarkYQLTestAgainstValidator(b *testing.B) {
+
+// 	for i := 0; i < b.N; i++ {
+
+// 		rawYQL := `age>=23`
+// 		yql.Match(rawYQL, map[string]interface{}{
+// 			"age": int64(24),
+// 		})
+// 	}
+// }
+
+// func BenchmarkValidationTestAgainstYQL(b *testing.B) {
+
+// 	type TestData struct {
+// 		Age int64 `json:"age"`
+// 	}
+
+// 	validationRules := requestValidator.MapData{
+// 		"age": []string{"required", "min:23"},
+// 	}
+
+// 	sunnyDayData := TestData{Age: 24}
+// 	sunnyDayByteArray, _ := ffjson.Marshal(&sunnyDayData)
+// 	sunnyDayHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(sunnyDayByteArray)))
+// 	sunnyDayHTTPRequest.Header.Set("Content-Type", "application/json")
+
+// 	for i := 0; i < b.N; i++ {
+// 		ValidateRequest(sunnyDayHTTPRequest, validationRules, nil)
+// 	}
+// }
+
+type StructToValidate struct {
+	Name   string `json:"name" valid:"required,alpha,length(4|8)"`
+	Email  string `json:"email" valid:"required,email"`
+	Age    int    `json:"age" valid:"required,range(18|50)"`
+	Mobile string `json:"mobile" valid:"required"`
+}
+
+func GetProperStruct() StructToValidate {
+	structToValidate := StructToValidate{}
+	structToValidate.Name = "testmkcl"
+	structToValidate.Email = "test@mkcl.org"
+	structToValidate.Age = 40
+	structToValidate.Mobile = "1234567890"
+	return structToValidate
+}
+
+func GetErrorStruct() StructToValidate {
+	structToValidate := StructToValidate{}
+	structToValidate.Name = ""
+	structToValidate.Email = "testmkcl.org"
+	structToValidate.Age = 40
+	structToValidate.Mobile = "1234567890"
+	return structToValidate
+}
+func GetEmptyStruct() StructToValidate {
+	structToValidate := StructToValidate{}
+	return structToValidate
+}
+
+// func TestValidateStruct(t *testing.T) {
+// 	structToValidate := GetProperStruct()
+// 	err := ValidateStruct(structToValidate)
+// 	assert.NoError(t, err, "This should not return error")
+// }
+// func TestValidateStructEmptyStruct(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	structToValidate := GetEmptyStruct()
+// 	err := ValidateStruct(structToValidate)
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// 	assert.Error(t, err, "This should return error")
+// }
+
+// func TestValidateStructInvalidStruct(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	structToValidate := GetErrorStruct()
+// 	err := ValidateStruct(structToValidate)
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// 	assert.Error(t, err, "This should return error")
+// }
+// func TestValidateStructTypeCheck(t *testing.T) {
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = true
+// 	structToValidate := GetProperStruct()
+// 	err := ValidateStruct(structToValidate)
+// 	errormdl.IsTestingNegetiveCaseOnCheckBool = false
+// 	assert.Error(t, err, "This should return error")
+// }
+
+// func BenchmarkValidateStruct(b *testing.B) {
+// 	structToValidate := GetProperStruct()
+// 	for i := 0; i < b.N; i++ {
+// 		ValidateStruct(structToValidate)
+// 	}
+// }
+
+// func TestNewJsonValidation(t *testing.T) {
+
+// 	// json.NewDecoder(v.Opts.Request.Body).Decode(v.Opts.Data)
+
+// 	requestBodyData := RequestBodyData{Email: "test@mkcl.org", Password: "test"}
+
+// 	dataByteArray, _ := ffjson.Marshal(&requestBodyData)
+
+// 	opts := requestValidator.Options{
+// 		Rules:    validationRules,
+// 		JSONData: dataByteArray,
+// 	}
+
+// 	validator := requestValidator.New(opts)
+// 	validationErrors := validator.ValidateJSONData()
+
+// 	loggermdl.LogSpot("Validation error : ", validationErrors)
+// }
+
+// func TestNewJsonValidationMDL(t *testing.T) {
+
+// 	// json.NewDecoder(v.Opts.Request.Body).Decode(v.Opts.Data)
+
+// 	// jsonData := RequestBodyData{Email: "test@mkcl.org", Password: "test"}
+// 	// dataByteArray, _ := ffjson.Marshal(&jsonData)
+
+// 	jsonString := `{"email":"test@mkcl.org","password":"test"}`
+
+// 	// fmt.Println("json string : ", string(dataByteArray))
+
+// 	validationErrors := ValidateJSONString(jsonString, validationRules, validationMessages)
+
+// 	loggermdl.LogSpot("Validation error : ", validationErrors)
+// }
+
+//Benchmarking method to test JSON validation without using http request
+// func BenchmarkValidateJSON(b *testing.B) {
+
+// 	jsonData := RequestBodyData{Email: "test@mkcl.org", Password: "test"}
+// 	dataByteArray, _ := ffjson.Marshal(&jsonData)
+
+// 	for i := 0; i < b.N; i++ {
+// 		ValidateJSONData(dataByteArray, validationRules, validationMessages)
+// 	}
+
+// 	// loggermdl.LogError("Count : ", requestValidator)
+// }
+
+//Benchmarking method to test JSON validtion using http request
+// func BenchmarkValidateJSONRequest(b *testing.B) {
+
+// 	sunnyDayData := RequestBodyData{Email: "test@mkcl.org", Password: "test"}
+// 	sunnyDayByteArray, _ := ffjson.Marshal(&sunnyDayData)
+// 	sunnyDayHTTPRequest, _ := http.NewRequest("POST", "test.com", bytes.NewBufferString(string(sunnyDayByteArray)))
+// 	sunnyDayHTTPRequest.Header.Set("Content-Type", "application/json")
+
+// 	for i := 0; i < b.N; i++ {
+// 		ValidateRequest(sunnyDayHTTPRequest, validationRules, validationMessages)
+// 	}
+// 	// loggermdl.LogError("Count : ", requestValidator.Cnt)
+// }
+
+// //Benckmarking method to testing JSON string validation
+
+func BenchmarkValidateJSONString(b *testing.B) {
+
+	jsonString := `{"email":"tessdcet@mkcl.org"}`
+	// jsonString := `{"email":"test@mkcl.org","password":"test"}`
+
+	var validationRules = requestValidator.MapData{
+		"email": []string{"required", "email"},
+		// "password": []string{"required"},
+	}
+
+	for i := 0; i < b.N; i++ {
+		ValidateJSONString(jsonString, validationRules, validationMessages)
+	}
+}
+
+func TestValidateJSONString(t *testing.T) {
+	jsonString := `{"email": "xcvbnm,./dfghjkl"}`
+	// jsonString1 := `{"password":"testrfew@mkcl.org"}`
+	// jsonString := `{"email":"test@mkcl.org","password":"test"}`
+
+	var validationRules1 = requestValidator.MapData{
+		"email": []string{"required"},
+		// "name": []string{"required"},
+	}
+
+	// var validationRules2 = requestValidator.MapData{
+	// 	"password": []string{"required"},
+	// 	"email":    []string{"required"},
+	// }
+
+	mapErr := ValidateJSONString(jsonString, validationRules1, validationMessages)
+	if mapErr != nil {
+		loggermdl.LogError(mapErr)
+	}
+	// mapErr = ValidateJSONString(jsonString1, validationRules2, validationMessages)
+	// if mapErr != nil {
+	// 	loggermdl.LogError(mapErr)
+	// }
+}
+
+func Test1(t *testing.T) {
+	data := `{
+		"email":""
+	}`
+	rs := gjson.Parse(data)
+	validationRules := requestValidator.MapData{
+		"email": []string{"required"},
+	}
+	err := ValidateGJSONResult(&rs, validationRules, validationMessages)
+	if err != nil {
+		loggermdl.LogError(err)
+	}
+}
diff --git a/v2/workerpoolmdl/workerpoolmdl.go b/v2/workerpoolmdl/workerpoolmdl.go
new file mode 100644
index 0000000000000000000000000000000000000000..a62f3d2a828abb20c35bfe0348371e334de190a6
--- /dev/null
+++ b/v2/workerpoolmdl/workerpoolmdl.go
@@ -0,0 +1,76 @@
+package workerpoolmdl
+
+import (
+	"sync"
+)
+
+// Pool is a worker group that runs a number of tasks at a
+// configured concurrency.
+type Pool struct {
+	Tasks []*Task
+
+	concurrency int
+	tasksChan   chan *Task
+	wg          sync.WaitGroup
+}
+
+// NewPool initializes a new pool with the given tasks and
+// at the given concurrency.
+func NewPool(tasks []*Task, concurrency int) *Pool {
+	return &Pool{
+		Tasks:       tasks,
+		concurrency: concurrency,
+		tasksChan:   make(chan *Task),
+	}
+}
+
+// Run runs all work within the pool and blocks until it's
+// finished.
+func (p *Pool) Run() {
+	for i := 0; i < p.concurrency; i++ {
+		go p.work()
+	}
+
+	p.wg.Add(len(p.Tasks))
+	for _, task := range p.Tasks {
+		p.tasksChan <- task
+	}
+
+	// all workers return
+	close(p.tasksChan)
+
+	p.wg.Wait()
+}
+
+// The work loop for any single goroutine.
+func (p *Pool) work() {
+	for task := range p.tasksChan {
+		task.Run(&p.wg)
+	}
+}
+
+// Task encapsulates a work item that should go in a work
+// pool.
+type Task struct {
+	// Err holds an error that occurred during a task. Its
+	// result is only meaningful after Run has been called
+	// for the pool that holds it.
+	Err error
+
+	Data interface{}
+
+	f func(data interface{}) error
+}
+
+// NewTask initializes a new task based on a given work
+// function.
+func NewTask(d interface{}, f func(data interface{}) error) *Task {
+	return &Task{Data: d, f: f}
+}
+
+// Run runs a Task and does appropriate accounting via a
+// given sync.WorkGroup.
+func (t *Task) Run(wg *sync.WaitGroup) {
+	t.Err = t.f(t.Data)
+	wg.Done()
+}
diff --git a/v2/workerpoolmdl/workerpoolmdl_test.go b/v2/workerpoolmdl/workerpoolmdl_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..90da14880b34eac6f7446f1651fcfe93b5bb94f5
--- /dev/null
+++ b/v2/workerpoolmdl/workerpoolmdl_test.go
@@ -0,0 +1,48 @@
+package workerpoolmdl
+
+import (
+	"fmt"
+	"sync"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+var globalvar int
+var mutex = &sync.Mutex{}
+
+func TestWorkerPool(t *testing.T) {
+	globalvar = 0
+	// HOW TO USE IT
+	fn := func(data interface{}) error {
+		mutex.Lock()
+		globalvar++
+		mutex.Unlock()
+
+		return nil
+	}
+
+	tasks := []*Task{}
+
+	for index := 0; index < 100; index++ {
+		tasks = append(tasks, NewTask(index, fn))
+	}
+
+	p := NewPool(tasks, 100)
+	p.Run()
+
+	var numErrors int
+	for _, task := range p.Tasks {
+		if task.Err != nil {
+			fmt.Println(task.Err)
+			numErrors++
+		}
+		if numErrors >= 10 {
+			fmt.Println("Too many errors.")
+			break
+		}
+	}
+
+	assert.Equal(t, 100, globalvar, "Count should match")
+
+}
diff --git a/v2/workerpoolmdl/workerpoolmdlv2.go b/v2/workerpoolmdl/workerpoolmdlv2.go
new file mode 100644
index 0000000000000000000000000000000000000000..50369d9c3ed775b47e55895906436a1704693971
--- /dev/null
+++ b/v2/workerpoolmdl/workerpoolmdlv2.go
@@ -0,0 +1,112 @@
+package workerpoolmdl
+
+import (
+	"context"
+)
+
+// PoolWithContext is a worker group that runs a number of tasks at a
+// configured concurrency.
+type PoolWithContext struct {
+	Tasks          []*TaskWithContext
+	concurrency    int
+	tasksChan      chan *TaskWithContext
+	CancelHandler  *cancelHandler
+	IsPoolCanceled bool
+}
+
+type cancelHandler struct {
+	Ctx        context.Context
+	CancelFunc context.CancelFunc
+}
+
+// NewPoolWithContext initializes a new pool with the given tasks and
+// at the given concurrency.
+func NewPoolWithContext(tasks []*TaskWithContext, concurrency int) *PoolWithContext {
+	cntx, cancelFunction := context.WithCancel(context.Background())
+
+	obj := cancelHandler{}
+
+	obj.Ctx = cntx
+	obj.CancelFunc = cancelFunction
+
+	return &PoolWithContext{
+		Tasks:         tasks,
+		concurrency:   concurrency,
+		tasksChan:     make(chan *TaskWithContext),
+		CancelHandler: &obj,
+	}
+}
+
+// Run runs all work within the pool
+func (p *PoolWithContext) Run() {
+
+	for i := 0; i < p.concurrency; i++ {
+		go p.work()
+	}
+
+	for _, task := range p.Tasks {
+
+		if p.IsPoolCanceled {
+			return
+		} else {
+			p.tasksChan <- task
+		}
+
+	}
+
+	close(p.tasksChan)
+
+}
+
+// The work loop for any single goroutine.
+func (p *PoolWithContext) work() {
+	for task := range p.tasksChan {
+		task.Run(p)
+	}
+}
+
+// TaskWithContext encapsulates a work item that should go in a work
+// pool.
+type TaskWithContext struct {
+	// Err holds an error that occurred during a task. Its
+	// result is only meaningful after Run has been called
+	// for the pool that holds it.
+	Err error
+
+	Data interface{}
+
+	f func(data interface{}) error
+}
+
+// NewTaskWithContext initializes a new task based on a given work
+// function.
+func NewTaskWithContext(d interface{}, f func(data interface{}) error) *TaskWithContext {
+	return &TaskWithContext{Data: d, f: f}
+}
+
+// Run runs a Task and does appropriate accounting via a
+func (t *TaskWithContext) Run(p *PoolWithContext) {
+
+	for {
+		select {
+
+		case <-p.CancelHandler.Ctx.Done():
+			return
+
+		default:
+
+			t.Err = t.f(t.Data)
+
+			return
+		}
+	}
+
+}
+
+//Cancel all tasks
+func (p *PoolWithContext) Cancel() {
+	if p != nil {
+		p.CancelHandler.CancelFunc()
+		p.IsPoolCanceled = true
+	}
+}
diff --git a/v2/workerpoolmdl/workerpoolmdlv2_test.go b/v2/workerpoolmdl/workerpoolmdlv2_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f6ea611effa3c2f9537e7b029edf92ad20ce22d9
--- /dev/null
+++ b/v2/workerpoolmdl/workerpoolmdlv2_test.go
@@ -0,0 +1,71 @@
+package workerpoolmdl
+
+import (
+	"fmt"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+var count int
+var countMutex = &sync.Mutex{}
+var cancelTriggered bool
+
+//TestWorkerPoolWithContext - test for cancel trigger if count >= 500
+func TestWorkerPoolWithContext(t *testing.T) {
+	count = 0
+
+	tasks := []*TaskWithContext{}
+
+	for index := 0; index < 1000; index++ {
+		tasks = append(tasks, NewTaskWithContext(index, incrementCount))
+	}
+
+	pool := NewPoolWithContext(tasks, 10)
+
+	ticker := time.NewTicker(1 * time.Millisecond)
+
+	go func() {
+		for range ticker.C {
+			if count > 500 {
+				fmt.Println("cancelling tasks...")
+				pool.Cancel()
+				return
+			}
+		}
+	}()
+
+	pool.Run()
+
+	assert.GreaterOrEqual(t, count, 500, "Count be greater than or equals to 500")
+
+}
+
+//TestWorkerpoolWithoutCancel - test without cancel trigger
+func TestWorkerpoolWithoutCancel(t *testing.T) {
+	count = 0
+
+	tasks := []*TaskWithContext{}
+
+	for index := 0; index < 1000; index++ {
+		tasks = append(tasks, NewTaskWithContext(index, incrementCount))
+	}
+
+	pool := NewPoolWithContext(tasks, 10)
+
+	pool.Run()
+
+	assert.Equal(t, count, 1000, "Count should be equals to 1000")
+}
+
+//incrementCount- increment count by 1
+func incrementCount(data interface{}) error {
+
+	countMutex.Lock()
+	count++
+	countMutex.Unlock()
+
+	return nil
+}