Skip to content
This repository was archived by the owner on Mar 26, 2020. It is now read-only.

Commit 27859b4

Browse files
Oshank Kumarkshlm
Oshank Kumar
authored andcommitted
cluster-lock: fix getting cluster wide etcd lock
Instead of using a global etcd session create a new etcd session everytime a lock is created. If we use existing session every time when a lock is created then etcd lock will use same leaseId. It leads to faulty lock behaviour. Signed-off-by: Oshank Kumar <[email protected]>
1 parent 0827c74 commit 27859b4

File tree

1 file changed

+8
-2
lines changed

1 file changed

+8
-2
lines changed

glusterd2/transaction/lock.go

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import (
1616
const (
1717
lockPrefix = "locks/"
1818
lockObtainTimeout = 5 * time.Second
19+
lockTTL = 10
1920
)
2021

2122
var (
@@ -163,12 +164,17 @@ func (l Locks) lock(lockID string) error {
163164
logger.Debug("attempting to obtain lock")
164165

165166
key := lockPrefix + lockID
166-
locker := concurrency.NewMutex(store.Store.Session, key)
167+
s, err := concurrency.NewSession(store.Store.Client, concurrency.WithTTL(lockTTL))
168+
if err != nil {
169+
return err
170+
}
171+
172+
locker := concurrency.NewMutex(s, key)
167173

168174
ctx, cancel := context.WithTimeout(store.Store.Ctx(), lockObtainTimeout)
169175
defer cancel()
170176

171-
err := locker.Lock(ctx)
177+
err = locker.Lock(ctx)
172178
switch err {
173179
case nil:
174180
logger.Debug("lock obtained")

0 commit comments

Comments
 (0)