You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

295 lines
8.6 KiB

  1. /*
  2. * Minio Cloud Storage, (C) 2016 Minio, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. package cmd
  17. import (
  18. "errors"
  19. "net/url"
  20. pathutil "path"
  21. "sync"
  22. "github.com/minio/dsync"
  23. )
  24. // Global name space lock.
  25. var globalNSMutex *nsLockMap
  26. // Initialize distributed locking only in case of distributed setup.
  27. // Returns if the setup is distributed or not on success.
  28. func initDsyncNodes(eps []*url.URL) error {
  29. cred := serverConfig.GetCredential()
  30. // Initialize rpc lock client information only if this instance is a distributed setup.
  31. clnts := make([]dsync.RPC, len(eps))
  32. myNode := -1
  33. for index, ep := range eps {
  34. if ep == nil {
  35. return errInvalidArgument
  36. }
  37. clnts[index] = newAuthClient(&authConfig{
  38. accessKey: cred.AccessKey,
  39. secretKey: cred.SecretKey,
  40. // Construct a new dsync server addr.
  41. secureConn: isSSL(),
  42. address: ep.Host,
  43. // Construct a new rpc path for the endpoint.
  44. path: pathutil.Join(lockRPCPath, getPath(ep)),
  45. loginMethod: "Dsync.LoginHandler",
  46. })
  47. if isLocalStorage(ep) && myNode == -1 {
  48. myNode = index
  49. }
  50. }
  51. return dsync.SetNodesWithClients(clnts, myNode)
  52. }
  53. // initNSLock - initialize name space lock map.
  54. func initNSLock(isDistXL bool) {
  55. globalNSMutex = &nsLockMap{
  56. isDistXL: isDistXL,
  57. lockMap: make(map[nsParam]*nsLock),
  58. counters: &lockStat{},
  59. }
  60. // Initialize nsLockMap with entry for instrumentation information.
  61. // Entries of <volume,path> -> stateInfo of locks
  62. globalNSMutex.debugLockMap = make(map[nsParam]*debugLockInfoPerVolumePath)
  63. }
  64. // RWLocker - interface that any read-write locking library should implement.
  65. type RWLocker interface {
  66. sync.Locker
  67. RLock()
  68. RUnlock()
  69. }
  70. // nsParam - carries name space resource.
  71. type nsParam struct {
  72. volume string
  73. path string
  74. }
  75. // nsLock - provides primitives for locking critical namespace regions.
  76. type nsLock struct {
  77. RWLocker
  78. ref uint
  79. }
  80. // nsLockMap - namespace lock map, provides primitives to Lock,
  81. // Unlock, RLock and RUnlock.
  82. type nsLockMap struct {
  83. // Lock counter used for lock debugging.
  84. counters *lockStat
  85. debugLockMap map[nsParam]*debugLockInfoPerVolumePath // Info for instrumentation on locks.
  86. // Indicates whether the locking service is part
  87. // of a distributed setup or not.
  88. isDistXL bool
  89. lockMap map[nsParam]*nsLock
  90. lockMapMutex sync.Mutex
  91. }
  92. // Lock the namespace resource.
  93. func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock bool) {
  94. var nsLk *nsLock
  95. n.lockMapMutex.Lock()
  96. param := nsParam{volume, path}
  97. nsLk, found := n.lockMap[param]
  98. if !found {
  99. nsLk = &nsLock{
  100. RWLocker: func() RWLocker {
  101. if n.isDistXL {
  102. return dsync.NewDRWMutex(pathJoin(volume, path))
  103. }
  104. return &sync.RWMutex{}
  105. }(),
  106. ref: 0,
  107. }
  108. n.lockMap[param] = nsLk
  109. }
  110. nsLk.ref++ // Update ref count here to avoid multiple races.
  111. // Change the state of the lock to be blocked for the given
  112. // pair of <volume, path> and <OperationID> till the lock
  113. // unblocks. The lock for accessing `globalNSMutex` is held inside
  114. // the function itself.
  115. if err := n.statusNoneToBlocked(param, lockSource, opsID, readLock); err != nil {
  116. errorIf(err, "Failed to set lock state to blocked")
  117. }
  118. // Unlock map before Locking NS which might block.
  119. n.lockMapMutex.Unlock()
  120. // Locking here can block.
  121. if readLock {
  122. nsLk.RLock()
  123. } else {
  124. nsLk.Lock()
  125. }
  126. // Changing the status of the operation from blocked to
  127. // running. change the state of the lock to be running (from
  128. // blocked) for the given pair of <volume, path> and <OperationID>.
  129. if err := n.statusBlockedToRunning(param, lockSource, opsID, readLock); err != nil {
  130. errorIf(err, "Failed to set the lock state to running")
  131. }
  132. }
  133. // Unlock the namespace resource.
  134. func (n *nsLockMap) unlock(volume, path, opsID string, readLock bool) {
  135. // nsLk.Unlock() will not block, hence locking the map for the
  136. // entire function is fine.
  137. n.lockMapMutex.Lock()
  138. defer n.lockMapMutex.Unlock()
  139. param := nsParam{volume, path}
  140. if nsLk, found := n.lockMap[param]; found {
  141. if readLock {
  142. nsLk.RUnlock()
  143. } else {
  144. nsLk.Unlock()
  145. }
  146. if nsLk.ref == 0 {
  147. errorIf(errors.New("Namespace reference count cannot be 0"),
  148. "Invalid reference count detected")
  149. }
  150. if nsLk.ref != 0 {
  151. nsLk.ref--
  152. // delete the lock state entry for given operation ID.
  153. err := n.deleteLockInfoEntryForOps(param, opsID)
  154. if err != nil {
  155. errorIf(err, "Failed to delete lock info entry")
  156. }
  157. }
  158. if nsLk.ref == 0 {
  159. // Remove from the map if there are no more references.
  160. delete(n.lockMap, param)
  161. // delete the lock state entry for given
  162. // <volume, path> pair.
  163. err := n.deleteLockInfoEntryForVolumePath(param)
  164. if err != nil {
  165. errorIf(err, "Failed to delete lock info entry")
  166. }
  167. }
  168. }
  169. }
  170. // Lock - locks the given resource for writes, using a previously
  171. // allocated name space lock or initializing a new one.
  172. func (n *nsLockMap) Lock(volume, path, opsID string) {
  173. readLock := false // This is a write lock.
  174. lockSource := callerSource() // Useful for debugging
  175. n.lock(volume, path, lockSource, opsID, readLock)
  176. }
  177. // Unlock - unlocks any previously acquired write locks.
  178. func (n *nsLockMap) Unlock(volume, path, opsID string) {
  179. readLock := false
  180. n.unlock(volume, path, opsID, readLock)
  181. }
  182. // RLock - locks any previously acquired read locks.
  183. func (n *nsLockMap) RLock(volume, path, opsID string) {
  184. readLock := true
  185. lockSource := callerSource() // Useful for debugging
  186. n.lock(volume, path, lockSource, opsID, readLock)
  187. }
  188. // RUnlock - unlocks any previously acquired read locks.
  189. func (n *nsLockMap) RUnlock(volume, path, opsID string) {
  190. readLock := true
  191. n.unlock(volume, path, opsID, readLock)
  192. }
  193. // ForceUnlock - forcefully unlock a lock based on name.
  194. func (n *nsLockMap) ForceUnlock(volume, path string) {
  195. n.lockMapMutex.Lock()
  196. defer n.lockMapMutex.Unlock()
  197. // Clarification on operation:
  198. // - In case of FS or XL we call ForceUnlock on the local globalNSMutex
  199. // (since there is only a single server) which will cause the 'stuck'
  200. // mutex to be removed from the map. Existing operations for this
  201. // will continue to be blocked (and timeout). New operations on this
  202. // resource will use a new mutex and proceed normally.
  203. //
  204. // - In case of Distributed setup (using dsync), there is no need to call
  205. // ForceUnlock on the server where the lock was acquired and is presumably
  206. // 'stuck'. Instead dsync.ForceUnlock() will release the underlying locks
  207. // that participated in granting the lock. Any pending dsync locks that
  208. // are blocking can now proceed as normal and any new locks will also
  209. // participate normally.
  210. if n.isDistXL { // For distributed mode, broadcast ForceUnlock message.
  211. dsync.NewDRWMutex(pathJoin(volume, path)).ForceUnlock()
  212. }
  213. param := nsParam{volume, path}
  214. if _, found := n.lockMap[param]; found {
  215. // Remove lock from the map.
  216. delete(n.lockMap, param)
  217. // delete the lock state entry for given
  218. // <volume, path> pair.
  219. err := n.deleteLockInfoEntryForVolumePath(param)
  220. if err != nil {
  221. errorIf(err, "Failed to delete lock info entry")
  222. }
  223. }
  224. }
  225. // lockInstance - frontend/top-level interface for namespace locks.
  226. type lockInstance struct {
  227. n *nsLockMap
  228. volume, path, opsID string
  229. }
  230. // NewNSLock - returns a lock instance for a given volume and
  231. // path. The returned lockInstance object encapsulates the nsLockMap,
  232. // volume, path and operation ID.
  233. func (n *nsLockMap) NewNSLock(volume, path string) *lockInstance {
  234. return &lockInstance{n, volume, path, getOpsID()}
  235. }
  236. // Lock - block until write lock is taken.
  237. func (li *lockInstance) Lock() {
  238. lockSource := callerSource()
  239. readLock := false
  240. li.n.lock(li.volume, li.path, lockSource, li.opsID, readLock)
  241. }
  242. // Unlock - block until write lock is released.
  243. func (li *lockInstance) Unlock() {
  244. readLock := false
  245. li.n.unlock(li.volume, li.path, li.opsID, readLock)
  246. }
  247. // RLock - block until read lock is taken.
  248. func (li *lockInstance) RLock() {
  249. lockSource := callerSource()
  250. readLock := true
  251. li.n.lock(li.volume, li.path, lockSource, li.opsID, readLock)
  252. }
  253. // RUnlock - block until read lock is released.
  254. func (li *lockInstance) RUnlock() {
  255. readLock := true
  256. li.n.unlock(li.volume, li.path, li.opsID, readLock)
  257. }