PageRenderTime 58ms CodeModel.GetById 26ms RepoModel.GetById 1ms app.codeStats 0ms

/distribution/xfer/transfer.go

https://gitlab.com/vectorci/docker-1
Go | 401 lines | 260 code | 48 blank | 93 comment | 37 complexity | 3cf446d16608bd50c5301609acd73cea MD5 | raw file
  1. package xfer
  2. import (
  3. "runtime"
  4. "sync"
  5. "github.com/docker/docker/pkg/progress"
  6. "golang.org/x/net/context"
  7. )
  8. // DoNotRetry is an error wrapper indicating that the error cannot be resolved
  9. // with a retry.
  10. type DoNotRetry struct {
  11. Err error
  12. }
  13. // Error returns the stringified representation of the encapsulated error.
  14. func (e DoNotRetry) Error() string {
  15. return e.Err.Error()
  16. }
  17. // Watcher is returned by Watch and can be passed to Release to stop watching.
  18. type Watcher struct {
  19. // signalChan is used to signal to the watcher goroutine that
  20. // new progress information is available, or that the transfer
  21. // has finished.
  22. signalChan chan struct{}
  23. // releaseChan signals to the watcher goroutine that the watcher
  24. // should be detached.
  25. releaseChan chan struct{}
  26. // running remains open as long as the watcher is watching the
  27. // transfer. It gets closed if the transfer finishes or the
  28. // watcher is detached.
  29. running chan struct{}
  30. }
  31. // Transfer represents an in-progress transfer.
  32. type Transfer interface {
  33. Watch(progressOutput progress.Output) *Watcher
  34. Release(*Watcher)
  35. Context() context.Context
  36. Close()
  37. Done() <-chan struct{}
  38. Released() <-chan struct{}
  39. Broadcast(masterProgressChan <-chan progress.Progress)
  40. }
  41. type transfer struct {
  42. mu sync.Mutex
  43. ctx context.Context
  44. cancel context.CancelFunc
  45. // watchers keeps track of the goroutines monitoring progress output,
  46. // indexed by the channels that release them.
  47. watchers map[chan struct{}]*Watcher
  48. // lastProgress is the most recently received progress event.
  49. lastProgress progress.Progress
  50. // hasLastProgress is true when lastProgress has been set.
  51. hasLastProgress bool
  52. // running remains open as long as the transfer is in progress.
  53. running chan struct{}
  54. // released stays open until all watchers release the transfer and
  55. // the transfer is no longer tracked by the transfer manager.
  56. released chan struct{}
  57. // broadcastDone is true if the master progress channel has closed.
  58. broadcastDone bool
  59. // closed is true if Close has been called
  60. closed bool
  61. // broadcastSyncChan allows watchers to "ping" the broadcasting
  62. // goroutine to wait for it for deplete its input channel. This ensures
  63. // a detaching watcher won't miss an event that was sent before it
  64. // started detaching.
  65. broadcastSyncChan chan struct{}
  66. }
  67. // NewTransfer creates a new transfer.
  68. func NewTransfer() Transfer {
  69. t := &transfer{
  70. watchers: make(map[chan struct{}]*Watcher),
  71. running: make(chan struct{}),
  72. released: make(chan struct{}),
  73. broadcastSyncChan: make(chan struct{}),
  74. }
  75. // This uses context.Background instead of a caller-supplied context
  76. // so that a transfer won't be cancelled automatically if the client
  77. // which requested it is ^C'd (there could be other viewers).
  78. t.ctx, t.cancel = context.WithCancel(context.Background())
  79. return t
  80. }
  81. // Broadcast copies the progress and error output to all viewers.
  82. func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) {
  83. for {
  84. var (
  85. p progress.Progress
  86. ok bool
  87. )
  88. select {
  89. case p, ok = <-masterProgressChan:
  90. default:
  91. // We've depleted the channel, so now we can handle
  92. // reads on broadcastSyncChan to let detaching watchers
  93. // know we're caught up.
  94. select {
  95. case <-t.broadcastSyncChan:
  96. continue
  97. case p, ok = <-masterProgressChan:
  98. }
  99. }
  100. t.mu.Lock()
  101. if ok {
  102. t.lastProgress = p
  103. t.hasLastProgress = true
  104. for _, w := range t.watchers {
  105. select {
  106. case w.signalChan <- struct{}{}:
  107. default:
  108. }
  109. }
  110. } else {
  111. t.broadcastDone = true
  112. }
  113. t.mu.Unlock()
  114. if !ok {
  115. close(t.running)
  116. return
  117. }
  118. }
  119. }
  120. // Watch adds a watcher to the transfer. The supplied channel gets progress
  121. // updates and is closed when the transfer finishes.
  122. func (t *transfer) Watch(progressOutput progress.Output) *Watcher {
  123. t.mu.Lock()
  124. defer t.mu.Unlock()
  125. w := &Watcher{
  126. releaseChan: make(chan struct{}),
  127. signalChan: make(chan struct{}),
  128. running: make(chan struct{}),
  129. }
  130. t.watchers[w.releaseChan] = w
  131. if t.broadcastDone {
  132. close(w.running)
  133. return w
  134. }
  135. go func() {
  136. defer func() {
  137. close(w.running)
  138. }()
  139. var (
  140. done bool
  141. lastWritten progress.Progress
  142. hasLastWritten bool
  143. )
  144. for {
  145. t.mu.Lock()
  146. hasLastProgress := t.hasLastProgress
  147. lastProgress := t.lastProgress
  148. t.mu.Unlock()
  149. // Make sure we don't write the last progress item
  150. // twice.
  151. if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) {
  152. progressOutput.WriteProgress(lastProgress)
  153. lastWritten = lastProgress
  154. hasLastWritten = true
  155. }
  156. if done {
  157. return
  158. }
  159. select {
  160. case <-w.signalChan:
  161. case <-w.releaseChan:
  162. done = true
  163. // Since the watcher is going to detach, make
  164. // sure the broadcaster is caught up so we
  165. // don't miss anything.
  166. select {
  167. case t.broadcastSyncChan <- struct{}{}:
  168. case <-t.running:
  169. }
  170. case <-t.running:
  171. done = true
  172. }
  173. }
  174. }()
  175. return w
  176. }
  177. // Release is the inverse of Watch; indicating that the watcher no longer wants
  178. // to be notified about the progress of the transfer. All calls to Watch must
  179. // be paired with later calls to Release so that the lifecycle of the transfer
  180. // is properly managed.
  181. func (t *transfer) Release(watcher *Watcher) {
  182. t.mu.Lock()
  183. delete(t.watchers, watcher.releaseChan)
  184. if len(t.watchers) == 0 {
  185. if t.closed {
  186. // released may have been closed already if all
  187. // watchers were released, then another one was added
  188. // while waiting for a previous watcher goroutine to
  189. // finish.
  190. select {
  191. case <-t.released:
  192. default:
  193. close(t.released)
  194. }
  195. } else {
  196. t.cancel()
  197. }
  198. }
  199. t.mu.Unlock()
  200. close(watcher.releaseChan)
  201. // Block until the watcher goroutine completes
  202. <-watcher.running
  203. }
  204. // Done returns a channel which is closed if the transfer completes or is
  205. // cancelled. Note that having 0 watchers causes a transfer to be cancelled.
  206. func (t *transfer) Done() <-chan struct{} {
  207. // Note that this doesn't return t.ctx.Done() because that channel will
  208. // be closed the moment Cancel is called, and we need to return a
  209. // channel that blocks until a cancellation is actually acknowledged by
  210. // the transfer function.
  211. return t.running
  212. }
  213. // Released returns a channel which is closed once all watchers release the
  214. // transfer AND the transfer is no longer tracked by the transfer manager.
  215. func (t *transfer) Released() <-chan struct{} {
  216. return t.released
  217. }
  218. // Context returns the context associated with the transfer.
  219. func (t *transfer) Context() context.Context {
  220. return t.ctx
  221. }
  222. // Close is called by the transfer manager when the transfer is no longer
  223. // being tracked.
  224. func (t *transfer) Close() {
  225. t.mu.Lock()
  226. t.closed = true
  227. if len(t.watchers) == 0 {
  228. close(t.released)
  229. }
  230. t.mu.Unlock()
  231. }
  232. // DoFunc is a function called by the transfer manager to actually perform
  233. // a transfer. It should be non-blocking. It should wait until the start channel
  234. // is closed before transferring any data. If the function closes inactive, that
  235. // signals to the transfer manager that the job is no longer actively moving
  236. // data - for example, it may be waiting for a dependent transfer to finish.
  237. // This prevents it from taking up a slot.
  238. type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer
  239. // TransferManager is used by LayerDownloadManager and LayerUploadManager to
  240. // schedule and deduplicate transfers. It is up to the TransferManager
  241. // implementation to make the scheduling and concurrency decisions.
  242. type TransferManager interface {
  243. // Transfer checks if a transfer with the given key is in progress. If
  244. // so, it returns progress and error output from that transfer.
  245. // Otherwise, it will call xferFunc to initiate the transfer.
  246. Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher)
  247. // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload
  248. SetConcurrency(concurrency int)
  249. }
  250. type transferManager struct {
  251. mu sync.Mutex
  252. concurrencyLimit int
  253. activeTransfers int
  254. transfers map[string]Transfer
  255. waitingTransfers []chan struct{}
  256. }
  257. // NewTransferManager returns a new TransferManager.
  258. func NewTransferManager(concurrencyLimit int) TransferManager {
  259. return &transferManager{
  260. concurrencyLimit: concurrencyLimit,
  261. transfers: make(map[string]Transfer),
  262. }
  263. }
  264. // SetConcurrency set the concurrencyLimit
  265. func (tm *transferManager) SetConcurrency(concurrency int) {
  266. tm.mu.Lock()
  267. tm.concurrencyLimit = concurrency
  268. tm.mu.Unlock()
  269. }
  270. // Transfer checks if a transfer matching the given key is in progress. If not,
  271. // it starts one by calling xferFunc. The caller supplies a channel which
  272. // receives progress output from the transfer.
  273. func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) {
  274. tm.mu.Lock()
  275. defer tm.mu.Unlock()
  276. for {
  277. xfer, present := tm.transfers[key]
  278. if !present {
  279. break
  280. }
  281. // Transfer is already in progress.
  282. watcher := xfer.Watch(progressOutput)
  283. select {
  284. case <-xfer.Context().Done():
  285. // We don't want to watch a transfer that has been cancelled.
  286. // Wait for it to be removed from the map and try again.
  287. xfer.Release(watcher)
  288. tm.mu.Unlock()
  289. // The goroutine that removes this transfer from the
  290. // map is also waiting for xfer.Done(), so yield to it.
  291. // This could be avoided by adding a Closed method
  292. // to Transfer to allow explicitly waiting for it to be
  293. // removed the map, but forcing a scheduling round in
  294. // this very rare case seems better than bloating the
  295. // interface definition.
  296. runtime.Gosched()
  297. <-xfer.Done()
  298. tm.mu.Lock()
  299. default:
  300. return xfer, watcher
  301. }
  302. }
  303. start := make(chan struct{})
  304. inactive := make(chan struct{})
  305. if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit {
  306. close(start)
  307. tm.activeTransfers++
  308. } else {
  309. tm.waitingTransfers = append(tm.waitingTransfers, start)
  310. }
  311. masterProgressChan := make(chan progress.Progress)
  312. xfer := xferFunc(masterProgressChan, start, inactive)
  313. watcher := xfer.Watch(progressOutput)
  314. go xfer.Broadcast(masterProgressChan)
  315. tm.transfers[key] = xfer
  316. // When the transfer is finished, remove from the map.
  317. go func() {
  318. for {
  319. select {
  320. case <-inactive:
  321. tm.mu.Lock()
  322. tm.inactivate(start)
  323. tm.mu.Unlock()
  324. inactive = nil
  325. case <-xfer.Done():
  326. tm.mu.Lock()
  327. if inactive != nil {
  328. tm.inactivate(start)
  329. }
  330. delete(tm.transfers, key)
  331. tm.mu.Unlock()
  332. xfer.Close()
  333. return
  334. }
  335. }
  336. }()
  337. return xfer, watcher
  338. }
  339. func (tm *transferManager) inactivate(start chan struct{}) {
  340. // If the transfer was started, remove it from the activeTransfers
  341. // count.
  342. select {
  343. case <-start:
  344. // Start next transfer if any are waiting
  345. if len(tm.waitingTransfers) != 0 {
  346. close(tm.waitingTransfers[0])
  347. tm.waitingTransfers = tm.waitingTransfers[1:]
  348. } else {
  349. tm.activeTransfers--
  350. }
  351. default:
  352. }
  353. }