PageRenderTime 66ms CodeModel.GetById 1ms RepoModel.GetById 2ms app.codeStats 0ms

/vendor/github.com/coreos/etcd/clientv3/lease.go

https://gitlab.com/unofficial-mirrors/kubernetes
Go | 545 lines | 408 code | 73 blank | 64 comment | 83 complexity | a85bddcfdeac76d6784bfc296e799ba3 MD5 | raw file
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package clientv3
  15. import (
  16. "sync"
  17. "time"
  18. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  19. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  20. "golang.org/x/net/context"
  21. "google.golang.org/grpc"
  22. "google.golang.org/grpc/metadata"
  23. )
  24. type (
  25. LeaseRevokeResponse pb.LeaseRevokeResponse
  26. LeaseID int64
  27. )
  28. // LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
  29. type LeaseGrantResponse struct {
  30. *pb.ResponseHeader
  31. ID LeaseID
  32. TTL int64
  33. Error string
  34. }
  35. // LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
  36. type LeaseKeepAliveResponse struct {
  37. *pb.ResponseHeader
  38. ID LeaseID
  39. TTL int64
  40. }
  41. // LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
  42. type LeaseTimeToLiveResponse struct {
  43. *pb.ResponseHeader
  44. ID LeaseID `json:"id"`
  45. // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
  46. TTL int64 `json:"ttl"`
  47. // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
  48. GrantedTTL int64 `json:"granted-ttl"`
  49. // Keys is the list of keys attached to this lease.
  50. Keys [][]byte `json:"keys"`
  51. }
  52. // LeaseStatus represents a lease status.
  53. type LeaseStatus struct {
  54. ID LeaseID `json:"id"`
  55. // TODO: TTL int64
  56. }
  57. const (
  58. // defaultTTL is the assumed lease TTL used for the first keepalive
  59. // deadline before the actual TTL is known to the client.
  60. defaultTTL = 5 * time.Second
  61. // a small buffer to store unsent lease responses.
  62. leaseResponseChSize = 16
  63. // NoLease is a lease ID for the absence of a lease.
  64. NoLease LeaseID = 0
  65. // retryConnWait is how long to wait before retrying request due to an error
  66. retryConnWait = 500 * time.Millisecond
  67. )
  68. // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
  69. //
  70. // This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
  71. type ErrKeepAliveHalted struct {
  72. Reason error
  73. }
  74. func (e ErrKeepAliveHalted) Error() string {
  75. s := "etcdclient: leases keep alive halted"
  76. if e.Reason != nil {
  77. s += ": " + e.Reason.Error()
  78. }
  79. return s
  80. }
  81. type Lease interface {
  82. // Grant creates a new lease.
  83. Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
  84. // Revoke revokes the given lease.
  85. Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
  86. // TimeToLive retrieves the lease information of the given lease ID.
  87. TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
  88. // KeepAlive keeps the given lease alive forever.
  89. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
  90. // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive
  91. // should be used instead of KeepAliveOnce.
  92. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
  93. // Close releases all resources Lease keeps for efficient communication
  94. // with the etcd server.
  95. Close() error
  96. }
  97. type lessor struct {
  98. mu sync.Mutex // guards all fields
  99. // donec is closed and loopErr is set when recvKeepAliveLoop stops
  100. donec chan struct{}
  101. loopErr error
  102. remote pb.LeaseClient
  103. stream pb.Lease_LeaseKeepAliveClient
  104. streamCancel context.CancelFunc
  105. stopCtx context.Context
  106. stopCancel context.CancelFunc
  107. keepAlives map[LeaseID]*keepAlive
  108. // firstKeepAliveTimeout is the timeout for the first keepalive request
  109. // before the actual TTL is known to the lease client
  110. firstKeepAliveTimeout time.Duration
  111. // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
  112. firstKeepAliveOnce sync.Once
  113. callOpts []grpc.CallOption
  114. }
  115. // keepAlive multiplexes a keepalive for a lease over multiple channels
  116. type keepAlive struct {
  117. chs []chan<- *LeaseKeepAliveResponse
  118. ctxs []context.Context
  119. // deadline is the time the keep alive channels close if no response
  120. deadline time.Time
  121. // nextKeepAlive is when to send the next keep alive message
  122. nextKeepAlive time.Time
  123. // donec is closed on lease revoke, expiration, or cancel.
  124. donec chan struct{}
  125. }
  126. func NewLease(c *Client) Lease {
  127. return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
  128. }
  129. func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
  130. l := &lessor{
  131. donec: make(chan struct{}),
  132. keepAlives: make(map[LeaseID]*keepAlive),
  133. remote: remote,
  134. firstKeepAliveTimeout: keepAliveTimeout,
  135. }
  136. if l.firstKeepAliveTimeout == time.Second {
  137. l.firstKeepAliveTimeout = defaultTTL
  138. }
  139. if c != nil {
  140. l.callOpts = c.callOpts
  141. }
  142. reqLeaderCtx := WithRequireLeader(context.Background())
  143. l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
  144. return l
  145. }
  146. func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
  147. r := &pb.LeaseGrantRequest{TTL: ttl}
  148. resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
  149. if err == nil {
  150. gresp := &LeaseGrantResponse{
  151. ResponseHeader: resp.GetHeader(),
  152. ID: LeaseID(resp.ID),
  153. TTL: resp.TTL,
  154. Error: resp.Error,
  155. }
  156. return gresp, nil
  157. }
  158. return nil, toErr(ctx, err)
  159. }
  160. func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
  161. r := &pb.LeaseRevokeRequest{ID: int64(id)}
  162. resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
  163. if err == nil {
  164. return (*LeaseRevokeResponse)(resp), nil
  165. }
  166. return nil, toErr(ctx, err)
  167. }
  168. func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
  169. r := toLeaseTimeToLiveRequest(id, opts...)
  170. resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
  171. if err == nil {
  172. gresp := &LeaseTimeToLiveResponse{
  173. ResponseHeader: resp.GetHeader(),
  174. ID: LeaseID(resp.ID),
  175. TTL: resp.TTL,
  176. GrantedTTL: resp.GrantedTTL,
  177. Keys: resp.Keys,
  178. }
  179. return gresp, nil
  180. }
  181. return nil, toErr(ctx, err)
  182. }
  183. func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
  184. ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
  185. l.mu.Lock()
  186. // ensure that recvKeepAliveLoop is still running
  187. select {
  188. case <-l.donec:
  189. err := l.loopErr
  190. l.mu.Unlock()
  191. close(ch)
  192. return ch, ErrKeepAliveHalted{Reason: err}
  193. default:
  194. }
  195. ka, ok := l.keepAlives[id]
  196. if !ok {
  197. // create fresh keep alive
  198. ka = &keepAlive{
  199. chs: []chan<- *LeaseKeepAliveResponse{ch},
  200. ctxs: []context.Context{ctx},
  201. deadline: time.Now().Add(l.firstKeepAliveTimeout),
  202. nextKeepAlive: time.Now(),
  203. donec: make(chan struct{}),
  204. }
  205. l.keepAlives[id] = ka
  206. } else {
  207. // add channel and context to existing keep alive
  208. ka.ctxs = append(ka.ctxs, ctx)
  209. ka.chs = append(ka.chs, ch)
  210. }
  211. l.mu.Unlock()
  212. go l.keepAliveCtxCloser(id, ctx, ka.donec)
  213. l.firstKeepAliveOnce.Do(func() {
  214. go l.recvKeepAliveLoop()
  215. go l.deadlineLoop()
  216. })
  217. return ch, nil
  218. }
  219. func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  220. for {
  221. resp, err := l.keepAliveOnce(ctx, id)
  222. if err == nil {
  223. if resp.TTL <= 0 {
  224. err = rpctypes.ErrLeaseNotFound
  225. }
  226. return resp, err
  227. }
  228. if isHaltErr(ctx, err) {
  229. return nil, toErr(ctx, err)
  230. }
  231. }
  232. }
  233. func (l *lessor) Close() error {
  234. l.stopCancel()
  235. // close for synchronous teardown if stream goroutines never launched
  236. l.firstKeepAliveOnce.Do(func() { close(l.donec) })
  237. <-l.donec
  238. return nil
  239. }
  240. func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
  241. select {
  242. case <-donec:
  243. return
  244. case <-l.donec:
  245. return
  246. case <-ctx.Done():
  247. }
  248. l.mu.Lock()
  249. defer l.mu.Unlock()
  250. ka, ok := l.keepAlives[id]
  251. if !ok {
  252. return
  253. }
  254. // close channel and remove context if still associated with keep alive
  255. for i, c := range ka.ctxs {
  256. if c == ctx {
  257. close(ka.chs[i])
  258. ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
  259. ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
  260. break
  261. }
  262. }
  263. // remove if no one more listeners
  264. if len(ka.chs) == 0 {
  265. delete(l.keepAlives, id)
  266. }
  267. }
  268. // closeRequireLeader scans keepAlives for ctxs that have require leader
  269. // and closes the associated channels.
  270. func (l *lessor) closeRequireLeader() {
  271. l.mu.Lock()
  272. defer l.mu.Unlock()
  273. for _, ka := range l.keepAlives {
  274. reqIdxs := 0
  275. // find all required leader channels, close, mark as nil
  276. for i, ctx := range ka.ctxs {
  277. md, ok := metadata.FromOutgoingContext(ctx)
  278. if !ok {
  279. continue
  280. }
  281. ks := md[rpctypes.MetadataRequireLeaderKey]
  282. if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
  283. continue
  284. }
  285. close(ka.chs[i])
  286. ka.chs[i] = nil
  287. reqIdxs++
  288. }
  289. if reqIdxs == 0 {
  290. continue
  291. }
  292. // remove all channels that required a leader from keepalive
  293. newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
  294. newCtxs := make([]context.Context, len(newChs))
  295. newIdx := 0
  296. for i := range ka.chs {
  297. if ka.chs[i] == nil {
  298. continue
  299. }
  300. newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
  301. newIdx++
  302. }
  303. ka.chs, ka.ctxs = newChs, newCtxs
  304. }
  305. }
  306. func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  307. cctx, cancel := context.WithCancel(ctx)
  308. defer cancel()
  309. stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
  310. if err != nil {
  311. return nil, toErr(ctx, err)
  312. }
  313. err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
  314. if err != nil {
  315. return nil, toErr(ctx, err)
  316. }
  317. resp, rerr := stream.Recv()
  318. if rerr != nil {
  319. return nil, toErr(ctx, rerr)
  320. }
  321. karesp := &LeaseKeepAliveResponse{
  322. ResponseHeader: resp.GetHeader(),
  323. ID: LeaseID(resp.ID),
  324. TTL: resp.TTL,
  325. }
  326. return karesp, nil
  327. }
  328. func (l *lessor) recvKeepAliveLoop() (gerr error) {
  329. defer func() {
  330. l.mu.Lock()
  331. close(l.donec)
  332. l.loopErr = gerr
  333. for _, ka := range l.keepAlives {
  334. ka.close()
  335. }
  336. l.keepAlives = make(map[LeaseID]*keepAlive)
  337. l.mu.Unlock()
  338. }()
  339. for {
  340. stream, err := l.resetRecv()
  341. if err != nil {
  342. if canceledByCaller(l.stopCtx, err) {
  343. return err
  344. }
  345. } else {
  346. for {
  347. resp, err := stream.Recv()
  348. if err != nil {
  349. if canceledByCaller(l.stopCtx, err) {
  350. return err
  351. }
  352. if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
  353. l.closeRequireLeader()
  354. }
  355. break
  356. }
  357. l.recvKeepAlive(resp)
  358. }
  359. }
  360. select {
  361. case <-time.After(retryConnWait):
  362. continue
  363. case <-l.stopCtx.Done():
  364. return l.stopCtx.Err()
  365. }
  366. }
  367. }
  368. // resetRecv opens a new lease stream and starts sending keep alive requests.
  369. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
  370. sctx, cancel := context.WithCancel(l.stopCtx)
  371. stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
  372. if err != nil {
  373. cancel()
  374. return nil, err
  375. }
  376. l.mu.Lock()
  377. defer l.mu.Unlock()
  378. if l.stream != nil && l.streamCancel != nil {
  379. l.streamCancel()
  380. }
  381. l.streamCancel = cancel
  382. l.stream = stream
  383. go l.sendKeepAliveLoop(stream)
  384. return stream, nil
  385. }
  386. // recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
  387. func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
  388. karesp := &LeaseKeepAliveResponse{
  389. ResponseHeader: resp.GetHeader(),
  390. ID: LeaseID(resp.ID),
  391. TTL: resp.TTL,
  392. }
  393. l.mu.Lock()
  394. defer l.mu.Unlock()
  395. ka, ok := l.keepAlives[karesp.ID]
  396. if !ok {
  397. return
  398. }
  399. if karesp.TTL <= 0 {
  400. // lease expired; close all keep alive channels
  401. delete(l.keepAlives, karesp.ID)
  402. ka.close()
  403. return
  404. }
  405. // send update to all channels
  406. nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
  407. ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
  408. for _, ch := range ka.chs {
  409. select {
  410. case ch <- karesp:
  411. ka.nextKeepAlive = nextKeepAlive
  412. default:
  413. }
  414. }
  415. }
  416. // deadlineLoop reaps any keep alive channels that have not received a response
  417. // within the lease TTL
  418. func (l *lessor) deadlineLoop() {
  419. for {
  420. select {
  421. case <-time.After(time.Second):
  422. case <-l.donec:
  423. return
  424. }
  425. now := time.Now()
  426. l.mu.Lock()
  427. for id, ka := range l.keepAlives {
  428. if ka.deadline.Before(now) {
  429. // waited too long for response; lease may be expired
  430. ka.close()
  431. delete(l.keepAlives, id)
  432. }
  433. }
  434. l.mu.Unlock()
  435. }
  436. }
  437. // sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
  438. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
  439. for {
  440. var tosend []LeaseID
  441. now := time.Now()
  442. l.mu.Lock()
  443. for id, ka := range l.keepAlives {
  444. if ka.nextKeepAlive.Before(now) {
  445. tosend = append(tosend, id)
  446. }
  447. }
  448. l.mu.Unlock()
  449. for _, id := range tosend {
  450. r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
  451. if err := stream.Send(r); err != nil {
  452. // TODO do something with this error?
  453. return
  454. }
  455. }
  456. select {
  457. case <-time.After(500 * time.Millisecond):
  458. case <-stream.Context().Done():
  459. return
  460. case <-l.donec:
  461. return
  462. case <-l.stopCtx.Done():
  463. return
  464. }
  465. }
  466. }
  467. func (ka *keepAlive) close() {
  468. close(ka.donec)
  469. for _, ch := range ka.chs {
  470. close(ch)
  471. }
  472. }