PageRenderTime 27ms CodeModel.GetById 30ms RepoModel.GetById 0ms app.codeStats 0ms

/scheduler/client/client_test.go

https://gitlab.com/suo/tinykv
Go | 420 lines | 352 code | 42 blank | 26 comment | 30 complexity | e6c3d0bbb05dc3b03dc7b259b4581ccd MD5 | raw file
  1. // Copyright 2016 PingCAP, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. package pd
  14. import (
  15. "context"
  16. "math"
  17. "sync"
  18. "testing"
  19. "time"
  20. "github.com/gogo/protobuf/proto"
  21. "github.com/pingcap-incubator/tinykv/proto/pkg/metapb"
  22. "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb"
  23. "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockid"
  24. "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil"
  25. "github.com/pingcap-incubator/tinykv/scheduler/server"
  26. "github.com/pingcap-incubator/tinykv/scheduler/server/core"
  27. . "github.com/pingcap/check"
  28. )
  29. func TestClient(t *testing.T) {
  30. server.EnableZap = true
  31. TestingT(t)
  32. }
  33. var _ = Suite(&testClientSuite{})
  34. type idAllocator struct {
  35. allocator *mockid.IDAllocator
  36. }
  37. func (i *idAllocator) alloc() uint64 {
  38. id, _ := i.allocator.Alloc()
  39. return id
  40. }
  41. var (
  42. regionIDAllocator = &idAllocator{allocator: &mockid.IDAllocator{}}
  43. // Note: IDs below are entirely arbitrary. They are only for checking
  44. // whether GetRegion/GetStore works.
  45. // If we alloc ID in client in the future, these IDs must be updated.
  46. stores = []*metapb.Store{
  47. {Id: 1,
  48. Address: "localhost:1",
  49. },
  50. {Id: 2,
  51. Address: "localhost:2",
  52. },
  53. {Id: 3,
  54. Address: "localhost:3",
  55. },
  56. {Id: 4,
  57. Address: "localhost:4",
  58. },
  59. }
  60. peers = []*metapb.Peer{
  61. {Id: regionIDAllocator.alloc(),
  62. StoreId: stores[0].GetId(),
  63. },
  64. {Id: regionIDAllocator.alloc(),
  65. StoreId: stores[1].GetId(),
  66. },
  67. {Id: regionIDAllocator.alloc(),
  68. StoreId: stores[2].GetId(),
  69. },
  70. }
  71. )
  72. type testClientSuite struct {
  73. cleanup server.CleanupFunc
  74. srv *server.Server
  75. client Client
  76. grpcSchedulerClient schedulerpb.SchedulerClient
  77. regionHeartbeat schedulerpb.Scheduler_RegionHeartbeatClient
  78. }
  79. func (s *testClientSuite) SetUpSuite(c *C) {
  80. var err error
  81. s.srv, s.cleanup, err = server.NewTestServer(c)
  82. c.Assert(err, IsNil)
  83. s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.srv.GetAddr())
  84. mustWaitLeader(c, map[string]*server.Server{s.srv.GetAddr(): s.srv})
  85. bootstrapServer(c, newHeader(s.srv), s.grpcSchedulerClient)
  86. s.client, err = NewClient(s.srv.GetEndpoints(), SecurityOption{})
  87. c.Assert(err, IsNil)
  88. s.regionHeartbeat, err = s.grpcSchedulerClient.RegionHeartbeat(context.Background())
  89. c.Assert(err, IsNil)
  90. cluster := s.srv.GetRaftCluster()
  91. c.Assert(cluster, NotNil)
  92. for _, store := range stores {
  93. s.srv.PutStore(context.Background(), &schedulerpb.PutStoreRequest{Header: newHeader(s.srv), Store: store})
  94. }
  95. }
  96. func (s *testClientSuite) TearDownSuite(c *C) {
  97. s.client.Close()
  98. s.cleanup()
  99. }
  100. func mustWaitLeader(c *C, svrs map[string]*server.Server) *server.Server {
  101. for i := 0; i < 500; i++ {
  102. for _, s := range svrs {
  103. if !s.IsClosed() && s.GetMember().IsLeader() {
  104. return s
  105. }
  106. }
  107. time.Sleep(100 * time.Millisecond)
  108. }
  109. c.Fatal("no leader")
  110. return nil
  111. }
  112. func newHeader(srv *server.Server) *schedulerpb.RequestHeader {
  113. return &schedulerpb.RequestHeader{
  114. ClusterId: srv.ClusterID(),
  115. }
  116. }
  117. func bootstrapServer(c *C, header *schedulerpb.RequestHeader, client schedulerpb.SchedulerClient) {
  118. req := &schedulerpb.BootstrapRequest{
  119. Header: header,
  120. Store: stores[0],
  121. }
  122. _, err := client.Bootstrap(context.Background(), req)
  123. c.Assert(err, IsNil)
  124. }
  125. func (s *testClientSuite) TestTSO(c *C) {
  126. var tss []int64
  127. for i := 0; i < 100; i++ {
  128. p, l, err := s.client.GetTS(context.Background())
  129. c.Assert(err, IsNil)
  130. tss = append(tss, p<<18+l)
  131. }
  132. var last int64
  133. for _, ts := range tss {
  134. c.Assert(ts, Greater, last)
  135. last = ts
  136. }
  137. }
  138. func (s *testClientSuite) TestTSORace(c *C) {
  139. var wg sync.WaitGroup
  140. begin := make(chan struct{})
  141. count := 10
  142. wg.Add(count)
  143. for i := 0; i < count; i++ {
  144. go func() {
  145. <-begin
  146. for i := 0; i < 100; i++ {
  147. _, _, err := s.client.GetTS(context.Background())
  148. c.Assert(err, IsNil)
  149. }
  150. wg.Done()
  151. }()
  152. }
  153. close(begin)
  154. wg.Wait()
  155. }
  156. func (s *testClientSuite) TestGetRegion(c *C) {
  157. regionID := regionIDAllocator.alloc()
  158. region := &metapb.Region{
  159. Id: regionID,
  160. RegionEpoch: &metapb.RegionEpoch{
  161. ConfVer: 1,
  162. Version: 1,
  163. },
  164. Peers: peers,
  165. }
  166. req := &schedulerpb.RegionHeartbeatRequest{
  167. Header: newHeader(s.srv),
  168. Region: region,
  169. Leader: peers[0],
  170. }
  171. err := s.regionHeartbeat.Send(req)
  172. c.Assert(err, IsNil)
  173. testutil.WaitUntil(c, func(c *C) bool {
  174. r, leader, err := s.client.GetRegion(context.Background(), []byte("a"))
  175. c.Assert(err, IsNil)
  176. return c.Check(r, DeepEquals, region) &&
  177. c.Check(leader, DeepEquals, peers[0])
  178. })
  179. c.Succeed()
  180. }
  181. func (s *testClientSuite) TestGetPrevRegion(c *C) {
  182. regionLen := 10
  183. regions := make([]*metapb.Region, 0, regionLen)
  184. for i := 0; i < regionLen; i++ {
  185. regionID := regionIDAllocator.alloc()
  186. r := &metapb.Region{
  187. Id: regionID,
  188. RegionEpoch: &metapb.RegionEpoch{
  189. ConfVer: 1,
  190. Version: 1,
  191. },
  192. StartKey: []byte{byte(i)},
  193. EndKey: []byte{byte(i + 1)},
  194. Peers: peers,
  195. }
  196. regions = append(regions, r)
  197. req := &schedulerpb.RegionHeartbeatRequest{
  198. Header: newHeader(s.srv),
  199. Region: r,
  200. Leader: peers[0],
  201. }
  202. err := s.regionHeartbeat.Send(req)
  203. c.Assert(err, IsNil)
  204. }
  205. for i := 0; i < 20; i++ {
  206. testutil.WaitUntil(c, func(c *C) bool {
  207. r, leader, err := s.client.GetPrevRegion(context.Background(), []byte{byte(i)})
  208. c.Assert(err, IsNil)
  209. if i > 0 && i < regionLen {
  210. return c.Check(leader, DeepEquals, peers[0]) &&
  211. c.Check(r, DeepEquals, regions[i-1])
  212. }
  213. return c.Check(leader, IsNil) &&
  214. c.Check(r, IsNil)
  215. })
  216. }
  217. c.Succeed()
  218. }
  219. func (s *testClientSuite) TestScanRegions(c *C) {
  220. regionLen := 10
  221. regions := make([]*metapb.Region, 0, regionLen)
  222. for i := 0; i < regionLen; i++ {
  223. regionID := regionIDAllocator.alloc()
  224. r := &metapb.Region{
  225. Id: regionID,
  226. RegionEpoch: &metapb.RegionEpoch{
  227. ConfVer: 1,
  228. Version: 1,
  229. },
  230. StartKey: []byte{byte(i)},
  231. EndKey: []byte{byte(i + 1)},
  232. Peers: peers,
  233. }
  234. regions = append(regions, r)
  235. req := &schedulerpb.RegionHeartbeatRequest{
  236. Header: newHeader(s.srv),
  237. Region: r,
  238. Leader: peers[0],
  239. }
  240. err := s.regionHeartbeat.Send(req)
  241. c.Assert(err, IsNil)
  242. }
  243. // Wait for region heartbeats.
  244. testutil.WaitUntil(c, func(c *C) bool {
  245. scanRegions, _, err := s.client.ScanRegions(context.Background(), []byte{0}, nil, 10)
  246. return err == nil && len(scanRegions) == 10
  247. })
  248. // Set leader of region3 to nil.
  249. region3 := core.NewRegionInfo(regions[3], nil)
  250. s.srv.GetRaftCluster().HandleRegionHeartbeat(region3)
  251. check := func(start, end []byte, limit int, expect []*metapb.Region) {
  252. scanRegions, leaders, err := s.client.ScanRegions(context.Background(), start, end, limit)
  253. c.Assert(err, IsNil)
  254. c.Assert(scanRegions, HasLen, len(expect))
  255. c.Assert(leaders, HasLen, len(expect))
  256. c.Log("scanRegions", scanRegions)
  257. c.Log("expect", expect)
  258. c.Log("scanLeaders", leaders)
  259. for i := range expect {
  260. c.Assert(scanRegions[i], DeepEquals, expect[i])
  261. if scanRegions[i].GetId() == region3.GetID() {
  262. c.Assert(leaders[i], DeepEquals, &metapb.Peer{})
  263. } else {
  264. c.Assert(leaders[i], DeepEquals, expect[i].Peers[0])
  265. }
  266. }
  267. }
  268. check([]byte{0}, nil, 10, regions)
  269. check([]byte{1}, nil, 5, regions[1:6])
  270. check([]byte{100}, nil, 1, nil)
  271. check([]byte{1}, []byte{6}, 0, regions[1:6])
  272. check([]byte{1}, []byte{6}, 2, regions[1:3])
  273. }
  274. func (s *testClientSuite) TestGetRegionByID(c *C) {
  275. regionID := regionIDAllocator.alloc()
  276. region := &metapb.Region{
  277. Id: regionID,
  278. RegionEpoch: &metapb.RegionEpoch{
  279. ConfVer: 1,
  280. Version: 1,
  281. },
  282. Peers: peers,
  283. }
  284. req := &schedulerpb.RegionHeartbeatRequest{
  285. Header: newHeader(s.srv),
  286. Region: region,
  287. Leader: peers[0],
  288. }
  289. err := s.regionHeartbeat.Send(req)
  290. c.Assert(err, IsNil)
  291. testutil.WaitUntil(c, func(c *C) bool {
  292. r, leader, err := s.client.GetRegionByID(context.Background(), regionID)
  293. c.Assert(err, IsNil)
  294. return c.Check(r, DeepEquals, region) &&
  295. c.Check(leader, DeepEquals, peers[0])
  296. })
  297. c.Succeed()
  298. }
  299. func (s *testClientSuite) TestGetStore(c *C) {
  300. cluster := s.srv.GetRaftCluster()
  301. c.Assert(cluster, NotNil)
  302. store := stores[0]
  303. // Get an up store should be OK.
  304. n, err := s.client.GetStore(context.Background(), store.GetId())
  305. c.Assert(err, IsNil)
  306. c.Assert(n, DeepEquals, store)
  307. stores, err := s.client.GetAllStores(context.Background())
  308. c.Assert(err, IsNil)
  309. c.Assert(stores, DeepEquals, stores)
  310. // Mark the store as offline.
  311. err = cluster.RemoveStore(store.GetId())
  312. c.Assert(err, IsNil)
  313. offlineStore := proto.Clone(store).(*metapb.Store)
  314. offlineStore.State = metapb.StoreState_Offline
  315. // Get an offline store should be OK.
  316. n, err = s.client.GetStore(context.Background(), store.GetId())
  317. c.Assert(err, IsNil)
  318. c.Assert(n, DeepEquals, offlineStore)
  319. // Should return offline stores.
  320. contains := false
  321. stores, err = s.client.GetAllStores(context.Background())
  322. c.Assert(err, IsNil)
  323. for _, store := range stores {
  324. if store.GetId() == offlineStore.GetId() {
  325. contains = true
  326. c.Assert(store, DeepEquals, offlineStore)
  327. }
  328. }
  329. c.Assert(contains, IsTrue)
  330. // Mark the store as tombstone.
  331. err = cluster.BuryStore(store.GetId(), true)
  332. c.Assert(err, IsNil)
  333. tombstoneStore := proto.Clone(store).(*metapb.Store)
  334. tombstoneStore.State = metapb.StoreState_Tombstone
  335. // Get a tombstone store should fail.
  336. n, err = s.client.GetStore(context.Background(), store.GetId())
  337. c.Assert(err, IsNil)
  338. c.Assert(n, IsNil)
  339. // Should return tombstone stores.
  340. contains = false
  341. stores, err = s.client.GetAllStores(context.Background())
  342. c.Assert(err, IsNil)
  343. for _, store := range stores {
  344. if store.GetId() == tombstoneStore.GetId() {
  345. contains = true
  346. c.Assert(store, DeepEquals, tombstoneStore)
  347. }
  348. }
  349. c.Assert(contains, IsTrue)
  350. // Should not return tombstone stores.
  351. stores, err = s.client.GetAllStores(context.Background(), WithExcludeTombstone())
  352. c.Assert(err, IsNil)
  353. for _, store := range stores {
  354. c.Assert(store, Not(Equals), tombstoneStore)
  355. }
  356. }
  357. func (s *testClientSuite) checkGCSafePoint(c *C, expectedSafePoint uint64) {
  358. req := &schedulerpb.GetGCSafePointRequest{
  359. Header: newHeader(s.srv),
  360. }
  361. resp, err := s.srv.GetGCSafePoint(context.Background(), req)
  362. c.Assert(err, IsNil)
  363. c.Assert(resp.SafePoint, Equals, expectedSafePoint)
  364. }
  365. func (s *testClientSuite) TestUpdateGCSafePoint(c *C) {
  366. s.checkGCSafePoint(c, 0)
  367. for _, safePoint := range []uint64{0, 1, 2, 3, 233, 23333, 233333333333, math.MaxUint64} {
  368. newSafePoint, err := s.client.UpdateGCSafePoint(context.Background(), safePoint)
  369. c.Assert(err, IsNil)
  370. c.Assert(newSafePoint, Equals, safePoint)
  371. s.checkGCSafePoint(c, safePoint)
  372. }
  373. // If the new safe point is less than the old one, it should not be updated.
  374. newSafePoint, err := s.client.UpdateGCSafePoint(context.Background(), 1)
  375. c.Assert(newSafePoint, Equals, uint64(math.MaxUint64))
  376. c.Assert(err, IsNil)
  377. s.checkGCSafePoint(c, math.MaxUint64)
  378. }