PageRenderTime 70ms CodeModel.GetById 28ms RepoModel.GetById 1ms app.codeStats 0ms

/pytests/upgrade/newupgradebasetest.py

https://github.com/membase/testrunner
Python | 784 lines | 761 code | 18 blank | 5 comment | 91 complexity | f96a8cc766d8bcfd3a5af7b970b816be MD5 | raw file
  1. import re
  2. import testconstants
  3. import gc
  4. import sys
  5. import traceback
  6. import Queue
  7. from threading import Thread
  8. from basetestcase import BaseTestCase
  9. from mc_bin_client import MemcachedError
  10. from memcached.helper.data_helper import VBucketAwareMemcached, MemcachedClientHelper
  11. from membase.helper.bucket_helper import BucketOperationHelper
  12. from membase.api.rest_client import RestConnection, RestHelper
  13. from membase.helper.cluster_helper import ClusterOperationHelper
  14. from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
  15. from couchbase_helper.document import DesignDocument, View
  16. from couchbase_helper.documentgenerator import BlobGenerator
  17. from scripts.install import InstallerJob
  18. from builds.build_query import BuildQuery
  19. from couchbase_helper.tuq_generators import JsonGenerator
  20. from pprint import pprint
  21. from testconstants import CB_REPO
  22. from testconstants import MV_LATESTBUILD_REPO
  23. from testconstants import SHERLOCK_BUILD_REPO
  24. from testconstants import COUCHBASE_VERSION_2
  25. from testconstants import COUCHBASE_VERSION_3
  26. from testconstants import COUCHBASE_VERSIONS
  27. from testconstants import SHERLOCK_VERSION
  28. from testconstants import CB_VERSION_NAME
  29. from testconstants import COUCHBASE_FROM_VERSION_3
  30. from testconstants import COUCHBASE_MP_VERSION
  31. from testconstants import CE_EE_ON_SAME_FOLDER
  32. class NewUpgradeBaseTest(BaseTestCase):
  33. def setUp(self):
  34. super(NewUpgradeBaseTest, self).setUp()
  35. self.released_versions = ["2.0.0-1976-rel", "2.0.1", "2.5.0", "2.5.1",
  36. "2.5.2", "3.0.0", "3.0.1",
  37. "3.0.1-1444", "3.0.2", "3.0.2-1603", "3.0.3",
  38. "3.1.0", "3.1.0-1776", "3.1.1", "3.1.1-1807",
  39. "3.1.2", "3.1.2-1815", "3.1.3", "3.1.3-1823",
  40. "4.0.0", "4.0.0-4051", "4.1.0", "4.1.0-5005",
  41. "4.5.0", "4.5.0-2601", "4.5.1", "4.5.1-2817",
  42. "4.6.0", "4.6.0-3573", '4.6.2', "4.6.2-3905"]
  43. self.use_hostnames = self.input.param("use_hostnames", False)
  44. self.product = self.input.param('product', 'couchbase-server')
  45. self.initial_version = self.input.param('initial_version', '2.5.1-1083')
  46. self.initial_vbuckets = self.input.param('initial_vbuckets', 1024)
  47. self.upgrade_versions = self.input.param('upgrade_version', '2.0.1-170-rel')
  48. self.upgrade_versions = self.upgrade_versions.split(";")
  49. self.skip_cleanup = self.input.param("skip_cleanup", False)
  50. self.init_nodes = self.input.param('init_nodes', True)
  51. self.is_downgrade = self.input.param('downgrade', False)
  52. if self.is_downgrade:
  53. self.initial_version, self.upgrade_versions = self.upgrade_versions[0], [self.initial_version]
  54. upgrade_path = self.input.param('upgrade_path', [])
  55. if upgrade_path:
  56. upgrade_path = upgrade_path.split(",")
  57. self.upgrade_versions = upgrade_path + self.upgrade_versions
  58. if self.input.param('released_upgrade_version', None) is not None:
  59. self.upgrade_versions = [self.input.param('released_upgrade_version', None)]
  60. self.initial_build_type = self.input.param('initial_build_type', None)
  61. self.stop_persistence = self.input.param('stop_persistence', False)
  62. self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
  63. self.rest_settings = self.input.membase_settings
  64. self.rest = None
  65. self.rest_helper = None
  66. self.is_ubuntu = False
  67. self.sleep_time = 15
  68. self.ddocs = []
  69. self.item_flag = self.input.param('item_flag', 0)
  70. self.expire_time = self.input.param('expire_time', 0)
  71. self.wait_expire = self.input.param('wait_expire', False)
  72. self.default_view_name = "upgrade-test-view"
  73. self.ddocs_num = self.input.param("ddocs_num", 1)
  74. self.view_num = self.input.param("view_per_ddoc", 2)
  75. self.is_dev_ddoc = self.input.param("is-dev-ddoc", False)
  76. self.during_ops = None
  77. if "during-ops" in self.input.test_params:
  78. self.during_ops = self.input.param("during-ops", None).split(",")
  79. if self.initial_version.startswith("1.6") or self.initial_version.startswith("1.7"):
  80. self.product = 'membase-server'
  81. else:
  82. self.product = 'couchbase-server'
  83. self.index_replicas = self.input.param("index_replicas", None)
  84. self.index_kv_store = self.input.param("kvstore", None)
  85. self.partitions_per_pindex = \
  86. self.input.param("max_partitions_pindex", 32)
  87. self.dataset = self.input.param("dataset", "emp")
  88. self.expiry = self.input.param("expiry", 0)
  89. self.create_ops_per = self.input.param("create_ops_per", 0)
  90. self.expiry_ops_per = self.input.param("expiry_ops_per", 0)
  91. self.delete_ops_per = self.input.param("delete_ops_per", 0)
  92. self.update_ops_per = self.input.param("update_ops_per", 0)
  93. self.docs_per_day = self.input.param("doc-per-day", 49)
  94. self.doc_ops = self.input.param("doc_ops", False)
  95. if self.doc_ops:
  96. self.ops_dist_map = self.calculate_data_change_distribution(
  97. create_per=self.create_ops_per , update_per=self.update_ops_per ,
  98. delete_per=self.delete_ops_per, expiry_per=self.expiry_ops_per,
  99. start=0, end=self.docs_per_day)
  100. self.log.info(self.ops_dist_map)
  101. self.docs_gen_map = self.generate_ops_docs(self.docs_per_day, 0)
  102. #self.full_docs_list_after_ops = self.generate_full_docs_list_after_ops(self.docs_gen_map)
  103. if self.max_verify is None:
  104. self.max_verify = min(self.num_items, 100000)
  105. shell = RemoteMachineShellConnection(self.master)
  106. type = shell.extract_remote_info().distribution_type
  107. shell.disconnect()
  108. if type.lower() == 'windows':
  109. self.is_linux = False
  110. else:
  111. self.is_linux = True
  112. if type.lower() == "ubuntu":
  113. self.is_ubuntu = True
  114. self.queue = Queue.Queue()
  115. self.upgrade_servers = []
  116. self.index_replicas = self.input.param("index_replicas", None)
  117. self.partitions_per_pindex = \
  118. self.input.param("max_partitions_pindex", 32)
  119. self.index_kv_store = self.input.param("kvstore", None)
  120. def tearDown(self):
  121. test_failed = (hasattr(self, '_resultForDoCleanups') and \
  122. len(self._resultForDoCleanups.failures or \
  123. self._resultForDoCleanups.errors)) or \
  124. (hasattr(self, '_exc_info') and \
  125. self._exc_info()[1] is not None)
  126. if test_failed and self.skip_cleanup:
  127. self.log.warn("CLEANUP WAS SKIPPED DUE TO FAILURES IN UPGRADE TEST")
  128. self.cluster.shutdown(force=True)
  129. self.log.info("Test Input params were:")
  130. pprint(self.input.test_params)
  131. if self.input.param('BUGS', False):
  132. self.log.warn("Test failed. Possible reason is: {0}"
  133. .format(self.input.param('BUGS', False)))
  134. else:
  135. if not hasattr(self, 'rest'):
  136. return
  137. try:
  138. # cleanup only nodes that are in cluster
  139. # not all servers have been installed
  140. if self.rest is None:
  141. self._new_master(self.master)
  142. nodes = self.rest.get_nodes()
  143. temp = []
  144. for server in self.servers:
  145. if server.ip in [node.ip for node in nodes]:
  146. temp.append(server)
  147. self.servers = temp
  148. except Exception, e:
  149. if e:
  150. print "Exception ", e
  151. self.cluster.shutdown(force=True)
  152. self.fail(e)
  153. super(NewUpgradeBaseTest, self).tearDown()
  154. if self.upgrade_servers:
  155. self._install(self.upgrade_servers,version=self.initial_version)
  156. self.sleep(20, "sleep 20 seconds before run next test")
  157. def _install(self, servers):
  158. params = {}
  159. params['num_nodes'] = len(servers)
  160. params['product'] = self.product
  161. params['version'] = self.initial_version
  162. params['vbuckets'] = [self.initial_vbuckets]
  163. params['init_nodes'] = self.init_nodes
  164. if self.initial_build_type is not None:
  165. params['type'] = self.initial_build_type
  166. self.log.info("will install {0} on {1}".format(self.initial_version, [s.ip for s in servers]))
  167. InstallerJob().parallel_install(servers, params)
  168. if self.product in ["couchbase", "couchbase-server", "cb"]:
  169. success = True
  170. for server in servers:
  171. success &= RemoteMachineShellConnection(server).is_couchbase_installed()
  172. self.sleep(5, "sleep 5 seconds to let cb up completely")
  173. if not success:
  174. sys.exit("some nodes were not install successfully!")
  175. if self.rest is None:
  176. self._new_master(self.master)
  177. if self.use_hostnames:
  178. for server in self.servers[:self.nodes_init]:
  179. hostname = RemoteUtilHelper.use_hostname_for_server_settings(server)
  180. server.hostname = hostname
  181. def operations(self, servers):
  182. self.quota = self._initialize_nodes(self.cluster, servers, self.disabled_consistent_view,
  183. self.rebalanceIndexWaitingDisabled, self.rebalanceIndexPausingDisabled,
  184. self.maxParallelIndexers, self.maxParallelReplicaIndexers, self.port)
  185. if self.port and self.port != '8091':
  186. self.rest = RestConnection(self.master)
  187. self.rest_helper = RestHelper(self.rest)
  188. self.sleep(7, "wait to make sure node is ready")
  189. if len(servers) > 1:
  190. self.cluster.rebalance([servers[0]], servers[1:], [],
  191. use_hostnames=self.use_hostnames)
  192. self.buckets = []
  193. gc.collect()
  194. if self.input.param('extra_verification', False):
  195. self.total_buckets += 2
  196. print self.total_buckets
  197. self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
  198. self._bucket_creation()
  199. if self.stop_persistence:
  200. for server in servers:
  201. for bucket in self.buckets:
  202. client = MemcachedClientHelper.direct_client(server, bucket)
  203. client.stop_persistence()
  204. self.sleep(10)
  205. gen_load = BlobGenerator('upgrade', 'upgrade-', self.value_size, end=self.num_items)
  206. self._load_all_buckets(self.master, gen_load, "create", self.expire_time, flag=self.item_flag)
  207. if not self.stop_persistence:
  208. self._wait_for_stats_all_buckets(servers)
  209. else:
  210. for bucket in self.buckets:
  211. drain_rate = 0
  212. for server in servers:
  213. client = MemcachedClientHelper.direct_client(server, bucket)
  214. drain_rate += int(client.stats()["ep_queue_size"])
  215. self.sleep(3, "Pause to load all items")
  216. self.assertEqual(self.num_items * (self.num_replicas + 1), drain_rate,
  217. "Persistence is stopped, drain rate is incorrect %s. Expected %s" % (
  218. drain_rate, self.num_items * (self.num_replicas + 1)))
  219. self.change_settings()
  220. def _get_build(self, server, version, remote, is_amazon=False, info=None):
  221. if info is None:
  222. info = remote.extract_remote_info()
  223. build_repo = CB_REPO
  224. if version[:5] in COUCHBASE_VERSIONS:
  225. if version[:3] in CB_VERSION_NAME:
  226. build_repo = CB_REPO + CB_VERSION_NAME[version[:3]] + "/"
  227. elif version[:5] in COUCHBASE_MP_VERSION:
  228. build_repo = MV_LATESTBUILD_REPO
  229. builds, changes = BuildQuery().get_all_builds(version=version, timeout=self.wait_timeout * 5, \
  230. deliverable_type=info.deliverable_type, architecture_type=info.architecture_type, \
  231. edition_type="couchbase-server-enterprise", repo=build_repo, \
  232. distribution_version=info.distribution_version.lower())
  233. self.log.info("finding build %s for machine %s" % (version, server))
  234. if re.match(r'[1-9].[0-9].[0-9]-[0-9]+$', version):
  235. version = version + "-rel"
  236. if version[:5] in self.released_versions:
  237. appropriate_build = BuildQuery().\
  238. find_couchbase_release_build('%s-enterprise' % (self.product),
  239. info.deliverable_type,
  240. info.architecture_type,
  241. version.strip(),
  242. is_amazon=is_amazon,
  243. os_version=info.distribution_version)
  244. else:
  245. appropriate_build = BuildQuery().\
  246. find_build(builds, '%s-enterprise' % (self.product), info.deliverable_type,
  247. info.architecture_type, version.strip())
  248. if appropriate_build is None:
  249. self.log.info("builds are: %s \n. Remote is %s, %s. Result is: %s" % (builds, remote.ip, remote.username, version))
  250. raise Exception("Build %s for machine %s is not found" % (version, server))
  251. return appropriate_build
  252. def _upgrade(self, upgrade_version, server, queue=None, skip_init=False, info=None):
  253. try:
  254. remote = RemoteMachineShellConnection(server)
  255. appropriate_build = self._get_build(server, upgrade_version, remote, info=info)
  256. self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(upgrade_version))
  257. self.assertTrue(remote.download_build(appropriate_build), "Build wasn't downloaded!")
  258. o, e = remote.couchbase_upgrade(appropriate_build, save_upgrade_config=False, forcefully=self.is_downgrade)
  259. self.log.info("upgrade {0} to version {1} is completed".format(server.ip, upgrade_version))
  260. """ remove this line when bug MB-11807 fixed """
  261. if self.is_ubuntu:
  262. remote.start_server()
  263. """ remove end here """
  264. remote.disconnect()
  265. self.sleep(10)
  266. if self.is_linux:
  267. self.wait_node_restarted(server, wait_time=testconstants.NS_SERVER_TIMEOUT * 4, wait_if_warmup=True)
  268. else:
  269. self.wait_node_restarted(server, wait_time=testconstants.NS_SERVER_TIMEOUT * 10, wait_if_warmup=True, check_service=True)
  270. if not skip_init:
  271. self.rest.init_cluster(self.rest_settings.rest_username, self.rest_settings.rest_password)
  272. self.sleep(self.sleep_time)
  273. return o, e
  274. except Exception, e:
  275. print traceback.extract_stack()
  276. if queue is not None:
  277. queue.put(False)
  278. if not self.is_linux:
  279. remote = RemoteMachineShellConnection(server)
  280. output, error = remote.execute_command("cmd /c schtasks /Query /FO LIST /TN removeme /V")
  281. remote.log_command_output(output, error)
  282. output, error = remote.execute_command("cmd /c schtasks /Query /FO LIST /TN installme /V")
  283. remote.log_command_output(output, error)
  284. output, error = remote.execute_command("cmd /c schtasks /Query /FO LIST /TN upgrademe /V")
  285. remote.log_command_output(output, error)
  286. remote.disconnect()
  287. raise e
  288. if queue is not None:
  289. queue.put(True)
  290. def _async_update(self, upgrade_version, servers, queue=None, skip_init=False):
  291. self.log.info("\n\n*** servers {0} will be upgraded to {1} version ***\n".
  292. format([server.ip for server in servers], upgrade_version))
  293. q = queue or self.queue
  294. upgrade_threads = []
  295. for server in servers:
  296. upgrade_thread = Thread(target=self._upgrade,
  297. name="upgrade_thread" + server.ip,
  298. args=(upgrade_version, server, q, skip_init))
  299. upgrade_threads.append(upgrade_thread)
  300. upgrade_thread.start()
  301. return upgrade_threads
  302. def _new_master(self, server):
  303. self.master = server
  304. self.rest = RestConnection(self.master)
  305. self.rest_helper = RestHelper(self.rest)
  306. def verification(self, servers, check_items=True):
  307. if self.use_hostnames:
  308. for server in servers:
  309. node_info = RestConnection(server).get_nodes_self()
  310. new_hostname = node_info.hostname
  311. self.assertEqual("%s:%s" % (server.hostname, server.port), new_hostname,
  312. "Hostname is incorrect for server %s. Settings are %s" % (server.ip, new_hostname))
  313. if self.master.ip != self.rest.ip or \
  314. self.master.ip == self.rest.ip and str(self.master.port) != str(self.rest.port):
  315. if self.port:
  316. self.master.port = self.port
  317. self.rest = RestConnection(self.master)
  318. self.rest_helper = RestHelper(self.rest)
  319. if self.port and self.port != '8091':
  320. settings = self.rest.get_cluster_settings()
  321. if settings and 'port' in settings:
  322. self.assertTrue(self.port == str(settings['port']),
  323. 'Expected cluster port is %s, but is %s' % (self.port, settings['port']))
  324. for bucket in self.buckets:
  325. if not self.rest_helper.bucket_exists(bucket.name):
  326. raise Exception("bucket: %s not found" % bucket.name)
  327. self.verify_cluster_stats(servers, max_verify=self.max_verify, \
  328. timeout=self.wait_timeout * 20, check_items=check_items)
  329. if self.ddocs:
  330. self.verify_all_queries()
  331. if "update_notifications" in self.input.test_params:
  332. if self.rest.get_notifications() != self.input.param("update_notifications", True):
  333. self.fail("update notifications settings wasn't saved")
  334. if "autofailover_timeout" in self.input.test_params:
  335. if self.rest.get_autofailover_settings().timeout != self.input.param("autofailover_timeout", None):
  336. self.fail("autofailover settings wasn't saved")
  337. if "autofailover_alerts" in self.input.test_params:
  338. alerts = self.rest.get_alerts_settings()
  339. if alerts["recipients"] != ['couchbase@localhost']:
  340. self.fail("recipients value wasn't saved")
  341. if alerts["sender"] != 'root@localhost':
  342. self.fail("sender value wasn't saved")
  343. if alerts["emailServer"]["user"] != 'user':
  344. self.fail("email_username value wasn't saved")
  345. if alerts["emailServer"]["pass"] != '':
  346. self.fail("email_password should be empty for security")
  347. if "autocompaction" in self.input.test_params:
  348. cluster_status = self.rest.cluster_status()
  349. if cluster_status["autoCompactionSettings"]["viewFragmentationThreshold"]\
  350. ["percentage"] != self.input.param("autocompaction", 50):
  351. self.log.info("Cluster status: {0}".format(cluster_status))
  352. self.fail("autocompaction settings weren't saved")
  353. def verify_all_queries(self, queue=None):
  354. query = {"connectionTimeout" : 60000}
  355. expected_rows = self.num_items
  356. if self.max_verify:
  357. expected_rows = self.max_verify
  358. query["limit"] = expected_rows
  359. if self.input.param("wait_expiration", False):
  360. expected_rows = 0
  361. self.log.info("Start verify queries...")
  362. for bucket in self.buckets:
  363. for ddoc in self.ddocs:
  364. prefix = ("", "dev_")[ddoc.views[0].dev_view]
  365. try:
  366. self.perform_verify_queries(len(ddoc.views), prefix, ddoc.name,
  367. query, bucket=bucket,
  368. wait_time=self.wait_timeout * 5,
  369. expected_rows=expected_rows,
  370. retry_time=10)
  371. except Exception, e:
  372. print e
  373. if queue is not None:
  374. queue.put(False)
  375. if queue is not None:
  376. queue.put(True)
  377. def change_settings(self):
  378. status = True
  379. if "update_notifications" in self.input.test_params:
  380. status &= self.rest.update_notifications(str(self.input.param("update_notifications", 'true')).lower())
  381. if "autofailover_timeout" in self.input.test_params:
  382. status &= self.rest.update_autofailover_settings(True, self.input.param("autofailover_timeout", None))
  383. if "autofailover_alerts" in self.input.test_params:
  384. status &= self.rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')
  385. if "autocompaction" in self.input.test_params:
  386. tmp, _, _ = self.rest.set_auto_compaction(viewFragmntThresholdPercentage=
  387. self.input.param("autocompaction", 50))
  388. status &= tmp
  389. if not status:
  390. self.fail("some settings were not set correctly!")
  391. def warm_up_node(self, warmup_nodes=None):
  392. if not warmup_nodes:
  393. warmup_nodes = [self.servers[:self.nodes_init][-1], ]
  394. for warmup_node in warmup_nodes:
  395. shell = RemoteMachineShellConnection(warmup_node)
  396. shell.stop_couchbase()
  397. shell.disconnect()
  398. self.sleep(20)
  399. for warmup_node in warmup_nodes:
  400. shell = RemoteMachineShellConnection(warmup_node)
  401. shell.start_couchbase()
  402. shell.disconnect()
  403. ClusterOperationHelper.wait_for_ns_servers_or_assert(warmup_nodes, self)
  404. def start_index(self):
  405. if self.ddocs:
  406. query = {"connectionTimeout" : 60000}
  407. for bucket in self.buckets:
  408. for ddoc in self.ddocs:
  409. prefix = ("", "dev_")[ddoc.views[0].dev_view]
  410. self.perform_verify_queries(len(ddoc.views), prefix, ddoc.name, query, bucket=bucket)
  411. def failover(self):
  412. rest = RestConnection(self.master)
  413. nodes = rest.node_statuses()
  414. nodes = [node for node in nodes
  415. if node.ip != self.master.ip or str(node.port) != self.master.port]
  416. self.failover_node = nodes[0]
  417. rest.fail_over(self.failover_node.id)
  418. def add_back_failover(self):
  419. rest = RestConnection(self.master)
  420. rest.add_back_node(self.failover_node.id)
  421. def create_ddocs_and_views(self, queue=None):
  422. self.default_view = View(self.default_view_name, None, None)
  423. for bucket in self.buckets:
  424. if int(self.ddocs_num) > 0:
  425. for i in xrange(int(self.ddocs_num)):
  426. views = self.make_default_views(self.default_view_name,
  427. self.view_num, self.is_dev_ddoc, different_map=True)
  428. ddoc = DesignDocument(self.default_view_name + str(i), views)
  429. self.ddocs.append(ddoc)
  430. for view in views:
  431. try:
  432. self.cluster.create_view(self.master, ddoc.name, view,
  433. bucket=bucket)
  434. except Exception, e:
  435. print e
  436. if queue is not None:
  437. queue.put(False)
  438. if queue is not None:
  439. queue.put(True)
  440. else:
  441. self.fail("Check param ddocs_num value")
  442. def delete_data(self, servers, paths_to_delete):
  443. for server in servers:
  444. shell = RemoteMachineShellConnection(server)
  445. for path in paths_to_delete:
  446. output, error = shell.execute_command("rm -rf {0}".format(path))
  447. shell.log_command_output(output, error)
  448. # shell._ssh_client.open_sftp().rmdir(path)
  449. shell.disconnect()
  450. def check_seqno(self, seqno_expected, comparator='=='):
  451. for bucket in self.buckets:
  452. if bucket.type == 'memcached':
  453. continue
  454. ready = BucketOperationHelper.wait_for_memcached(self.master,
  455. bucket.name)
  456. self.assertTrue(ready, "wait_for_memcached failed")
  457. client = VBucketAwareMemcached(RestConnection(self.master), bucket)
  458. valid_keys, deleted_keys = bucket.kvs[1].key_set()
  459. for valid_key in valid_keys:
  460. try:
  461. _, flags, exp, seqno, cas = client.memcached(valid_key).getMeta(valid_key)
  462. except MemcachedError, e:
  463. print e
  464. client.reset(RestConnection(self.master))
  465. _, flags, exp, seqno, cas = client.memcached(valid_key).getMeta(valid_key)
  466. self.assertTrue((comparator == '==' and seqno == seqno_expected) or
  467. (comparator == '>=' and seqno >= seqno_expected),
  468. msg="seqno {0} !{1} {2} for key:{3}".
  469. format(seqno, comparator, seqno_expected, valid_key))
  470. client.done()
  471. def force_reinstall(self, servers):
  472. for server in servers:
  473. try:
  474. remote = RemoteMachineShellConnection(server)
  475. appropriate_build = self._get_build(server, self.initial_version, remote)
  476. self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(self.initial_version))
  477. remote.download_build(appropriate_build)
  478. remote.install_server(appropriate_build, force=True)
  479. self.log.info("upgrade {0} to version {1} is completed".format(server.ip, self.initial_version))
  480. remote.disconnect()
  481. self.sleep(10)
  482. if self.is_linux:
  483. self.wait_node_restarted(server, wait_time=testconstants.NS_SERVER_TIMEOUT * 4, wait_if_warmup=True)
  484. else:
  485. self.wait_node_restarted(server, wait_time=testconstants.NS_SERVER_TIMEOUT * 10, wait_if_warmup=True, check_service=True)
  486. except Exception, e:
  487. print traceback.extract_stack()
  488. if queue is not None:
  489. queue.put(False)
  490. if not self.is_linux:
  491. remote = RemoteMachineShellConnection(server)
  492. output, error = remote.execute_command("cmd /c schtasks /Query /FO LIST /TN installme /V")
  493. remote.log_command_output(output, error)
  494. remote.disconnect()
  495. raise e
  496. def _verify_vbucket_nums_for_swap(self, old_vbs, new_vbs):
  497. out_servers = set(old_vbs) - set(new_vbs)
  498. in_servers = set(new_vbs) - set(old_vbs)
  499. self.assertEqual(len(out_servers), len(in_servers),
  500. "Seems like it wasn't swap rebalance. Out %s, in %s" % (
  501. len(out_servers),len(in_servers)))
  502. for vb_type in ["active_vb", "replica_vb"]:
  503. self.log.info("Checking %s on nodes that remain in cluster..." % vb_type)
  504. for server, stats in old_vbs.iteritems():
  505. if server in new_vbs:
  506. self.assertTrue(sorted(stats[vb_type]) == sorted(new_vbs[server][vb_type]),
  507. "Server %s Seems like %s vbuckets were shuffled, old vbs is %s, new are %s" %(
  508. server.ip, vb_type, stats[vb_type], new_vbs[server][vb_type]))
  509. self.log.info("%s vbuckets were not suffled" % vb_type)
  510. self.log.info("Checking in-out nodes...")
  511. vbs_servs_out = vbs_servs_in = []
  512. for srv, stat in old_vbs.iteritems():
  513. if srv in out_servers:
  514. vbs_servs_out.extend(stat[vb_type])
  515. for srv, stat in new_vbs.iteritems():
  516. if srv in in_servers:
  517. vbs_servs_in.extend(stat[vb_type])
  518. self.assertTrue(sorted(vbs_servs_out) == sorted(vbs_servs_in),
  519. "%s vbuckets seem to be suffled" % vb_type)
  520. def monitor_dcp_rebalance(self):
  521. """ released_upgrade_version """
  522. upgrade_version = ""
  523. if self.input.param('released_upgrade_version', None) is not None:
  524. upgrade_version = self.input.param('released_upgrade_version', None)[:5]
  525. else:
  526. upgrade_version = self.input.param('upgrade_version', '')[:5]
  527. if self.input.param('initial_version', '')[:5] in COUCHBASE_VERSION_2 and \
  528. (upgrade_version in COUCHBASE_VERSION_3 or \
  529. upgrade_version in SHERLOCK_VERSION):
  530. if int(self.initial_vbuckets) >= 512:
  531. if self.master.ip != self.rest.ip or \
  532. self.master.ip == self.rest.ip and \
  533. str(self.master.port) != str(self.rest.port):
  534. if self.port:
  535. self.master.port = self.port
  536. self.rest = RestConnection(self.master)
  537. self.rest_helper = RestHelper(self.rest)
  538. if self.rest._rebalance_progress_status() == 'running':
  539. self.log.info("Start monitoring DCP upgrade from {0} to {1}"\
  540. .format(self.input.param('initial_version', '')[:5], \
  541. upgrade_version))
  542. status = self.rest.monitorRebalance()
  543. if status:
  544. self.log.info("Done DCP rebalance upgrade!")
  545. else:
  546. self.fail("Failed DCP rebalance upgrade")
  547. elif any ("DCP upgrade completed successfully.\n" \
  548. in d.values() for d in self.rest.get_logs(10)):
  549. self.log.info("DCP upgrade is completed")
  550. else:
  551. self.fail("DCP reabalance upgrade is not running")
  552. else:
  553. self.fail("Need vbuckets setting >= 256 for upgrade from 2.x.x to 3+")
  554. else:
  555. if self.master.ip != self.rest.ip:
  556. self.rest = RestConnection(self.master)
  557. self.rest_helper = RestHelper(self.rest)
  558. self.log.info("No need to do DCP rebalance upgrade")
  559. def _offline_upgrade(self, skip_init=False):
  560. try:
  561. self.log.info("offline_upgrade")
  562. stoped_nodes = self.servers[:self.nodes_init]
  563. for upgrade_version in self.upgrade_versions:
  564. self.sleep(self.sleep_time, "Pre-setup of old version is done. "
  565. " Wait for upgrade to {0} version".format(upgrade_version))
  566. for server in stoped_nodes:
  567. remote = RemoteMachineShellConnection(server)
  568. remote.stop_server()
  569. remote.disconnect()
  570. self.sleep(self.sleep_time)
  571. upgrade_threads = self._async_update(upgrade_version, stoped_nodes,
  572. None, skip_init)
  573. for upgrade_thread in upgrade_threads:
  574. upgrade_thread.join()
  575. success_upgrade = True
  576. while not self.queue.empty():
  577. success_upgrade &= self.queue.get()
  578. if not success_upgrade:
  579. self.fail("Upgrade failed!")
  580. self.dcp_rebalance_in_offline_upgrade_from_version2()
  581. """ set install cb version to upgrade version after done upgrade """
  582. self.initial_version = self.upgrade_versions[0]
  583. except Exception, ex:
  584. self.log.info(ex)
  585. raise
  586. def dcp_rebalance_in_offline_upgrade_from_version2(self):
  587. if self.input.param('initial_version', '')[:5] in COUCHBASE_VERSION_2 and \
  588. (self.input.param('upgrade_version', '')[:5] in COUCHBASE_VERSION_3 or \
  589. self.input.param('upgrade_version', '')[:5] in SHERLOCK_VERSION) and \
  590. self.input.param('num_stoped_nodes', self.nodes_init) >= self.nodes_init:
  591. otpNodes = []
  592. nodes = self.rest.node_statuses()
  593. for node in nodes:
  594. otpNodes.append(node.id)
  595. self.log.info("Start DCP rebalance after complete offline upgrade from {0} to {1}"\
  596. .format(self.input.param('initial_version', '')[:5], \
  597. self.input.param('upgrade_version', '')[:5]))
  598. self.rest.rebalance(otpNodes, [])
  599. """ verify DCP upgrade in 3.0.0 version """
  600. self.monitor_dcp_rebalance()
  601. else:
  602. self.log.info("No need to do DCP rebalance upgrade")
  603. def generate_map_nodes_out_dist_upgrade(self, nodes_out_dist):
  604. self.nodes_out_dist = nodes_out_dist
  605. self.generate_map_nodes_out_dist()
  606. """ subdoc base test starts here """
  607. def generate_json_for_nesting(self):
  608. json = {
  609. "not_tested_integer_zero":0,
  610. "not_tested_integer_big":1038383839293939383938393,
  611. "not_tested_double_zero":0.0,
  612. "not_tested_integer":1,
  613. "not_tested_integer_negative":-1,
  614. "not_tested_double":1.1,
  615. "not_tested_double_negative":-1.1,
  616. "not_tested_float":2.99792458e8,
  617. "not_tested_float_negative":-2.99792458e8,
  618. "not_tested_array_numbers_integer" : [1,2,3,4,5],
  619. "not_tested_array_numbers_double" : [1.1,2.2,3.3,4.4,5.5],
  620. "not_tested_array_numbers_float" : [2.99792458e8,2.99792458e8,2.99792458e8],
  621. "not_tested_array_numbers_mix" : [0,2.99792458e8,1.1],
  622. "not_tested_array_array_mix" : [[2.99792458e8,2.99792458e8,2.99792458e8],[0,2.99792458e8,1.1],[],[0, 0, 0]],
  623. "not_tested_simple_string_lower_case":"abcdefghijklmnoprestuvxyz",
  624. "not_tested_simple_string_upper_case":"ABCDEFGHIJKLMNOPQRSTUVWXZYZ",
  625. "not_tested_simple_string_empty":"",
  626. "not_tested_simple_string_datetime":"2012-10-03 15:35:46.461491",
  627. "not_tested_simple_string_special_chars":"_-+!#@$%&*(){}\][;.,<>?/",
  628. "not_test_json" : { "not_to_bes_tested_string_field1": "not_to_bes_tested_string"}
  629. }
  630. return json
  631. def generate_simple_data_null(self):
  632. json = {
  633. "null":None,
  634. "null_array":[None, None]
  635. }
  636. return json
  637. def generate_simple_data_boolean(self):
  638. json = {
  639. "true":True,
  640. "false":False,
  641. "array":[True, False, True, False]
  642. }
  643. return json
  644. def generate_nested_json(self):
  645. json_data = self.generate_json_for_nesting()
  646. json = {
  647. "json_1": { "json_2": {"json_3":json_data}}
  648. }
  649. return json
  650. def generate_simple_data_numbers(self):
  651. json = {
  652. "integer_zero":0,
  653. "integer_big":1038383839293939383938393,
  654. "double_zero":0.0,
  655. "integer":1,
  656. "integer_negative":-1,
  657. "double":1.1,
  658. "double_negative":-1.1,
  659. "float":2.99792458e8,
  660. "float_negative":-2.99792458e8,
  661. }
  662. return json
  663. def subdoc_direct_client(self, server, bucket, timeout=30):
  664. # CREATE SDK CLIENT
  665. if self.use_sdk_client:
  666. try:
  667. from sdk_client import SDKClient
  668. scheme = "couchbase"
  669. host=self.master.ip
  670. if self.master.ip == "127.0.0.1":
  671. scheme = "http"
  672. host="{0}:{1}".format(self.master.ip,self.master.port)
  673. return SDKClient(scheme=scheme,hosts = [host], bucket = bucket.name)
  674. except Exception, ex:
  675. self.log.info("cannot load sdk client due to error {0}".format(str(ex)))
  676. # USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
  677. return self.direct_mc_bin_client(server, bucket, timeout= timeout)
  678. """ subdoc base test ends here """
  679. def construct_plan_params(self):
  680. plan_params = {}
  681. plan_params['numReplicas'] = 0
  682. if self.index_replicas:
  683. plan_params['numReplicas'] = self.index_replicas
  684. if self.partitions_per_pindex:
  685. plan_params['maxPartitionsPerPIndex'] = self.partitions_per_pindex
  686. return plan_params
  687. def construct_cbft_query_json(self, query, fields=None, timeout=None):
  688. max_matches = TestInputSingleton.input.param("query_max_matches", 10000000)
  689. query_json = QUERY.JSON
  690. # query is a unicode dict
  691. query_json['query'] = query
  692. query_json['indexName'] = self.name
  693. if max_matches:
  694. query_json['size'] = int(max_matches)
  695. if timeout:
  696. query_json['timeout'] = int(timeout)
  697. if fields:
  698. query_json['fields'] = fields
  699. return query_json
  700. def generate_ops_docs(self, num_items, start=0):
  701. try:
  702. json_generator = JsonGenerator()
  703. if self.dataset == "simple":
  704. return self.generate_ops(num_items, start, json_generator.generate_docs_simple)
  705. if self.dataset == "sales":
  706. return self.generate_ops(num_items, start, json_generator.generate_docs_sales)
  707. if self.dataset == "employee" or self.dataset == "default":
  708. return self.generate_ops(num_items, start, json_generator.generate_docs_employee)
  709. if self.dataset == "sabre":
  710. return self.generate_ops(num_items, start, json_generator.generate_docs_sabre)
  711. if self.dataset == "bigdata":
  712. return self.generate_ops(num_items, start, json_generator.generate_docs_bigdata)
  713. if self.dataset == "array":
  714. return self.generate_ops(num_items, start, json_generator.generate_docs_array)
  715. except Exception, ex:
  716. self.log.info(ex)
  717. self.fail("There is no dataset %s, please enter a valid one" % self.dataset)
  718. def generate_ops(self, docs_per_day, start=0, method=None):
  719. gen_docs_map = {}
  720. for key in self.ops_dist_map.keys():
  721. isShuffle = False
  722. if key == "update":
  723. isShuffle = True
  724. if self.dataset != "bigdata":
  725. gen_docs_map[key] = method(docs_per_day=self.ops_dist_map[key]["end"],
  726. start=self.ops_dist_map[key]["start"])
  727. else:
  728. gen_docs_map[key] = method(value_size=self.value_size,
  729. end=self.ops_dist_map[key]["end"],
  730. start=self.ops_dist_map[key]["start"])
  731. return gen_docs_map
  732. def _convert_server_map(self, servers):
  733. map = {}
  734. for server in servers:
  735. key = self._gen_server_key(server)
  736. map[key] = server
  737. return map
  738. def _gen_server_key(self, server):
  739. return "{0}:{1}".format(server.ip, server.port)