PageRenderTime 77ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/client/virt/virt_utils.py

https://gitlab.com/libvirt/autotest
Python | 3718 lines | 3566 code | 57 blank | 95 comment | 44 complexity | 6029af17aad132999256e10d97d0bf41 MD5 | raw file
Possible License(s): LGPL-3.0, GPL-2.0
  1. """
  2. KVM test utility functions.
  3. @copyright: 2008-2009 Red Hat Inc.
  4. """
  5. import time, string, random, socket, os, signal, re, logging, commands, cPickle
  6. import fcntl, shelve, ConfigParser, threading, sys, UserDict, inspect, tarfile
  7. import struct, shutil, glob
  8. from autotest_lib.client.bin import utils, os_dep
  9. from autotest_lib.client.common_lib import error, logging_config
  10. from autotest_lib.client.common_lib import logging_manager, git
  11. import rss_client, aexpect
  12. import platform
  13. try:
  14. import koji
  15. KOJI_INSTALLED = True
  16. except ImportError:
  17. KOJI_INSTALLED = False
  18. ARCH = platform.machine()
  19. if ARCH == "ppc64":
  20. # From include/linux/sockios.h
  21. SIOCSIFHWADDR = 0x8924
  22. SIOCGIFHWADDR = 0x8927
  23. SIOCSIFFLAGS = 0x8914
  24. SIOCGIFINDEX = 0x8933
  25. SIOCBRADDIF = 0x89a2
  26. # From linux/include/linux/if_tun.h
  27. TUNSETIFF = 0x800454ca
  28. TUNGETIFF = 0x400454d2
  29. TUNGETFEATURES = 0x400454cf
  30. IFF_TAP = 0x2
  31. IFF_NO_PI = 0x1000
  32. IFF_VNET_HDR = 0x4000
  33. # From linux/include/linux/if.h
  34. IFF_UP = 0x1
  35. else:
  36. # From include/linux/sockios.h
  37. SIOCSIFHWADDR = 0x8924
  38. SIOCGIFHWADDR = 0x8927
  39. SIOCSIFFLAGS = 0x8914
  40. SIOCGIFINDEX = 0x8933
  41. SIOCBRADDIF = 0x89a2
  42. # From linux/include/linux/if_tun.h
  43. TUNSETIFF = 0x400454ca
  44. TUNGETIFF = 0x800454d2
  45. TUNGETFEATURES = 0x800454cf
  46. IFF_TAP = 0x0002
  47. IFF_NO_PI = 0x1000
  48. IFF_VNET_HDR = 0x4000
  49. # From linux/include/linux/if.h
  50. IFF_UP = 0x1
  51. def _lock_file(filename):
  52. f = open(filename, "w")
  53. fcntl.lockf(f, fcntl.LOCK_EX)
  54. return f
  55. def _unlock_file(f):
  56. fcntl.lockf(f, fcntl.LOCK_UN)
  57. f.close()
  58. def is_vm(obj):
  59. """
  60. Tests whether a given object is a VM object.
  61. @param obj: Python object.
  62. """
  63. return obj.__class__.__name__ == "VM"
  64. class NetError(Exception):
  65. pass
  66. class TAPModuleError(NetError):
  67. def __init__(self, devname, action="open", details=None):
  68. NetError.__init__(self, devname)
  69. self.devname = devname
  70. self.details = details
  71. def __str__(self):
  72. e_msg = "Can't %s %s" % (self.action, self.devname)
  73. if self.details is not None:
  74. e_msg += " : %s" % self.details
  75. return e_msg
  76. class TAPNotExistError(NetError):
  77. def __init__(self, ifname):
  78. NetError.__init__(self, ifname)
  79. self.ifname = ifname
  80. def __str__(self):
  81. return "Interface %s does not exist" % self.ifname
  82. class TAPCreationError(NetError):
  83. def __init__(self, ifname, details=None):
  84. NetError.__init__(self, ifname, details)
  85. self.ifname = ifname
  86. self.details = details
  87. def __str__(self):
  88. e_msg = "Cannot create TAP device %s" % self.ifname
  89. if self.details is not None:
  90. e_msg += ": %s" % self.details
  91. return e_msg
  92. class TAPBringUpError(NetError):
  93. def __init__(self, ifname):
  94. NetError.__init__(self, ifname)
  95. self.ifname = ifname
  96. def __str__(self):
  97. return "Cannot bring up TAP %s" % self.ifname
  98. class BRAddIfError(NetError):
  99. def __init__(self, ifname, brname, details):
  100. NetError.__init__(self, ifname, brname, details)
  101. self.ifname = ifname
  102. self.brname = brname
  103. self.details = details
  104. def __str__(self):
  105. return ("Can not add if %s to bridge %s: %s" %
  106. (self.ifname, self.brname, self.details))
  107. class HwAddrSetError(NetError):
  108. def __init__(self, ifname, mac):
  109. NetError.__init__(self, ifname, mac)
  110. self.ifname = ifname
  111. self.mac = mac
  112. def __str__(self):
  113. return "Can not set mac %s to interface %s" % (self.mac, self.ifname)
  114. class HwAddrGetError(NetError):
  115. def __init__(self, ifname):
  116. NetError.__init__(self, ifname)
  117. self.ifname = ifname
  118. def __str__(self):
  119. return "Can not get mac of interface %s" % self.ifname
  120. class Env(UserDict.IterableUserDict):
  121. """
  122. A dict-like object containing global objects used by tests.
  123. """
  124. def __init__(self, filename=None, version=0):
  125. """
  126. Create an empty Env object or load an existing one from a file.
  127. If the version recorded in the file is lower than version, or if some
  128. error occurs during unpickling, or if filename is not supplied,
  129. create an empty Env object.
  130. @param filename: Path to an env file.
  131. @param version: Required env version (int).
  132. """
  133. UserDict.IterableUserDict.__init__(self)
  134. empty = {"version": version}
  135. if filename:
  136. self._filename = filename
  137. try:
  138. if os.path.isfile(filename):
  139. f = open(filename, "r")
  140. env = cPickle.load(f)
  141. f.close()
  142. if env.get("version", 0) >= version:
  143. self.data = env
  144. else:
  145. logging.warn("Incompatible env file found. Not using it.")
  146. self.data = empty
  147. else:
  148. # No previous env file found, proceed...
  149. self.data = empty
  150. # Almost any exception can be raised during unpickling, so let's
  151. # catch them all
  152. except Exception, e:
  153. logging.warn(e)
  154. self.data = empty
  155. else:
  156. self.data = empty
  157. def save(self, filename=None):
  158. """
  159. Pickle the contents of the Env object into a file.
  160. @param filename: Filename to pickle the dict into. If not supplied,
  161. use the filename from which the dict was loaded.
  162. """
  163. filename = filename or self._filename
  164. f = open(filename, "w")
  165. cPickle.dump(self.data, f)
  166. f.close()
  167. def get_all_vms(self):
  168. """
  169. Return a list of all VM objects in this Env object.
  170. """
  171. return [o for o in self.values() if is_vm(o)]
  172. def get_vm(self, name):
  173. """
  174. Return a VM object by its name.
  175. @param name: VM name.
  176. """
  177. return self.get("vm__%s" % name)
  178. def register_vm(self, name, vm):
  179. """
  180. Register a VM in this Env object.
  181. @param name: VM name.
  182. @param vm: VM object.
  183. """
  184. self["vm__%s" % name] = vm
  185. def unregister_vm(self, name):
  186. """
  187. Remove a given VM.
  188. @param name: VM name.
  189. """
  190. del self["vm__%s" % name]
  191. def register_installer(self, installer):
  192. """
  193. Register a installer that was just run
  194. The installer will be available for other tests, so that
  195. information about the installed KVM modules and qemu-kvm can be used by
  196. them.
  197. """
  198. self['last_installer'] = installer
  199. def previous_installer(self):
  200. """
  201. Return the last installer that was registered
  202. """
  203. return self.get('last_installer')
  204. class Params(UserDict.IterableUserDict):
  205. """
  206. A dict-like object passed to every test.
  207. """
  208. def objects(self, key):
  209. """
  210. Return the names of objects defined using a given key.
  211. @param key: The name of the key whose value lists the objects
  212. (e.g. 'nics').
  213. """
  214. return self.get(key, "").split()
  215. def object_params(self, obj_name):
  216. """
  217. Return a dict-like object containing the parameters of an individual
  218. object.
  219. This method behaves as follows: the suffix '_' + obj_name is removed
  220. from all key names that have it. Other key names are left unchanged.
  221. The values of keys with the suffix overwrite the values of their
  222. suffixless versions.
  223. @param obj_name: The name of the object (objects are listed by the
  224. objects() method).
  225. """
  226. suffix = "_" + obj_name
  227. new_dict = self.copy()
  228. for key in self:
  229. if key.endswith(suffix):
  230. new_key = key.split(suffix)[0]
  231. new_dict[new_key] = self[key]
  232. return new_dict
  233. # Functions related to MAC/IP addresses
  234. def _open_mac_pool(lock_mode):
  235. lock_file = open("/tmp/mac_lock", "w+")
  236. fcntl.lockf(lock_file, lock_mode)
  237. pool = shelve.open("/tmp/address_pool")
  238. return pool, lock_file
  239. def _close_mac_pool(pool, lock_file):
  240. pool.close()
  241. fcntl.lockf(lock_file, fcntl.LOCK_UN)
  242. lock_file.close()
  243. def _generate_mac_address_prefix(mac_pool):
  244. """
  245. Generate a random MAC address prefix and add it to the MAC pool dictionary.
  246. If there's a MAC prefix there already, do not update the MAC pool and just
  247. return what's in there. By convention we will set KVM autotest MAC
  248. addresses to start with 0x9a.
  249. @param mac_pool: The MAC address pool object.
  250. @return: The MAC address prefix.
  251. """
  252. if "prefix" in mac_pool:
  253. prefix = mac_pool["prefix"]
  254. else:
  255. r = random.SystemRandom()
  256. prefix = "9a:%02x:%02x:%02x:" % (r.randint(0x00, 0xff),
  257. r.randint(0x00, 0xff),
  258. r.randint(0x00, 0xff))
  259. mac_pool["prefix"] = prefix
  260. return prefix
  261. def generate_mac_address(vm_instance, nic_index):
  262. """
  263. Randomly generate a MAC address and add it to the MAC address pool.
  264. Try to generate a MAC address based on a randomly generated MAC address
  265. prefix and add it to a persistent dictionary.
  266. key = VM instance + NIC index, value = MAC address
  267. e.g. {'20100310-165222-Wt7l:0': '9a:5d:94:6a:9b:f9'}
  268. @param vm_instance: The instance attribute of a VM.
  269. @param nic_index: The index of the NIC.
  270. @return: MAC address string.
  271. """
  272. mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
  273. key = "%s:%s" % (vm_instance, nic_index)
  274. if key in mac_pool:
  275. mac = mac_pool[key]
  276. else:
  277. prefix = _generate_mac_address_prefix(mac_pool)
  278. r = random.SystemRandom()
  279. while key not in mac_pool:
  280. mac = prefix + "%02x:%02x" % (r.randint(0x00, 0xff),
  281. r.randint(0x00, 0xff))
  282. if mac in mac_pool.values():
  283. continue
  284. mac_pool[key] = mac
  285. _close_mac_pool(mac_pool, lock_file)
  286. return mac
  287. def free_mac_address(vm_instance, nic_index):
  288. """
  289. Remove a MAC address from the address pool.
  290. @param vm_instance: The instance attribute of a VM.
  291. @param nic_index: The index of the NIC.
  292. """
  293. mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
  294. key = "%s:%s" % (vm_instance, nic_index)
  295. if key in mac_pool:
  296. del mac_pool[key]
  297. _close_mac_pool(mac_pool, lock_file)
  298. def set_mac_address(vm_instance, nic_index, mac):
  299. """
  300. Set a MAC address in the pool.
  301. @param vm_instance: The instance attribute of a VM.
  302. @param nic_index: The index of the NIC.
  303. """
  304. mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
  305. mac_pool["%s:%s" % (vm_instance, nic_index)] = mac
  306. _close_mac_pool(mac_pool, lock_file)
  307. def get_mac_address(vm_instance, nic_index):
  308. """
  309. Return a MAC address from the pool.
  310. @param vm_instance: The instance attribute of a VM.
  311. @param nic_index: The index of the NIC.
  312. @return: MAC address string.
  313. """
  314. mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_SH)
  315. mac = mac_pool.get("%s:%s" % (vm_instance, nic_index))
  316. _close_mac_pool(mac_pool, lock_file)
  317. return mac
  318. def verify_ip_address_ownership(ip, macs, timeout=10.0):
  319. """
  320. Use arping and the ARP cache to make sure a given IP address belongs to one
  321. of the given MAC addresses.
  322. @param ip: An IP address.
  323. @param macs: A list or tuple of MAC addresses.
  324. @return: True iff ip is assigned to a MAC address in macs.
  325. """
  326. # Compile a regex that matches the given IP address and any of the given
  327. # MAC addresses
  328. mac_regex = "|".join("(%s)" % mac for mac in macs)
  329. regex = re.compile(r"\b%s\b.*\b(%s)\b" % (ip, mac_regex), re.IGNORECASE)
  330. # Check the ARP cache
  331. o = commands.getoutput("%s -n" % find_command("arp"))
  332. if regex.search(o):
  333. return True
  334. # Get the name of the bridge device for arping
  335. o = commands.getoutput("%s route get %s" % (find_command("ip"), ip))
  336. dev = re.findall("dev\s+\S+", o, re.IGNORECASE)
  337. if not dev:
  338. return False
  339. dev = dev[0].split()[-1]
  340. # Send an ARP request
  341. o = commands.getoutput("%s -f -c 3 -I %s %s" %
  342. (find_command("arping"), dev, ip))
  343. return bool(regex.search(o))
  344. # Utility functions for dealing with external processes
  345. def find_command(cmd):
  346. for dir in ["/usr/local/sbin", "/usr/local/bin",
  347. "/usr/sbin", "/usr/bin", "/sbin", "/bin"]:
  348. file = os.path.join(dir, cmd)
  349. if os.path.exists(file):
  350. return file
  351. raise ValueError('Missing command: %s' % cmd)
  352. def pid_exists(pid):
  353. """
  354. Return True if a given PID exists.
  355. @param pid: Process ID number.
  356. """
  357. try:
  358. os.kill(pid, 0)
  359. return True
  360. except Exception:
  361. return False
  362. def safe_kill(pid, signal):
  363. """
  364. Attempt to send a signal to a given process that may or may not exist.
  365. @param signal: Signal number.
  366. """
  367. try:
  368. os.kill(pid, signal)
  369. return True
  370. except Exception:
  371. return False
  372. def kill_process_tree(pid, sig=signal.SIGKILL):
  373. """Signal a process and all of its children.
  374. If the process does not exist -- return.
  375. @param pid: The pid of the process to signal.
  376. @param sig: The signal to send to the processes.
  377. """
  378. if not safe_kill(pid, signal.SIGSTOP):
  379. return
  380. children = commands.getoutput("ps --ppid=%d -o pid=" % pid).split()
  381. for child in children:
  382. kill_process_tree(int(child), sig)
  383. safe_kill(pid, sig)
  384. safe_kill(pid, signal.SIGCONT)
  385. def check_kvm_source_dir(source_dir):
  386. """
  387. Inspects the kvm source directory and verifies its disposition. In some
  388. occasions build may be dependant on the source directory disposition.
  389. The reason why the return codes are numbers is that we might have more
  390. changes on the source directory layout, so it's not scalable to just use
  391. strings like 'old_repo', 'new_repo' and such.
  392. @param source_dir: Source code path that will be inspected.
  393. """
  394. os.chdir(source_dir)
  395. has_qemu_dir = os.path.isdir('qemu')
  396. has_kvm_dir = os.path.isdir('kvm')
  397. if has_qemu_dir:
  398. logging.debug("qemu directory detected, source dir layout 1")
  399. return 1
  400. if has_kvm_dir and not has_qemu_dir:
  401. logging.debug("kvm directory detected, source dir layout 2")
  402. return 2
  403. else:
  404. raise error.TestError("Unknown source dir layout, cannot proceed.")
  405. # Functions and classes used for logging into guests and transferring files
  406. class LoginError(Exception):
  407. def __init__(self, msg, output):
  408. Exception.__init__(self, msg, output)
  409. self.msg = msg
  410. self.output = output
  411. def __str__(self):
  412. return "%s (output: %r)" % (self.msg, self.output)
  413. class LoginAuthenticationError(LoginError):
  414. pass
  415. class LoginTimeoutError(LoginError):
  416. def __init__(self, output):
  417. LoginError.__init__(self, "Login timeout expired", output)
  418. class LoginProcessTerminatedError(LoginError):
  419. def __init__(self, status, output):
  420. LoginError.__init__(self, None, output)
  421. self.status = status
  422. def __str__(self):
  423. return ("Client process terminated (status: %s, output: %r)" %
  424. (self.status, self.output))
  425. class LoginBadClientError(LoginError):
  426. def __init__(self, client):
  427. LoginError.__init__(self, None, None)
  428. self.client = client
  429. def __str__(self):
  430. return "Unknown remote shell client: %r" % self.client
  431. class SCPError(Exception):
  432. def __init__(self, msg, output):
  433. Exception.__init__(self, msg, output)
  434. self.msg = msg
  435. self.output = output
  436. def __str__(self):
  437. return "%s (output: %r)" % (self.msg, self.output)
  438. class SCPAuthenticationError(SCPError):
  439. pass
  440. class SCPAuthenticationTimeoutError(SCPAuthenticationError):
  441. def __init__(self, output):
  442. SCPAuthenticationError.__init__(self, "Authentication timeout expired",
  443. output)
  444. class SCPTransferTimeoutError(SCPError):
  445. def __init__(self, output):
  446. SCPError.__init__(self, "Transfer timeout expired", output)
  447. class SCPTransferFailedError(SCPError):
  448. def __init__(self, status, output):
  449. SCPError.__init__(self, None, output)
  450. self.status = status
  451. def __str__(self):
  452. return ("SCP transfer failed (status: %s, output: %r)" %
  453. (self.status, self.output))
  454. def _remote_login(session, username, password, prompt, timeout=10, debug=False):
  455. """
  456. Log into a remote host (guest) using SSH or Telnet. Wait for questions
  457. and provide answers. If timeout expires while waiting for output from the
  458. child (e.g. a password prompt or a shell prompt) -- fail.
  459. @brief: Log into a remote host (guest) using SSH or Telnet.
  460. @param session: An Expect or ShellSession instance to operate on
  461. @param username: The username to send in reply to a login prompt
  462. @param password: The password to send in reply to a password prompt
  463. @param prompt: The shell prompt that indicates a successful login
  464. @param timeout: The maximal time duration (in seconds) to wait for each
  465. step of the login procedure (i.e. the "Are you sure" prompt, the
  466. password prompt, the shell prompt, etc)
  467. @raise LoginTimeoutError: If timeout expires
  468. @raise LoginAuthenticationError: If authentication fails
  469. @raise LoginProcessTerminatedError: If the client terminates during login
  470. @raise LoginError: If some other error occurs
  471. """
  472. password_prompt_count = 0
  473. login_prompt_count = 0
  474. while True:
  475. try:
  476. match, text = session.read_until_last_line_matches(
  477. [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"[Ll]ogin:\s*$",
  478. r"[Cc]onnection.*closed", r"[Cc]onnection.*refused",
  479. r"[Pp]lease wait", r"[Ww]arning", prompt],
  480. timeout=timeout, internal_timeout=0.5)
  481. if match == 0: # "Are you sure you want to continue connecting"
  482. if debug:
  483. logging.debug("Got 'Are you sure...', sending 'yes'")
  484. session.sendline("yes")
  485. continue
  486. elif match == 1: # "password:"
  487. if password_prompt_count == 0:
  488. if debug:
  489. logging.debug("Got password prompt, sending '%s'", password)
  490. session.sendline(password)
  491. password_prompt_count += 1
  492. continue
  493. else:
  494. raise LoginAuthenticationError("Got password prompt twice",
  495. text)
  496. elif match == 2: # "login:"
  497. if login_prompt_count == 0 and password_prompt_count == 0:
  498. if debug:
  499. logging.debug("Got username prompt; sending '%s'", username)
  500. session.sendline(username)
  501. login_prompt_count += 1
  502. continue
  503. else:
  504. if login_prompt_count > 0:
  505. msg = "Got username prompt twice"
  506. else:
  507. msg = "Got username prompt after password prompt"
  508. raise LoginAuthenticationError(msg, text)
  509. elif match == 3: # "Connection closed"
  510. raise LoginError("Client said 'connection closed'", text)
  511. elif match == 4: # "Connection refused"
  512. raise LoginError("Client said 'connection refused'", text)
  513. elif match == 5: # "Please wait"
  514. if debug:
  515. logging.debug("Got 'Please wait'")
  516. timeout = 30
  517. continue
  518. elif match == 6: # "Warning added RSA"
  519. if debug:
  520. logging.debug("Got 'Warning added RSA to known host list")
  521. continue
  522. elif match == 7: # prompt
  523. if debug:
  524. logging.debug("Got shell prompt -- logged in")
  525. break
  526. except aexpect.ExpectTimeoutError, e:
  527. raise LoginTimeoutError(e.output)
  528. except aexpect.ExpectProcessTerminatedError, e:
  529. raise LoginProcessTerminatedError(e.status, e.output)
  530. def remote_login(client, host, port, username, password, prompt, linesep="\n",
  531. log_filename=None, timeout=10):
  532. """
  533. Log into a remote host (guest) using SSH/Telnet/Netcat.
  534. @param client: The client to use ('ssh', 'telnet' or 'nc')
  535. @param host: Hostname or IP address
  536. @param port: Port to connect to
  537. @param username: Username (if required)
  538. @param password: Password (if required)
  539. @param prompt: Shell prompt (regular expression)
  540. @param linesep: The line separator to use when sending lines
  541. (e.g. '\\n' or '\\r\\n')
  542. @param log_filename: If specified, log all output to this file
  543. @param timeout: The maximal time duration (in seconds) to wait for
  544. each step of the login procedure (i.e. the "Are you sure" prompt
  545. or the password prompt)
  546. @raise LoginBadClientError: If an unknown client is requested
  547. @raise: Whatever _remote_login() raises
  548. @return: A ShellSession object.
  549. """
  550. if client == "ssh":
  551. cmd = ("ssh -o UserKnownHostsFile=/dev/null "
  552. "-o PreferredAuthentications=password -p %s %s@%s" %
  553. (port, username, host))
  554. elif client == "telnet":
  555. cmd = "telnet -l %s %s %s" % (username, host, port)
  556. elif client == "nc":
  557. cmd = "nc %s %s" % (host, port)
  558. else:
  559. raise LoginBadClientError(client)
  560. logging.debug("Login command: '%s'", cmd)
  561. session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
  562. try:
  563. _remote_login(session, username, password, prompt, timeout)
  564. except Exception:
  565. session.close()
  566. raise
  567. if log_filename:
  568. session.set_output_func(log_line)
  569. session.set_output_params((log_filename,))
  570. return session
  571. def wait_for_login(client, host, port, username, password, prompt, linesep="\n",
  572. log_filename=None, timeout=240, internal_timeout=10):
  573. """
  574. Make multiple attempts to log into a remote host (guest) until one succeeds
  575. or timeout expires.
  576. @param timeout: Total time duration to wait for a successful login
  577. @param internal_timeout: The maximal time duration (in seconds) to wait for
  578. each step of the login procedure (e.g. the "Are you sure" prompt
  579. or the password prompt)
  580. @see: remote_login()
  581. @raise: Whatever remote_login() raises
  582. @return: A ShellSession object.
  583. """
  584. logging.debug("Attempting to log into %s:%s using %s (timeout %ds)",
  585. host, port, client, timeout)
  586. end_time = time.time() + timeout
  587. while time.time() < end_time:
  588. try:
  589. return remote_login(client, host, port, username, password, prompt,
  590. linesep, log_filename, internal_timeout)
  591. except LoginError, e:
  592. logging.debug(e)
  593. time.sleep(2)
  594. # Timeout expired; try one more time but don't catch exceptions
  595. return remote_login(client, host, port, username, password, prompt,
  596. linesep, log_filename, internal_timeout)
  597. def _remote_scp(session, password_list, transfer_timeout=600, login_timeout=20):
  598. """
  599. Transfer file(s) to a remote host (guest) using SCP. Wait for questions
  600. and provide answers. If login_timeout expires while waiting for output
  601. from the child (e.g. a password prompt), fail. If transfer_timeout expires
  602. while waiting for the transfer to complete, fail.
  603. @brief: Transfer files using SCP, given a command line.
  604. @param session: An Expect or ShellSession instance to operate on
  605. @param password_list: Password list to send in reply to the password prompt
  606. @param transfer_timeout: The time duration (in seconds) to wait for the
  607. transfer to complete.
  608. @param login_timeout: The maximal time duration (in seconds) to wait for
  609. each step of the login procedure (i.e. the "Are you sure" prompt or
  610. the password prompt)
  611. @raise SCPAuthenticationError: If authentication fails
  612. @raise SCPTransferTimeoutError: If the transfer fails to complete in time
  613. @raise SCPTransferFailedError: If the process terminates with a nonzero
  614. exit code
  615. @raise SCPError: If some other error occurs
  616. """
  617. password_prompt_count = 0
  618. timeout = login_timeout
  619. authentication_done = False
  620. scp_type = len(password_list)
  621. while True:
  622. try:
  623. match, text = session.read_until_last_line_matches(
  624. [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"lost connection"],
  625. timeout=timeout, internal_timeout=0.5)
  626. if match == 0: # "Are you sure you want to continue connecting"
  627. logging.debug("Got 'Are you sure...', sending 'yes'")
  628. session.sendline("yes")
  629. continue
  630. elif match == 1: # "password:"
  631. if password_prompt_count == 0:
  632. logging.debug("Got password prompt, sending '%s'" %
  633. password_list[password_prompt_count])
  634. session.sendline(password_list[password_prompt_count])
  635. password_prompt_count += 1
  636. timeout = transfer_timeout
  637. if scp_type == 1:
  638. authentication_done = True
  639. continue
  640. elif password_prompt_count == 1 and scp_type == 2:
  641. logging.debug("Got password prompt, sending '%s'" %
  642. password_list[password_prompt_count])
  643. session.sendline(password_list[password_prompt_count])
  644. password_prompt_count += 1
  645. timeout = transfer_timeout
  646. authentication_done = True
  647. continue
  648. else:
  649. raise SCPAuthenticationError("Got password prompt twice",
  650. text)
  651. elif match == 2: # "lost connection"
  652. raise SCPError("SCP client said 'lost connection'", text)
  653. except aexpect.ExpectTimeoutError, e:
  654. if authentication_done:
  655. raise SCPTransferTimeoutError(e.output)
  656. else:
  657. raise SCPAuthenticationTimeoutError(e.output)
  658. except aexpect.ExpectProcessTerminatedError, e:
  659. if e.status == 0:
  660. logging.debug("SCP process terminated with status 0")
  661. break
  662. else:
  663. raise SCPTransferFailedError(e.status, e.output)
  664. def remote_scp(command, password_list, log_filename=None, transfer_timeout=600,
  665. login_timeout=20):
  666. """
  667. Transfer file(s) to a remote host (guest) using SCP.
  668. @brief: Transfer files using SCP, given a command line.
  669. @param command: The command to execute
  670. (e.g. "scp -r foobar root@localhost:/tmp/").
  671. @param password_list: Password list to send in reply to a password prompt.
  672. @param log_filename: If specified, log all output to this file
  673. @param transfer_timeout: The time duration (in seconds) to wait for the
  674. transfer to complete.
  675. @param login_timeout: The maximal time duration (in seconds) to wait for
  676. each step of the login procedure (i.e. the "Are you sure" prompt
  677. or the password prompt)
  678. @raise: Whatever _remote_scp() raises
  679. """
  680. logging.debug("Trying to SCP with command '%s', timeout %ss",
  681. command, transfer_timeout)
  682. if log_filename:
  683. output_func = log_line
  684. output_params = (log_filename,)
  685. else:
  686. output_func = None
  687. output_params = ()
  688. session = aexpect.Expect(command,
  689. output_func=output_func,
  690. output_params=output_params)
  691. try:
  692. _remote_scp(session, password_list, transfer_timeout, login_timeout)
  693. finally:
  694. session.close()
  695. def scp_to_remote(host, port, username, password, local_path, remote_path,
  696. log_filename=None, timeout=600):
  697. """
  698. Copy files to a remote host (guest) through scp.
  699. @param host: Hostname or IP address
  700. @param username: Username (if required)
  701. @param password: Password (if required)
  702. @param local_path: Path on the local machine where we are copying from
  703. @param remote_path: Path on the remote machine where we are copying to
  704. @param log_filename: If specified, log all output to this file
  705. @param timeout: The time duration (in seconds) to wait for the transfer
  706. to complete.
  707. @raise: Whatever remote_scp() raises
  708. """
  709. command = ("scp -v -o UserKnownHostsFile=/dev/null "
  710. "-o PreferredAuthentications=password -r -P %s %s %s@%s:%s" %
  711. (port, local_path, username, host, remote_path))
  712. password_list = []
  713. password_list.append(password)
  714. return remote_scp(command, password_list, log_filename, timeout)
  715. def scp_from_remote(host, port, username, password, remote_path, local_path,
  716. log_filename=None, timeout=600):
  717. """
  718. Copy files from a remote host (guest).
  719. @param host: Hostname or IP address
  720. @param username: Username (if required)
  721. @param password: Password (if required)
  722. @param local_path: Path on the local machine where we are copying from
  723. @param remote_path: Path on the remote machine where we are copying to
  724. @param log_filename: If specified, log all output to this file
  725. @param timeout: The time duration (in seconds) to wait for the transfer
  726. to complete.
  727. @raise: Whatever remote_scp() raises
  728. """
  729. command = ("scp -v -o UserKnownHostsFile=/dev/null "
  730. "-o PreferredAuthentications=password -r -P %s %s@%s:%s %s" %
  731. (port, username, host, remote_path, local_path))
  732. password_list = []
  733. password_list.append(password)
  734. remote_scp(command, password_list, log_filename, timeout)
  735. def scp_between_remotes(src, dst, port, s_passwd, d_passwd, s_name, d_name,
  736. s_path, d_path, log_filename=None, timeout=600):
  737. """
  738. Copy files from a remote host (guest) to another remote host (guest).
  739. @param src/dst: Hostname or IP address of src and dst
  740. @param s_name/d_name: Username (if required)
  741. @param s_passwd/d_passwd: Password (if required)
  742. @param s_path/d_path: Path on the remote machine where we are copying
  743. from/to
  744. @param log_filename: If specified, log all output to this file
  745. @param timeout: The time duration (in seconds) to wait for the transfer
  746. to complete.
  747. @return: True on success and False on failure.
  748. """
  749. command = ("scp -v -o UserKnownHostsFile=/dev/null -o "
  750. "PreferredAuthentications=password -r -P %s %s@%s:%s %s@%s:%s" %
  751. (port, s_name, src, s_path, d_name, dst, d_path))
  752. password_list = []
  753. password_list.append(s_passwd)
  754. password_list.append(d_passwd)
  755. return remote_scp(command, password_list, log_filename, timeout)
  756. def copy_files_to(address, client, username, password, port, local_path,
  757. remote_path, log_filename=None, verbose=False, timeout=600):
  758. """
  759. Copy files to a remote host (guest) using the selected client.
  760. @param client: Type of transfer client
  761. @param username: Username (if required)
  762. @param password: Password (if requried)
  763. @param local_path: Path on the local machine where we are copying from
  764. @param remote_path: Path on the remote machine where we are copying to
  765. @param address: Address of remote host(guest)
  766. @param log_filename: If specified, log all output to this file (SCP only)
  767. @param verbose: If True, log some stats using logging.debug (RSS only)
  768. @param timeout: The time duration (in seconds) to wait for the transfer to
  769. complete.
  770. @raise: Whatever remote_scp() raises
  771. """
  772. if client == "scp":
  773. scp_to_remote(address, port, username, password, local_path,
  774. remote_path, log_filename, timeout)
  775. elif client == "rss":
  776. log_func = None
  777. if verbose:
  778. log_func = logging.debug
  779. c = rss_client.FileUploadClient(address, port, log_func)
  780. c.upload(local_path, remote_path, timeout)
  781. c.close()
  782. def copy_files_from(address, client, username, password, port, remote_path,
  783. local_path, log_filename=None, verbose=False, timeout=600):
  784. """
  785. Copy files from a remote host (guest) using the selected client.
  786. @param client: Type of transfer client
  787. @param username: Username (if required)
  788. @param password: Password (if requried)
  789. @param remote_path: Path on the remote machine where we are copying from
  790. @param local_path: Path on the local machine where we are copying to
  791. @param address: Address of remote host(guest)
  792. @param log_filename: If specified, log all output to this file (SCP only)
  793. @param verbose: If True, log some stats using logging.debug (RSS only)
  794. @param timeout: The time duration (in seconds) to wait for the transfer to
  795. complete.
  796. @raise: Whatever remote_scp() raises
  797. """
  798. if client == "scp":
  799. scp_from_remote(address, port, username, password, remote_path,
  800. local_path, log_filename, timeout)
  801. elif client == "rss":
  802. log_func = None
  803. if verbose:
  804. log_func = logging.debug
  805. c = rss_client.FileDownloadClient(address, port, log_func)
  806. c.download(remote_path, local_path, timeout)
  807. c.close()
  808. # The following are utility functions related to ports.
  809. def is_port_free(port, address):
  810. """
  811. Return True if the given port is available for use.
  812. @param port: Port number
  813. """
  814. try:
  815. s = socket.socket()
  816. #s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
  817. if address == "localhost":
  818. s.bind(("localhost", port))
  819. free = True
  820. else:
  821. s.connect((address, port))
  822. free = False
  823. except socket.error:
  824. if address == "localhost":
  825. free = False
  826. else:
  827. free = True
  828. s.close()
  829. return free
  830. def find_free_port(start_port, end_port, address="localhost"):
  831. """
  832. Return a host free port in the range [start_port, end_port].
  833. @param start_port: First port that will be checked.
  834. @param end_port: Port immediately after the last one that will be checked.
  835. """
  836. for i in range(start_port, end_port):
  837. if is_port_free(i, address):
  838. return i
  839. return None
  840. def find_free_ports(start_port, end_port, count, address="localhost"):
  841. """
  842. Return count of host free ports in the range [start_port, end_port].
  843. @count: Initial number of ports known to be free in the range.
  844. @param start_port: First port that will be checked.
  845. @param end_port: Port immediately after the last one that will be checked.
  846. """
  847. ports = []
  848. i = start_port
  849. while i < end_port and count > 0:
  850. if is_port_free(i, address):
  851. ports.append(i)
  852. count -= 1
  853. i += 1
  854. return ports
  855. # An easy way to log lines to files when the logging system can't be used
  856. _open_log_files = {}
  857. _log_file_dir = "/tmp"
  858. def log_line(filename, line):
  859. """
  860. Write a line to a file. '\n' is appended to the line.
  861. @param filename: Path of file to write to, either absolute or relative to
  862. the dir set by set_log_file_dir().
  863. @param line: Line to write.
  864. """
  865. global _open_log_files, _log_file_dir
  866. if filename not in _open_log_files:
  867. path = get_path(_log_file_dir, filename)
  868. try:
  869. os.makedirs(os.path.dirname(path))
  870. except OSError:
  871. pass
  872. _open_log_files[filename] = open(path, "w")
  873. timestr = time.strftime("%Y-%m-%d %H:%M:%S")
  874. _open_log_files[filename].write("%s: %s\n" % (timestr, line))
  875. _open_log_files[filename].flush()
  876. def set_log_file_dir(dir):
  877. """
  878. Set the base directory for log files created by log_line().
  879. @param dir: Directory for log files.
  880. """
  881. global _log_file_dir
  882. _log_file_dir = dir
  883. # The following are miscellaneous utility functions.
  884. def get_path(base_path, user_path):
  885. """
  886. Translate a user specified path to a real path.
  887. If user_path is relative, append it to base_path.
  888. If user_path is absolute, return it as is.
  889. @param base_path: The base path of relative user specified paths.
  890. @param user_path: The user specified path.
  891. """
  892. if os.path.isabs(user_path):
  893. return user_path
  894. else:
  895. return os.path.join(base_path, user_path)
  896. def generate_random_string(length):
  897. """
  898. Return a random string using alphanumeric characters.
  899. @length: length of the string that will be generated.
  900. """
  901. r = random.SystemRandom()
  902. str = ""
  903. chars = string.letters + string.digits
  904. while length > 0:
  905. str += r.choice(chars)
  906. length -= 1
  907. return str
  908. def generate_random_id():
  909. """
  910. Return a random string suitable for use as a qemu id.
  911. """
  912. return "id" + generate_random_string(6)
  913. def generate_tmp_file_name(file, ext=None, dir='/tmp/'):
  914. """
  915. Returns a temporary file name. The file is not created.
  916. """
  917. while True:
  918. file_name = (file + '-' + time.strftime("%Y%m%d-%H%M%S-") +
  919. generate_random_string(4))
  920. if ext:
  921. file_name += '.' + ext
  922. file_name = os.path.join(dir, file_name)
  923. if not os.path.exists(file_name):
  924. break
  925. return file_name
  926. def format_str_for_message(str):
  927. """
  928. Format str so that it can be appended to a message.
  929. If str consists of one line, prefix it with a space.
  930. If str consists of multiple lines, prefix it with a newline.
  931. @param str: string that will be formatted.
  932. """
  933. lines = str.splitlines()
  934. num_lines = len(lines)
  935. str = "\n".join(lines)
  936. if num_lines == 0:
  937. return ""
  938. elif num_lines == 1:
  939. return " " + str
  940. else:
  941. return "\n" + str
  942. def wait_for(func, timeout, first=0.0, step=1.0, text=None):
  943. """
  944. If func() evaluates to True before timeout expires, return the
  945. value of func(). Otherwise return None.
  946. @brief: Wait until func() evaluates to True.
  947. @param timeout: Timeout in seconds
  948. @param first: Time to sleep before first attempt
  949. @param steps: Time to sleep between attempts in seconds
  950. @param text: Text to print while waiting, for debug purposes
  951. """
  952. start_time = time.time()
  953. end_time = time.time() + timeout
  954. time.sleep(first)
  955. while time.time() < end_time:
  956. if text:
  957. logging.debug("%s (%f secs)", text, (time.time() - start_time))
  958. output = func()
  959. if output:
  960. return output
  961. time.sleep(step)
  962. return None
  963. def get_hash_from_file(hash_path, dvd_basename):
  964. """
  965. Get the a hash from a given DVD image from a hash file
  966. (Hash files are usually named MD5SUM or SHA1SUM and are located inside the
  967. download directories of the DVDs)
  968. @param hash_path: Local path to a hash file.
  969. @param cd_image: Basename of a CD image
  970. """
  971. hash_file = open(hash_path, 'r')
  972. for line in hash_file.readlines():
  973. if dvd_basename in line:
  974. return line.split()[0]
  975. def run_tests(parser, job):
  976. """
  977. Runs the sequence of KVM tests based on the list of dictionaries
  978. generated by the configuration system, handling dependencies.
  979. @param parser: Config parser object.
  980. @param job: Autotest job object.
  981. @return: True, if all tests ran passed, False if any of them failed.
  982. """
  983. for i, d in enumerate(parser.get_dicts()):
  984. logging.info("Test %4d: %s" % (i + 1, d["shortname"]))
  985. status_dict = {}
  986. failed = False
  987. for dict in parser.get_dicts():
  988. if dict.get("skip") == "yes":
  989. continue
  990. dependencies_satisfied = True
  991. for dep in dict.get("dep"):
  992. for test_name in status_dict.keys():
  993. if not dep in test_name:
  994. continue
  995. # So the only really non-fatal state is WARN,
  996. # All the others make it not safe to proceed with dependency
  997. # execution
  998. if status_dict[test_name] not in ['GOOD', 'WARN']:
  999. dependencies_satisfied = False
  1000. break
  1001. test_iterations = int(dict.get("iterations", 1))
  1002. test_tag = dict.get("shortname")
  1003. if dependencies_satisfied:
  1004. # Setting up profilers during test execution.
  1005. profilers = dict.get("profilers", "").split()
  1006. for profiler in profilers:
  1007. job.profilers.add(profiler)
  1008. # We need only one execution, profiled, hence we're passing
  1009. # the profile_only parameter to job.run_test().
  1010. profile_only = bool(profilers) or None
  1011. current_status = job.run_test_detail(dict.get("vm_type"),
  1012. params=dict,
  1013. tag=test_tag,
  1014. iterations=test_iterations,
  1015. profile_only=profile_only)
  1016. for profiler in profilers:
  1017. job.profilers.delete(profiler)
  1018. else:
  1019. # We will force the test to fail as TestNA during preprocessing
  1020. dict['dependency_failed'] = 'yes'
  1021. current_status = job.run_test_detail(dict.get("vm_type"),
  1022. params=dict,
  1023. tag=test_tag,
  1024. iterations=test_iterations)
  1025. if not current_status:
  1026. failed = True
  1027. status_dict[dict.get("name")] = current_status
  1028. return not failed
  1029. def display_attributes(instance):
  1030. """
  1031. Inspects a given class instance attributes and displays them, convenient
  1032. for debugging.
  1033. """
  1034. logging.debug("Attributes set:")
  1035. for member in inspect.getmembers(instance):
  1036. name, value = member
  1037. attribute = getattr(instance, name)
  1038. if not (name.startswith("__") or callable(attribute) or not value):
  1039. logging.debug(" %s: %s", name, value)
  1040. def get_full_pci_id(pci_id):
  1041. """
  1042. Get full PCI ID of pci_id.
  1043. @param pci_id: PCI ID of a device.
  1044. """
  1045. cmd = "lspci -D | awk '/%s/ {print $1}'" % pci_id
  1046. status, full_id = commands.getstatusoutput(cmd)
  1047. if status != 0:
  1048. return None
  1049. return full_id
  1050. def get_vendor_from_pci_id(pci_id):
  1051. """
  1052. Check out the device vendor ID according to pci_id.
  1053. @param pci_id: PCI ID of a device.
  1054. """
  1055. cmd = "lspci -n | awk '/%s/ {print $3}'" % pci_id
  1056. return re.sub(":", " ", commands.getoutput(cmd))
  1057. class Flag(str):
  1058. """
  1059. Class for easy merge cpuflags.
  1060. """
  1061. aliases = {}
  1062. def __new__(cls, flag):
  1063. if flag in Flag.aliases:
  1064. flag = Flag.aliases[flag]
  1065. return str.__new__(cls, flag)
  1066. def __eq__(self, other):
  1067. s = set(self.split("|"))
  1068. o = set(other.split("|"))
  1069. if s & o:
  1070. return True
  1071. else:
  1072. return False
  1073. def __hash__(self, *args, **kwargs):
  1074. return 0
  1075. kvm_map_flags_to_test = {
  1076. Flag('avx') :set(['avx']),
  1077. Flag('sse3') :set(['sse3']),
  1078. Flag('ssse3') :set(['ssse3']),
  1079. Flag('sse4.1|sse4_1|sse4.2|sse4_2'):set(['sse4']),
  1080. Flag('aes') :set(['aes','pclmul']),
  1081. Flag('pclmuldq') :set(['pclmul']),
  1082. Flag('pclmulqdq') :set(['pclmul']),
  1083. Flag('rdrand') :set(['rdrand']),
  1084. Flag('sse4a') :set(['sse4a']),
  1085. Flag('fma4') :set(['fma4']),
  1086. Flag('xop') :set(['xop']),
  1087. }
  1088. kvm_map_flags_aliases = {
  1089. 'sse4.1' :'sse4_1',
  1090. 'sse4.2' :'sse4_2',
  1091. 'pclmulqdq' :'pclmuldq',
  1092. }
  1093. def kvm_flags_to_stresstests(flags):
  1094. """
  1095. Covert [cpu flags] to [tests]
  1096. @param cpuflags: list of cpuflags
  1097. @return: Return tests like string.
  1098. """
  1099. tests = set([])
  1100. for f in flags:
  1101. tests |= kvm_map_flags_to_test[f]
  1102. param = ""
  1103. for f in tests:
  1104. param += ","+f
  1105. return param
  1106. def get_cpu_flags():
  1107. """
  1108. Returns a list of the CPU flags
  1109. """
  1110. flags_re = re.compile(r'^flags\s*:(.*)')
  1111. for line in open('/proc/cpuinfo').readlines():
  1112. match = flags_re.match(line)
  1113. if match:
  1114. return match.groups()[0].split()
  1115. return []
  1116. def get_cpu_vendor(cpu_flags=[], verbose=True):
  1117. """
  1118. Returns the name of the CPU vendor, either intel, amd or unknown
  1119. """
  1120. if not cpu_flags:
  1121. cpu_flags = get_cpu_flags()
  1122. if 'vmx' in cpu_flags:
  1123. vendor = 'intel'
  1124. elif 'svm' in cpu_flags:
  1125. vendor = 'amd'
  1126. else:
  1127. vendor = 'unknown'
  1128. if verbose:
  1129. logging.debug("Detected CPU vendor as '%s'", vendor)
  1130. return vendor
  1131. def get_archive_tarball_name(source_dir, tarball_name, compression):
  1132. '''
  1133. Get the name for a tarball file, based on source, name and compression
  1134. '''
  1135. if tarball_name is None:
  1136. tarball_name = os.path.basename(source_dir)
  1137. if not tarball_name.endswith('.tar'):
  1138. tarball_name = '%s.tar' % tarball_name
  1139. if compression and not tarball_name.endswith('.%s' % compression):
  1140. tarball_name = '%s.%s' % (tarball_name, compression)
  1141. return tarball_name
  1142. def archive_as_tarball(source_dir, dest_dir, tarball_name=None,
  1143. compression='bz2', verbose=True):
  1144. '''
  1145. Saves the given source directory to the given destination as a tarball
  1146. If the name of the archive is omitted, it will be taken from the
  1147. source_dir. If it is an absolute path, dest_dir will be ignored. But,
  1148. if both the destination directory and tarball anem is given, and the
  1149. latter is not an absolute path, they will be combined.
  1150. For archiving directory '/tmp' in '/net/server/backup' as file
  1151. 'tmp.tar.bz2', simply use:
  1152. >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup')
  1153. To save the file it with a different name, say 'host1-tmp.tar.bz2'
  1154. and save it under '/net/server/backup', use:
  1155. >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup',
  1156. 'host1-tmp')
  1157. To save with gzip compression instead (resulting in the file
  1158. '/net/server/backup/host1-tmp.tar.gz'), use:
  1159. >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup',
  1160. 'host1-tmp', 'gz')
  1161. '''
  1162. tarball_name = get_archive_tarball_name(source_dir,
  1163. tarball_name,
  1164. compression)
  1165. if not os.path.isabs(tarball_name):
  1166. tarball_path = os.path.join(dest_dir, tarball_name)
  1167. else:
  1168. tarball_path = tarball_name
  1169. if verbose:
  1170. logging.debug('Archiving %s as %s' % (source_dir,
  1171. tarball_path))
  1172. os.chdir(os.path.dirname(source_dir))
  1173. tarball = tarfile.TarFile(name=tarball_path, mode='w')
  1174. tarball = tarball.open(name=tarball_path, mode='w:%s' % compression)
  1175. tarball.add(os.path.basename(source_dir))
  1176. tarball.close()
  1177. class Thread(threading.Thread):
  1178. """
  1179. Run a function in a background thread.
  1180. """
  1181. def __init__(self, target, args=(), kwargs={}):
  1182. """
  1183. Initialize the instance.
  1184. @param target: Function to run in the thread.
  1185. @param args: Arguments to pass to target.
  1186. @param kwargs: Keyword arguments to pass to target.
  1187. """
  1188. threading.Thread.__init__(self)
  1189. self._target = target
  1190. self._args = args
  1191. self._kwargs = kwargs
  1192. def run(self):
  1193. """
  1194. Run target (passed to the constructor). No point in calling this
  1195. function directly. Call start() to make this function run in a new
  1196. thread.
  1197. """
  1198. self._e = None
  1199. self._retval = None
  1200. try:
  1201. try:
  1202. self._retval = self._target(*self._args, **self._kwargs)
  1203. except Exception:
  1204. self._e = sys.exc_info()
  1205. raise
  1206. finally:
  1207. # Avoid circular references (start() may be called only once so
  1208. # it's OK to delete these)
  1209. del self._target, self._args, self._kwargs
  1210. def join(self, timeout=None, suppress_exception=False):
  1211. """
  1212. Join the thread. If target raised an exception, re-raise it.
  1213. Otherwise, return the value returned by target.
  1214. @param timeout: Timeout value to pass to threading.Thread.join().
  1215. @param suppress_exception: If True, don't re-raise the exception.
  1216. """
  1217. threading.Thread.join(self, timeout)
  1218. try:
  1219. if self._e:
  1220. if not suppress_exception:
  1221. # Because the exception was raised in another thread, we
  1222. # need to explicitly insert the current context into it
  1223. s = error.exception_context(self._e[1])
  1224. s = error.join_contexts(error.get_context(), s)
  1225. error.set_exception_context(self._e[1], s)
  1226. raise self._e[0], self._e[1], self._e[2]
  1227. else:
  1228. return self._retval
  1229. finally:
  1230. # Avoid circular references (join() may be called multiple times
  1231. # so we can't delete these)
  1232. self._e = None
  1233. self._retval = None
  1234. def parallel(targets):
  1235. """
  1236. Run multiple functions in parallel.
  1237. @param targets: A sequence of tuples or functions. If it's a sequence of
  1238. tuples, each tuple will be interpreted as (target, args, kwargs) or
  1239. (target, args) or (target,) depending on its length. If it's a
  1240. sequence of functions, the functions will be called without
  1241. arguments.
  1242. @return: A list of the values returned by the functions called.
  1243. """
  1244. threads = []
  1245. for target in targets:
  1246. if isinstance(target, tuple) or isinstance(target, list):
  1247. t = Thread(*target)
  1248. else:
  1249. t = Thread(target)
  1250. threads.append(t)
  1251. t.start()
  1252. return [t.join() for t in threads]
  1253. class VirtLoggingConfig(logging_config.LoggingConfig):
  1254. """
  1255. Used with the sole purpose of providing convenient logging setup
  1256. for the KVM test auxiliary programs.
  1257. """
  1258. def configure_logging(self, results_dir=None, verbose=False):
  1259. super(VirtLoggingConfig, self).configure_logging(use_console=True,
  1260. verbose=verbose)
  1261. class PciAssignable(object):
  1262. """
  1263. Request PCI assignable devices on host. It will check whether to request
  1264. PF (physical Functions) or VF (Virtual Functions).
  1265. """
  1266. def __init__(self, type="vf", driver=None, driver_option=None,
  1267. names=None, devices_requested=None):
  1268. """
  1269. Initialize parameter 'type' which could be:
  1270. vf: Virtual Functions
  1271. pf: Physical Function (actual hardware)
  1272. mixed: Both includes VFs and PFs
  1273. If pass through Physical NIC cards, we need to specify which devices
  1274. to be assigned, e.g. 'eth1 eth2'.
  1275. If pass through Virtual Functions, we need to specify how many vfs
  1276. are going to be assigned, e.g. passthrough_count = 8 and max_vfs in
  1277. config file.
  1278. @param type: PCI device type.
  1279. @param driver: Kernel module for the PCI assignable device.
  1280. @param driver_option: Module option to specify the maximum number of
  1281. VFs (eg 'max_vfs=7')
  1282. @param names: Physical NIC cards correspondent network interfaces,
  1283. e.g.'eth1 eth2 ...'
  1284. @param devices_requested: Number of devices being requested.
  1285. """
  1286. self.type = type
  1287. self.driver = driver
  1288. self.driver_option = driver_option
  1289. if names:
  1290. self.name_list = names.split()
  1291. if devices_requested:
  1292. self.devices_requested = int(devices_requested)
  1293. else:
  1294. self.devices_requested = None
  1295. def _get_pf_pci_id(self, name, search_str):
  1296. """
  1297. Get the PF PCI ID according to name.
  1298. @param name: Name of the PCI device.
  1299. @param search_str: Search string to be used on lspci.
  1300. """
  1301. cmd = "ethtool -i %s | awk '/bus-info/ {print $2}'" % name
  1302. s, pci_id = commands.getstatusoutput(cmd)
  1303. if not (s or "Cannot get driver information" in pci_id):
  1304. return pci_id[5:]
  1305. cmd = "lspci | awk '/%s/ {print $1}'" % search_str
  1306. pci_ids = [id for id in commands.getoutput(cmd).splitlines()]
  1307. nic_id = int(re.search('[0-9]+', name).group(0))
  1308. if (len(pci_ids) - 1) < nic_id:
  1309. return None
  1310. return pci_ids[nic_id]
  1311. def _release_dev(self, pci_id):
  1312. """
  1313. Release a single PCI device.
  1314. @param pci_id: PCI ID of a given PCI device.
  1315. """
  1316. base_dir = "/sys/bus/pci"
  1317. full_id = get_full_pci_id(pci_id)
  1318. vendor_id = get_vendor_from_pci_id(pci_id)
  1319. drv_path = os.path.join(base_dir, "devices/%s/driver" % full_id)
  1320. if 'pci-stub' in os.readlink(drv_path):
  1321. cmd = "echo '%s' > %s/new_id" % (vendor_id, drv_path)
  1322. if os.system(cmd):
  1323. return False
  1324. stub_path = os.path.join(base_dir, "drivers/pci-stub")
  1325. cmd = "echo '%s' > %s/unbind" % (full_id, stub_path)
  1326. if os.system(cmd):
  1327. return False
  1328. driver = self.dev_drivers[pci_id]
  1329. cmd = "echo '%s' > %s/bind" % (full_id, driver)
  1330. if os.system(cmd):
  1331. return False
  1332. return True
  1333. def get_vf_devs(self):
  1334. """
  1335. Catch all VFs PCI IDs.
  1336. @return: List with all PCI IDs for the Virtual Functions avaliable
  1337. """
  1338. if not self.sr_iov_setup():
  1339. return []
  1340. cmd = "lspci | awk '/Virtual Function/ {print $1}'"
  1341. return commands.getoutput(cmd).split()
  1342. def get_pf_devs(self):
  1343. """
  1344. Catch all PFs PCI IDs.
  1345. @return: List with all PCI IDs for the physical hardware requested
  1346. """
  1347. pf_ids = []
  1348. for name in self.name_list:
  1349. pf_id = self._get_pf_pci_id(name, "Ethernet")
  1350. if not pf_id:
  1351. continue
  1352. pf_ids.append(pf_id)
  1353. return pf_ids
  1354. def get_devs(self, count):
  1355. """
  1356. Check out all devices' PCI IDs according to their name.
  1357. @param count: count number of PCI devices needed for pass through
  1358. @return: a list of all devices' PCI IDs
  1359. """
  1360. if self.type == "vf":
  1361. vf_ids = self.get_vf_devs()
  1362. elif self.type == "pf":
  1363. vf_ids = self.get_pf_devs()
  1364. elif self.type == "mixed":
  1365. vf_ids = self.get_vf_devs()
  1366. vf_ids.extend(self.get_pf_devs())
  1367. return vf_ids[0:count]
  1368. def get_vfs_count(self):
  1369. """
  1370. Get VFs count number according to lspci.
  1371. """
  1372. # FIXME: Need to think out a method of identify which
  1373. # 'virtual function' belongs to which physical card considering
  1374. # that if the host has more than one 82576 card. PCI_ID?
  1375. cmd = "lspci | grep 'Virtual Function' | wc -l"
  1376. return int(commands.getoutput(cmd))
  1377. def check_vfs_count(self):
  1378. """
  1379. Check VFs count number according to the parameter driver_options.
  1380. """
  1381. # Network card 82576 has two network interfaces and each can be
  1382. # virtualized up to 7 virtual functions, therefore we multiply
  1383. # two for the value of driver_option 'max_vfs'.
  1384. expected_count = int((re.findall("(\d)", self.driver_option)[0])) * 2
  1385. return (self.get_vfs_count == expected_count)
  1386. def is_binded_to_stub(self, full_id):
  1387. """
  1388. Verify whether the device with full_id is already binded to pci-stub.
  1389. @param full_id: Full ID for the given PCI device
  1390. """
  1391. base_dir = "/sys/bus/pci"
  1392. stub_path = os.path.join(base_dir, "drivers/pci-stub")
  1393. if os.path.exists(os.path.join(stub_path, full_id)):
  1394. return True
  1395. return False
  1396. def sr_iov_setup(self):
  1397. """
  1398. Ensure the PCI device is working in sr_iov mode.
  1399. Check if the PCI hardware device drive is loaded with the appropriate,
  1400. parameters (number of VFs), and if it's not, perform setup.
  1401. @return: True, if the setup was completed successfuly, False otherwise.
  1402. """
  1403. re_probe = False
  1404. s, o = commands.getstatusoutput('lsmod | grep %s' % self.driver)
  1405. if s:
  1406. re_probe = True
  1407. elif not self.check_vfs_count():
  1408. os.system("modprobe -r %s" % self.driver)
  1409. re_probe = True
  1410. else:
  1411. return True
  1412. # Re-probe driver with proper number of VFs
  1413. if re_probe:
  1414. cmd = "modprobe %s %s" % (self.driver, self.driver_option)
  1415. logging.info("Loading the driver '%s' with option '%s'",
  1416. self.driver, self.driver_option)
  1417. s, o = commands.getstatusoutput(cmd)
  1418. if s:
  1419. return False
  1420. return True
  1421. def request_devs(self):
  1422. """
  1423. Implement setup process: unbind the PCI device and then bind it
  1424. to the pci-stub driver.
  1425. @return: a list of successfully requested devices' PCI IDs.
  1426. """
  1427. base_dir = "/sys/bus/pci"
  1428. stub_path = os.path.join(base_dir, "drivers/pci-stub")
  1429. self.pci_ids = self.get_devs(self.devices_requested)
  1430. logging.debug("The following pci_ids were found: %s", self.pci_ids)
  1431. requested_pci_ids = []
  1432. self.dev_drivers = {}
  1433. # Setup all devices specified for assignment to guest
  1434. for pci_id in self.pci_ids:
  1435. full_id = get_full_pci_id(pci_id)
  1436. if not full_id:
  1437. continue
  1438. drv_path = os.path.join(base_dir, "devices/%s/driver" % full_id)
  1439. dev_prev_driver = os.path.realpath(os.path.join(drv_path,
  1440. os.readlink(drv_path)))
  1441. self.dev_drivers[pci_id] = dev_prev_driver
  1442. # Judge whether the device driver has been binded to stub
  1443. if not self.is_binded_to_stub(full_id):
  1444. logging.debug("Binding device %s to stub", full_id)
  1445. vendor_id = get_vendor_from_pci_id(pci_id)
  1446. stub_new_id = os.path.join(stub_path, 'new_id')
  1447. unbind_dev = os.path.join(drv_path, 'unbind')
  1448. stub_bind = os.path.join(stub_path, 'bind')
  1449. info_write_to_files = [(vendor_id, stub_new_id),
  1450. (full_id, unbind_dev),
  1451. (full_id, stub_bind)]
  1452. for content, file in info_write_to_files:
  1453. try:
  1454. utils.open_write_close(file, content)
  1455. except IOError:
  1456. logging.debug("Failed to write %s to file %s", content,
  1457. file)
  1458. continue
  1459. if not self.is_binded_to_stub(full_id):
  1460. logging.error("Binding device %s to stub failed", pci_id)
  1461. continue
  1462. else:
  1463. logging.debug("Device %s already binded to stub", pci_id)
  1464. requested_pci_ids.append(pci_id)
  1465. self.pci_ids = requested_pci_ids
  1466. return self.pci_ids
  1467. def release_devs(self):
  1468. """
  1469. Release all PCI devices currently assigned to VMs back to the
  1470. virtualization host.
  1471. """
  1472. try:
  1473. for pci_id in self.dev_drivers:
  1474. if not self._release_dev(pci_id):
  1475. logging.error("Failed to release device %s to host", pci_id)
  1476. else:
  1477. logging.info("Released device %s successfully", pci_id)
  1478. except Exception:
  1479. return
  1480. class KojiClient(object):
  1481. """
  1482. Stablishes a connection with the build system, either koji or brew.
  1483. This class provides convenience methods to retrieve information on packages
  1484. and the packages themselves hosted on the build system. Packages should be
  1485. specified in the KojiPgkSpec syntax.
  1486. """
  1487. CMD_LOOKUP_ORDER = ['/usr/bin/brew', '/usr/bin/koji' ]
  1488. CONFIG_MAP = {'/usr/bin/brew': '/etc/brewkoji.conf',
  1489. '/usr/bin/koji': '/etc/koji.conf'}
  1490. def __init__(self, cmd=None):
  1491. """
  1492. Verifies whether the system has koji or brew installed, then loads
  1493. the configuration file that will be used to download the files.
  1494. @type cmd: string
  1495. @param cmd: Optional command name, either 'brew' or 'koji'. If not
  1496. set, get_default_command() is used and to look for
  1497. one of them.
  1498. @raise: ValueError
  1499. """
  1500. if not KOJI_INSTALLED:
  1501. raise ValueError('No koji/brew installed on the machine')
  1502. # Instance variables used by many methods
  1503. self.command = None
  1504. self.config = None
  1505. self.config_options = {}
  1506. self.session = None
  1507. # Set koji command or get default
  1508. if cmd is None:
  1509. self.command = self.get_default_command()
  1510. else:
  1511. self.command = cmd
  1512. # Check koji command
  1513. if not self.is_command_valid():
  1514. raise ValueError('Koji command "%s" is not valid' % self.command)
  1515. # Assuming command is valid, set configuration file and read it
  1516. self.config = self.CONFIG_MAP[self.command]
  1517. self.read_config()
  1518. # Setup koji session
  1519. server_url = self.config_options['server']
  1520. session_options = self.get_session_options()
  1521. self.session = koji.ClientSession(server_url,
  1522. session_options)
  1523. def read_config(self, check_is_valid=True):
  1524. '''
  1525. Reads options from the Koji configuration file
  1526. By default it checks if the koji configuration is valid
  1527. @type check_valid: boolean
  1528. @param check_valid: whether to include a check on the configuration
  1529. @raises: ValueError
  1530. @returns: None
  1531. '''
  1532. if check_is_valid:
  1533. if not self.is_config_valid():
  1534. raise ValueError('Koji config "%s" is not valid' % self.config)
  1535. config = ConfigParser.ConfigParser()
  1536. config.read(self.config)
  1537. basename = os.path.basename(self.command)
  1538. for name, value in config.items(basename):
  1539. self.config_options[name] = value
  1540. def get_session_options(self):
  1541. '''
  1542. Filter only options necessary for setting up a cobbler client session
  1543. @returns: only the options used for session setup
  1544. '''
  1545. session_options = {}
  1546. for name, value in self.config_options.items():
  1547. if name in ('user', 'password', 'debug_xmlrpc', 'debug'):
  1548. session_options[name] = value
  1549. return session_options
  1550. def is_command_valid(self):
  1551. '''
  1552. Checks if the currently set koji command is valid
  1553. @returns: True or False
  1554. '''
  1555. koji_command_ok = True
  1556. if not os.path.isfile(self.command):
  1557. logging.error('Koji command "%s" is not a regular file',
  1558. self.command)
  1559. koji_command_ok = False
  1560. if not os.access(self.command, os.X_OK):
  1561. logging.warn('Koji command "%s" is not executable: this is '
  1562. 'not fatal but indicates an unexpected situation',
  1563. self.command)
  1564. if not self.command in self.CONFIG_MAP.keys():
  1565. logging.error('Koji command "%s" does not have a configuration '
  1566. 'file associated to it', self.command)
  1567. koji_command_ok = False
  1568. return koji_command_ok
  1569. def is_config_valid(self):
  1570. '''
  1571. Checks if the currently set koji configuration is valid
  1572. @returns: True or False
  1573. '''
  1574. koji_config_ok = True
  1575. if not os.path.isfile(self.config):
  1576. logging.error('Koji config "%s" is not a regular file', self.config)
  1577. koji_config_ok = False
  1578. if not os.access(self.config, os.R_OK):
  1579. logging.error('Koji config "%s" is not readable', self.config)
  1580. koji_config_ok = False
  1581. config = ConfigParser.ConfigParser()
  1582. config.read(self.config)
  1583. basename = os.path.basename(self.command)
  1584. if not config.has_section(basename):
  1585. logging.error('Koji configuration file "%s" does not have a '
  1586. 'section "%s", named after the base name of the '
  1587. 'currently set koji command "%s"', self.config,
  1588. basename, self.command)
  1589. koji_config_ok = False
  1590. return koji_config_ok
  1591. def get_default_command(self):
  1592. '''
  1593. Looks up for koji or brew "binaries" on the system
  1594. Systems with plain koji usually don't have a brew cmd, while systems
  1595. with koji, have *both* koji and brew utilities. So we look for brew
  1596. first, and if found, we consider that the system is configured for
  1597. brew. If not, we consider this is a system with plain koji.
  1598. @returns: either koji or brew command line executable path, or None
  1599. '''
  1600. koji_command = None
  1601. for command in self.CMD_LOOKUP_ORDER:
  1602. if os.path.isfile(command):
  1603. koji_command = command
  1604. break
  1605. else:
  1606. koji_command_basename = os.path.basename(command)
  1607. try:
  1608. koji_command = os_dep.command(koji_command_basename)
  1609. break
  1610. except ValueError:
  1611. pass
  1612. return koji_command
  1613. def get_pkg_info(self, pkg):
  1614. '''
  1615. Returns information from Koji on the package
  1616. @type pkg: KojiPkgSpec
  1617. @param pkg: information about the package, as a KojiPkgSpec instance
  1618. @returns: information from Koji about the specified package
  1619. '''
  1620. info = {}
  1621. if pkg.build is not None:
  1622. info = self.session.getBuild(int(pkg.build))
  1623. elif pkg.tag is not None and pkg.package is not None:
  1624. builds = self.session.listTagged(pkg.tag,
  1625. latest=True,
  1626. inherit=True,
  1627. package=pkg.package)
  1628. if builds:
  1629. info = builds[0]
  1630. return info
  1631. def is_pkg_valid(self, pkg):
  1632. '''
  1633. Checks if this package is altogether valid on Koji
  1634. This verifies if the build or tag specified in the package
  1635. specification actually exist on the Koji server
  1636. @returns: True or False
  1637. '''
  1638. valid = True
  1639. if pkg.build:
  1640. if not self.is_pkg_spec_build_valid(pkg):
  1641. valid = False
  1642. elif pkg.tag:
  1643. if not self.is_pkg_spec_tag_valid(pkg):
  1644. valid = False
  1645. else:
  1646. valid = False
  1647. return valid
  1648. def is_pkg_spec_build_valid(self, pkg):
  1649. '''
  1650. Checks if build is valid on Koji
  1651. @param pkg: a Pkg instance
  1652. '''
  1653. if pkg.build is not None:
  1654. info = self.session.getBuild(int(pkg.build))
  1655. if info:
  1656. return True
  1657. return False
  1658. def is_pkg_spec_tag_valid(self, pkg):
  1659. '''
  1660. Checks if tag is valid on Koji
  1661. @type pkg: KojiPkgSpec
  1662. @param pkg: a package specification
  1663. '''
  1664. if pkg.tag is not None:
  1665. tag = self.session.getTag(pkg.tag)
  1666. if tag:
  1667. return True
  1668. return False
  1669. def get_pkg_rpm_info(self, pkg, arch=None):
  1670. '''
  1671. Returns a list of infomation on the RPM packages found on koji
  1672. @type pkg: KojiPkgSpec
  1673. @param pkg: a package specification
  1674. @type arch: string
  1675. @param arch: packages built for this architecture, but also including
  1676. architecture independent (noarch) packages
  1677. '''
  1678. if arch is None:
  1679. arch = utils.get_arch()
  1680. rpms = []
  1681. info = self.get_pkg_info(pkg)
  1682. if info:
  1683. rpms = self.session.listRPMs(buildID=info['id'],
  1684. arches=[arch, 'noarch'])
  1685. if pkg.subpackages:
  1686. rpms = [d for d in rpms if d['name'] in pkg.subpackages]
  1687. return rpms
  1688. def get_pkg_rpm_names(self, pkg, arch=None):
  1689. '''
  1690. Gets the names for the RPM packages specified in pkg
  1691. @type pkg: KojiPkgSpec
  1692. @param pkg: a package specification
  1693. @type arch: string
  1694. @param arch: packages built for this architecture, but also including
  1695. architecture independent (noarch) packages
  1696. '''
  1697. if arch is None:
  1698. arch = utils.get_arch()
  1699. rpms = self.get_pkg_rpm_info(pkg, arch)
  1700. return [rpm['name'] for rpm in rpms]
  1701. def get_pkg_rpm_file_names(self, pkg, arch=None):
  1702. '''
  1703. Gets the file names for the RPM packages specified in pkg
  1704. @type pkg: KojiPkgSpec
  1705. @param pkg: a package specification
  1706. @type arch: string
  1707. @param arch: packages built for this architecture, but also including
  1708. architecture independent (noarch) packages
  1709. '''
  1710. if arch is None:
  1711. arch = utils.get_arch()
  1712. rpm_names = []
  1713. rpms = self.get_pkg_rpm_info(pkg, arch)
  1714. for rpm in rpms:
  1715. arch_rpm_name = koji.pathinfo.rpm(rpm)
  1716. rpm_name = os.path.basename(arch_rpm_name)
  1717. rpm_names.append(rpm_name)
  1718. return rpm_names
  1719. def get_pkg_urls(self, pkg, arch=None):
  1720. '''
  1721. Gets the urls for the packages specified in pkg
  1722. @type pkg: KojiPkgSpec
  1723. @param pkg: a package specification
  1724. @type arch: string
  1725. @param arch: packages built for this architecture, but also including
  1726. architecture independent (noarch) packages
  1727. '''
  1728. info = self.get_pkg_info(pkg)
  1729. rpms = self.get_pkg_rpm_info(pkg, arch)
  1730. rpm_urls = []
  1731. if self.config_options.has_key('pkgurl'):
  1732. base_url = self.config_options['pkgurl']
  1733. else:
  1734. base_url = "%s/%s" % (self.config_options['topurl'],
  1735. 'packages')
  1736. for rpm in rpms:
  1737. rpm_name = koji.pathinfo.rpm(rpm)
  1738. url = ("%s/%s/%s/%s/%s" % (base_url,
  1739. info['package_name'],
  1740. info['version'], info['release'],
  1741. rpm_name))
  1742. rpm_urls.append(url)
  1743. return rpm_urls
  1744. def get_pkgs(self, pkg, dst_dir, arch=None):
  1745. '''
  1746. Download the packages
  1747. @type pkg: KojiPkgSpec
  1748. @param pkg: a package specification
  1749. @type dst_dir: string
  1750. @param dst_dir: the destination directory, where the downloaded
  1751. packages will be saved on
  1752. @type arch: string
  1753. @param arch: packages built for this architecture, but also including
  1754. architecture independent (noarch) packages
  1755. '''
  1756. rpm_urls = self.get_pkg_urls(pkg, arch)
  1757. for url in rpm_urls:
  1758. utils.get_file(url,
  1759. os.path.join(dst_dir, os.path.basename(url)))
  1760. DEFAULT_KOJI_TAG = None
  1761. def set_default_koji_tag(tag):
  1762. '''
  1763. Sets the default tag that will be used
  1764. '''
  1765. global DEFAULT_KOJI_TAG
  1766. DEFAULT_KOJI_TAG = tag
  1767. def get_default_koji_tag():
  1768. return DEFAULT_KOJI_TAG
  1769. class KojiPkgSpec(object):
  1770. '''
  1771. A package specification syntax parser for Koji
  1772. This holds information on either tag or build, and packages to be fetched
  1773. from koji and possibly installed (features external do this class).
  1774. New objects can be created either by providing information in the textual
  1775. format or by using the actual parameters for tag, build, package and sub-
  1776. packages. The textual format is useful for command line interfaces and
  1777. configuration files, while using parameters is better for using this in
  1778. a programatic fashion.
  1779. The following sets of examples are interchangeable. Specifying all packages
  1780. part of build number 1000:
  1781. >>> from kvm_utils import KojiPkgSpec
  1782. >>> pkg = KojiPkgSpec('1000')
  1783. >>> pkg = KojiPkgSpec(build=1000)
  1784. Specifying only a subset of packages of build number 1000:
  1785. >>> pkg = KojiPkgSpec('1000:kernel,kernel-devel')
  1786. >>> pkg = KojiPkgSpec(build=1000,
  1787. subpackages=['kernel', 'kernel-devel'])
  1788. Specifying the latest build for the 'kernel' package tagged with 'dist-f14':
  1789. >>> pkg = KojiPkgSpec('dist-f14:kernel')
  1790. >>> pkg = KojiPkgSpec(tag='dist-f14', package='kernel')
  1791. Specifying the 'kernel' package using the default tag:
  1792. >>> kvm_utils.set_default_koji_tag('dist-f14')
  1793. >>> pkg = KojiPkgSpec('kernel')
  1794. >>> pkg = KojiPkgSpec(package='kernel')
  1795. Specifying the 'kernel' package using the default tag:
  1796. >>> kvm_utils.set_default_koji_tag('dist-f14')
  1797. >>> pkg = KojiPkgSpec('kernel')
  1798. >>> pkg = KojiPkgSpec(package='kernel')
  1799. If you do not specify a default tag, and give a package name without an
  1800. explicit tag, your package specification is considered invalid:
  1801. >>> print kvm_utils.get_default_koji_tag()
  1802. None
  1803. >>> print kvm_utils.KojiPkgSpec('kernel').is_valid()
  1804. False
  1805. >>> print kvm_utils.KojiPkgSpec(package='kernel').is_valid()
  1806. False
  1807. '''
  1808. SEP = ':'
  1809. def __init__(self, text='', tag=None, build=None,
  1810. package=None, subpackages=[]):
  1811. '''
  1812. Instantiates a new KojiPkgSpec object
  1813. @type text: string
  1814. @param text: a textual representation of a package on Koji that
  1815. will be parsed
  1816. @type tag: string
  1817. @param tag: a koji tag, example: Fedora-14-RELEASE
  1818. (see U{http://fedoraproject.org/wiki/Koji#Tags_and_Targets})
  1819. @type build: number
  1820. @param build: a koji build, example: 1001
  1821. (see U{http://fedoraproject.org/wiki/Koji#Koji_Architecture})
  1822. @type package: string
  1823. @param package: a koji package, example: python
  1824. (see U{http://fedoraproject.org/wiki/Koji#Koji_Architecture})
  1825. @type subpackages: list of strings
  1826. @param subpackages: a list of package names, usually a subset of
  1827. the RPM packages generated by a given build
  1828. '''
  1829. # Set to None to indicate 'not set' (and be able to use 'is')
  1830. self.tag = None
  1831. self.build = None
  1832. self.package = None
  1833. self.subpackages = []
  1834. self.default_tag = None
  1835. # Textual representation takes precedence (most common use case)
  1836. if text:
  1837. self.parse(text)
  1838. else:
  1839. self.tag = tag
  1840. self.build = build
  1841. self.package = package
  1842. self.subpackages = subpackages
  1843. # Set the default tag, if set, as a fallback
  1844. if not self.build and not self.tag:
  1845. default_tag = get_default_koji_tag()
  1846. if default_tag is not None:
  1847. self.tag = default_tag
  1848. def parse(self, text):
  1849. '''
  1850. Parses a textual representation of a package specification
  1851. @type text: string
  1852. @param text: textual representation of a package in koji
  1853. '''
  1854. parts = text.count(self.SEP) + 1
  1855. if parts == 1:
  1856. if text.isdigit():
  1857. self.build = text
  1858. else:
  1859. self.package = text
  1860. elif parts == 2:
  1861. part1, part2 = text.split(self.SEP)
  1862. if part1.isdigit():
  1863. self.build = part1
  1864. self.subpackages = part2.split(',')
  1865. else:
  1866. self.tag = part1
  1867. self.package = part2
  1868. elif parts >= 3:
  1869. # Instead of erroring on more arguments, we simply ignore them
  1870. # This makes the parser suitable for future syntax additions, such
  1871. # as specifying the package architecture
  1872. part1, part2, part3 = text.split(self.SEP)[0:3]
  1873. self.tag = part1
  1874. self.package = part2
  1875. self.subpackages = part3.split(',')
  1876. def _is_invalid_neither_tag_or_build(self):
  1877. '''
  1878. Checks if this package is invalid due to not having either a valid
  1879. tag or build set, that is, both are empty.
  1880. @returns: True if this is invalid and False if it's valid
  1881. '''
  1882. return (self.tag is None and self.build is None)
  1883. def _is_invalid_package_but_no_tag(self):
  1884. '''
  1885. Checks if this package is invalid due to having a package name set
  1886. but tag or build set, that is, both are empty.
  1887. @returns: True if this is invalid and False if it's valid
  1888. '''
  1889. return (self.package and not self.tag)
  1890. def _is_invalid_subpackages_but_no_main_package(self):
  1891. '''
  1892. Checks if this package is invalid due to having a tag set (this is Ok)
  1893. but specifying subpackage names without specifying the main package
  1894. name.
  1895. Specifying subpackages without a main package name is only valid when
  1896. a build is used instead of a tag.
  1897. @returns: True if this is invalid and False if it's valid
  1898. '''
  1899. return (self.tag and self.subpackages and not self.package)
  1900. def is_valid(self):
  1901. '''
  1902. Checks if this package specification is valid.
  1903. Being valid means that it has enough and not conflicting information.
  1904. It does not validate that the packages specified actually existe on
  1905. the Koji server.
  1906. @returns: True or False
  1907. '''
  1908. if self._is_invalid_neither_tag_or_build():
  1909. return False
  1910. elif self._is_invalid_package_but_no_tag():
  1911. return False
  1912. elif self._is_invalid_subpackages_but_no_main_package():
  1913. return False
  1914. return True
  1915. def describe_invalid(self):
  1916. '''
  1917. Describes why this is not valid, in a human friendly way
  1918. '''
  1919. if self._is_invalid_neither_tag_or_build():
  1920. return 'neither a tag or build are set, and of them should be set'
  1921. elif self._is_invalid_package_but_no_tag():
  1922. return 'package name specified but no tag is set'
  1923. elif self._is_invalid_subpackages_but_no_main_package():
  1924. return 'subpackages specified but no main package is set'
  1925. return 'unkwown reason, seems to be valid'
  1926. def describe(self):
  1927. '''
  1928. Describe this package specification, in a human friendly way
  1929. @returns: package specification description
  1930. '''
  1931. if self.is_valid():
  1932. description = ''
  1933. if not self.subpackages:
  1934. description += 'all subpackages from %s ' % self.package
  1935. else:
  1936. description += ('only subpackage(s) %s from package %s ' %
  1937. (', '.join(self.subpackages), self.package))
  1938. if self.build:
  1939. description += 'from build %s' % self.build
  1940. elif self.tag:
  1941. description += 'tagged with %s' % self.tag
  1942. else:
  1943. raise ValueError, 'neither build or tag is set'
  1944. return description
  1945. else:
  1946. return ('Invalid package specification: %s' %
  1947. self.describe_invalid())
  1948. def to_text(self):
  1949. '''
  1950. Return the textual representation of this package spec
  1951. The output should be consumable by parse() and produce the same
  1952. package specification.
  1953. We find that it's acceptable to put the currently set default tag
  1954. as the package explicit tag in the textual definition for completeness.
  1955. @returns: package specification in a textual representation
  1956. '''
  1957. default_tag = get_default_koji_tag()
  1958. if self.build:
  1959. if self.subpackages:
  1960. return "%s:%s" % (self.build, ",".join(self.subpackages))
  1961. else:
  1962. return "%s" % self.build
  1963. elif self.tag:
  1964. if self.subpackages:
  1965. return "%s:%s:%s" % (self.tag, self.package,
  1966. ",".join(self.subpackages))
  1967. else:
  1968. return "%s:%s" % (self.tag, self.package)
  1969. elif default_tag is not None:
  1970. # neither build or tag is set, try default_tag as a fallback
  1971. if self.subpackages:
  1972. return "%s:%s:%s" % (default_tag, self.package,
  1973. ",".join(self.subpackages))
  1974. else:
  1975. return "%s:%s" % (default_tag, self.package)
  1976. else:
  1977. raise ValueError, 'neither build or tag is set'
  1978. def __repr__(self):
  1979. return ("<KojiPkgSpec tag=%s build=%s pkg=%s subpkgs=%s>" %
  1980. (self.tag, self.build, self.package,
  1981. ", ".join(self.subpackages)))
  1982. def umount(src, mount_point, type):
  1983. """
  1984. Umount the src mounted in mount_point.
  1985. @src: mount source
  1986. @mount_point: mount point
  1987. @type: file system type
  1988. """
  1989. mount_string = "%s %s %s" % (src, mount_point, type)
  1990. if mount_string in file("/etc/mtab").read():
  1991. umount_cmd = "umount %s" % mount_point
  1992. try:
  1993. utils.system(umount_cmd)
  1994. return True
  1995. except error.CmdError:
  1996. return False
  1997. else:
  1998. logging.debug("%s is not mounted under %s", src, mount_point)
  1999. return True
  2000. def mount(src, mount_point, type, perm="rw"):
  2001. """
  2002. Mount the src into mount_point of the host.
  2003. @src: mount source
  2004. @mount_point: mount point
  2005. @type: file system type
  2006. @perm: mount premission
  2007. """
  2008. umount(src, mount_point, type)
  2009. mount_string = "%s %s %s %s" % (src, mount_point, type, perm)
  2010. if mount_string in file("/etc/mtab").read():
  2011. logging.debug("%s is already mounted in %s with %s",
  2012. src, mount_point, perm)
  2013. return True
  2014. mount_cmd = "mount -t %s %s %s -o %s" % (type, src, mount_point, perm)
  2015. try:
  2016. utils.system(mount_cmd)
  2017. except error.CmdError:
  2018. return False
  2019. logging.debug("Verify the mount through /etc/mtab")
  2020. if mount_string in file("/etc/mtab").read():
  2021. logging.debug("%s is successfully mounted", src)
  2022. return True
  2023. else:
  2024. logging.error("Can't find mounted NFS share - /etc/mtab contents \n%s",
  2025. file("/etc/mtab").read())
  2026. return False
  2027. class GitRepoParamHelper(git.GitRepoHelper):
  2028. '''
  2029. Helps to deal with git repos specified in cartersian config files
  2030. This class attempts to make it simple to manage a git repo, by using a
  2031. naming standard that follows this basic syntax:
  2032. <prefix>_name_<suffix>
  2033. <prefix> is always 'git_repo' and <suffix> sets options for this git repo.
  2034. Example for repo named foo:
  2035. git_repo_foo_uri = git://git.foo.org/foo.git
  2036. git_repo_foo_base_uri = /home/user/code/foo
  2037. git_repo_foo_branch = master
  2038. git_repo_foo_lbranch = master
  2039. git_repo_foo_commit = bb5fb8e678aabe286e74c4f2993dc2a9e550b627
  2040. '''
  2041. def __init__(self, params, name, destination_dir):
  2042. '''
  2043. Instantiates a new GitRepoParamHelper
  2044. '''
  2045. self.params = params
  2046. self.name = name
  2047. self.destination_dir = destination_dir
  2048. self._parse_params()
  2049. def _parse_params(self):
  2050. '''
  2051. Parses the params items for entries related to this repo
  2052. This method currently does everything that the parent class __init__()
  2053. method does, that is, sets all instance variables needed by other
  2054. methods. That means it's not strictly necessary to call parent's
  2055. __init__().
  2056. '''
  2057. config_prefix = 'git_repo_%s' % self.name
  2058. logging.debug('Parsing parameters for git repo %s, configuration '
  2059. 'prefix is %s' % (self.name, config_prefix))
  2060. self.base_uri = self.params.get('%s_base_uri' % config_prefix)
  2061. if self.base_uri is None:
  2062. logging.debug('Git repo %s base uri is not set' % self.name)
  2063. else:
  2064. logging.debug('Git repo %s base uri: %s' % (self.name,
  2065. self.base_uri))
  2066. self.uri = self.params.get('%s_uri' % config_prefix)
  2067. logging.debug('Git repo %s uri: %s' % (self.name, self.uri))
  2068. self.branch = self.params.get('%s_branch' % config_prefix, 'master')
  2069. logging.debug('Git repo %s branch: %s' % (self.name, self.branch))
  2070. self.lbranch = self.params.get('%s_lbranch' % config_prefix)
  2071. if self.lbranch is None:
  2072. self.lbranch = self.branch
  2073. logging.debug('Git repo %s lbranch: %s' % (self.name, self.lbranch))
  2074. self.commit = self.params.get('%s_commit' % config_prefix)
  2075. if self.commit is None:
  2076. logging.debug('Git repo %s commit is not set' % self.name)
  2077. else:
  2078. logging.debug('Git repo %s commit: %s' % (self.name, self.commit))
  2079. self.cmd = os_dep.command('git')
  2080. class LocalSourceDirHelper(object):
  2081. '''
  2082. Helper class to deal with source code sitting somewhere in the filesystem
  2083. '''
  2084. def __init__(self, source_dir, destination_dir):
  2085. '''
  2086. @param source_dir:
  2087. @param destination_dir:
  2088. @return: new LocalSourceDirHelper instance
  2089. '''
  2090. self.source = source_dir
  2091. self.destination = destination_dir
  2092. def execute(self):
  2093. '''
  2094. Copies the source directory to the destination directory
  2095. '''
  2096. if os.path.isdir(self.destination):
  2097. shutil.rmtree(self.destination)
  2098. if os.path.isdir(self.source):
  2099. shutil.copytree(self.source, self.destination)
  2100. class LocalSourceDirParamHelper(LocalSourceDirHelper):
  2101. '''
  2102. Helps to deal with source dirs specified in cartersian config files
  2103. This class attempts to make it simple to manage a source dir, by using a
  2104. naming standard that follows this basic syntax:
  2105. <prefix>_name_<suffix>
  2106. <prefix> is always 'local_src' and <suffix> sets options for this source
  2107. dir. Example for source dir named foo:
  2108. local_src_foo_path = /home/user/foo
  2109. '''
  2110. def __init__(self, params, name, destination_dir):
  2111. '''
  2112. Instantiate a new LocalSourceDirParamHelper
  2113. '''
  2114. self.params = params
  2115. self.name = name
  2116. self.destination_dir = destination_dir
  2117. self._parse_params()
  2118. def _parse_params(self):
  2119. '''
  2120. Parses the params items for entries related to source dir
  2121. '''
  2122. config_prefix = 'local_src_%s' % self.name
  2123. logging.debug('Parsing parameters for local source %s, configuration '
  2124. 'prefix is %s' % (self.name, config_prefix))
  2125. self.path = self.params.get('%s_path' % config_prefix)
  2126. logging.debug('Local source directory %s path: %s' % (self.name,
  2127. self.path))
  2128. self.source = self.path
  2129. self.destination = self.destination_dir
  2130. class LocalTarHelper(object):
  2131. '''
  2132. Helper class to deal with source code in a local tarball
  2133. '''
  2134. def __init__(self, source, destination_dir):
  2135. self.source = source
  2136. self.destination = destination_dir
  2137. def extract(self):
  2138. '''
  2139. Extracts the tarball into the destination directory
  2140. '''
  2141. if os.path.isdir(self.destination):
  2142. shutil.rmtree(self.destination)
  2143. if os.path.isfile(self.source) and tarfile.is_tarfile(self.source):
  2144. name = os.path.basename(self.destination)
  2145. temp_dir = os.path.join(os.path.dirname(self.destination),
  2146. '%s.tmp' % name)
  2147. logging.debug('Temporary directory for extracting tarball is %s' %
  2148. temp_dir)
  2149. if not os.path.isdir(temp_dir):
  2150. os.makedirs(temp_dir)
  2151. tarball = tarfile.open(self.source)
  2152. tarball.extractall(temp_dir)
  2153. #
  2154. # If there's a directory at the toplevel of the tarfile, assume
  2155. # it's the root for the contents, usually source code
  2156. #
  2157. tarball_info = tarball.members[0]
  2158. if tarball_info.isdir():
  2159. content_path = os.path.join(temp_dir,
  2160. tarball_info.name)
  2161. else:
  2162. content_path = temp_dir
  2163. #
  2164. # Now move the content directory to the final destination
  2165. #
  2166. shutil.move(content_path, self.destination)
  2167. else:
  2168. raise OSError("%s is not a file or tar file" % self.source)
  2169. def execute(self):
  2170. '''
  2171. Executes all action this helper is suposed to perform
  2172. This is the main entry point method for this class, and all other
  2173. helper classes.
  2174. '''
  2175. self.extract()
  2176. class LocalTarParamHelper(LocalTarHelper):
  2177. '''
  2178. Helps to deal with source tarballs specified in cartersian config files
  2179. This class attempts to make it simple to manage a tarball with source code,
  2180. by using a naming standard that follows this basic syntax:
  2181. <prefix>_name_<suffix>
  2182. <prefix> is always 'local_tar' and <suffix> sets options for this source
  2183. tarball. Example for source tarball named foo:
  2184. local_tar_foo_path = /tmp/foo-1.0.tar.gz
  2185. '''
  2186. def __init__(self, params, name, destination_dir):
  2187. '''
  2188. Instantiates a new LocalTarParamHelper
  2189. '''
  2190. self.params = params
  2191. self.name = name
  2192. self.destination_dir = destination_dir
  2193. self._parse_params()
  2194. def _parse_params(self):
  2195. '''
  2196. Parses the params items for entries related to this local tar helper
  2197. '''
  2198. config_prefix = 'local_tar_%s' % self.name
  2199. logging.debug('Parsing parameters for local tar %s, configuration '
  2200. 'prefix is %s' % (self.name, config_prefix))
  2201. self.path = self.params.get('%s_path' % config_prefix)
  2202. logging.debug('Local source tar %s path: %s' % (self.name,
  2203. self.path))
  2204. self.source = self.path
  2205. self.destination = self.destination_dir
  2206. class RemoteTarHelper(LocalTarHelper):
  2207. '''
  2208. Helper that fetches a tarball and extracts it locally
  2209. '''
  2210. def __init__(self, source_uri, destination_dir):
  2211. self.source = source_uri
  2212. self.destination = destination_dir
  2213. def execute(self):
  2214. '''
  2215. Executes all action this helper class is suposed to perform
  2216. This is the main entry point method for this class, and all other
  2217. helper classes.
  2218. This implementation fetches the remote tar file and then extracts
  2219. it using the functionality present in the parent class.
  2220. '''
  2221. name = os.path.basename(self.source)
  2222. base_dest = os.path.dirname(self.destination_dir)
  2223. dest = os.path.join(base_dest, name)
  2224. utils.get_file(self.source, dest)
  2225. self.source = dest
  2226. self.extract()
  2227. class RemoteTarParamHelper(RemoteTarHelper):
  2228. '''
  2229. Helps to deal with remote source tarballs specified in cartersian config
  2230. This class attempts to make it simple to manage a tarball with source code,
  2231. by using a naming standard that follows this basic syntax:
  2232. <prefix>_name_<suffix>
  2233. <prefix> is always 'local_tar' and <suffix> sets options for this source
  2234. tarball. Example for source tarball named foo:
  2235. remote_tar_foo_uri = http://foo.org/foo-1.0.tar.gz
  2236. '''
  2237. def __init__(self, params, name, destination_dir):
  2238. '''
  2239. Instantiates a new RemoteTarParamHelper instance
  2240. '''
  2241. self.params = params
  2242. self.name = name
  2243. self.destination_dir = destination_dir
  2244. self._parse_params()
  2245. def _parse_params(self):
  2246. '''
  2247. Parses the params items for entries related to this remote tar helper
  2248. '''
  2249. config_prefix = 'remote_tar_%s' % self.name
  2250. logging.debug('Parsing parameters for remote tar %s, configuration '
  2251. 'prefix is %s' % (self.name, config_prefix))
  2252. self.uri = self.params.get('%s_uri' % config_prefix)
  2253. logging.debug('Remote source tar %s uri: %s' % (self.name,
  2254. self.uri))
  2255. self.source = self.uri
  2256. self.destination = self.destination_dir
  2257. class PatchHelper(object):
  2258. '''
  2259. Helper that encapsulates the patching of source code with patch files
  2260. '''
  2261. def __init__(self, source_dir, patches):
  2262. '''
  2263. Initializes a new PatchHelper
  2264. '''
  2265. self.source_dir = source_dir
  2266. self.patches = patches
  2267. def download(self):
  2268. '''
  2269. Copies patch files from remote locations to the source directory
  2270. '''
  2271. for patch in self.patches:
  2272. utils.get_file(patch, os.path.join(self.source_dir,
  2273. os.path.basename(patch)))
  2274. def patch(self):
  2275. '''
  2276. Patches the source dir with all patch files
  2277. '''
  2278. os.chdir(self.source_dir)
  2279. for patch in self.patches:
  2280. patch_file = os.path.join(self.source_dir,
  2281. os.path.basename(patch))
  2282. utils.system('patch -p1 < %s' % os.path.basename(patch))
  2283. def execute(self):
  2284. '''
  2285. Performs all steps necessary to download patches and apply them
  2286. '''
  2287. self.download()
  2288. self.patch()
  2289. class PatchParamHelper(PatchHelper):
  2290. '''
  2291. Helps to deal with patches specified in cartersian config files
  2292. This class attempts to make it simple to patch source coude, by using a
  2293. naming standard that follows this basic syntax:
  2294. [<git_repo>|<local_src>|<local_tar>|<remote_tar>]_<name>_patches
  2295. <prefix> is either a 'local_src' or 'git_repo', that, together with <name>
  2296. specify a directory containing source code to receive the patches. That is,
  2297. for source code coming from git repo foo, patches would be specified as:
  2298. git_repo_foo_patches = ['http://foo/bar.patch', 'http://foo/baz.patch']
  2299. And for for patches to be applied on local source code named also foo:
  2300. local_src_foo_patches = ['http://foo/bar.patch', 'http://foo/baz.patch']
  2301. '''
  2302. def __init__(self, params, prefix, source_dir):
  2303. '''
  2304. Initializes a new PatchParamHelper instance
  2305. '''
  2306. self.params = params
  2307. self.prefix = prefix
  2308. self.source_dir = source_dir
  2309. self._parse_params()
  2310. def _parse_params(self):
  2311. '''
  2312. Parses the params items for entries related to this set of patches
  2313. This method currently does everything that the parent class __init__()
  2314. method does, that is, sets all instance variables needed by other
  2315. methods. That means it's not strictly necessary to call parent's
  2316. __init__().
  2317. '''
  2318. logging.debug('Parsing patch parameters for prefix %s' % self.prefix)
  2319. patches_param_key = '%s_patches' % self.prefix
  2320. self.patches_str = self.params.get(patches_param_key, '[]')
  2321. logging.debug('Patches config for prefix %s: %s' % (self.prefix,
  2322. self.patches_str))
  2323. self.patches = eval(self.patches_str)
  2324. logging.debug('Patches for prefix %s: %s' % (self.prefix,
  2325. ", ".join(self.patches)))
  2326. class GnuSourceBuildInvalidSource(Exception):
  2327. '''
  2328. Exception raised when build source dir/file is not valid
  2329. '''
  2330. pass
  2331. class SourceBuildFailed(Exception):
  2332. '''
  2333. Exception raised when building with parallel jobs fails
  2334. This serves as feedback for code using *BuildHelper
  2335. '''
  2336. pass
  2337. class SourceBuildParallelFailed(Exception):
  2338. '''
  2339. Exception raised when building with parallel jobs fails
  2340. This serves as feedback for code using *BuildHelper
  2341. '''
  2342. pass
  2343. class GnuSourceBuildHelper(object):
  2344. '''
  2345. Handles software installation of GNU-like source code
  2346. This basically means that the build will go though the classic GNU
  2347. autotools steps: ./configure, make, make install
  2348. '''
  2349. def __init__(self, source, build_dir, prefix,
  2350. configure_options=[]):
  2351. '''
  2352. @type source: string
  2353. @param source: source directory or tarball
  2354. @type prefix: string
  2355. @param prefix: installation prefix
  2356. @type build_dir: string
  2357. @param build_dir: temporary directory used for building the source code
  2358. @type configure_options: list
  2359. @param configure_options: options to pass to configure
  2360. @throws: GnuSourceBuildInvalidSource
  2361. '''
  2362. self.source = source
  2363. self.build_dir = build_dir
  2364. self.prefix = prefix
  2365. self.configure_options = configure_options
  2366. self.install_debug_info = True
  2367. self.include_pkg_config_path()
  2368. def include_pkg_config_path(self):
  2369. '''
  2370. Adds the current prefix to the list of paths that pkg-config searches
  2371. This is currently not optional as there is no observed adverse side
  2372. effects of enabling this. As the "prefix" is usually only valid during
  2373. a test run, we believe that having other pkg-config files (*.pc) in
  2374. either '<prefix>/share/pkgconfig' or '<prefix>/lib/pkgconfig' is
  2375. exactly for the purpose of using them.
  2376. @returns: None
  2377. '''
  2378. env_var = 'PKG_CONFIG_PATH'
  2379. include_paths = [os.path.join(self.prefix, 'share', 'pkgconfig'),
  2380. os.path.join(self.prefix, 'lib', 'pkgconfig')]
  2381. if os.environ.has_key(env_var):
  2382. paths = os.environ[env_var].split(':')
  2383. for include_path in include_paths:
  2384. if include_path not in paths:
  2385. paths.append(include_path)
  2386. os.environ[env_var] = ':'.join(paths)
  2387. else:
  2388. os.environ[env_var] = ':'.join(include_paths)
  2389. logging.debug('PKG_CONFIG_PATH is: %s' % os.environ['PKG_CONFIG_PATH'])
  2390. def get_configure_path(self):
  2391. '''
  2392. Checks if 'configure' exists, if not, return 'autogen.sh' as a fallback
  2393. '''
  2394. configure_path = os.path.abspath(os.path.join(self.source,
  2395. "configure"))
  2396. autogen_path = os.path.abspath(os.path.join(self.source,
  2397. "autogen.sh"))
  2398. if os.path.exists(configure_path):
  2399. return configure_path
  2400. elif os.path.exists(autogen_path):
  2401. return autogen_path
  2402. else:
  2403. raise GnuSourceBuildInvalidSource('configure script does not exist')
  2404. def get_available_configure_options(self):
  2405. '''
  2406. Return the list of available options of a GNU like configure script
  2407. This will run the "configure" script at the source directory
  2408. @returns: list of options accepted by configure script
  2409. '''
  2410. help_raw = utils.system_output('%s --help' % self.get_configure_path(),
  2411. ignore_status=True)
  2412. help_output = help_raw.split("\n")
  2413. option_list = []
  2414. for line in help_output:
  2415. cleaned_line = line.lstrip()
  2416. if cleaned_line.startswith("--"):
  2417. option = cleaned_line.split()[0]
  2418. option = option.split("=")[0]
  2419. option_list.append(option)
  2420. return option_list
  2421. def enable_debug_symbols(self):
  2422. '''
  2423. Enables option that leaves debug symbols on compiled software
  2424. This makes debugging a lot easier.
  2425. '''
  2426. enable_debug_option = "--disable-strip"
  2427. if enable_debug_option in self.get_available_configure_options():
  2428. self.configure_options.append(enable_debug_option)
  2429. logging.debug('Enabling debug symbols with option: %s' %
  2430. enable_debug_option)
  2431. def get_configure_command(self):
  2432. '''
  2433. Formats configure script with all options set
  2434. @returns: string with all configure options, including prefix
  2435. '''
  2436. prefix_option = "--prefix=%s" % self.prefix
  2437. options = self.configure_options
  2438. options.append(prefix_option)
  2439. return "%s %s" % (self.get_configure_path(),
  2440. " ".join(options))
  2441. def configure(self):
  2442. '''
  2443. Runs the "configure" script passing apropriate command line options
  2444. '''
  2445. configure_command = self.get_configure_command()
  2446. logging.info('Running configure on build dir')
  2447. os.chdir(self.build_dir)
  2448. utils.system(configure_command)
  2449. def make_parallel(self):
  2450. '''
  2451. Runs "make" using the correct number of parallel jobs
  2452. '''
  2453. parallel_make_jobs = utils.count_cpus()
  2454. make_command = "make -j %s" % parallel_make_jobs
  2455. logging.info("Running parallel make on build dir")
  2456. os.chdir(self.build_dir)
  2457. utils.system(make_command)
  2458. def make_non_parallel(self):
  2459. '''
  2460. Runs "make", using a single job
  2461. '''
  2462. os.chdir(self.build_dir)
  2463. utils.system("make")
  2464. def make_clean(self):
  2465. '''
  2466. Runs "make clean"
  2467. '''
  2468. os.chdir(self.build_dir)
  2469. utils.system("make clean")
  2470. def make(self, failure_feedback=True):
  2471. '''
  2472. Runs a parallel make, falling back to a single job in failure
  2473. @param failure_feedback: return information on build failure by raising
  2474. the appropriate exceptions
  2475. @raise: SourceBuildParallelFailed if parallel build fails, or
  2476. SourceBuildFailed if single job build fails
  2477. '''
  2478. try:
  2479. self.make_parallel()
  2480. except error.CmdError:
  2481. try:
  2482. self.make_clean()
  2483. self.make_non_parallel()
  2484. except error.CmdError:
  2485. if failure_feedback:
  2486. raise SourceBuildFailed
  2487. if failure_feedback:
  2488. raise SourceBuildParallelFailed
  2489. def make_install(self):
  2490. '''
  2491. Runs "make install"
  2492. '''
  2493. os.chdir(self.build_dir)
  2494. utils.system("make install")
  2495. install = make_install
  2496. def execute(self):
  2497. '''
  2498. Runs appropriate steps for *building* this source code tree
  2499. '''
  2500. if self.install_debug_info:
  2501. self.enable_debug_symbols()
  2502. self.configure()
  2503. self.make()
  2504. class LinuxKernelBuildHelper(object):
  2505. '''
  2506. Handles Building Linux Kernel.
  2507. '''
  2508. def __init__(self, params, prefix, source):
  2509. '''
  2510. @type params: dict
  2511. @param params: dictionary containing the test parameters
  2512. @type source: string
  2513. @param source: source directory or tarball
  2514. @type prefix: string
  2515. @param prefix: installation prefix
  2516. '''
  2517. self.params = params
  2518. self.prefix = prefix
  2519. self.source = source
  2520. self._parse_params()
  2521. def _parse_params(self):
  2522. '''
  2523. Parses the params items for entries related to guest kernel
  2524. '''
  2525. configure_opt_key = '%s_config' % self.prefix
  2526. self.config = self.params.get(configure_opt_key, '')
  2527. build_image_key = '%s_build_image' % self.prefix
  2528. self.build_image = self.params.get(build_image_key,
  2529. 'arch/x86/boot/bzImage')
  2530. build_target_key = '%s_build_target' % self.prefix
  2531. self.build_target = self.params.get(build_target_key, 'bzImage')
  2532. kernel_path_key = '%s_kernel_path' % self.prefix
  2533. default_kernel_path = os.path.join('/tmp/kvm_autotest_root/images',
  2534. self.build_target)
  2535. self.kernel_path = self.params.get(kernel_path_key,
  2536. default_kernel_path)
  2537. logging.info('Parsing Linux kernel build parameters for %s',
  2538. self.prefix)
  2539. def make_guest_kernel(self):
  2540. '''
  2541. Runs "make", using a single job
  2542. '''
  2543. os.chdir(self.source)
  2544. logging.info("Building guest kernel")
  2545. logging.debug("Kernel config is %s" % self.config)
  2546. utils.get_file(self.config, '.config')
  2547. # FIXME currently no support for builddir
  2548. # run old config
  2549. utils.system('yes "" | make oldconfig > /dev/null')
  2550. parallel_make_jobs = utils.count_cpus()
  2551. make_command = "make -j %s %s" % (parallel_make_jobs, self.build_target)
  2552. logging.info("Running parallel make on src dir")
  2553. utils.system(make_command)
  2554. def make_clean(self):
  2555. '''
  2556. Runs "make clean"
  2557. '''
  2558. os.chdir(self.source)
  2559. utils.system("make clean")
  2560. def make(self, failure_feedback=True):
  2561. '''
  2562. Runs a parallel make
  2563. @param failure_feedback: return information on build failure by raising
  2564. the appropriate exceptions
  2565. @raise: SourceBuildParallelFailed if parallel build fails, or
  2566. '''
  2567. try:
  2568. self.make_clean()
  2569. self.make_guest_kernel()
  2570. except error.CmdError:
  2571. if failure_feedback:
  2572. raise SourceBuildParallelFailed
  2573. def cp_linux_kernel(self):
  2574. '''
  2575. Copying Linux kernel to target path
  2576. '''
  2577. os.chdir(self.source)
  2578. utils.force_copy(self.build_image, self.kernel_path)
  2579. install = cp_linux_kernel
  2580. def execute(self):
  2581. '''
  2582. Runs appropriate steps for *building* this source code tree
  2583. '''
  2584. self.make()
  2585. class GnuSourceBuildParamHelper(GnuSourceBuildHelper):
  2586. '''
  2587. Helps to deal with gnu_autotools build helper in cartersian config files
  2588. This class attempts to make it simple to build source coude, by using a
  2589. naming standard that follows this basic syntax:
  2590. [<git_repo>|<local_src>]_<name>_<option> = value
  2591. To pass extra options to the configure script, while building foo from a
  2592. git repo, set the following variable:
  2593. git_repo_foo_configure_options = --enable-feature
  2594. '''
  2595. def __init__(self, params, name, destination_dir, install_prefix):
  2596. '''
  2597. Instantiates a new GnuSourceBuildParamHelper
  2598. '''
  2599. self.params = params
  2600. self.name = name
  2601. self.destination_dir = destination_dir
  2602. self.install_prefix = install_prefix
  2603. self._parse_params()
  2604. def _parse_params(self):
  2605. '''
  2606. Parses the params items for entries related to source directory
  2607. This method currently does everything that the parent class __init__()
  2608. method does, that is, sets all instance variables needed by other
  2609. methods. That means it's not strictly necessary to call parent's
  2610. __init__().
  2611. '''
  2612. logging.debug('Parsing gnu_autotools build parameters for %s' %
  2613. self.name)
  2614. configure_opt_key = '%s_configure_options' % self.name
  2615. configure_options = self.params.get(configure_opt_key, '').split()
  2616. logging.debug('Configure options for %s: %s' % (self.name,
  2617. configure_options))
  2618. self.source = self.destination_dir
  2619. self.build_dir = self.destination_dir
  2620. self.prefix = self.install_prefix
  2621. self.configure_options = configure_options
  2622. self.include_pkg_config_path()
  2623. # Support the install_debug_info feature, that automatically
  2624. # adds/keeps debug information on generated libraries/binaries
  2625. install_debug_info_cfg = self.params.get("install_debug_info", "yes")
  2626. self.install_debug_info = install_debug_info_cfg != "no"
  2627. def install_host_kernel(job, params):
  2628. """
  2629. Install a host kernel, given the appropriate params.
  2630. @param job: Job object.
  2631. @param params: Dict with host kernel install params.
  2632. """
  2633. install_type = params.get('host_kernel_install_type')
  2634. if install_type == 'rpm':
  2635. logging.info('Installing host kernel through rpm')
  2636. rpm_url = params.get('host_kernel_rpm_url')
  2637. dst = os.path.join("/tmp", os.path.basename(rpm_url))
  2638. k = utils.get_file(rpm_url, dst)
  2639. host_kernel = job.kernel(k)
  2640. host_kernel.install(install_vmlinux=False)
  2641. host_kernel.boot()
  2642. elif install_type in ['koji', 'brew']:
  2643. logging.info('Installing host kernel through koji/brew')
  2644. koji_cmd = params.get('host_kernel_koji_cmd')
  2645. koji_build = params.get('host_kernel_koji_build')
  2646. koji_tag = params.get('host_kernel_koji_tag')
  2647. k_deps = KojiPkgSpec(tag=koji_tag, package='kernel',
  2648. subpackages=['kernel-devel', 'kernel-firmware'])
  2649. k = KojiPkgSpec(tag=koji_tag, package='kernel',
  2650. subpackages=['kernel'])
  2651. c = KojiClient(koji_cmd)
  2652. logging.info('Fetching kernel dependencies (-devel, -firmware)')
  2653. c.get_pkgs(k_deps, job.tmpdir)
  2654. logging.info('Installing kernel dependencies (-devel, -firmware) '
  2655. 'through %s', install_type)
  2656. k_deps_rpm_file_names = [os.path.join(job.tmpdir, rpm_file_name) for
  2657. rpm_file_name in c.get_pkg_rpm_file_names(k_deps)]
  2658. utils.run('rpm -U --force %s' % " ".join(k_deps_rpm_file_names))
  2659. c.get_pkgs(k, job.tmpdir)
  2660. k_rpm = os.path.join(job.tmpdir,
  2661. c.get_pkg_rpm_file_names(k)[0])
  2662. host_kernel = job.kernel(k_rpm)
  2663. host_kernel.install(install_vmlinux=False)
  2664. host_kernel.boot()
  2665. elif install_type == 'git':
  2666. logging.info('Chose to install host kernel through git, proceeding')
  2667. repo = params.get('host_kernel_git_repo')
  2668. repo_base = params.get('host_kernel_git_repo_base', None)
  2669. branch = params.get('host_kernel_git_branch')
  2670. commit = params.get('host_kernel_git_commit')
  2671. patch_list = params.get('host_kernel_patch_list')
  2672. if patch_list:
  2673. patch_list = patch_list.split()
  2674. kernel_config = params.get('host_kernel_config')
  2675. repodir = os.path.join("/tmp", 'kernel_src')
  2676. r = git.get_repo(uri=repo, branch=branch, destination_dir=repodir,
  2677. commit=commit, base_uri=repo_base)
  2678. host_kernel = job.kernel(r)
  2679. if patch_list:
  2680. host_kernel.patch(patch_list)
  2681. host_kernel.config(kernel_config)
  2682. host_kernel.build()
  2683. host_kernel.install()
  2684. host_kernel.boot()
  2685. else:
  2686. logging.info('Chose %s, using the current kernel for the host',
  2687. install_type)
  2688. def if_nametoindex(ifname):
  2689. """
  2690. Map an interface name into its corresponding index.
  2691. Returns 0 on error, as 0 is not a valid index
  2692. @param ifname: interface name
  2693. """
  2694. index = 0
  2695. ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
  2696. ifr = struct.pack("16si", ifname, 0)
  2697. r = fcntl.ioctl(ctrl_sock, SIOCGIFINDEX, ifr)
  2698. index = struct.unpack("16si", r)[1]
  2699. ctrl_sock.close()
  2700. return index
  2701. def vnet_hdr_probe(tapfd):
  2702. """
  2703. Check if the IFF_VNET_HDR is support by tun.
  2704. @param tapfd: the file descriptor of /dev/net/tun
  2705. """
  2706. u = struct.pack("I", 0)
  2707. try:
  2708. r = fcntl.ioctl(tapfd, TUNGETFEATURES, u)
  2709. except OverflowError:
  2710. return False
  2711. flags = struct.unpack("I", r)[0]
  2712. if flags & IFF_VNET_HDR:
  2713. return True
  2714. else:
  2715. return False
  2716. def open_tap(devname, ifname, vnet_hdr=True):
  2717. """
  2718. Open a tap device and returns its file descriptor which is used by
  2719. fd=<fd> parameter of qemu-kvm.
  2720. @param ifname: TAP interface name
  2721. @param vnet_hdr: Whether enable the vnet header
  2722. """
  2723. try:
  2724. tapfd = os.open(devname, os.O_RDWR)
  2725. except OSError, e:
  2726. raise TAPModuleError(devname, "open", e)
  2727. flags = IFF_TAP | IFF_NO_PI
  2728. if vnet_hdr and vnet_hdr_probe(tapfd):
  2729. flags |= IFF_VNET_HDR
  2730. ifr = struct.pack("16sh", ifname, flags)
  2731. try:
  2732. r = fcntl.ioctl(tapfd, TUNSETIFF, ifr)
  2733. except IOError, details:
  2734. raise TAPCreationError(ifname, details)
  2735. ifname = struct.unpack("16sh", r)[0].strip("\x00")
  2736. return tapfd
  2737. def add_to_bridge(ifname, brname):
  2738. """
  2739. Add a TAP device to bridge
  2740. @param ifname: Name of TAP device
  2741. @param brname: Name of the bridge
  2742. """
  2743. ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
  2744. index = if_nametoindex(ifname)
  2745. if index == 0:
  2746. raise TAPNotExistError(ifname)
  2747. ifr = struct.pack("16si", brname, index)
  2748. try:
  2749. r = fcntl.ioctl(ctrl_sock, SIOCBRADDIF, ifr)
  2750. except IOError, details:
  2751. raise BRAddIfError(ifname, brname, details)
  2752. ctrl_sock.close()
  2753. def bring_up_ifname(ifname):
  2754. """
  2755. Bring up an interface
  2756. @param ifname: Name of the interface
  2757. """
  2758. ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
  2759. ifr = struct.pack("16si", ifname, IFF_UP)
  2760. try:
  2761. fcntl.ioctl(ctrl_sock, SIOCSIFFLAGS, ifr)
  2762. except IOError:
  2763. raise TAPBringUpError(ifname)
  2764. ctrl_sock.close()
  2765. def if_set_macaddress(ifname, mac):
  2766. """
  2767. Set the mac address for an interface
  2768. @param ifname: Name of the interface
  2769. @mac: Mac address
  2770. """
  2771. ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
  2772. ifr = struct.pack("256s", ifname)
  2773. try:
  2774. mac_dev = fcntl.ioctl(ctrl_sock, SIOCGIFHWADDR, ifr)[18:24]
  2775. mac_dev = ":".join(["%02x" % ord(m) for m in mac_dev])
  2776. except IOError, e:
  2777. raise HwAddrGetError(ifname)
  2778. if mac_dev.lower() == mac.lower():
  2779. return
  2780. ifr = struct.pack("16sH14s", ifname, 1,
  2781. "".join([chr(int(m, 16)) for m in mac.split(":")]))
  2782. try:
  2783. fcntl.ioctl(ctrl_sock, SIOCSIFHWADDR, ifr)
  2784. except IOError, e:
  2785. logging.info(e)
  2786. raise HwAddrSetError(ifname, mac)
  2787. ctrl_sock.close()
  2788. def check_iso(url, destination, iso_sha1):
  2789. """
  2790. Verifies if ISO that can be find on url is on destination with right hash.
  2791. This function will verify the SHA1 hash of the ISO image. If the file
  2792. turns out to be missing or corrupted, let the user know we can download it.
  2793. @param url: URL where the ISO file can be found.
  2794. @param destination: Directory in local disk where we'd like the iso to be.
  2795. @param iso_sha1: SHA1 hash for the ISO image.
  2796. """
  2797. file_ok = False
  2798. if not os.path.isdir(destination):
  2799. os.makedirs(destination)
  2800. iso_path = os.path.join(destination, os.path.basename(url))
  2801. if not os.path.isfile(iso_path):
  2802. logging.warning("File %s not found", iso_path)
  2803. logging.warning("Expected SHA1 sum: %s", iso_sha1)
  2804. answer = utils.ask("Would you like to download it from %s?" % url)
  2805. if answer == 'y':
  2806. utils.interactive_download(url, iso_path, 'ISO Download')
  2807. else:
  2808. logging.warning("Missing file %s", iso_path)
  2809. logging.warning("Please download it or put an existing copy on the "
  2810. "appropriate location")
  2811. return
  2812. else:
  2813. logging.info("Found %s", iso_path)
  2814. logging.info("Expected SHA1 sum: %s", iso_sha1)
  2815. answer = utils.ask("Would you like to check %s? It might take a while" %
  2816. iso_path)
  2817. if answer == 'y':
  2818. actual_iso_sha1 = utils.hash_file(iso_path, method='sha1')
  2819. if actual_iso_sha1 != iso_sha1:
  2820. logging.error("Actual SHA1 sum: %s", actual_iso_sha1)
  2821. else:
  2822. logging.info("SHA1 sum check OK")
  2823. else:
  2824. logging.info("File %s present, but chose to not verify it",
  2825. iso_path)
  2826. return
  2827. if file_ok:
  2828. logging.info("%s present, with proper checksum", iso_path)
  2829. def virt_test_assistant(test_name, test_dir, base_dir, default_userspace_paths,
  2830. check_modules, online_docs_url):
  2831. """
  2832. Common virt test assistant module.
  2833. @param test_name: Test name, such as "kvm".
  2834. @param test_dir: Path with the test directory.
  2835. @param base_dir: Base directory used to hold images and isos.
  2836. @param default_userspace_paths: Important programs for a successful test
  2837. execution.
  2838. @param check_modules: Whether we want to verify if a given list of modules
  2839. is loaded in the system.
  2840. @param online_docs_url: URL to an online documentation system, such as an
  2841. wiki page.
  2842. """
  2843. logging_manager.configure_logging(VirtLoggingConfig(), verbose=True)
  2844. logging.info("%s test config helper", test_name)
  2845. step = 0
  2846. common_dir = os.path.dirname(sys.modules[__name__].__file__)
  2847. logging.info("")
  2848. step += 1
  2849. logging.info("%d - Verifying directories (check if the directory structure "
  2850. "expected by the default test config is there)", step)
  2851. sub_dir_list = ["images", "isos", "steps_data"]
  2852. for sub_dir in sub_dir_list:
  2853. sub_dir_path = os.path.join(base_dir, sub_dir)
  2854. if not os.path.isdir(sub_dir_path):
  2855. logging.debug("Creating %s", sub_dir_path)
  2856. os.makedirs(sub_dir_path)
  2857. else:
  2858. logging.debug("Dir %s exists, not creating" %
  2859. sub_dir_path)
  2860. logging.info("")
  2861. step += 1
  2862. logging.info("%d - Creating config files from samples (copy the default "
  2863. "config samples to actual config files)", step)
  2864. config_file_list = glob.glob(os.path.join(test_dir, "*.cfg.sample"))
  2865. config_file_list += glob.glob(os.path.join(common_dir, "*.cfg.sample"))
  2866. for config_file in config_file_list:
  2867. src_file = config_file
  2868. dst_file = os.path.join(test_dir, os.path.basename(config_file))
  2869. dst_file = dst_file.rstrip(".sample")
  2870. if not os.path.isfile(dst_file):
  2871. logging.debug("Creating config file %s from sample", dst_file)
  2872. shutil.copyfile(src_file, dst_file)
  2873. else:
  2874. logging.debug("Config file %s exists, not touching" % dst_file)
  2875. logging.info("")
  2876. step += 1
  2877. logging.info("%s - Verifying iso (make sure we have the OS ISO needed for "
  2878. "the default test set)", step)
  2879. iso_name = "Fedora-16-x86_64-DVD.iso"
  2880. fedora_dir = "pub/fedora/linux/releases/16/Fedora/x86_64/iso"
  2881. url = os.path.join("http://download.fedoraproject.org/", fedora_dir,
  2882. iso_name)
  2883. iso_sha1 = "76dd59c37e9a0ec2af56263fa892ff571c92c89a"
  2884. destination = os.path.join(base_dir, 'isos', 'linux')
  2885. check_iso(url, destination, iso_sha1)
  2886. logging.info("")
  2887. step += 1
  2888. logging.info("%d - Verifying winutils.iso (make sure we have the utility "
  2889. "ISO needed for Windows testing)", step)
  2890. logging.info("In order to run the KVM autotests in Windows guests, we "
  2891. "provide you an ISO that this script can download")
  2892. url = "http://people.redhat.com/mrodrigu/kvm/winutils.iso"
  2893. iso_sha1 = "02930224756510e383c44c49bffb760e35d6f892"
  2894. destination = os.path.join(base_dir, 'isos', 'windows')
  2895. path = os.path.join(destination, iso_name)
  2896. check_iso(url, destination, iso_sha1)
  2897. logging.info("")
  2898. step += 1
  2899. logging.info("%d - Checking if the appropriate userspace programs are "
  2900. "installed", step)
  2901. for path in default_userspace_paths:
  2902. if not os.path.isfile(path):
  2903. logging.warning("No %s found. You might need to install %s.",
  2904. path, os.path.basename(path))
  2905. else:
  2906. logging.debug("%s present", path)
  2907. logging.info("If you wish to change any userspace program path, "
  2908. "you will have to modify tests.cfg")
  2909. if check_modules:
  2910. logging.info("")
  2911. step += 1
  2912. logging.info("%d - Checking for modules %s", step,
  2913. ",".join(check_modules))
  2914. for module in check_modules:
  2915. if not utils.module_is_loaded(module):
  2916. logging.warning("Module %s is not loaded. You might want to "
  2917. "load it", module)
  2918. else:
  2919. logging.debug("Module %s loaded", module)
  2920. if online_docs_url:
  2921. logging.info("")
  2922. step += 1
  2923. logging.info("%d - Verify needed packages to get started", step)
  2924. logging.info("Please take a look at the online documentation: %s",
  2925. online_docs_url)
  2926. client_dir = os.path.abspath(os.path.join(test_dir, "..", ".."))
  2927. autotest_bin = os.path.join(client_dir, 'bin', 'autotest')
  2928. control_file = os.path.join(test_dir, 'control')
  2929. logging.info("")
  2930. logging.info("When you are done fixing eventual warnings found, "
  2931. "you can run the test using this command line AS ROOT:")
  2932. logging.info("%s %s", autotest_bin, control_file)
  2933. logging.info("Autotest prints the results dir, so you can look at DEBUG "
  2934. "logs if something went wrong")
  2935. logging.info("You can also edit the test config files")
  2936. class NumaNode(object):
  2937. """
  2938. Numa node to control processes and shared memory.
  2939. """
  2940. def __init__(self, i=-1):
  2941. self.num = self.get_node_num()
  2942. if i < 0:
  2943. self.cpus = self.get_node_cpus(int(self.num) + i).split()
  2944. else:
  2945. self.cpus = self.get_node_cpus(i - 1).split()
  2946. self.dict = {}
  2947. for i in self.cpus:
  2948. self.dict[i] = "free"
  2949. def get_node_num(self):
  2950. """
  2951. Get the number of nodes of current host.
  2952. """
  2953. cmd = utils.run("numactl --hardware")
  2954. return re.findall("available: (\d+) nodes", cmd.stdout)[0]
  2955. def get_node_cpus(self, i):
  2956. """
  2957. Get cpus of a specific node
  2958. @param i: Index of the CPU inside the node.
  2959. """
  2960. cmd = utils.run("numactl --hardware")
  2961. return re.findall("node %s cpus: (.*)" % i, cmd.stdout)[0]
  2962. def free_cpu(self, i):
  2963. """
  2964. Release pin of one node.
  2965. @param i: Index of the node.
  2966. """
  2967. self.dict[i] = "free"
  2968. def _flush_pin(self):
  2969. """
  2970. Flush pin dict, remove the record of exited process.
  2971. """
  2972. cmd = utils.run("ps -eLf | awk '{print $4}'")
  2973. all_pids = cmd.stdout
  2974. for i in self.cpus:
  2975. if self.dict[i] != "free" and self.dict[i] not in all_pids:
  2976. self.free_cpu(i)
  2977. @error.context_aware
  2978. def pin_cpu(self, process):
  2979. """
  2980. Pin one process to a single cpu.
  2981. @param process: Process ID.
  2982. """
  2983. self._flush_pin()
  2984. error.context("Pinning process %s to the CPU" % process)
  2985. for i in self.cpus:
  2986. if self.dict[i] == "free":
  2987. self.dict[i] = str(process)
  2988. cmd = "taskset -p %s %s" % (hex(2 ** int(i)), process)
  2989. logging.debug("NumaNode (%s): " % i + cmd)
  2990. utils.run(cmd)
  2991. return i
  2992. def show(self):
  2993. """
  2994. Display the record dict in a convenient way.
  2995. """
  2996. logging.info("Numa Node record dict:")
  2997. for i in self.cpus:
  2998. logging.info(" %s: %s" % (i, self.dict[i]))