PageRenderTime 56ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/client/common_lib/base_utils.py

https://gitlab.com/libvirt/autotest
Python | 1955 lines | 1861 code | 34 blank | 60 comment | 23 complexity | 05566fbc58e81c3d3da1d6bee23caa57 MD5 | raw file
Possible License(s): LGPL-3.0, GPL-2.0
  1. #
  2. # Copyright 2008 Google Inc. Released under the GPL v2
  3. import os, pickle, random, re, resource, select, shutil, signal, StringIO
  4. import socket, struct, subprocess, sys, time, textwrap, traceback, urlparse
  5. import warnings, smtplib, logging, urllib2
  6. from threading import Thread, Event, Lock
  7. try:
  8. import hashlib
  9. except ImportError:
  10. import md5, sha
  11. from autotest_lib.client.common_lib import error, logging_manager, global_config
  12. from autotest_lib.client.common_lib import progressbar
  13. GLOBAL_CONFIG = global_config.global_config
  14. def deprecated(func):
  15. """This is a decorator which can be used to mark functions as deprecated.
  16. It will result in a warning being emmitted when the function is used."""
  17. def new_func(*args, **dargs):
  18. warnings.warn("Call to deprecated function %s." % func.__name__,
  19. category=DeprecationWarning)
  20. return func(*args, **dargs)
  21. new_func.__name__ = func.__name__
  22. new_func.__doc__ = func.__doc__
  23. new_func.__dict__.update(func.__dict__)
  24. return new_func
  25. class _NullStream(object):
  26. def write(self, data):
  27. pass
  28. def flush(self):
  29. pass
  30. TEE_TO_LOGS = object()
  31. _the_null_stream = _NullStream()
  32. DEFAULT_STDOUT_LEVEL = logging.DEBUG
  33. DEFAULT_STDERR_LEVEL = logging.ERROR
  34. # prefixes for logging stdout/stderr of commands
  35. STDOUT_PREFIX = '[stdout] '
  36. STDERR_PREFIX = '[stderr] '
  37. def get_stream_tee_file(stream, level, prefix=''):
  38. if stream is None:
  39. return _the_null_stream
  40. if stream is TEE_TO_LOGS:
  41. return logging_manager.LoggingFile(level=level, prefix=prefix)
  42. return stream
  43. class BgJob(object):
  44. def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
  45. stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
  46. self.command = command
  47. self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
  48. prefix=STDOUT_PREFIX)
  49. self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
  50. prefix=STDERR_PREFIX)
  51. self.result = CmdResult(command)
  52. # allow for easy stdin input by string, we'll let subprocess create
  53. # a pipe for stdin input and we'll write to it in the wait loop
  54. if isinstance(stdin, basestring):
  55. self.string_stdin = stdin
  56. stdin = subprocess.PIPE
  57. else:
  58. self.string_stdin = None
  59. if verbose:
  60. logging.debug("Running '%s'" % command)
  61. # Ok, bash is nice and everything, but we might face occasions where
  62. # it is not available. Just do the right thing and point to /bin/sh.
  63. shell = '/bin/bash'
  64. if not os.path.isfile(shell):
  65. shell = '/bin/sh'
  66. self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
  67. stderr=subprocess.PIPE,
  68. preexec_fn=self._reset_sigpipe, shell=True,
  69. executable=shell,
  70. stdin=stdin)
  71. def output_prepare(self, stdout_file=None, stderr_file=None):
  72. self.stdout_file = stdout_file
  73. self.stderr_file = stderr_file
  74. def process_output(self, stdout=True, final_read=False):
  75. """output_prepare must be called prior to calling this"""
  76. if stdout:
  77. pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
  78. else:
  79. pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
  80. if final_read:
  81. # read in all the data we can from pipe and then stop
  82. data = []
  83. while select.select([pipe], [], [], 0)[0]:
  84. data.append(os.read(pipe.fileno(), 1024))
  85. if len(data[-1]) == 0:
  86. break
  87. data = "".join(data)
  88. else:
  89. # perform a single read
  90. data = os.read(pipe.fileno(), 1024)
  91. buf.write(data)
  92. tee.write(data)
  93. def cleanup(self):
  94. self.stdout_tee.flush()
  95. self.stderr_tee.flush()
  96. self.sp.stdout.close()
  97. self.sp.stderr.close()
  98. self.result.stdout = self.stdout_file.getvalue()
  99. self.result.stderr = self.stderr_file.getvalue()
  100. def _reset_sigpipe(self):
  101. signal.signal(signal.SIGPIPE, signal.SIG_DFL)
  102. class AsyncJob(BgJob):
  103. def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
  104. stdin=None, stderr_level=DEFAULT_STDERR_LEVEL, kill_func=None):
  105. super(AsyncJob, self).__init__(command, stdout_tee=stdout_tee,
  106. stderr_tee=stderr_tee, verbose=verbose, stdin=stdin,
  107. stderr_level=stderr_level)
  108. #start time for CmdResult
  109. self.start_time = time.time()
  110. if kill_func is None:
  111. self.kill_func = self._kill_self_process
  112. else:
  113. self.kill_func = kill_func
  114. #we're going to make some threads to drain the stdout and stderr
  115. def drainer(input, output, lock):
  116. """
  117. input is a pipe and output is file-like. if lock is non-None, then
  118. we assume output isn't threadsafe
  119. """
  120. try:
  121. while True:
  122. tmp = os.read(input.fileno(), 1024)
  123. if tmp == '':
  124. break
  125. if lock is not None:
  126. lock.acquire()
  127. for f in filter(lambda x: x is not None, output):
  128. f.write(tmp)
  129. if lock is not None:
  130. lock.release()
  131. except:
  132. pass
  133. self.stdout_lock = Lock()
  134. self.stdout_file = StringIO.StringIO()
  135. self.stdout_thread = Thread(target=drainer, name=("%s-stdout"%command),
  136. args=(self.sp.stdout, (self.stdout_file, self.stdout_tee),
  137. self.stdout_lock))
  138. self.stdout_thread.daemon = True
  139. self.stderr_lock = Lock()
  140. self.stderr_file = StringIO.StringIO()
  141. self.stderr_thread = Thread(target=drainer, name=("%s-stderr"%command),
  142. args=(self.sp.stderr, (self.stderr_file, self.stderr_tee),
  143. self.stderr_lock))
  144. self.stderr_thread.daemon = True
  145. self.stdout_thread.start()
  146. self.stderr_thread.start()
  147. def output_prepare(self, stdout_file=None, stderr_file=None):
  148. raise NotImplementedError("This object automatically prepares its own "
  149. "output")
  150. def process_output(self, stdout=True, final_read=False):
  151. raise NotImplementedError("This object has background threads "
  152. "automatically polling the process. Use the locked accessors")
  153. def get_stdout(self):
  154. self.stdout_lock.acquire()
  155. tmp = self.stdout_file.getvalue()
  156. self.stdout_lock.release()
  157. return tmp
  158. def get_stderr(self):
  159. self.stderr_lock.acquire()
  160. tmp = self.stderr_file.getvalue()
  161. self.stderr_lock.release()
  162. return tmp
  163. def cleanup(self):
  164. raise NotImplementedError("This must be waited for to get a result")
  165. def _kill_self_process(self):
  166. try:
  167. os.kill(self.sp.pid, signal.SIGTERM)
  168. except OSError:
  169. pass #don't care if the process is already gone, since that was the goal
  170. def wait_for(self, timeout=None):
  171. if timeout is None:
  172. self.sp.wait()
  173. if timeout > 0:
  174. start_time = time.time()
  175. while time.time() - start_time < timeout:
  176. self.result.exit_status = self.sp.poll()
  177. if self.result.exit_status is not None:
  178. break
  179. #first need to kill the threads and process, then no more locking
  180. #issues for superclass's cleanup function
  181. self.kill_func()
  182. #we need to fill in parts of the result that aren't done automatically
  183. try:
  184. pid, self.result.exit_status = os.waitpid(self.sp.pid, 0)
  185. except OSError:
  186. self.result.exit_status = self.sp.poll()
  187. self.result.duration = time.time() - self.start_time
  188. assert self.result.exit_status is not None
  189. #make sure we've got stdout and stderr
  190. self.stdout_thread.join(1)
  191. self.stderr_thread.join(1)
  192. assert not self.stdout_thread.is_alive()
  193. assert not self.stderr_thread.is_alive()
  194. super(AsyncJob, self).cleanup()
  195. return self.result
  196. def ip_to_long(ip):
  197. # !L is a long in network byte order
  198. return struct.unpack('!L', socket.inet_aton(ip))[0]
  199. def long_to_ip(number):
  200. # See above comment.
  201. return socket.inet_ntoa(struct.pack('!L', number))
  202. def create_subnet_mask(bits):
  203. return (1 << 32) - (1 << 32-bits)
  204. def format_ip_with_mask(ip, mask_bits):
  205. masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
  206. return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
  207. def normalize_hostname(alias):
  208. ip = socket.gethostbyname(alias)
  209. return socket.gethostbyaddr(ip)[0]
  210. def get_ip_local_port_range():
  211. match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
  212. read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
  213. return (int(match.group(1)), int(match.group(2)))
  214. def set_ip_local_port_range(lower, upper):
  215. write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
  216. '%d %d\n' % (lower, upper))
  217. def send_email(mail_from, mail_to, subject, body,
  218. smtp_info={'server':'localhost', 'user':'', 'password':'',
  219. 'port':''}):
  220. """
  221. Sends an email via SMTP.
  222. @param mail_from: string with email address of sender
  223. @param mail_to: string or list with email address(es) of recipients
  224. @param subject: string with subject of email
  225. @param body: (multi-line) string with body of email
  226. @param smtp_info: Dict with smtp server info
  227. server: SMTP server
  228. user: SMTP user (if any)
  229. password: SMTP password (if any)
  230. port: SMTP port (if non standard)
  231. """
  232. if isinstance(mail_to, str):
  233. mail_to = [mail_to]
  234. msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
  235. subject, body)
  236. try:
  237. # Here if we pass an empty string as port, the default (25) will be
  238. # used http://docs.python.org/library/smtplib.html
  239. mailer = smtplib.SMTP(smtp_info['server'], smtp_info['port'])
  240. if smtp_info['user']:
  241. mailer.login(smtp_info['user'], smtp_info['password'])
  242. try:
  243. mailer.sendmail(mail_from, mail_to, msg)
  244. finally:
  245. mailer.quit()
  246. except Exception, e:
  247. # Emails are non-critical, log the error, don't raise it
  248. logging.error("Sending email failed. Reason: %s", repr(e))
  249. def read_one_line(filename):
  250. return open(filename, 'r').readline().rstrip('\n')
  251. def read_file(filename):
  252. f = open(filename)
  253. try:
  254. return f.read()
  255. finally:
  256. f.close()
  257. def get_field(data, param, linestart="", sep=" "):
  258. """
  259. Parse data from string.
  260. @param data: Data to parse.
  261. example:
  262. data:
  263. cpu 324 345 34 5 345
  264. cpu0 34 11 34 34 33
  265. ^^^^
  266. start of line
  267. params 0 1 2 3 4
  268. @param param: Position of parameter after linestart marker.
  269. @param linestart: String to which start line with parameters.
  270. @param sep: Separator between parameters regular expression.
  271. """
  272. search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
  273. find = search.search(data)
  274. if find != None:
  275. return re.split("%s" % sep, find.group(1))[param]
  276. else:
  277. print "There is no line which starts with %s in data." % linestart
  278. return None
  279. def write_one_line(filename, line):
  280. open_write_close(filename, line.rstrip('\n') + '\n')
  281. def open_write_close(filename, data):
  282. f = open(filename, 'w')
  283. try:
  284. f.write(data)
  285. finally:
  286. f.close()
  287. def matrix_to_string(matrix, header=None):
  288. """
  289. Return a pretty, aligned string representation of a nxm matrix.
  290. This representation can be used to print any tabular data, such as
  291. database results. It works by scanning the lengths of each element
  292. in each column, and determining the format string dynamically.
  293. @param matrix: Matrix representation (list with n rows of m elements).
  294. @param header: Optional tuple or list with header elements to be displayed.
  295. """
  296. if type(header) is list:
  297. header = tuple(header)
  298. lengths = []
  299. if header:
  300. for column in header:
  301. lengths.append(len(column))
  302. for row in matrix:
  303. for i, column in enumerate(row):
  304. column = unicode(column).encode("utf-8")
  305. cl = len(column)
  306. try:
  307. ml = lengths[i]
  308. if cl > ml:
  309. lengths[i] = cl
  310. except IndexError:
  311. lengths.append(cl)
  312. lengths = tuple(lengths)
  313. format_string = ""
  314. for length in lengths:
  315. format_string += "%-" + str(length) + "s "
  316. format_string += "\n"
  317. matrix_str = ""
  318. if header:
  319. matrix_str += format_string % header
  320. for row in matrix:
  321. matrix_str += format_string % tuple(row)
  322. return matrix_str
  323. def read_keyval(path):
  324. """
  325. Read a key-value pair format file into a dictionary, and return it.
  326. Takes either a filename or directory name as input. If it's a
  327. directory name, we assume you want the file to be called keyval.
  328. """
  329. if os.path.isdir(path):
  330. path = os.path.join(path, 'keyval')
  331. keyval = {}
  332. if os.path.exists(path):
  333. for line in open(path):
  334. line = re.sub('#.*', '', line).rstrip()
  335. if not re.search(r'^[-\.\w]+=', line):
  336. raise ValueError('Invalid format line: %s' % line)
  337. key, value = line.split('=', 1)
  338. if re.search('^\d+$', value):
  339. value = int(value)
  340. elif re.search('^(\d+\.)?\d+$', value):
  341. value = float(value)
  342. keyval[key] = value
  343. return keyval
  344. def write_keyval(path, dictionary, type_tag=None, tap_report=None):
  345. """
  346. Write a key-value pair format file out to a file. This uses append
  347. mode to open the file, so existing text will not be overwritten or
  348. reparsed.
  349. If type_tag is None, then the key must be composed of alphanumeric
  350. characters (or dashes+underscores). However, if type-tag is not
  351. null then the keys must also have "{type_tag}" as a suffix. At
  352. the moment the only valid values of type_tag are "attr" and "perf".
  353. @param path: full path of the file to be written
  354. @param dictionary: the items to write
  355. @param type_tag: see text above
  356. """
  357. if os.path.isdir(path):
  358. path = os.path.join(path, 'keyval')
  359. keyval = open(path, 'a')
  360. if type_tag is None:
  361. key_regex = re.compile(r'^[-\.\w]+$')
  362. else:
  363. if type_tag not in ('attr', 'perf'):
  364. raise ValueError('Invalid type tag: %s' % type_tag)
  365. escaped_tag = re.escape(type_tag)
  366. key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
  367. try:
  368. for key in sorted(dictionary.keys()):
  369. if not key_regex.search(key):
  370. raise ValueError('Invalid key: %s' % key)
  371. keyval.write('%s=%s\n' % (key, dictionary[key]))
  372. finally:
  373. keyval.close()
  374. # same for tap
  375. if tap_report is not None and tap_report.do_tap_report:
  376. tap_report.record_keyval(path, dictionary, type_tag=type_tag)
  377. class FileFieldMonitor(object):
  378. """
  379. Monitors the information from the file and reports it's values.
  380. It gather the information at start and stop of the measurement or
  381. continuously during the measurement.
  382. """
  383. class Monitor(Thread):
  384. """
  385. Internal monitor class to ensure continuous monitor of monitored file.
  386. """
  387. def __init__(self, master):
  388. """
  389. @param master: Master class which control Monitor
  390. """
  391. Thread.__init__(self)
  392. self.master = master
  393. def run(self):
  394. """
  395. Start monitor in thread mode
  396. """
  397. while not self.master.end_event.isSet():
  398. self.master._get_value(self.master.logging)
  399. time.sleep(self.master.time_step)
  400. def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
  401. contlogging=False, separator=" +", time_step=0.1):
  402. """
  403. Initialize variables.
  404. @param status_file: File contain status.
  405. @param mode_diff: If True make a difference of value, else average.
  406. @param data_to_read: List of tuples with data position.
  407. format: [(start_of_line,position in params)]
  408. example:
  409. data:
  410. cpu 324 345 34 5 345
  411. cpu0 34 11 34 34 33
  412. ^^^^
  413. start of line
  414. params 0 1 2 3 4
  415. @param mode_diff: True to subtract old value from new value,
  416. False make average of the values.
  417. @parma continuously: Start the monitoring thread using the time_step
  418. as the measurement period.
  419. @param contlogging: Log data in continuous run.
  420. @param separator: Regular expression of separator.
  421. @param time_step: Time period of the monitoring value.
  422. """
  423. self.end_event = Event()
  424. self.start_time = 0
  425. self.end_time = 0
  426. self.test_time = 0
  427. self.status_file = status_file
  428. self.separator = separator
  429. self.data_to_read = data_to_read
  430. self.num_of_params = len(self.data_to_read)
  431. self.mode_diff = mode_diff
  432. self.continuously = continuously
  433. self.time_step = time_step
  434. self.value = [0 for i in range(self.num_of_params)]
  435. self.old_value = [0 for i in range(self.num_of_params)]
  436. self.log = []
  437. self.logging = contlogging
  438. self.started = False
  439. self.num_of_get_value = 0
  440. self.monitor = None
  441. def _get_value(self, logging=True):
  442. """
  443. Return current values.
  444. @param logging: If true log value in memory. There can be problem
  445. with long run.
  446. """
  447. data = read_file(self.status_file)
  448. value = []
  449. for i in range(self.num_of_params):
  450. value.append(int(get_field(data,
  451. self.data_to_read[i][1],
  452. self.data_to_read[i][0],
  453. self.separator)))
  454. if logging:
  455. self.log.append(value)
  456. if not self.mode_diff:
  457. value = map(lambda x, y: x + y, value, self.old_value)
  458. self.old_value = value
  459. self.num_of_get_value += 1
  460. return value
  461. def start(self):
  462. """
  463. Start value monitor.
  464. """
  465. if self.started:
  466. self.stop()
  467. self.old_value = [0 for i in range(self.num_of_params)]
  468. self.num_of_get_value = 0
  469. self.log = []
  470. self.end_event.clear()
  471. self.start_time = time.time()
  472. self._get_value()
  473. self.started = True
  474. if (self.continuously):
  475. self.monitor = FileFieldMonitor.Monitor(self)
  476. self.monitor.start()
  477. def stop(self):
  478. """
  479. Stop value monitor.
  480. """
  481. if self.started:
  482. self.started = False
  483. self.end_time = time.time()
  484. self.test_time = self.end_time - self.start_time
  485. self.value = self._get_value()
  486. if (self.continuously):
  487. self.end_event.set()
  488. self.monitor.join()
  489. if (self.mode_diff):
  490. self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
  491. else:
  492. self.value = map(lambda x: x / self.num_of_get_value,
  493. self.value)
  494. def get_status(self):
  495. """
  496. @return: Status of monitored process average value,
  497. time of test and array of monitored values and time step of
  498. continuous run.
  499. """
  500. if self.started:
  501. self.stop()
  502. if self.mode_diff:
  503. for i in range(len(self.log) - 1):
  504. self.log[i] = (map(lambda x, y: x - y,
  505. self.log[i + 1], self.log[i]))
  506. self.log.pop()
  507. return (self.value, self.test_time, self.log, self.time_step)
  508. def is_url(path):
  509. """Return true if path looks like a URL"""
  510. # for now, just handle http and ftp
  511. url_parts = urlparse.urlparse(path)
  512. return (url_parts[0] in ('http', 'ftp'))
  513. def urlopen(url, data=None, timeout=5):
  514. """Wrapper to urllib2.urlopen with timeout addition."""
  515. # Save old timeout
  516. old_timeout = socket.getdefaulttimeout()
  517. socket.setdefaulttimeout(timeout)
  518. try:
  519. return urllib2.urlopen(url, data=data)
  520. finally:
  521. socket.setdefaulttimeout(old_timeout)
  522. def urlretrieve(url, filename, data=None, timeout=300):
  523. """Retrieve a file from given url."""
  524. logging.debug('Fetching %s -> %s', url, filename)
  525. src_file = urlopen(url, data=data, timeout=timeout)
  526. try:
  527. dest_file = open(filename, 'wb')
  528. try:
  529. shutil.copyfileobj(src_file, dest_file)
  530. finally:
  531. dest_file.close()
  532. finally:
  533. src_file.close()
  534. def hash(type, input=None):
  535. """
  536. Returns an hash object of type md5 or sha1. This function is implemented in
  537. order to encapsulate hash objects in a way that is compatible with python
  538. 2.4 and python 2.6 without warnings.
  539. Note that even though python 2.6 hashlib supports hash types other than
  540. md5 and sha1, we are artificially limiting the input values in order to
  541. make the function to behave exactly the same among both python
  542. implementations.
  543. @param input: Optional input string that will be used to update the hash.
  544. """
  545. if type not in ['md5', 'sha1']:
  546. raise ValueError("Unsupported hash type: %s" % type)
  547. try:
  548. hash = hashlib.new(type)
  549. except NameError:
  550. if type == 'md5':
  551. hash = md5.new()
  552. elif type == 'sha1':
  553. hash = sha.new()
  554. if input:
  555. hash.update(input)
  556. return hash
  557. def get_file(src, dest, permissions=None):
  558. """Get a file from src, which can be local or a remote URL"""
  559. if src == dest:
  560. return
  561. if is_url(src):
  562. urlretrieve(src, dest)
  563. else:
  564. shutil.copyfile(src, dest)
  565. if permissions:
  566. os.chmod(dest, permissions)
  567. return dest
  568. def unmap_url(srcdir, src, destdir='.'):
  569. """
  570. Receives either a path to a local file or a URL.
  571. returns either the path to the local file, or the fetched URL
  572. unmap_url('/usr/src', 'foo.tar', '/tmp')
  573. = '/usr/src/foo.tar'
  574. unmap_url('/usr/src', 'http://site/file', '/tmp')
  575. = '/tmp/file'
  576. (after retrieving it)
  577. """
  578. if is_url(src):
  579. url_parts = urlparse.urlparse(src)
  580. filename = os.path.basename(url_parts[2])
  581. dest = os.path.join(destdir, filename)
  582. return get_file(src, dest)
  583. else:
  584. return os.path.join(srcdir, src)
  585. def update_version(srcdir, preserve_srcdir, new_version, install,
  586. *args, **dargs):
  587. """
  588. Make sure srcdir is version new_version
  589. If not, delete it and install() the new version.
  590. In the preserve_srcdir case, we just check it's up to date,
  591. and if not, we rerun install, without removing srcdir
  592. """
  593. versionfile = os.path.join(srcdir, '.version')
  594. install_needed = True
  595. if os.path.exists(versionfile):
  596. old_version = pickle.load(open(versionfile))
  597. if old_version == new_version:
  598. install_needed = False
  599. if install_needed:
  600. if not preserve_srcdir and os.path.exists(srcdir):
  601. shutil.rmtree(srcdir)
  602. install(*args, **dargs)
  603. if os.path.exists(srcdir):
  604. pickle.dump(new_version, open(versionfile, 'w'))
  605. def get_stderr_level(stderr_is_expected):
  606. if stderr_is_expected:
  607. return DEFAULT_STDOUT_LEVEL
  608. return DEFAULT_STDERR_LEVEL
  609. def run(command, timeout=None, ignore_status=False,
  610. stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
  611. stderr_is_expected=None, args=()):
  612. """
  613. Run a command on the host.
  614. @param command: the command line string.
  615. @param timeout: time limit in seconds before attempting to kill the
  616. running process. The run() function will take a few seconds
  617. longer than 'timeout' to complete if it has to kill the process.
  618. @param ignore_status: do not raise an exception, no matter what the exit
  619. code of the command is.
  620. @param stdout_tee: optional file-like object to which stdout data
  621. will be written as it is generated (data will still be stored
  622. in result.stdout).
  623. @param stderr_tee: likewise for stderr.
  624. @param verbose: if True, log the command being run.
  625. @param stdin: stdin to pass to the executed process (can be a file
  626. descriptor, a file object of a real file or a string).
  627. @param args: sequence of strings of arguments to be given to the command
  628. inside " quotes after they have been escaped for that; each
  629. element in the sequence will be given as a separate command
  630. argument
  631. @return a CmdResult object
  632. @raise CmdError: the exit code of the command execution was not 0
  633. """
  634. if isinstance(args, basestring):
  635. raise TypeError('Got a string for the "args" keyword argument, '
  636. 'need a sequence.')
  637. for arg in args:
  638. command += ' "%s"' % sh_escape(arg)
  639. if stderr_is_expected is None:
  640. stderr_is_expected = ignore_status
  641. bg_job = join_bg_jobs(
  642. (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
  643. stderr_level=get_stderr_level(stderr_is_expected)),),
  644. timeout)[0]
  645. if not ignore_status and bg_job.result.exit_status:
  646. raise error.CmdError(command, bg_job.result,
  647. "Command returned non-zero exit status")
  648. return bg_job.result
  649. def run_parallel(commands, timeout=None, ignore_status=False,
  650. stdout_tee=None, stderr_tee=None):
  651. """
  652. Behaves the same as run() with the following exceptions:
  653. - commands is a list of commands to run in parallel.
  654. - ignore_status toggles whether or not an exception should be raised
  655. on any error.
  656. @return: a list of CmdResult objects
  657. """
  658. bg_jobs = []
  659. for command in commands:
  660. bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
  661. stderr_level=get_stderr_level(ignore_status)))
  662. # Updates objects in bg_jobs list with their process information
  663. join_bg_jobs(bg_jobs, timeout)
  664. for bg_job in bg_jobs:
  665. if not ignore_status and bg_job.result.exit_status:
  666. raise error.CmdError(command, bg_job.result,
  667. "Command returned non-zero exit status")
  668. return [bg_job.result for bg_job in bg_jobs]
  669. @deprecated
  670. def run_bg(command):
  671. """Function deprecated. Please use BgJob class instead."""
  672. bg_job = BgJob(command)
  673. return bg_job.sp, bg_job.result
  674. def join_bg_jobs(bg_jobs, timeout=None):
  675. """Joins the bg_jobs with the current thread.
  676. Returns the same list of bg_jobs objects that was passed in.
  677. """
  678. ret, timeout_error = 0, False
  679. for bg_job in bg_jobs:
  680. bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
  681. try:
  682. # We are holding ends to stdin, stdout pipes
  683. # hence we need to be sure to close those fds no mater what
  684. start_time = time.time()
  685. timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
  686. for bg_job in bg_jobs:
  687. # Process stdout and stderr
  688. bg_job.process_output(stdout=True,final_read=True)
  689. bg_job.process_output(stdout=False,final_read=True)
  690. finally:
  691. # close our ends of the pipes to the sp no matter what
  692. for bg_job in bg_jobs:
  693. bg_job.cleanup()
  694. if timeout_error:
  695. # TODO: This needs to be fixed to better represent what happens when
  696. # running in parallel. However this is backwards compatable, so it will
  697. # do for the time being.
  698. raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
  699. "Command(s) did not complete within %d seconds"
  700. % timeout)
  701. return bg_jobs
  702. def _wait_for_commands(bg_jobs, start_time, timeout):
  703. # This returns True if it must return due to a timeout, otherwise False.
  704. # To check for processes which terminate without producing any output
  705. # a 1 second timeout is used in select.
  706. SELECT_TIMEOUT = 1
  707. read_list = []
  708. write_list = []
  709. reverse_dict = {}
  710. for bg_job in bg_jobs:
  711. read_list.append(bg_job.sp.stdout)
  712. read_list.append(bg_job.sp.stderr)
  713. reverse_dict[bg_job.sp.stdout] = (bg_job, True)
  714. reverse_dict[bg_job.sp.stderr] = (bg_job, False)
  715. if bg_job.string_stdin is not None:
  716. write_list.append(bg_job.sp.stdin)
  717. reverse_dict[bg_job.sp.stdin] = bg_job
  718. if timeout:
  719. stop_time = start_time + timeout
  720. time_left = stop_time - time.time()
  721. else:
  722. time_left = None # so that select never times out
  723. while not timeout or time_left > 0:
  724. # select will return when we may write to stdin or when there is
  725. # stdout/stderr output we can read (including when it is
  726. # EOF, that is the process has terminated).
  727. read_ready, write_ready, _ = select.select(read_list, write_list, [],
  728. SELECT_TIMEOUT)
  729. # os.read() has to be used instead of
  730. # subproc.stdout.read() which will otherwise block
  731. for file_obj in read_ready:
  732. bg_job, is_stdout = reverse_dict[file_obj]
  733. bg_job.process_output(is_stdout)
  734. for file_obj in write_ready:
  735. # we can write PIPE_BUF bytes without blocking
  736. # POSIX requires PIPE_BUF is >= 512
  737. bg_job = reverse_dict[file_obj]
  738. file_obj.write(bg_job.string_stdin[:512])
  739. bg_job.string_stdin = bg_job.string_stdin[512:]
  740. # no more input data, close stdin, remove it from the select set
  741. if not bg_job.string_stdin:
  742. file_obj.close()
  743. write_list.remove(file_obj)
  744. del reverse_dict[file_obj]
  745. all_jobs_finished = True
  746. for bg_job in bg_jobs:
  747. if bg_job.result.exit_status is not None:
  748. continue
  749. bg_job.result.exit_status = bg_job.sp.poll()
  750. if bg_job.result.exit_status is not None:
  751. # process exited, remove its stdout/stdin from the select set
  752. bg_job.result.duration = time.time() - start_time
  753. read_list.remove(bg_job.sp.stdout)
  754. read_list.remove(bg_job.sp.stderr)
  755. del reverse_dict[bg_job.sp.stdout]
  756. del reverse_dict[bg_job.sp.stderr]
  757. else:
  758. all_jobs_finished = False
  759. if all_jobs_finished:
  760. return False
  761. if timeout:
  762. time_left = stop_time - time.time()
  763. # Kill all processes which did not complete prior to timeout
  764. for bg_job in bg_jobs:
  765. if bg_job.result.exit_status is not None:
  766. continue
  767. logging.warn('run process timeout (%s) fired on: %s', timeout,
  768. bg_job.command)
  769. nuke_subprocess(bg_job.sp)
  770. bg_job.result.exit_status = bg_job.sp.poll()
  771. bg_job.result.duration = time.time() - start_time
  772. return True
  773. def get_children_pids(ppid):
  774. """
  775. Get all PIDs of children/threads of parent ppid
  776. param ppid: parent PID
  777. return: list of PIDs of all children/threads of ppid
  778. """
  779. return (system_output("ps -L --ppid=%d -o lwp" % ppid).split('\n')[1:])
  780. def pid_is_alive(pid):
  781. """
  782. True if process pid exists and is not yet stuck in Zombie state.
  783. Zombies are impossible to move between cgroups, etc.
  784. pid can be integer, or text of integer.
  785. """
  786. path = '/proc/%s/stat' % pid
  787. try:
  788. stat = read_one_line(path)
  789. except IOError:
  790. if not os.path.exists(path):
  791. # file went away
  792. return False
  793. raise
  794. return stat.split()[2] != 'Z'
  795. def signal_pid(pid, sig):
  796. """
  797. Sends a signal to a process id. Returns True if the process terminated
  798. successfully, False otherwise.
  799. """
  800. try:
  801. os.kill(pid, sig)
  802. except OSError:
  803. # The process may have died before we could kill it.
  804. pass
  805. for i in range(5):
  806. if not pid_is_alive(pid):
  807. return True
  808. time.sleep(1)
  809. # The process is still alive
  810. return False
  811. def nuke_subprocess(subproc):
  812. # check if the subprocess is still alive, first
  813. if subproc.poll() is not None:
  814. return subproc.poll()
  815. # the process has not terminated within timeout,
  816. # kill it via an escalating series of signals.
  817. signal_queue = [signal.SIGTERM, signal.SIGKILL]
  818. for sig in signal_queue:
  819. signal_pid(subproc.pid, sig)
  820. if subproc.poll() is not None:
  821. return subproc.poll()
  822. def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
  823. # the process has not terminated within timeout,
  824. # kill it via an escalating series of signals.
  825. for sig in signal_queue:
  826. if signal_pid(pid, sig):
  827. return
  828. # no signal successfully terminated the process
  829. raise error.AutoservRunError('Could not kill %d' % pid, None)
  830. def system(command, timeout=None, ignore_status=False):
  831. """
  832. Run a command
  833. @param timeout: timeout in seconds
  834. @param ignore_status: if ignore_status=False, throw an exception if the
  835. command's exit code is non-zero
  836. if ignore_stauts=True, return the exit code.
  837. @return exit status of command
  838. (note, this will always be zero unless ignore_status=True)
  839. """
  840. return run(command, timeout=timeout, ignore_status=ignore_status,
  841. stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
  842. def system_parallel(commands, timeout=None, ignore_status=False):
  843. """This function returns a list of exit statuses for the respective
  844. list of commands."""
  845. return [bg_jobs.exit_status for bg_jobs in
  846. run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
  847. stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
  848. def system_output(command, timeout=None, ignore_status=False,
  849. retain_output=False, args=()):
  850. """
  851. Run a command and return the stdout output.
  852. @param command: command string to execute.
  853. @param timeout: time limit in seconds before attempting to kill the
  854. running process. The function will take a few seconds longer
  855. than 'timeout' to complete if it has to kill the process.
  856. @param ignore_status: do not raise an exception, no matter what the exit
  857. code of the command is.
  858. @param retain_output: set to True to make stdout/stderr of the command
  859. output to be also sent to the logging system
  860. @param args: sequence of strings of arguments to be given to the command
  861. inside " quotes after they have been escaped for that; each
  862. element in the sequence will be given as a separate command
  863. argument
  864. @return a string with the stdout output of the command.
  865. """
  866. if retain_output:
  867. out = run(command, timeout=timeout, ignore_status=ignore_status,
  868. stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
  869. args=args).stdout
  870. else:
  871. out = run(command, timeout=timeout, ignore_status=ignore_status,
  872. args=args).stdout
  873. if out[-1:] == '\n':
  874. out = out[:-1]
  875. return out
  876. def system_output_parallel(commands, timeout=None, ignore_status=False,
  877. retain_output=False):
  878. if retain_output:
  879. out = [bg_job.stdout for bg_job
  880. in run_parallel(commands, timeout=timeout,
  881. ignore_status=ignore_status,
  882. stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
  883. else:
  884. out = [bg_job.stdout for bg_job in run_parallel(commands,
  885. timeout=timeout, ignore_status=ignore_status)]
  886. for x in out:
  887. if out[-1:] == '\n': out = out[:-1]
  888. return out
  889. def etraceback(prep, exc_info):
  890. """
  891. Enhanced Traceback formats traceback into lines "prep: line\nname: line"
  892. @param prep: desired line preposition
  893. @param exc_info: sys.exc_info of the exception
  894. @return: string which contains beautifully formatted exception
  895. """
  896. out = ""
  897. for line in traceback.format_exception(exc_info[0], exc_info[1],
  898. exc_info[2]):
  899. out += "%s: %s" % (prep, line)
  900. return out
  901. def strip_unicode(input):
  902. if type(input) == list:
  903. return [strip_unicode(i) for i in input]
  904. elif type(input) == dict:
  905. output = {}
  906. for key in input.keys():
  907. output[str(key)] = strip_unicode(input[key])
  908. return output
  909. elif type(input) == unicode:
  910. return str(input)
  911. else:
  912. return input
  913. def get_cpu_percentage(function, *args, **dargs):
  914. """Returns a tuple containing the CPU% and return value from function call.
  915. This function calculates the usage time by taking the difference of
  916. the user and system times both before and after the function call.
  917. """
  918. child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
  919. self_pre = resource.getrusage(resource.RUSAGE_SELF)
  920. start = time.time()
  921. to_return = function(*args, **dargs)
  922. elapsed = time.time() - start
  923. self_post = resource.getrusage(resource.RUSAGE_SELF)
  924. child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
  925. # Calculate CPU Percentage
  926. s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
  927. c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
  928. cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
  929. return cpu_percent, to_return
  930. class SystemLoad(object):
  931. """
  932. Get system and/or process values and return average value of load.
  933. """
  934. def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
  935. use_log=False):
  936. """
  937. @param pids: List of pids to be monitored. If pid = 0 whole system will
  938. be monitored. pid == 0 means whole system.
  939. @param advanced: monitor add value for system irq count and softirq
  940. for process minor and maior page fault
  941. @param time_step: Time step for continuous monitoring.
  942. @param cpu_cont: If True monitor CPU load continuously.
  943. @param use_log: If true every monitoring is logged for dump.
  944. """
  945. self.pids = []
  946. self.stats = {}
  947. for pid in pids:
  948. if pid == 0:
  949. cpu = FileFieldMonitor("/proc/stat",
  950. [("cpu", 0), # User Time
  951. ("cpu", 2), # System Time
  952. ("intr", 0), # IRQ Count
  953. ("softirq", 0)], # Soft IRQ Count
  954. True,
  955. cpu_cont,
  956. use_log,
  957. " +",
  958. time_step)
  959. mem = FileFieldMonitor("/proc/meminfo",
  960. [("MemTotal:", 0), # Mem Total
  961. ("MemFree:", 0), # Mem Free
  962. ("Buffers:", 0), # Buffers
  963. ("Cached:", 0)], # Cached
  964. False,
  965. True,
  966. use_log,
  967. " +",
  968. time_step)
  969. self.stats[pid] = ["TOTAL", cpu, mem]
  970. self.pids.append(pid)
  971. else:
  972. name = ""
  973. if (type(pid) is int):
  974. self.pids.append(pid)
  975. name = get_process_name(pid)
  976. else:
  977. self.pids.append(pid[0])
  978. name = pid[1]
  979. cpu = FileFieldMonitor("/proc/%d/stat" %
  980. self.pids[-1],
  981. [("", 13), # User Time
  982. ("", 14), # System Time
  983. ("", 9), # Minority Page Fault
  984. ("", 11)], # Majority Page Fault
  985. True,
  986. cpu_cont,
  987. use_log,
  988. " +",
  989. time_step)
  990. mem = FileFieldMonitor("/proc/%d/status" %
  991. self.pids[-1],
  992. [("VmSize:", 0), # Virtual Memory Size
  993. ("VmRSS:", 0), # Resident Set Size
  994. ("VmPeak:", 0), # Peak VM Size
  995. ("VmSwap:", 0)], # VM in Swap
  996. False,
  997. True,
  998. use_log,
  999. " +",
  1000. time_step)
  1001. self.stats[self.pids[-1]] = [name, cpu, mem]
  1002. self.advanced = advanced
  1003. def __str__(self):
  1004. """
  1005. Define format how to print
  1006. """
  1007. out = ""
  1008. for pid in self.pids:
  1009. for stat in self.stats[pid][1:]:
  1010. out += str(stat.get_status()) + "\n"
  1011. return out
  1012. def start(self, pids=[]):
  1013. """
  1014. Start monitoring of the process system usage.
  1015. @param pids: List of PIDs you intend to control. Use pids=[] to control
  1016. all defined PIDs.
  1017. """
  1018. if pids == []:
  1019. pids = self.pids
  1020. for pid in pids:
  1021. for stat in self.stats[pid][1:]:
  1022. stat.start()
  1023. def stop(self, pids=[]):
  1024. """
  1025. Stop monitoring of the process system usage.
  1026. @param pids: List of PIDs you intend to control. Use pids=[] to control
  1027. all defined PIDs.
  1028. """
  1029. if pids == []:
  1030. pids = self.pids
  1031. for pid in pids:
  1032. for stat in self.stats[pid][1:]:
  1033. stat.stop()
  1034. def dump(self, pids=[]):
  1035. """
  1036. Get the status of monitoring.
  1037. @param pids: List of PIDs you intend to control. Use pids=[] to control
  1038. all defined PIDs.
  1039. @return:
  1040. tuple([cpu load], [memory load]):
  1041. ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
  1042. [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
  1043. PID1_cpu_meas:
  1044. average_values[], test_time, cont_meas_values[[]], time_step
  1045. PID1_mem_meas:
  1046. average_values[], test_time, cont_meas_values[[]], time_step
  1047. where average_values[] are the measured values (mem_free,swap,...)
  1048. which are described in SystemLoad.__init__()-FileFieldMonitor.
  1049. cont_meas_values[[]] is a list of average_values in the sampling
  1050. times.
  1051. """
  1052. if pids == []:
  1053. pids = self.pids
  1054. cpus = []
  1055. memory = []
  1056. for pid in pids:
  1057. stat = (pid, self.stats[pid][1].get_status())
  1058. cpus.append(stat)
  1059. for pid in pids:
  1060. stat = (pid, self.stats[pid][2].get_status())
  1061. memory.append(stat)
  1062. return (cpus, memory)
  1063. def get_cpu_status_string(self, pids=[]):
  1064. """
  1065. Convert status to string array.
  1066. @param pids: List of PIDs you intend to control. Use pids=[] to control
  1067. all defined PIDs.
  1068. @return: String format to table.
  1069. """
  1070. if pids == []:
  1071. pids = self.pids
  1072. headers = ["NAME",
  1073. ("%7s") % "PID",
  1074. ("%5s") % "USER",
  1075. ("%5s") % "SYS",
  1076. ("%5s") % "SUM"]
  1077. if self.advanced:
  1078. headers.extend(["MINFLT/IRQC",
  1079. "MAJFLT/SOFTIRQ"])
  1080. headers.append(("%11s") % "TIME")
  1081. textstatus = []
  1082. for pid in pids:
  1083. stat = self.stats[pid][1].get_status()
  1084. time = stat[1]
  1085. stat = stat[0]
  1086. textstatus.append(["%s" % self.stats[pid][0],
  1087. "%7s" % pid,
  1088. "%4.0f%%" % (stat[0] / time),
  1089. "%4.0f%%" % (stat[1] / time),
  1090. "%4.0f%%" % ((stat[0] + stat[1]) / time),
  1091. "%10.3fs" % time])
  1092. if self.advanced:
  1093. textstatus[-1].insert(-1, "%11d" % stat[2])
  1094. textstatus[-1].insert(-1, "%14d" % stat[3])
  1095. return matrix_to_string(textstatus, tuple(headers))
  1096. def get_mem_status_string(self, pids=[]):
  1097. """
  1098. Convert status to string array.
  1099. @param pids: List of PIDs you intend to control. Use pids=[] to control
  1100. all defined PIDs.
  1101. @return: String format to table.
  1102. """
  1103. if pids == []:
  1104. pids = self.pids
  1105. headers = ["NAME",
  1106. ("%7s") % "PID",
  1107. ("%8s") % "TOTAL/VMSIZE",
  1108. ("%8s") % "FREE/VMRSS",
  1109. ("%8s") % "BUFFERS/VMPEAK",
  1110. ("%8s") % "CACHED/VMSWAP",
  1111. ("%11s") % "TIME"]
  1112. textstatus = []
  1113. for pid in pids:
  1114. stat = self.stats[pid][2].get_status()
  1115. time = stat[1]
  1116. stat = stat[0]
  1117. textstatus.append(["%s" % self.stats[pid][0],
  1118. "%7s" % pid,
  1119. "%10dMB" % (stat[0] / 1024),
  1120. "%8dMB" % (stat[1] / 1024),
  1121. "%12dMB" % (stat[2] / 1024),
  1122. "%11dMB" % (stat[3] / 1024),
  1123. "%10.3fs" % time])
  1124. return matrix_to_string(textstatus, tuple(headers))
  1125. def get_arch(run_function=run):
  1126. """
  1127. Get the hardware architecture of the machine.
  1128. run_function is used to execute the commands. It defaults to
  1129. utils.run() but a custom method (if provided) should be of the
  1130. same schema as utils.run. It should return a CmdResult object and
  1131. throw a CmdError exception.
  1132. """
  1133. arch = run_function('/bin/uname -m').stdout.rstrip()
  1134. if re.match(r'i\d86$', arch):
  1135. arch = 'i386'
  1136. return arch
  1137. def get_num_logical_cpus_per_socket(run_function=run):
  1138. """
  1139. Get the number of cores (including hyperthreading) per cpu.
  1140. run_function is used to execute the commands. It defaults to
  1141. utils.run() but a custom method (if provided) should be of the
  1142. same schema as utils.run. It should return a CmdResult object and
  1143. throw a CmdError exception.
  1144. """
  1145. siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
  1146. num_siblings = map(int,
  1147. re.findall(r'^siblings\s*:\s*(\d+)\s*$',
  1148. siblings, re.M))
  1149. if len(num_siblings) == 0:
  1150. raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
  1151. if min(num_siblings) != max(num_siblings):
  1152. raise error.TestError('Number of siblings differ %r' %
  1153. num_siblings)
  1154. return num_siblings[0]
  1155. def merge_trees(src, dest):
  1156. """
  1157. Merges a source directory tree at 'src' into a destination tree at
  1158. 'dest'. If a path is a file in both trees than the file in the source
  1159. tree is APPENDED to the one in the destination tree. If a path is
  1160. a directory in both trees then the directories are recursively merged
  1161. with this function. In any other case, the function will skip the
  1162. paths that cannot be merged (instead of failing).
  1163. """
  1164. if not os.path.exists(src):
  1165. return # exists only in dest
  1166. elif not os.path.exists(dest):
  1167. if os.path.isfile(src):
  1168. shutil.copy2(src, dest) # file only in src
  1169. else:
  1170. shutil.copytree(src, dest, symlinks=True) # dir only in src
  1171. return
  1172. elif os.path.isfile(src) and os.path.isfile(dest):
  1173. # src & dest are files in both trees, append src to dest
  1174. destfile = open(dest, "a")
  1175. try:
  1176. srcfile = open(src)
  1177. try:
  1178. destfile.write(srcfile.read())
  1179. finally:
  1180. srcfile.close()
  1181. finally:
  1182. destfile.close()
  1183. elif os.path.isdir(src) and os.path.isdir(dest):
  1184. # src & dest are directories in both trees, so recursively merge
  1185. for name in os.listdir(src):
  1186. merge_trees(os.path.join(src, name), os.path.join(dest, name))
  1187. else:
  1188. # src & dest both exist, but are incompatible
  1189. return
  1190. class CmdResult(object):
  1191. """
  1192. Command execution result.
  1193. command: String containing the command line itself
  1194. exit_status: Integer exit code of the process
  1195. stdout: String containing stdout of the process
  1196. stderr: String containing stderr of the process
  1197. duration: Elapsed wall clock time running the process
  1198. """
  1199. def __init__(self, command="", stdout="", stderr="",
  1200. exit_status=None, duration=0):
  1201. self.command = command
  1202. self.exit_status = exit_status
  1203. self.stdout = stdout
  1204. self.stderr = stderr
  1205. self.duration = duration
  1206. def __repr__(self):
  1207. wrapper = textwrap.TextWrapper(width = 78,
  1208. initial_indent="\n ",
  1209. subsequent_indent=" ")
  1210. stdout = self.stdout.rstrip()
  1211. if stdout:
  1212. stdout = "\nstdout:\n%s" % stdout
  1213. stderr = self.stderr.rstrip()
  1214. if stderr:
  1215. stderr = "\nstderr:\n%s" % stderr
  1216. return ("* Command: %s\n"
  1217. "Exit status: %s\n"
  1218. "Duration: %s\n"
  1219. "%s"
  1220. "%s"
  1221. % (wrapper.fill(self.command), self.exit_status,
  1222. self.duration, stdout, stderr))
  1223. class run_randomly:
  1224. def __init__(self, run_sequentially=False):
  1225. # Run sequentially is for debugging control files
  1226. self.test_list = []
  1227. self.run_sequentially = run_sequentially
  1228. def add(self, *args, **dargs):
  1229. test = (args, dargs)
  1230. self.test_list.append(test)
  1231. def run(self, fn):
  1232. while self.test_list:
  1233. test_index = random.randint(0, len(self.test_list)-1)
  1234. if self.run_sequentially:
  1235. test_index = 0
  1236. (args, dargs) = self.test_list.pop(test_index)
  1237. fn(*args, **dargs)
  1238. def import_site_module(path, module, dummy=None, modulefile=None):
  1239. """
  1240. Try to import the site specific module if it exists.
  1241. @param path full filename of the source file calling this (ie __file__)
  1242. @param module full module name
  1243. @param dummy dummy value to return in case there is no symbol to import
  1244. @param modulefile module filename
  1245. @return site specific module or dummy
  1246. @raises ImportError if the site file exists but imports fails
  1247. """
  1248. short_module = module[module.rfind(".") + 1:]
  1249. if not modulefile:
  1250. modulefile = short_module + ".py"
  1251. if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
  1252. return __import__(module, {}, {}, [short_module])
  1253. return dummy
  1254. def import_site_symbol(path, module, name, dummy=None, modulefile=None):
  1255. """
  1256. Try to import site specific symbol from site specific file if it exists
  1257. @param path full filename of the source file calling this (ie __file__)
  1258. @param module full module name
  1259. @param name symbol name to be imported from the site file
  1260. @param dummy dummy value to return in case there is no symbol to import
  1261. @param modulefile module filename
  1262. @return site specific symbol or dummy
  1263. @raises ImportError if the site file exists but imports fails
  1264. """
  1265. module = import_site_module(path, module, modulefile=modulefile)
  1266. if not module:
  1267. return dummy
  1268. # special unique value to tell us if the symbol can't be imported
  1269. cant_import = object()
  1270. obj = getattr(module, name, cant_import)
  1271. if obj is cant_import:
  1272. logging.debug("unable to import site symbol '%s', using non-site "
  1273. "implementation", name)
  1274. return dummy
  1275. return obj
  1276. def import_site_class(path, module, classname, baseclass, modulefile=None):
  1277. """
  1278. Try to import site specific class from site specific file if it exists
  1279. Args:
  1280. path: full filename of the source file calling this (ie __file__)
  1281. module: full module name
  1282. classname: class name to be loaded from site file
  1283. baseclass: base class object to return when no site file present or
  1284. to mixin when site class exists but is not inherited from baseclass
  1285. modulefile: module filename
  1286. Returns: baseclass if site specific class does not exist, the site specific
  1287. class if it exists and is inherited from baseclass or a mixin of the
  1288. site specific class and baseclass when the site specific class exists
  1289. and is not inherited from baseclass
  1290. Raises: ImportError if the site file exists but imports fails
  1291. """
  1292. res = import_site_symbol(path, module, classname, None, modulefile)
  1293. if res:
  1294. if not issubclass(res, baseclass):
  1295. # if not a subclass of baseclass then mix in baseclass with the
  1296. # site specific class object and return the result
  1297. res = type(classname, (res, baseclass), {})
  1298. else:
  1299. res = baseclass
  1300. return res
  1301. def import_site_function(path, module, funcname, dummy, modulefile=None):
  1302. """
  1303. Try to import site specific function from site specific file if it exists
  1304. Args:
  1305. path: full filename of the source file calling this (ie __file__)
  1306. module: full module name
  1307. funcname: function name to be imported from site file
  1308. dummy: dummy function to return in case there is no function to import
  1309. modulefile: module filename
  1310. Returns: site specific function object or dummy
  1311. Raises: ImportError if the site file exists but imports fails
  1312. """
  1313. return import_site_symbol(path, module, funcname, dummy, modulefile)
  1314. def _get_pid_path(program_name):
  1315. pid_files_dir = GLOBAL_CONFIG.get_config_value("SERVER", 'pid_files_dir',
  1316. default="")
  1317. if not pid_files_dir:
  1318. base_dir = os.path.dirname(__file__)
  1319. pid_path = os.path.abspath(os.path.join(base_dir, "..", "..",
  1320. "%s.pid" % program_name))
  1321. else:
  1322. pid_path = os.path.join(pid_files_dir, "%s.pid" % program_name)
  1323. return pid_path
  1324. def write_pid(program_name):
  1325. """
  1326. Try to drop <program_name>.pid in the main autotest directory.
  1327. Args:
  1328. program_name: prefix for file name
  1329. """
  1330. pidfile = open(_get_pid_path(program_name), "w")
  1331. try:
  1332. pidfile.write("%s\n" % os.getpid())
  1333. finally:
  1334. pidfile.close()
  1335. def delete_pid_file_if_exists(program_name):
  1336. """
  1337. Tries to remove <program_name>.pid from the main autotest directory.
  1338. """
  1339. pidfile_path = _get_pid_path(program_name)
  1340. try:
  1341. os.remove(pidfile_path)
  1342. except OSError:
  1343. if not os.path.exists(pidfile_path):
  1344. return
  1345. raise
  1346. def get_pid_from_file(program_name):
  1347. """
  1348. Reads the pid from <program_name>.pid in the autotest directory.
  1349. @param program_name the name of the program
  1350. @return the pid if the file exists, None otherwise.
  1351. """
  1352. pidfile_path = _get_pid_path(program_name)
  1353. if not os.path.exists(pidfile_path):
  1354. return None
  1355. pidfile = open(_get_pid_path(program_name), 'r')
  1356. try:
  1357. try:
  1358. pid = int(pidfile.readline())
  1359. except IOError:
  1360. if not os.path.exists(pidfile_path):
  1361. return None
  1362. raise
  1363. finally:
  1364. pidfile.close()
  1365. return pid
  1366. def get_process_name(pid):
  1367. """
  1368. Get process name from PID.
  1369. @param pid: PID of process.
  1370. """
  1371. return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
  1372. def program_is_alive(program_name):
  1373. """
  1374. Checks if the process is alive and not in Zombie state.
  1375. @param program_name the name of the program
  1376. @return True if still alive, False otherwise
  1377. """
  1378. pid = get_pid_from_file(program_name)
  1379. if pid is None:
  1380. return False
  1381. return pid_is_alive(pid)
  1382. def signal_program(program_name, sig=signal.SIGTERM):
  1383. """
  1384. Sends a signal to the process listed in <program_name>.pid
  1385. @param program_name the name of the program
  1386. @param sig signal to send
  1387. """
  1388. pid = get_pid_from_file(program_name)
  1389. if pid:
  1390. signal_pid(pid, sig)
  1391. def get_relative_path(path, reference):
  1392. """Given 2 absolute paths "path" and "reference", compute the path of
  1393. "path" as relative to the directory "reference".
  1394. @param path the absolute path to convert to a relative path
  1395. @param reference an absolute directory path to which the relative
  1396. path will be computed
  1397. """
  1398. # normalize the paths (remove double slashes, etc)
  1399. assert(os.path.isabs(path))
  1400. assert(os.path.isabs(reference))
  1401. path = os.path.normpath(path)
  1402. reference = os.path.normpath(reference)
  1403. # we could use os.path.split() but it splits from the end
  1404. path_list = path.split(os.path.sep)[1:]
  1405. ref_list = reference.split(os.path.sep)[1:]
  1406. # find the longest leading common path
  1407. for i in xrange(min(len(path_list), len(ref_list))):
  1408. if path_list[i] != ref_list[i]:
  1409. # decrement i so when exiting this loop either by no match or by
  1410. # end of range we are one step behind
  1411. i -= 1
  1412. break
  1413. i += 1
  1414. # drop the common part of the paths, not interested in that anymore
  1415. del path_list[:i]
  1416. # for each uncommon component in the reference prepend a ".."
  1417. path_list[:0] = ['..'] * (len(ref_list) - i)
  1418. return os.path.join(*path_list)
  1419. def sh_escape(command):
  1420. """
  1421. Escape special characters from a command so that it can be passed
  1422. as a double quoted (" ") string in a (ba)sh command.
  1423. Args:
  1424. command: the command string to escape.
  1425. Returns:
  1426. The escaped command string. The required englobing double
  1427. quotes are NOT added and so should be added at some point by
  1428. the caller.
  1429. See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
  1430. """
  1431. command = command.replace("\\", "\\\\")
  1432. command = command.replace("$", r'\$')
  1433. command = command.replace('"', r'\"')
  1434. command = command.replace('`', r'\`')
  1435. return command
  1436. def configure(extra=None, configure='./configure'):
  1437. """
  1438. Run configure passing in the correct host, build, and target options.
  1439. @param extra: extra command line arguments to pass to configure
  1440. @param configure: which configure script to use
  1441. """
  1442. args = []
  1443. if 'CHOST' in os.environ:
  1444. args.append('--host=' + os.environ['CHOST'])
  1445. if 'CBUILD' in os.environ:
  1446. args.append('--build=' + os.environ['CBUILD'])
  1447. if 'CTARGET' in os.environ:
  1448. args.append('--target=' + os.environ['CTARGET'])
  1449. if extra:
  1450. args.append(extra)
  1451. system('%s %s' % (configure, ' '.join(args)))
  1452. def make(extra='', make='make', timeout=None, ignore_status=False):
  1453. """
  1454. Run make, adding MAKEOPTS to the list of options.
  1455. @param extra: extra command line arguments to pass to make.
  1456. """
  1457. cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
  1458. return system(cmd, timeout=timeout, ignore_status=ignore_status)
  1459. def compare_versions(ver1, ver2):
  1460. """Version number comparison between ver1 and ver2 strings.
  1461. >>> compare_tuple("1", "2")
  1462. -1
  1463. >>> compare_tuple("foo-1.1", "foo-1.2")
  1464. -1
  1465. >>> compare_tuple("1.2", "1.2a")
  1466. -1
  1467. >>> compare_tuple("1.2b", "1.2a")
  1468. 1
  1469. >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
  1470. -1
  1471. Args:
  1472. ver1: version string
  1473. ver2: version string
  1474. Returns:
  1475. int: 1 if ver1 > ver2
  1476. 0 if ver1 == ver2
  1477. -1 if ver1 < ver2
  1478. """
  1479. ax = re.split('[.-]', ver1)
  1480. ay = re.split('[.-]', ver2)
  1481. while len(ax) > 0 and len(ay) > 0:
  1482. cx = ax.pop(0)
  1483. cy = ay.pop(0)
  1484. maxlen = max(len(cx), len(cy))
  1485. c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
  1486. if c != 0:
  1487. return c
  1488. return cmp(len(ax), len(ay))
  1489. def args_to_dict(args):
  1490. """Convert autoserv extra arguments in the form of key=val or key:val to a
  1491. dictionary. Each argument key is converted to lowercase dictionary key.
  1492. Args:
  1493. args - list of autoserv extra arguments.
  1494. Returns:
  1495. dictionary
  1496. """
  1497. arg_re = re.compile(r'(\w+)[:=](.*)$')
  1498. dict = {}
  1499. for arg in args:
  1500. match = arg_re.match(arg)
  1501. if match:
  1502. dict[match.group(1).lower()] = match.group(2)
  1503. else:
  1504. logging.warning("args_to_dict: argument '%s' doesn't match "
  1505. "'%s' pattern. Ignored." % (arg, arg_re.pattern))
  1506. return dict
  1507. def get_unused_port():
  1508. """
  1509. Finds a semi-random available port. A race condition is still
  1510. possible after the port number is returned, if another process
  1511. happens to bind it.
  1512. Returns:
  1513. A port number that is unused on both TCP and UDP.
  1514. """
  1515. def try_bind(port, socket_type, socket_proto):
  1516. s = socket.socket(socket.AF_INET, socket_type, socket_proto)
  1517. try:
  1518. try:
  1519. s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
  1520. s.bind(('', port))
  1521. return s.getsockname()[1]
  1522. except socket.error:
  1523. return None
  1524. finally:
  1525. s.close()
  1526. # On the 2.6 kernel, calling try_bind() on UDP socket returns the
  1527. # same port over and over. So always try TCP first.
  1528. while True:
  1529. # Ask the OS for an unused port.
  1530. port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
  1531. # Check if this port is unused on the other protocol.
  1532. if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
  1533. return port
  1534. def ask(question, auto=False):
  1535. """
  1536. Raw input with a prompt that emulates logging.
  1537. @param question: Question to be asked
  1538. @param auto: Whether to return "y" instead of asking the question
  1539. """
  1540. if auto:
  1541. logging.info("%s (y/n) y" % question)
  1542. return "y"
  1543. return raw_input("%s INFO | %s (y/n) " %
  1544. (time.strftime("%H:%M:%S", time.localtime()), question))
  1545. def display_data_size(size):
  1546. '''
  1547. Display data size in human readable units.
  1548. @type size: int
  1549. @param size: Data size, in Bytes.
  1550. @return: Human readable string with data size.
  1551. '''
  1552. prefixes = ['B', 'kB', 'MB', 'GB', 'TB']
  1553. i = 0
  1554. while size > 1000.0:
  1555. size /= 1000.0
  1556. i += 1
  1557. return '%.2f %s' % (size, prefixes[i])
  1558. def interactive_download(url, output_file, title='', chunk_size=100*1024):
  1559. '''
  1560. Interactively downloads a given file url to a given output file
  1561. @type url: string
  1562. @param url: URL for the file to be download
  1563. @type output_file: string
  1564. @param output_file: file name or absolute path on which to save the file to
  1565. @type title: string
  1566. @param title: optional title to go along the progress bar
  1567. @type chunk_size: integer
  1568. @param chunk_size: amount of data to read at a time
  1569. '''
  1570. output_dir = os.path.dirname(output_file)
  1571. output_file = open(output_file, 'w+b')
  1572. input_file = urllib2.urlopen(url)
  1573. try:
  1574. file_size = int(input_file.headers['Content-Length'])
  1575. except KeyError:
  1576. raise ValueError('Could not find file size in HTTP headers')
  1577. logging.info('Downloading %s, %s to %s', os.path.basename(url),
  1578. display_data_size(file_size), output_dir)
  1579. # Calculate progrss bar size based on title size
  1580. if title:
  1581. width = progressbar.ProgressBar.DEFAULT_WIDTH - len(title)
  1582. progress_bar = progressbar.ProgressBar(maximum=file_size,
  1583. width=width, title=title)
  1584. else:
  1585. progress_bar = progressbar.ProgressBar(maximum=file_size)
  1586. # Download the file, while interactively updating the progress
  1587. progress_bar.update_screen()
  1588. while True:
  1589. data = input_file.read(chunk_size)
  1590. if data:
  1591. progress_bar.increment(len(data))
  1592. output_file.write(data)
  1593. else:
  1594. progress_bar.update(file_size)
  1595. print
  1596. break
  1597. output_file.close()