/client/common_lib/base_utils.py
Python | 1955 lines | 1861 code | 34 blank | 60 comment | 23 complexity | 05566fbc58e81c3d3da1d6bee23caa57 MD5 | raw file
Possible License(s): LGPL-3.0, GPL-2.0
Large files files are truncated, but you can click here to view the full file
- #
- # Copyright 2008 Google Inc. Released under the GPL v2
- import os, pickle, random, re, resource, select, shutil, signal, StringIO
- import socket, struct, subprocess, sys, time, textwrap, traceback, urlparse
- import warnings, smtplib, logging, urllib2
- from threading import Thread, Event, Lock
- try:
- import hashlib
- except ImportError:
- import md5, sha
- from autotest_lib.client.common_lib import error, logging_manager, global_config
- from autotest_lib.client.common_lib import progressbar
- GLOBAL_CONFIG = global_config.global_config
- def deprecated(func):
- """This is a decorator which can be used to mark functions as deprecated.
- It will result in a warning being emmitted when the function is used."""
- def new_func(*args, **dargs):
- warnings.warn("Call to deprecated function %s." % func.__name__,
- category=DeprecationWarning)
- return func(*args, **dargs)
- new_func.__name__ = func.__name__
- new_func.__doc__ = func.__doc__
- new_func.__dict__.update(func.__dict__)
- return new_func
- class _NullStream(object):
- def write(self, data):
- pass
- def flush(self):
- pass
- TEE_TO_LOGS = object()
- _the_null_stream = _NullStream()
- DEFAULT_STDOUT_LEVEL = logging.DEBUG
- DEFAULT_STDERR_LEVEL = logging.ERROR
- # prefixes for logging stdout/stderr of commands
- STDOUT_PREFIX = '[stdout] '
- STDERR_PREFIX = '[stderr] '
- def get_stream_tee_file(stream, level, prefix=''):
- if stream is None:
- return _the_null_stream
- if stream is TEE_TO_LOGS:
- return logging_manager.LoggingFile(level=level, prefix=prefix)
- return stream
- class BgJob(object):
- def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
- stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
- self.command = command
- self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
- prefix=STDOUT_PREFIX)
- self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
- prefix=STDERR_PREFIX)
- self.result = CmdResult(command)
- # allow for easy stdin input by string, we'll let subprocess create
- # a pipe for stdin input and we'll write to it in the wait loop
- if isinstance(stdin, basestring):
- self.string_stdin = stdin
- stdin = subprocess.PIPE
- else:
- self.string_stdin = None
- if verbose:
- logging.debug("Running '%s'" % command)
- # Ok, bash is nice and everything, but we might face occasions where
- # it is not available. Just do the right thing and point to /bin/sh.
- shell = '/bin/bash'
- if not os.path.isfile(shell):
- shell = '/bin/sh'
- self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- preexec_fn=self._reset_sigpipe, shell=True,
- executable=shell,
- stdin=stdin)
- def output_prepare(self, stdout_file=None, stderr_file=None):
- self.stdout_file = stdout_file
- self.stderr_file = stderr_file
- def process_output(self, stdout=True, final_read=False):
- """output_prepare must be called prior to calling this"""
- if stdout:
- pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
- else:
- pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
- if final_read:
- # read in all the data we can from pipe and then stop
- data = []
- while select.select([pipe], [], [], 0)[0]:
- data.append(os.read(pipe.fileno(), 1024))
- if len(data[-1]) == 0:
- break
- data = "".join(data)
- else:
- # perform a single read
- data = os.read(pipe.fileno(), 1024)
- buf.write(data)
- tee.write(data)
- def cleanup(self):
- self.stdout_tee.flush()
- self.stderr_tee.flush()
- self.sp.stdout.close()
- self.sp.stderr.close()
- self.result.stdout = self.stdout_file.getvalue()
- self.result.stderr = self.stderr_file.getvalue()
- def _reset_sigpipe(self):
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
- class AsyncJob(BgJob):
- def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
- stdin=None, stderr_level=DEFAULT_STDERR_LEVEL, kill_func=None):
- super(AsyncJob, self).__init__(command, stdout_tee=stdout_tee,
- stderr_tee=stderr_tee, verbose=verbose, stdin=stdin,
- stderr_level=stderr_level)
- #start time for CmdResult
- self.start_time = time.time()
- if kill_func is None:
- self.kill_func = self._kill_self_process
- else:
- self.kill_func = kill_func
- #we're going to make some threads to drain the stdout and stderr
- def drainer(input, output, lock):
- """
- input is a pipe and output is file-like. if lock is non-None, then
- we assume output isn't threadsafe
- """
- try:
- while True:
- tmp = os.read(input.fileno(), 1024)
- if tmp == '':
- break
- if lock is not None:
- lock.acquire()
- for f in filter(lambda x: x is not None, output):
- f.write(tmp)
- if lock is not None:
- lock.release()
- except:
- pass
- self.stdout_lock = Lock()
- self.stdout_file = StringIO.StringIO()
- self.stdout_thread = Thread(target=drainer, name=("%s-stdout"%command),
- args=(self.sp.stdout, (self.stdout_file, self.stdout_tee),
- self.stdout_lock))
- self.stdout_thread.daemon = True
- self.stderr_lock = Lock()
- self.stderr_file = StringIO.StringIO()
- self.stderr_thread = Thread(target=drainer, name=("%s-stderr"%command),
- args=(self.sp.stderr, (self.stderr_file, self.stderr_tee),
- self.stderr_lock))
- self.stderr_thread.daemon = True
- self.stdout_thread.start()
- self.stderr_thread.start()
- def output_prepare(self, stdout_file=None, stderr_file=None):
- raise NotImplementedError("This object automatically prepares its own "
- "output")
- def process_output(self, stdout=True, final_read=False):
- raise NotImplementedError("This object has background threads "
- "automatically polling the process. Use the locked accessors")
- def get_stdout(self):
- self.stdout_lock.acquire()
- tmp = self.stdout_file.getvalue()
- self.stdout_lock.release()
- return tmp
- def get_stderr(self):
- self.stderr_lock.acquire()
- tmp = self.stderr_file.getvalue()
- self.stderr_lock.release()
- return tmp
- def cleanup(self):
- raise NotImplementedError("This must be waited for to get a result")
- def _kill_self_process(self):
- try:
- os.kill(self.sp.pid, signal.SIGTERM)
- except OSError:
- pass #don't care if the process is already gone, since that was the goal
- def wait_for(self, timeout=None):
- if timeout is None:
- self.sp.wait()
- if timeout > 0:
- start_time = time.time()
- while time.time() - start_time < timeout:
- self.result.exit_status = self.sp.poll()
- if self.result.exit_status is not None:
- break
- #first need to kill the threads and process, then no more locking
- #issues for superclass's cleanup function
- self.kill_func()
- #we need to fill in parts of the result that aren't done automatically
- try:
- pid, self.result.exit_status = os.waitpid(self.sp.pid, 0)
- except OSError:
- self.result.exit_status = self.sp.poll()
- self.result.duration = time.time() - self.start_time
- assert self.result.exit_status is not None
- #make sure we've got stdout and stderr
- self.stdout_thread.join(1)
- self.stderr_thread.join(1)
- assert not self.stdout_thread.is_alive()
- assert not self.stderr_thread.is_alive()
- super(AsyncJob, self).cleanup()
- return self.result
- def ip_to_long(ip):
- # !L is a long in network byte order
- return struct.unpack('!L', socket.inet_aton(ip))[0]
- def long_to_ip(number):
- # See above comment.
- return socket.inet_ntoa(struct.pack('!L', number))
- def create_subnet_mask(bits):
- return (1 << 32) - (1 << 32-bits)
- def format_ip_with_mask(ip, mask_bits):
- masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
- return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
- def normalize_hostname(alias):
- ip = socket.gethostbyname(alias)
- return socket.gethostbyaddr(ip)[0]
- def get_ip_local_port_range():
- match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
- read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
- return (int(match.group(1)), int(match.group(2)))
- def set_ip_local_port_range(lower, upper):
- write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
- '%d %d\n' % (lower, upper))
- def send_email(mail_from, mail_to, subject, body,
- smtp_info={'server':'localhost', 'user':'', 'password':'',
- 'port':''}):
- """
- Sends an email via SMTP.
- @param mail_from: string with email address of sender
- @param mail_to: string or list with email address(es) of recipients
- @param subject: string with subject of email
- @param body: (multi-line) string with body of email
- @param smtp_info: Dict with smtp server info
- server: SMTP server
- user: SMTP user (if any)
- password: SMTP password (if any)
- port: SMTP port (if non standard)
- """
- if isinstance(mail_to, str):
- mail_to = [mail_to]
- msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
- subject, body)
- try:
- # Here if we pass an empty string as port, the default (25) will be
- # used http://docs.python.org/library/smtplib.html
- mailer = smtplib.SMTP(smtp_info['server'], smtp_info['port'])
- if smtp_info['user']:
- mailer.login(smtp_info['user'], smtp_info['password'])
- try:
- mailer.sendmail(mail_from, mail_to, msg)
- finally:
- mailer.quit()
- except Exception, e:
- # Emails are non-critical, log the error, don't raise it
- logging.error("Sending email failed. Reason: %s", repr(e))
- def read_one_line(filename):
- return open(filename, 'r').readline().rstrip('\n')
- def read_file(filename):
- f = open(filename)
- try:
- return f.read()
- finally:
- f.close()
- def get_field(data, param, linestart="", sep=" "):
- """
- Parse data from string.
- @param data: Data to parse.
- example:
- data:
- cpu 324 345 34 5 345
- cpu0 34 11 34 34 33
- ^^^^
- start of line
- params 0 1 2 3 4
- @param param: Position of parameter after linestart marker.
- @param linestart: String to which start line with parameters.
- @param sep: Separator between parameters regular expression.
- """
- search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
- find = search.search(data)
- if find != None:
- return re.split("%s" % sep, find.group(1))[param]
- else:
- print "There is no line which starts with %s in data." % linestart
- return None
- def write_one_line(filename, line):
- open_write_close(filename, line.rstrip('\n') + '\n')
- def open_write_close(filename, data):
- f = open(filename, 'w')
- try:
- f.write(data)
- finally:
- f.close()
- def matrix_to_string(matrix, header=None):
- """
- Return a pretty, aligned string representation of a nxm matrix.
- This representation can be used to print any tabular data, such as
- database results. It works by scanning the lengths of each element
- in each column, and determining the format string dynamically.
- @param matrix: Matrix representation (list with n rows of m elements).
- @param header: Optional tuple or list with header elements to be displayed.
- """
- if type(header) is list:
- header = tuple(header)
- lengths = []
- if header:
- for column in header:
- lengths.append(len(column))
- for row in matrix:
- for i, column in enumerate(row):
- column = unicode(column).encode("utf-8")
- cl = len(column)
- try:
- ml = lengths[i]
- if cl > ml:
- lengths[i] = cl
- except IndexError:
- lengths.append(cl)
- lengths = tuple(lengths)
- format_string = ""
- for length in lengths:
- format_string += "%-" + str(length) + "s "
- format_string += "\n"
- matrix_str = ""
- if header:
- matrix_str += format_string % header
- for row in matrix:
- matrix_str += format_string % tuple(row)
- return matrix_str
- def read_keyval(path):
- """
- Read a key-value pair format file into a dictionary, and return it.
- Takes either a filename or directory name as input. If it's a
- directory name, we assume you want the file to be called keyval.
- """
- if os.path.isdir(path):
- path = os.path.join(path, 'keyval')
- keyval = {}
- if os.path.exists(path):
- for line in open(path):
- line = re.sub('#.*', '', line).rstrip()
- if not re.search(r'^[-\.\w]+=', line):
- raise ValueError('Invalid format line: %s' % line)
- key, value = line.split('=', 1)
- if re.search('^\d+$', value):
- value = int(value)
- elif re.search('^(\d+\.)?\d+$', value):
- value = float(value)
- keyval[key] = value
- return keyval
- def write_keyval(path, dictionary, type_tag=None, tap_report=None):
- """
- Write a key-value pair format file out to a file. This uses append
- mode to open the file, so existing text will not be overwritten or
- reparsed.
- If type_tag is None, then the key must be composed of alphanumeric
- characters (or dashes+underscores). However, if type-tag is not
- null then the keys must also have "{type_tag}" as a suffix. At
- the moment the only valid values of type_tag are "attr" and "perf".
- @param path: full path of the file to be written
- @param dictionary: the items to write
- @param type_tag: see text above
- """
- if os.path.isdir(path):
- path = os.path.join(path, 'keyval')
- keyval = open(path, 'a')
- if type_tag is None:
- key_regex = re.compile(r'^[-\.\w]+$')
- else:
- if type_tag not in ('attr', 'perf'):
- raise ValueError('Invalid type tag: %s' % type_tag)
- escaped_tag = re.escape(type_tag)
- key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
- try:
- for key in sorted(dictionary.keys()):
- if not key_regex.search(key):
- raise ValueError('Invalid key: %s' % key)
- keyval.write('%s=%s\n' % (key, dictionary[key]))
- finally:
- keyval.close()
- # same for tap
- if tap_report is not None and tap_report.do_tap_report:
- tap_report.record_keyval(path, dictionary, type_tag=type_tag)
- class FileFieldMonitor(object):
- """
- Monitors the information from the file and reports it's values.
- It gather the information at start and stop of the measurement or
- continuously during the measurement.
- """
- class Monitor(Thread):
- """
- Internal monitor class to ensure continuous monitor of monitored file.
- """
- def __init__(self, master):
- """
- @param master: Master class which control Monitor
- """
- Thread.__init__(self)
- self.master = master
- def run(self):
- """
- Start monitor in thread mode
- """
- while not self.master.end_event.isSet():
- self.master._get_value(self.master.logging)
- time.sleep(self.master.time_step)
- def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
- contlogging=False, separator=" +", time_step=0.1):
- """
- Initialize variables.
- @param status_file: File contain status.
- @param mode_diff: If True make a difference of value, else average.
- @param data_to_read: List of tuples with data position.
- format: [(start_of_line,position in params)]
- example:
- data:
- cpu 324 345 34 5 345
- cpu0 34 11 34 34 33
- ^^^^
- start of line
- params 0 1 2 3 4
- @param mode_diff: True to subtract old value from new value,
- False make average of the values.
- @parma continuously: Start the monitoring thread using the time_step
- as the measurement period.
- @param contlogging: Log data in continuous run.
- @param separator: Regular expression of separator.
- @param time_step: Time period of the monitoring value.
- """
- self.end_event = Event()
- self.start_time = 0
- self.end_time = 0
- self.test_time = 0
- self.status_file = status_file
- self.separator = separator
- self.data_to_read = data_to_read
- self.num_of_params = len(self.data_to_read)
- self.mode_diff = mode_diff
- self.continuously = continuously
- self.time_step = time_step
- self.value = [0 for i in range(self.num_of_params)]
- self.old_value = [0 for i in range(self.num_of_params)]
- self.log = []
- self.logging = contlogging
- self.started = False
- self.num_of_get_value = 0
- self.monitor = None
- def _get_value(self, logging=True):
- """
- Return current values.
- @param logging: If true log value in memory. There can be problem
- with long run.
- """
- data = read_file(self.status_file)
- value = []
- for i in range(self.num_of_params):
- value.append(int(get_field(data,
- self.data_to_read[i][1],
- self.data_to_read[i][0],
- self.separator)))
- if logging:
- self.log.append(value)
- if not self.mode_diff:
- value = map(lambda x, y: x + y, value, self.old_value)
- self.old_value = value
- self.num_of_get_value += 1
- return value
- def start(self):
- """
- Start value monitor.
- """
- if self.started:
- self.stop()
- self.old_value = [0 for i in range(self.num_of_params)]
- self.num_of_get_value = 0
- self.log = []
- self.end_event.clear()
- self.start_time = time.time()
- self._get_value()
- self.started = True
- if (self.continuously):
- self.monitor = FileFieldMonitor.Monitor(self)
- self.monitor.start()
- def stop(self):
- """
- Stop value monitor.
- """
- if self.started:
- self.started = False
- self.end_time = time.time()
- self.test_time = self.end_time - self.start_time
- self.value = self._get_value()
- if (self.continuously):
- self.end_event.set()
- self.monitor.join()
- if (self.mode_diff):
- self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
- else:
- self.value = map(lambda x: x / self.num_of_get_value,
- self.value)
- def get_status(self):
- """
- @return: Status of monitored process average value,
- time of test and array of monitored values and time step of
- continuous run.
- """
- if self.started:
- self.stop()
- if self.mode_diff:
- for i in range(len(self.log) - 1):
- self.log[i] = (map(lambda x, y: x - y,
- self.log[i + 1], self.log[i]))
- self.log.pop()
- return (self.value, self.test_time, self.log, self.time_step)
- def is_url(path):
- """Return true if path looks like a URL"""
- # for now, just handle http and ftp
- url_parts = urlparse.urlparse(path)
- return (url_parts[0] in ('http', 'ftp'))
- def urlopen(url, data=None, timeout=5):
- """Wrapper to urllib2.urlopen with timeout addition."""
- # Save old timeout
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- return urllib2.urlopen(url, data=data)
- finally:
- socket.setdefaulttimeout(old_timeout)
- def urlretrieve(url, filename, data=None, timeout=300):
- """Retrieve a file from given url."""
- logging.debug('Fetching %s -> %s', url, filename)
- src_file = urlopen(url, data=data, timeout=timeout)
- try:
- dest_file = open(filename, 'wb')
- try:
- shutil.copyfileobj(src_file, dest_file)
- finally:
- dest_file.close()
- finally:
- src_file.close()
- def hash(type, input=None):
- """
- Returns an hash object of type md5 or sha1. This function is implemented in
- order to encapsulate hash objects in a way that is compatible with python
- 2.4 and python 2.6 without warnings.
- Note that even though python 2.6 hashlib supports hash types other than
- md5 and sha1, we are artificially limiting the input values in order to
- make the function to behave exactly the same among both python
- implementations.
- @param input: Optional input string that will be used to update the hash.
- """
- if type not in ['md5', 'sha1']:
- raise ValueError("Unsupported hash type: %s" % type)
- try:
- hash = hashlib.new(type)
- except NameError:
- if type == 'md5':
- hash = md5.new()
- elif type == 'sha1':
- hash = sha.new()
- if input:
- hash.update(input)
- return hash
- def get_file(src, dest, permissions=None):
- """Get a file from src, which can be local or a remote URL"""
- if src == dest:
- return
- if is_url(src):
- urlretrieve(src, dest)
- else:
- shutil.copyfile(src, dest)
- if permissions:
- os.chmod(dest, permissions)
- return dest
- def unmap_url(srcdir, src, destdir='.'):
- """
- Receives either a path to a local file or a URL.
- returns either the path to the local file, or the fetched URL
- unmap_url('/usr/src', 'foo.tar', '/tmp')
- = '/usr/src/foo.tar'
- unmap_url('/usr/src', 'http://site/file', '/tmp')
- = '/tmp/file'
- (after retrieving it)
- """
- if is_url(src):
- url_parts = urlparse.urlparse(src)
- filename = os.path.basename(url_parts[2])
- dest = os.path.join(destdir, filename)
- return get_file(src, dest)
- else:
- return os.path.join(srcdir, src)
- def update_version(srcdir, preserve_srcdir, new_version, install,
- *args, **dargs):
- """
- Make sure srcdir is version new_version
- If not, delete it and install() the new version.
- In the preserve_srcdir case, we just check it's up to date,
- and if not, we rerun install, without removing srcdir
- """
- versionfile = os.path.join(srcdir, '.version')
- install_needed = True
- if os.path.exists(versionfile):
- old_version = pickle.load(open(versionfile))
- if old_version == new_version:
- install_needed = False
- if install_needed:
- if not preserve_srcdir and os.path.exists(srcdir):
- shutil.rmtree(srcdir)
- install(*args, **dargs)
- if os.path.exists(srcdir):
- pickle.dump(new_version, open(versionfile, 'w'))
- def get_stderr_level(stderr_is_expected):
- if stderr_is_expected:
- return DEFAULT_STDOUT_LEVEL
- return DEFAULT_STDERR_LEVEL
- def run(command, timeout=None, ignore_status=False,
- stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
- stderr_is_expected=None, args=()):
- """
- Run a command on the host.
- @param command: the command line string.
- @param timeout: time limit in seconds before attempting to kill the
- running process. The run() function will take a few seconds
- longer than 'timeout' to complete if it has to kill the process.
- @param ignore_status: do not raise an exception, no matter what the exit
- code of the command is.
- @param stdout_tee: optional file-like object to which stdout data
- will be written as it is generated (data will still be stored
- in result.stdout).
- @param stderr_tee: likewise for stderr.
- @param verbose: if True, log the command being run.
- @param stdin: stdin to pass to the executed process (can be a file
- descriptor, a file object of a real file or a string).
- @param args: sequence of strings of arguments to be given to the command
- inside " quotes after they have been escaped for that; each
- element in the sequence will be given as a separate command
- argument
- @return a CmdResult object
- @raise CmdError: the exit code of the command execution was not 0
- """
- if isinstance(args, basestring):
- raise TypeError('Got a string for the "args" keyword argument, '
- 'need a sequence.')
- for arg in args:
- command += ' "%s"' % sh_escape(arg)
- if stderr_is_expected is None:
- stderr_is_expected = ignore_status
- bg_job = join_bg_jobs(
- (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
- stderr_level=get_stderr_level(stderr_is_expected)),),
- timeout)[0]
- if not ignore_status and bg_job.result.exit_status:
- raise error.CmdError(command, bg_job.result,
- "Command returned non-zero exit status")
- return bg_job.result
- def run_parallel(commands, timeout=None, ignore_status=False,
- stdout_tee=None, stderr_tee=None):
- """
- Behaves the same as run() with the following exceptions:
- - commands is a list of commands to run in parallel.
- - ignore_status toggles whether or not an exception should be raised
- on any error.
- @return: a list of CmdResult objects
- """
- bg_jobs = []
- for command in commands:
- bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
- stderr_level=get_stderr_level(ignore_status)))
- # Updates objects in bg_jobs list with their process information
- join_bg_jobs(bg_jobs, timeout)
- for bg_job in bg_jobs:
- if not ignore_status and bg_job.result.exit_status:
- raise error.CmdError(command, bg_job.result,
- "Command returned non-zero exit status")
- return [bg_job.result for bg_job in bg_jobs]
- @deprecated
- def run_bg(command):
- """Function deprecated. Please use BgJob class instead."""
- bg_job = BgJob(command)
- return bg_job.sp, bg_job.result
- def join_bg_jobs(bg_jobs, timeout=None):
- """Joins the bg_jobs with the current thread.
- Returns the same list of bg_jobs objects that was passed in.
- """
- ret, timeout_error = 0, False
- for bg_job in bg_jobs:
- bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
- try:
- # We are holding ends to stdin, stdout pipes
- # hence we need to be sure to close those fds no mater what
- start_time = time.time()
- timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
- for bg_job in bg_jobs:
- # Process stdout and stderr
- bg_job.process_output(stdout=True,final_read=True)
- bg_job.process_output(stdout=False,final_read=True)
- finally:
- # close our ends of the pipes to the sp no matter what
- for bg_job in bg_jobs:
- bg_job.cleanup()
- if timeout_error:
- # TODO: This needs to be fixed to better represent what happens when
- # running in parallel. However this is backwards compatable, so it will
- # do for the time being.
- raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
- "Command(s) did not complete within %d seconds"
- % timeout)
- return bg_jobs
- def _wait_for_commands(bg_jobs, start_time, timeout):
- # This returns True if it must return due to a timeout, otherwise False.
- # To check for processes which terminate without producing any output
- # a 1 second timeout is used in select.
- SELECT_TIMEOUT = 1
- read_list = []
- write_list = []
- reverse_dict = {}
- for bg_job in bg_jobs:
- read_list.append(bg_job.sp.stdout)
- read_list.append(bg_job.sp.stderr)
- reverse_dict[bg_job.sp.stdout] = (bg_job, True)
- reverse_dict[bg_job.sp.stderr] = (bg_job, False)
- if bg_job.string_stdin is not None:
- write_list.append(bg_job.sp.stdin)
- reverse_dict[bg_job.sp.stdin] = bg_job
- if timeout:
- stop_time = start_time + timeout
- time_left = stop_time - time.time()
- else:
- time_left = None # so that select never times out
- while not timeout or time_left > 0:
- # select will return when we may write to stdin or when there is
- # stdout/stderr output we can read (including when it is
- # EOF, that is the process has terminated).
- read_ready, write_ready, _ = select.select(read_list, write_list, [],
- SELECT_TIMEOUT)
- # os.read() has to be used instead of
- # subproc.stdout.read() which will otherwise block
- for file_obj in read_ready:
- bg_job, is_stdout = reverse_dict[file_obj]
- bg_job.process_output(is_stdout)
- for file_obj in write_ready:
- # we can write PIPE_BUF bytes without blocking
- # POSIX requires PIPE_BUF is >= 512
- bg_job = reverse_dict[file_obj]
- file_obj.write(bg_job.string_stdin[:512])
- bg_job.string_stdin = bg_job.string_stdin[512:]
- # no more input data, close stdin, remove it from the select set
- if not bg_job.string_stdin:
- file_obj.close()
- write_list.remove(file_obj)
- del reverse_dict[file_obj]
- all_jobs_finished = True
- for bg_job in bg_jobs:
- if bg_job.result.exit_status is not None:
- continue
- bg_job.result.exit_status = bg_job.sp.poll()
- if bg_job.result.exit_status is not None:
- # process exited, remove its stdout/stdin from the select set
- bg_job.result.duration = time.time() - start_time
- read_list.remove(bg_job.sp.stdout)
- read_list.remove(bg_job.sp.stderr)
- del reverse_dict[bg_job.sp.stdout]
- del reverse_dict[bg_job.sp.stderr]
- else:
- all_jobs_finished = False
- if all_jobs_finished:
- return False
- if timeout:
- time_left = stop_time - time.time()
- # Kill all processes which did not complete prior to timeout
- for bg_job in bg_jobs:
- if bg_job.result.exit_status is not None:
- continue
- logging.warn('run process timeout (%s) fired on: %s', timeout,
- bg_job.command)
- nuke_subprocess(bg_job.sp)
- bg_job.result.exit_status = bg_job.sp.poll()
- bg_job.result.duration = time.time() - start_time
- return True
- def get_children_pids(ppid):
- """
- Get all PIDs of children/threads of parent ppid
- param ppid: parent PID
- return: list of PIDs of all children/threads of ppid
- """
- return (system_output("ps -L --ppid=%d -o lwp" % ppid).split('\n')[1:])
- def pid_is_alive(pid):
- """
- True if process pid exists and is not yet stuck in Zombie state.
- Zombies are impossible to move between cgroups, etc.
- pid can be integer, or text of integer.
- """
- path = '/proc/%s/stat' % pid
- try:
- stat = read_one_line(path)
- except IOError:
- if not os.path.exists(path):
- # file went away
- return False
- raise
- return stat.split()[2] != 'Z'
- def signal_pid(pid, sig):
- """
- Sends a signal to a process id. Returns True if the process terminated
- successfully, False otherwise.
- """
- try:
- os.kill(pid, sig)
- except OSError:
- # The process may have died before we could kill it.
- pass
- for i in range(5):
- if not pid_is_alive(pid):
- return True
- time.sleep(1)
- # The process is still alive
- return False
- def nuke_subprocess(subproc):
- # check if the subprocess is still alive, first
- if subproc.poll() is not None:
- return subproc.poll()
- # the process has not terminated within timeout,
- # kill it via an escalating series of signals.
- signal_queue = [signal.SIGTERM, signal.SIGKILL]
- for sig in signal_queue:
- signal_pid(subproc.pid, sig)
- if subproc.poll() is not None:
- return subproc.poll()
- def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
- # the process has not terminated within timeout,
- # kill it via an escalating series of signals.
- for sig in signal_queue:
- if signal_pid(pid, sig):
- return
- # no signal successfully terminated the process
- raise error.AutoservRunError('Could not kill %d' % pid, None)
- def system(command, timeout=None, ignore_status=False):
- """
- Run a command
- @param timeout: timeout in seconds
- @param ignore_status: if ignore_status=False, throw an exception if the
- command's exit code is non-zero
- if ignore_stauts=True, return the exit code.
- @return exit status of command
- (note, this will always be zero unless ignore_status=True)
- """
- return run(command, timeout=timeout, ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
- def system_parallel(commands, timeout=None, ignore_status=False):
- """This function returns a list of exit statuses for the respective
- list of commands."""
- return [bg_jobs.exit_status for bg_jobs in
- run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
- def system_output(command, timeout=None, ignore_status=False,
- retain_output=False, args=()):
- """
- Run a command and return the stdout output.
- @param command: command string to execute.
- @param timeout: time limit in seconds before attempting to kill the
- running process. The function will take a few seconds longer
- than 'timeout' to complete if it has to kill the process.
- @param ignore_status: do not raise an exception, no matter what the exit
- code of the command is.
- @param retain_output: set to True to make stdout/stderr of the command
- output to be also sent to the logging system
- @param args: sequence of strings of arguments to be given to the command
- inside " quotes after they have been escaped for that; each
- element in the sequence will be given as a separate command
- argument
- @return a string with the stdout output of the command.
- """
- if retain_output:
- out = run(command, timeout=timeout, ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
- args=args).stdout
- else:
- out = run(command, timeout=timeout, ignore_status=ignore_status,
- args=args).stdout
- if out[-1:] == '\n':
- out = out[:-1]
- return out
- def system_output_parallel(commands, timeout=None, ignore_status=False,
- retain_output=False):
- if retain_output:
- out = [bg_job.stdout for bg_job
- in run_parallel(commands, timeout=timeout,
- ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
- else:
- out = [bg_job.stdout for bg_job in run_parallel(commands,
- timeout=timeout, ignore_status=ignore_status)]
- for x in out:
- if out[-1:] == '\n': out = out[:-1]
- return out
- def etraceback(prep, exc_info):
- """
- Enhanced Traceback formats traceback into lines "prep: line\nname: line"
- @param prep: desired line preposition
- @param exc_info: sys.exc_info of the exception
- @return: string which contains beautifully formatted exception
- """
- out = ""
- for line in traceback.format_exception(exc_info[0], exc_info[1],
- exc_info[2]):
- out += "%s: %s" % (prep, line)
- return out
- def strip_unicode(input):
- if type(input) == list:
- return [strip_unicode(i) for i in input]
- elif type(input) == dict:
- output = {}
- for key in input.keys():
- output[str(key)] = strip_unicode(input[key])
- return output
- elif type(input) == unicode:
- return str(input)
- else:
- return input
- def get_cpu_percentage(function, *args, **dargs):
- """Returns a tuple containing the CPU% and return value from function call.
- This function calculates the usage time by taking the difference of
- the user and system times both before and after the function call.
- """
- child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
- self_pre = resource.getrusage(resource.RUSAGE_SELF)
- start = time.time()
- to_return = function(*args, **dargs)
- elapsed = time.time() - start
- self_post = resource.getrusage(resource.RUSAGE_SELF)
- child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
- # Calculate CPU Percentage
- s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
- c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
- cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
- return cpu_percent, to_return
- class SystemLoad(object):
- """
- Get system and/or process values and return average value of load.
- """
- def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
- use_log=False):
- """
- @param pids: List of pids to be monitored. If pid = 0 whole system will
- be monitored. pid == 0 means whole system.
- @param advanced: monitor add value for system irq count and softirq
- for process minor and maior page fault
- @param time_step: Time step for continuous monitoring.
- @param cpu_cont: If True monitor CPU load continuously.
- @param use_log: If true every monitoring is logged for dump.
- """
- self.pids = []
- self.stats = {}
- for pid in pids:
- if pid == 0:
- cpu = FileFieldMonitor("/proc/stat",
- [("cpu", 0), # User Time
- ("cpu", 2), # System Time
- ("intr", 0), # IRQ Count
- ("softirq", 0)], # Soft IRQ Count
- True,
- cpu_cont,
- use_log,
- " +",
- time_step)
- mem = FileFieldMonitor("/proc/meminfo",
- [("MemTotal:", 0), # Mem Total
- ("MemFree:", 0), # Mem Free
- ("Buffers:", 0), # Buffers
- ("Cached:", 0)], # Cached
- False,
- True,
- use_log,
- " +",
- time_step)
- self.stats[pid] = ["TOTAL", cpu, mem]
- self.pids.append(pid)
- else:
- name = ""
- if (type(pid) is int):
- self.pids.append(pid)
- name = get_process_name(pid)
- else:
- self.pids.append(pid[0])
- name = pid[1]
- cpu = FileFieldMonitor("/proc/%d/stat" %
- self.pids[-1],
- [("", 13), # User Time
- ("", 14), # System Time
- ("", 9), # Minority Page Fault
- ("", 11)], # Majority Page Fault
- True,
- cpu_cont,
- use_log,
- " +",
- time_step)
- mem = FileFieldMonitor("/proc/%d/status" %
- self.pids[-1],
- [("VmSize:", 0), # Virtual Memory Size
- ("VmRSS:", 0), # Resident Set Size
- ("VmPeak:", 0), # Peak VM Size
- ("VmSwap:", 0)], # VM in Swap
- False,
- True,
- use_log,
- " +",
- time_step)
- self.stats[self.pids[-1]] = [name, cpu, mem]
- self.advanced = advanced
- def __str__(self):
- """
- Define format how to print
- """
- out = ""
- for pid in self.pids:
- for stat in self.stats[pid][1:]:
- out += str(stat.get_status()) + "\n"
- return out
- def start(self, pids=[]):
- """
- Start monitoring of the process system usage.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- """
- if pids == []:
- pids = self.pids
- for pid in pids:
- for stat in self.stats[pid][1:]:
- stat.start()
- def stop(self, pids=[]):
- """
- Stop monitoring of the process system usage.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- """
- if pids == []:
- pids = self.pids
- for pid in pids:
- for stat in self.stats[pid][1:]:
- stat.stop()
- def dump(self, pids=[]):
- """
- Get the status of monitoring.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- @return:
- tuple([cpu load], [memory load]):
- ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
- [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
- PID1_cpu_meas:
- average_values[], test_time, cont_meas_values[[]], time_step
- PID1_mem_meas:
- average_values[], test_time, cont_meas_values[[]], time_step
- where average_values[] are the measured values (mem_free,swap,...)
- which are described in SystemLoad.__init__()-FileFieldMonitor.
- cont_meas_values[[]] is a list of average_values in the sampling
- times.
- """
- if pids == []:
- pids = self.pids
- cpus = []
- memory = []
- for pid in pids:
- stat = (pid, self.stats[pid][1].get_status())
- cpus.append(stat)
- for pid in pids:
- stat = (pid, self.stats[pid][2].get_status())
- memory.append(stat)
- return (cpus, memory)
- def get_cpu_status_string(self, pids=[]):
- """
- Convert status to string array.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- @return: String format to table.
- """
- if pids == []:
- pids = self.pids
- headers = ["NAME",
- ("%7s") % "PID",
- ("%5s") % "USER",
- ("%5s") % "SYS",
- ("%5s") % "SUM"]
- if self.advanced:
- headers.extend(["MINFLT/IRQC",
- "MAJFLT/SOFTIRQ"])
- headers.append(("%11s") % "TIME")
- textstatus = []
- for pid in pids:
- stat = self.stats[pid][1].get_status()
- time = stat[1]
- stat = stat[0]
- textstatus.append(["%s" % self.stats[pid][0],
- "%7s" % pid,
- "%4.0f%%" % (stat[0] / time),
- "%4.0f%%" % (stat[1] / time),
- "%4.0f%%" % ((stat[0] + stat[1]) / time),
- "%10.3fs" % time])
- if self.advanced:
- textstatus[-1].insert(-1, "%11d" % stat[2])
- textstatus[-1].insert(-1, "%14d" % stat[3])
- return matrix_to_string(textstatus, tuple(headers))
- def get_mem_status_string(self, pids=[]):
- """
- Convert status to string array.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- @return: String format to table.
- """
- if pids == []:
- pids = self.pids
- headers = ["NAME",
- ("%7s") % "PID",
- ("%8s") % "TOTAL/VMSIZE",
- ("%8s") % "FREE/VMRSS",
- ("%8s") % "BUFFERS/VMPEAK",
- ("%8s") % "CACHED/VMSWAP",
- ("%11s") % "TIME"]
- textstatus = []
- for pid in pids:
- stat = self.stats[pid][2].get_status()
- time = stat[1]
- stat = stat[0]
- textstatus.append(["%s" % self.stats[pid][0],
- "%7s" % pid,
- "%10dMB" % (stat[0] / 1024),
- "%8dMB" % (stat[1] / 1024),
- "%12dMB" % (stat[2] / 1024),
- "%11dMB" % (stat[3] / 1024),
- "%10.3fs" % time])
- return matrix_to_string(textstatus, tuple(headers))
- def get_arch(run_function=run):
- """
- Get the hardware architecture of the machine.
- run_function is used to execute the commands. It defaults to
- utils.run() but a custom method (if provided) should be of the
- same schema as utils.run. It should return a CmdResult object and
- throw a CmdError exception.
- """
- arch = run_function('/bin/uname -m').stdout.rstrip()
- if re.match(r'i\d86$', arch):
- arch = 'i386'
- return arch
- def get_num_logical_cpus_per_socket(run_function=run):
- """
- Get the number of cores (including hyperthreading) per cpu.
- run_function is used to execute the commands. It defaults to
- utils.run() but a custom method (if provided) should be of the
- same schema as utils.run. It should return a CmdResult object and
- throw a CmdError exception.
- """
- siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
- num_siblings = map(int,
- re.findall(r'^siblings\s*:\s*(\d+)\s*$',
- siblings, re.M))
- if len(num_siblings) == 0:
- raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
- if min(num_siblings) != max(num_siblings):
- raise error.TestError('Number of siblings differ %r' %
- num_siblings)
- return num_siblings[0]
- def merge_trees(src, dest):
- """
- Merges a source directory tree at 'src' into a destination tree at
- 'dest'. If a path is a file in both trees than the file in the source
- tree is APPENDED to the one in the destination tree. If a path is
- a directory in both trees then the directories are recursively merged
- with this function. In any other case, the function will skip the
- paths that cannot be merged (instead of failing).
- """
- if not os.path.exists(src):
- return # exists only in dest
- elif not os.path.exists(dest):
- if os.path.isfile(src):
- shutil.copy2(src, dest) # file only in src
- else:
- shutil.copytree(src, dest, symlinks=True) # dir only in src
- return
- elif os.path.isfile(src) and os.path.isfile(dest):
- # src & dest are files in both trees, append src to dest
- destfile = open(dest, "a")
- try:
- srcfile = open(src)
- try:
- destfile.write(srcfile.read())
- finally:
- srcfile.close()
- finally:
- destfile.close()
- elif os.path.isdir(src) and os.path.isdir(dest):
- # src & dest are directories in both trees, so recursively merge
- for name in os.listdir(src):
- merge_trees(os.path.join(src, name), os.path.join(dest, name))
- else:
- # src & dest both exist, but are incompatible
- return
- class CmdResu…
Large files files are truncated, but you can click here to view the full file