PageRenderTime 36ms CodeModel.GetById 18ms app.highlight 394ms RepoModel.GetById 1ms app.codeStats 3ms

/components/openstack/nova/files/solariszones/driver.py

https://bitbucket.org/dilos/userland-gate
Python | 4830 lines | 4402 code | 149 blank | 279 comment | 207 complexity | 25104b04cf6add9d01b73ed8ea618321 MD5 | raw file
   1# Copyright 2011 Justin Santa Barbara
   2# All Rights Reserved.
   3#
   4# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
   5#
   6#    Licensed under the Apache License, Version 2.0 (the "License"); you may
   7#    not use this file except in compliance with the License. You may obtain
   8#    a copy of the License at
   9#
  10#         http://www.apache.org/licenses/LICENSE-2.0
  11#
  12#    Unless required by applicable law or agreed to in writing, software
  13#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  14#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  15#    License for the specific language governing permissions and limitations
  16#    under the License.
  17
  18"""
  19Driver for Solaris Zones (nee Containers):
  20"""
  21
  22import base64
  23import glob
  24import os
  25import platform
  26import shutil
  27import tempfile
  28import uuid
  29
  30from collections import defaultdict
  31from openstack_common import get_ovsdb_info
  32import rad.bindings.com.oracle.solaris.rad.archivemgr_1 as archivemgr
  33import rad.bindings.com.oracle.solaris.rad.kstat_2 as kstat
  34import rad.bindings.com.oracle.solaris.rad.zonemgr_1 as zonemgr
  35import rad.client
  36import rad.connect
  37from solaris_install.target.size import Size
  38
  39from cinderclient import exceptions as cinder_exception
  40from cinderclient.v1 import client as v1_client
  41from eventlet import greenthread
  42from keystoneclient import exceptions as keystone_exception
  43from lxml import etree
  44from oslo_concurrency import lockutils, processutils
  45from oslo_config import cfg
  46from oslo_log import log as logging
  47from oslo_serialization import jsonutils
  48from oslo_utils import excutils
  49from oslo_utils import fileutils
  50from oslo_utils import strutils
  51from oslo_utils import versionutils
  52from passlib.hash import sha256_crypt
  53
  54from nova.api.metadata import password
  55from nova.compute import arch
  56from nova.compute import hv_type
  57from nova.compute import power_state
  58from nova.compute import task_states
  59from nova.compute import vm_mode
  60from nova.compute import vm_states
  61from nova import conductor
  62import nova.conf
  63from nova.console import type as ctype
  64from nova import context as nova_context
  65from nova import crypto
  66from nova import exception
  67from nova.i18n import _, _LE, _LI
  68from nova.image import API as glance_api
  69from nova.image import glance
  70from nova.network.neutronv2 import api as neutronv2_api
  71from nova import objects
  72from nova.objects import flavor as flavor_obj
  73from nova.objects import migrate_data as migrate_data_obj
  74from nova import utils
  75from nova.virt import driver
  76from nova.virt import event as virtevent
  77from nova.virt import hardware
  78from nova.virt import images
  79from nova.virt.solariszones import sysconfig
  80from nova.volume.cinder import API
  81from nova.volume.cinder import cinderclient
  82from nova.volume.cinder import translate_volume_exception
  83from nova.volume.cinder import _untranslate_volume_summary_view
  84
  85solariszones_opts = [
  86    cfg.StrOpt('boot_volume_type',
  87               default=None,
  88               help='Cinder volume type to use for boot volumes'),
  89    cfg.StrOpt('boot_volume_az',
  90               default=None,
  91               help='Cinder availability zone to use for boot volumes'),
  92    cfg.StrOpt('glancecache_dirname',
  93               default='/var/share/nova/images',
  94               help='Default path to Glance cache for Solaris Zones.'),
  95    cfg.StrOpt('live_migration_cipher',
  96               help='Cipher to use for encryption of memory traffic during '
  97                    'live migration. If not specified, a common encryption '
  98                    'algorithm will be negotiated. Options include: none or '
  99                    'the name of a supported OpenSSL cipher algorithm.'),
 100    cfg.StrOpt('solariszones_snapshots_directory',
 101               default='$instances_path/snapshots',
 102               help='Location to store snapshots before uploading them to the '
 103                    'Glance image service.'),
 104    cfg.StrOpt('zones_suspend_path',
 105               default='/var/share/zones/SYSsuspend',
 106               help='Default path for suspend images for Solaris Zones.'),
 107    cfg.BoolOpt('solariszones_boot_options',
 108                default=True,
 109                help='Allow kernel boot options to be set in instance '
 110                     'metadata.'),
 111]
 112
 113CONF = nova.conf.CONF
 114CONF.register_opts(solariszones_opts, 'solariszones')
 115LOG = logging.getLogger(__name__)
 116
 117# These should match the strings returned by the zone_state_str()
 118# function in the (private) libzonecfg library. These values are in turn
 119# returned in the 'state' string of the Solaris Zones' RAD interface by
 120# the zonemgr(3RAD) provider.
 121ZONE_STATE_CONFIGURED = 'configured'
 122ZONE_STATE_INCOMPLETE = 'incomplete'
 123ZONE_STATE_UNAVAILABLE = 'unavailable'
 124ZONE_STATE_INSTALLED = 'installed'
 125ZONE_STATE_READY = 'ready'
 126ZONE_STATE_RUNNING = 'running'
 127ZONE_STATE_SHUTTING_DOWN = 'shutting_down'
 128ZONE_STATE_DOWN = 'down'
 129ZONE_STATE_MOUNTED = 'mounted'
 130
 131# Mapping between zone state and Nova power_state.
 132SOLARISZONES_POWER_STATE = {
 133    ZONE_STATE_CONFIGURED:      power_state.NOSTATE,
 134    ZONE_STATE_INCOMPLETE:      power_state.NOSTATE,
 135    ZONE_STATE_UNAVAILABLE:     power_state.NOSTATE,
 136    ZONE_STATE_INSTALLED:       power_state.SHUTDOWN,
 137    ZONE_STATE_READY:           power_state.RUNNING,
 138    ZONE_STATE_RUNNING:         power_state.RUNNING,
 139    ZONE_STATE_SHUTTING_DOWN:   power_state.RUNNING,
 140    ZONE_STATE_DOWN:            power_state.RUNNING,
 141    ZONE_STATE_MOUNTED:         power_state.NOSTATE
 142}
 143
 144# Solaris Zones brands as defined in brands(5).
 145ZONE_BRAND_LABELED = 'labeled'
 146ZONE_BRAND_SOLARIS = 'solaris'
 147ZONE_BRAND_SOLARIS_KZ = 'solaris-kz'
 148ZONE_BRAND_SOLARIS10 = 'solaris10'
 149
 150# Mapping between supported zone brands and the name of the corresponding
 151# brand template.
 152ZONE_BRAND_TEMPLATE = {
 153    ZONE_BRAND_SOLARIS:         'SYSdefault',
 154    ZONE_BRAND_SOLARIS_KZ:      'SYSsolaris-kz',
 155}
 156
 157MAX_CONSOLE_BYTES = 102400
 158
 159VNC_CONSOLE_BASE_FMRI = 'svc:/application/openstack/nova/zone-vnc-console'
 160# Required in order to create a zone VNC console SMF service instance
 161VNC_SERVER_PATH = '/usr/bin/vncserver'
 162XTERM_PATH = '/usr/bin/xterm'
 163
 164ROOTZPOOL_RESOURCE = 'rootzpool'
 165
 166# The underlying Solaris Zones framework does not expose a specific
 167# version number, instead relying on feature tests to identify what is
 168# and what is not supported. A HYPERVISOR_VERSION is defined here for
 169# Nova's use but it generally should not be changed unless there is a
 170# incompatible change such as concerning kernel zone live migration.
 171HYPERVISOR_VERSION = '5.11'
 172
 173shared_storage = ['iscsi', 'fibre_channel']
 174
 175KSTAT_TYPE = {
 176    'NVVT_STR': 'string',
 177    'NVVT_STRS': 'strings',
 178    'NVVT_INT': 'integer',
 179    'NVVT_INTS': 'integers',
 180    'NVVT_KSTAT': 'kstat',
 181}
 182
 183
 184def lookup_resource(zone, resource):
 185    """Lookup specified resource from specified Solaris Zone."""
 186    try:
 187        val = zone.getResources(zonemgr.Resource(resource))
 188    except rad.client.ObjectError:
 189        return None
 190    except Exception:
 191        raise
 192    return val[0] if val else None
 193
 194
 195def lookup_resource_property(zone, resource, prop, filter=None):
 196    """Lookup specified property from specified Solaris Zone resource."""
 197    try:
 198        val = zone.getResourceProperties(zonemgr.Resource(resource, filter),
 199                                         [prop])
 200    except rad.client.ObjectError:
 201        return None
 202    except Exception:
 203        raise
 204    return val[0].value if val else None
 205
 206
 207def lookup_resource_property_value(zone, resource, prop, value):
 208    """Lookup specified property with value from specified Solaris Zone
 209    resource. Returns resource object if matching value is found, else None
 210    """
 211    try:
 212        resources = zone.getResources(zonemgr.Resource(resource))
 213        for resource in resources:
 214            for propertee in resource.properties:
 215                if propertee.name == prop and propertee.value == value:
 216                    return resource
 217        else:
 218            return None
 219    except rad.client.ObjectError:
 220        return None
 221    except Exception:
 222        raise
 223
 224
 225def zonemgr_strerror(ex):
 226    """Format the payload from a zonemgr(3RAD) rad.client.ObjectError
 227    exception into a sensible error string that can be logged. Newlines
 228    are converted to a colon-space string to create a single line.
 229
 230    If the exception was something other than rad.client.ObjectError,
 231    just return it as a string.
 232    """
 233    if not isinstance(ex, rad.client.ObjectError):
 234        return str(ex)
 235    payload = ex.get_payload()
 236    if payload.code == zonemgr.ErrorCode.NONE:
 237        return str(ex)
 238    error = [str(payload.code)]
 239    if payload.str is not None and payload.str != '':
 240        error.append(payload.str)
 241    if payload.stderr is not None and payload.stderr != '':
 242        stderr = payload.stderr.rstrip()
 243        error.append(stderr.replace('\n', ': '))
 244    result = ': '.join(error)
 245    return result
 246
 247
 248class MemoryAlignmentIncorrect(exception.FlavorMemoryTooSmall):
 249    msg_fmt = _("Requested flavor, %(flavor)s, memory size %(memsize)s does "
 250                "not align on %(align)s boundary.")
 251
 252
 253class SolarisVolumeAPI(API):
 254    """ Extending the volume api to support additional cinder sub-commands
 255    """
 256    @translate_volume_exception
 257    def create(self, context, size, name, description, snapshot=None,
 258               image_id=None, volume_type=None, metadata=None,
 259               availability_zone=None, source_volume=None):
 260        """Clone the source volume by calling the cinderclient version of
 261        create with a source_volid argument
 262
 263        :param context: the context for the clone
 264        :param size: size of the new volume, must be the same as the source
 265            volume
 266        :param name: display_name of the new volume
 267        :param description: display_description of the new volume
 268        :param snapshot: Snapshot object
 269        :param image_id: image_id to create the volume from
 270        :param volume_type: type of volume
 271        :param metadata: Additional metadata for the volume
 272        :param availability_zone: zone:host where the volume is to be created
 273        :param source_volume: Volume object
 274
 275        Returns a volume object
 276        """
 277        client = cinderclient(context)
 278
 279        if snapshot is not None:
 280            snapshot_id = snapshot['id']
 281        else:
 282            snapshot_id = None
 283
 284        if source_volume is not None:
 285            source_volid = source_volume['id']
 286        else:
 287            source_volid = None
 288
 289        kwargs = dict(snapshot_id=snapshot_id,
 290                      volume_type=volume_type,
 291                      user_id=context.user_id,
 292                      project_id=context.project_id,
 293                      availability_zone=availability_zone,
 294                      metadata=metadata,
 295                      imageRef=image_id,
 296                      source_volid=source_volid)
 297
 298        if isinstance(client, v1_client.Client):
 299            kwargs['display_name'] = name
 300            kwargs['display_description'] = description
 301        else:
 302            kwargs['name'] = name
 303            kwargs['description'] = description
 304
 305        try:
 306            item = cinderclient(context).volumes.create(size, **kwargs)
 307            return _untranslate_volume_summary_view(context, item)
 308        except cinder_exception.OverLimit:
 309            raise exception.OverQuota(overs='volumes')
 310        except (cinder_exception.BadRequest,
 311                keystone_exception.BadRequest) as reason:
 312            raise exception.InvalidInput(reason=reason)
 313
 314    @translate_volume_exception
 315    def update(self, context, volume_id, fields):
 316        """Update the fields of a volume for example used to rename a volume
 317        via a call to cinderclient
 318
 319        :param context: the context for the update
 320        :param volume_id: the id of the volume to update
 321        :param fields: a dictionary of of the name/value pairs to update
 322        """
 323        cinderclient(context).volumes.update(volume_id, **fields)
 324
 325    @translate_volume_exception
 326    def extend(self, context, volume, newsize):
 327        """Extend the size of a cinder volume by calling the cinderclient
 328
 329        :param context: the context for the extend
 330        :param volume: the volume object to extend
 331        :param newsize: the new size of the volume in GB
 332        """
 333        cinderclient(context).volumes.extend(volume, newsize)
 334
 335
 336class ZoneConfig(object):
 337    """ZoneConfig - context manager for access zone configurations.
 338    Automatically opens the configuration for a zone and commits any changes
 339    before exiting
 340    """
 341    def __init__(self, zone):
 342        """zone is a zonemgr object representing either a kernel zone or
 343        non-global zone.
 344        """
 345        self.zone = zone
 346        self.editing = False
 347
 348    def __enter__(self):
 349        """enables the editing of the zone."""
 350        try:
 351            self.zone.editConfig()
 352            self.editing = True
 353            return self
 354        except Exception as ex:
 355            reason = zonemgr_strerror(ex)
 356            LOG.exception(_("Unable to initialize editing of instance '%s' "
 357                            "via zonemgr(3RAD): %s")
 358                          % (self.zone.name, reason))
 359            raise
 360
 361    def __exit__(self, exc_type, exc_val, exc_tb):
 362        """looks for any kind of exception before exiting.  If one is found,
 363        cancel any configuration changes and reraise the exception.  If not,
 364        commit the new configuration.
 365        """
 366        if exc_type is not None and self.editing:
 367            # We received some kind of exception.  Cancel the config and raise.
 368            self.zone.cancelConfig()
 369            raise
 370        else:
 371            # commit the config
 372            try:
 373                self.zone.commitConfig()
 374            except Exception as ex:
 375                reason = zonemgr_strerror(ex)
 376                LOG.exception(_("Unable to commit the new configuration for "
 377                                "instance '%s' via zonemgr(3RAD): %s")
 378                              % (self.zone.name, reason))
 379
 380                # Last ditch effort to cleanup.
 381                self.zone.cancelConfig()
 382                raise
 383
 384    def setprop(self, resource, prop, value):
 385        """sets a property for an existing resource OR creates a new resource
 386        with the given property(s).
 387        """
 388        current = lookup_resource_property(self.zone, resource, prop)
 389        if current is not None and current == value:
 390            # the value is already set
 391            return
 392
 393        try:
 394            if current is None:
 395                self.zone.addResource(zonemgr.Resource(
 396                    resource, [zonemgr.Property(prop, value)]))
 397            else:
 398                self.zone.setResourceProperties(
 399                    zonemgr.Resource(resource),
 400                    [zonemgr.Property(prop, value)])
 401        except Exception as ex:
 402            reason = zonemgr_strerror(ex)
 403            LOG.exception(_("Unable to set '%s' property on '%s' resource for "
 404                            "instance '%s' via zonemgr(3RAD): %s")
 405                          % (prop, resource, self.zone.name, reason))
 406            raise
 407
 408    def addresource(self, resource, props=None, ignore_exists=False):
 409        """creates a new resource with an optional property list, or set the
 410        property if the resource exists and ignore_exists is true.
 411
 412        :param ignore_exists: If the resource exists, set the property for the
 413            resource.
 414        """
 415        if props is None:
 416            props = []
 417
 418        try:
 419            self.zone.addResource(zonemgr.Resource(resource, props))
 420        except Exception as ex:
 421            if isinstance(ex, rad.client.ObjectError):
 422                code = ex.get_payload().code
 423                if (ignore_exists and
 424                        code == zonemgr.ErrorCode.RESOURCE_ALREADY_EXISTS):
 425                    self.zone.setResourceProperties(
 426                        zonemgr.Resource(resource, None), props)
 427                    return
 428            reason = zonemgr_strerror(ex)
 429            LOG.exception(_("Unable to create new resource '%s' for instance "
 430                            "'%s' via zonemgr(3RAD): %s")
 431                          % (resource, self.zone.name, reason))
 432            raise
 433
 434    def removeresources(self, resource, props=None):
 435        """removes resources whose properties include the optional property
 436        list specified in props.
 437        """
 438        if props is None:
 439            props = []
 440
 441        try:
 442            self.zone.removeResources(zonemgr.Resource(resource, props))
 443        except Exception as ex:
 444            reason = zonemgr_strerror(ex)
 445            LOG.exception(_("Unable to remove resource '%s' for instance '%s' "
 446                            "via zonemgr(3RAD): %s")
 447                          % (resource, self.zone.name, reason))
 448            raise
 449
 450    def clear_resource_props(self, resource, props):
 451        """Clear property values of a given resource
 452        """
 453        try:
 454            self.zone.clearResourceProperties(zonemgr.Resource(resource, None),
 455                                              props)
 456        except rad.client.ObjectError as ex:
 457            reason = zonemgr_strerror(ex)
 458            LOG.exception(_("Unable to clear '%s' property on '%s' resource "
 459                            "for instance '%s' via zonemgr(3RAD): %s")
 460                          % (props, resource, self.zone.name, reason))
 461            raise
 462
 463
 464class SolarisZonesDriver(driver.ComputeDriver):
 465    """Solaris Zones Driver using the zonemgr(3RAD) and kstat(3RAD) providers.
 466
 467    The interface to this class talks in terms of 'instances' (Amazon EC2 and
 468    internal Nova terminology), by which we mean 'running virtual machine'
 469    (XenAPI terminology) or domain (Xen or libvirt terminology).
 470
 471    An instance has an ID, which is the identifier chosen by Nova to represent
 472    the instance further up the stack.  This is unfortunately also called a
 473    'name' elsewhere.  As far as this layer is concerned, 'instance ID' and
 474    'instance name' are synonyms.
 475
 476    Note that the instance ID or name is not human-readable or
 477    customer-controlled -- it's an internal ID chosen by Nova.  At the
 478    nova.virt layer, instances do not have human-readable names at all -- such
 479    things are only known higher up the stack.
 480
 481    Most virtualization platforms will also have their own identity schemes,
 482    to uniquely identify a VM or domain.  These IDs must stay internal to the
 483    platform-specific layer, and never escape the connection interface.  The
 484    platform-specific layer is responsible for keeping track of which instance
 485    ID maps to which platform-specific ID, and vice versa.
 486
 487    Some methods here take an instance of nova.compute.service.Instance.  This
 488    is the data structure used by nova.compute to store details regarding an
 489    instance, and pass them into this layer.  This layer is responsible for
 490    translating that generic data structure into terms that are specific to the
 491    virtualization platform.
 492
 493    """
 494
 495    capabilities = {
 496        "has_imagecache": False,
 497        "supports_recreate": True,
 498        "supports_migrate_to_same_host": False
 499    }
 500
 501    def __init__(self, virtapi):
 502        self.virtapi = virtapi
 503        self._archive_manager = None
 504        self._compute_event_callback = None
 505        self._conductor_api = conductor.API()
 506        self._fc_hbas = None
 507        self._fc_wwnns = None
 508        self._fc_wwpns = None
 509        self._host_stats = {}
 510        self._initiator = None
 511        self._install_engine = None
 512        self._kstat_control = None
 513        self._pagesize = os.sysconf('SC_PAGESIZE')
 514        self._rad_connection = None
 515        self._rootzpool_suffix = ROOTZPOOL_RESOURCE
 516        self._uname = os.uname()
 517        self._validated_archives = list()
 518        self._volume_api = SolarisVolumeAPI()
 519        self._zone_manager = None
 520
 521    @property
 522    def rad_connection(self):
 523        if self._rad_connection is None:
 524            self._rad_connection = rad.connect.connect_unix()
 525        else:
 526            # taken from rad.connect.RadConnection.__repr__ to look for a
 527            # closed connection
 528            if self._rad_connection._closed is not None:
 529                # the RAD connection has been lost.  Reconnect to RAD
 530                self._rad_connection = rad.connect.connect_unix()
 531
 532        return self._rad_connection
 533
 534    @property
 535    def zone_manager(self):
 536        try:
 537            if (self._zone_manager is None or
 538                    self._zone_manager._conn._closed is not None):
 539                self._zone_manager = self.rad_connection.get_object(
 540                    zonemgr.ZoneManager())
 541        except Exception as ex:
 542            reason = _("Unable to obtain RAD object: %s") % ex
 543            raise exception.NovaException(reason)
 544
 545        return self._zone_manager
 546
 547    @property
 548    def kstat_control(self):
 549        try:
 550            if (self._kstat_control is None or
 551                    self._kstat_control._conn._closed is not None):
 552                self._kstat_control = self.rad_connection.get_object(
 553                    kstat.Control())
 554        except Exception as ex:
 555            reason = _("Unable to obtain RAD object: %s") % ex
 556            raise exception.NovaException(reason)
 557
 558        return self._kstat_control
 559
 560    @property
 561    def archive_manager(self):
 562        try:
 563            if (self._archive_manager is None or
 564                    self._archive_manager._conn._closed is not None):
 565                self._archive_manager = self.rad_connection.get_object(
 566                    archivemgr.ArchiveManager())
 567        except Exception as ex:
 568            reason = _("Unable to obtain RAD object: %s") % ex
 569            raise exception.NovaException(reason)
 570
 571        return self._archive_manager
 572
 573    def init_host(self, host):
 574        """Initialize anything that is necessary for the driver to function,
 575        including catching up with currently running VM's on the given host.
 576        """
 577        # TODO(Vek): Need to pass context in for access to auth_token
 578        pass
 579
 580    def cleanup_host(self, host):
 581        """Clean up anything that is necessary for the driver gracefully stop,
 582        including ending remote sessions. This is optional.
 583        """
 584        pass
 585
 586    def _get_fc_hbas(self):
 587        """Get Fibre Channel HBA information."""
 588        if self._fc_hbas:
 589            return self._fc_hbas
 590
 591        out = None
 592        try:
 593            out, err = utils.execute('/usr/sbin/fcinfo', 'hba-port')
 594        except processutils.ProcessExecutionError:
 595            return []
 596
 597        if out is None:
 598            raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
 599
 600        hbas = []
 601        hba = {}
 602        for line in out.splitlines():
 603            line = line.strip()
 604            # Collect the following hba-port data:
 605            # 1: Port WWN
 606            # 2: State (online|offline)
 607            # 3: Node WWN
 608            if line.startswith("HBA Port WWN:"):
 609                # New HBA port entry
 610                hba = {}
 611                wwpn = line.split()[-1]
 612                hba['port_name'] = wwpn
 613                continue
 614            elif line.startswith("Port Mode:"):
 615                mode = line.split()[-1]
 616                # Skip Target mode ports
 617                if mode != 'Initiator':
 618                    break
 619            elif line.startswith("State:"):
 620                state = line.split()[-1]
 621                hba['port_state'] = state
 622                continue
 623            elif line.startswith("Node WWN:"):
 624                wwnn = line.split()[-1]
 625                hba['node_name'] = wwnn
 626                continue
 627            if len(hba) == 3:
 628                hbas.append(hba)
 629                hba = {}
 630        self._fc_hbas = hbas
 631        return self._fc_hbas
 632
 633    def _get_fc_wwnns(self):
 634        """Get Fibre Channel WWNNs from the system, if any."""
 635        hbas = self._get_fc_hbas()
 636
 637        wwnns = []
 638        for hba in hbas:
 639            if hba['port_state'] == 'online':
 640                wwnn = hba['node_name']
 641                wwnns.append(wwnn)
 642        return wwnns
 643
 644    def _get_fc_wwpns(self):
 645        """Get Fibre Channel WWPNs from the system, if any."""
 646        hbas = self._get_fc_hbas()
 647
 648        wwpns = []
 649        for hba in hbas:
 650            if hba['port_state'] == 'online':
 651                wwpn = hba['port_name']
 652                wwpns.append(wwpn)
 653        return wwpns
 654
 655    def _get_iscsi_initiator(self):
 656        """ Return the iSCSI initiator node name IQN for this host """
 657        try:
 658            out, err = utils.execute('/usr/sbin/iscsiadm', 'list',
 659                                     'initiator-node')
 660            # Sample first line of command output:
 661            # Initiator node name: iqn.1986-03.com.sun:01:e00000000000.4f757217
 662            initiator_name_line = out.splitlines()[0]
 663            initiator_iqn = initiator_name_line.rsplit(' ', 1)[1]
 664            return initiator_iqn
 665        except processutils.ProcessExecutionError as ex:
 666            LOG.info(_("Failed to get the initiator-node info: %s") % (ex))
 667            return None
 668
 669    def _get_zone_by_name(self, name):
 670        """Return a Solaris Zones object via RAD by name."""
 671        try:
 672            zone = self.rad_connection.get_object(
 673                zonemgr.Zone(), rad.client.ADRGlobPattern({'name': name}))
 674        except rad.client.NotFoundError:
 675            return None
 676        except Exception:
 677            raise
 678        return zone
 679
 680    def _get_state(self, zone):
 681        """Return the running state, one of the power_state codes."""
 682        return SOLARISZONES_POWER_STATE[zone.state]
 683
 684    def _pages_to_kb(self, pages):
 685        """Convert a number of pages of memory into a total size in KBytes."""
 686        return (pages * self._pagesize) / 1024
 687
 688    def _get_max_mem(self, zone):
 689        """Return the maximum memory in KBytes allowed."""
 690        if zone.brand == ZONE_BRAND_SOLARIS:
 691            mem_resource = 'swap'
 692        else:
 693            mem_resource = 'physical'
 694
 695        max_mem = lookup_resource_property(zone, 'capped-memory', mem_resource)
 696        if max_mem is not None:
 697            return strutils.string_to_bytes("%sB" % max_mem) / 1024
 698
 699        # If physical property in capped-memory doesn't exist, this may
 700        # represent a non-global zone so just return the system's total
 701        # memory.
 702        return self._pages_to_kb(os.sysconf('SC_PHYS_PAGES'))
 703
 704    def _get_mem(self, zone):
 705        """Return the memory in KBytes used by the domain."""
 706
 707        # There isn't any way of determining this from the hypervisor
 708        # perspective in Solaris, so just return the _get_max_mem() value
 709        # for now.
 710        return self._get_max_mem(zone)
 711
 712    def _get_num_cpu(self, zone):
 713        """Return the number of virtual CPUs for the domain.
 714
 715        In the case of kernel zones, the number of virtual CPUs a zone
 716        ends up with depends on whether or not there were 'virtual-cpu'
 717        or 'dedicated-cpu' resources in the configuration or whether
 718        there was an assigned pool in the configuration. This algorithm
 719        attempts to emulate what the virtual platform code does to
 720        determine a number of virtual CPUs to use.
 721        """
 722        # If a 'virtual-cpu' resource exists, use the minimum number of
 723        # CPUs defined there.
 724        ncpus = lookup_resource_property(zone, 'virtual-cpu', 'ncpus')
 725        if ncpus is not None:
 726            min = ncpus.split('-', 1)[0]
 727            if min.isdigit():
 728                return int(min)
 729
 730        # Otherwise if a 'dedicated-cpu' resource exists, use the maximum
 731        # number of CPUs defined there.
 732        ncpus = lookup_resource_property(zone, 'dedicated-cpu', 'ncpus')
 733        if ncpus is not None:
 734            max = ncpus.split('-', 1)[-1]
 735            if max.isdigit():
 736                return int(max)
 737
 738        # Finally if neither resource exists but the zone was assigned a
 739        # pool in the configuration, the number of CPUs would be the size
 740        # of the processor set. Currently there's no way of easily
 741        # determining this so use the system's notion of the total number
 742        # of online CPUs.
 743        return os.sysconf('SC_NPROCESSORS_ONLN')
 744
 745    def _kstat_data(self, uri):
 746        """Return Kstat snapshot data via RAD as a dictionary."""
 747        if not isinstance(uri, str):
 748            raise exception.NovaException("kstat URI must be string type: "
 749                                          "%s is %s" % (uri, type(uri)))
 750
 751        if not uri.startswith("kstat:/"):
 752            uri = "kstat:/" + uri
 753
 754        try:
 755            self.kstat_control.update()
 756            kstat_obj = self.rad_connection.get_object(
 757                kstat.Kstat(), rad.client.ADRGlobPattern({"uri": uri}))
 758
 759        except Exception as reason:
 760            LOG.info(_("Unable to retrieve kstat object '%s' via kstat(3RAD): "
 761                       "%s") % (uri, reason))
 762            return None
 763
 764        ks_data = {}
 765        for name, data in kstat_obj.getMap().items():
 766            ks_data[name] = getattr(data, KSTAT_TYPE[str(data.type)])
 767
 768        return ks_data
 769
 770    def _sum_kstat_statistic(self, kstat_data, statistic):
 771        total = 0
 772        for ks in kstat_data.values():
 773            data = ks.getMap()[statistic]
 774            value = getattr(data, KSTAT_TYPE[str(data.type)])
 775            try:
 776                total += value
 777            except TypeError:
 778                LOG.error(_("Unable to aggregate non-summable kstat %s;%s "
 779                            " of type %s") % (ks.getParent().uri, statistic,
 780                                              type(value)))
 781                return None
 782
 783        return total
 784
 785    def _get_kstat_statistic(self, ks, statistic):
 786        if not isinstance(ks, kstat.Kstat):
 787            reason = (_("Attempted to get a kstat from %s type.") % (type(ks)))
 788            raise TypeError(reason)
 789
 790        try:
 791            data = ks.getMap()[statistic]
 792            value = getattr(data, KSTAT_TYPE[str(data.type)])
 793        except TypeError:
 794            value = None
 795
 796        return value
 797
 798    def _get_cpu_time(self, zone):
 799        """Return the CPU time used in nanoseconds."""
 800        if zone.id == -1:
 801            return 0
 802
 803        # The retry value of 3 was determined by the "we shouldn't hit this
 804        # often, but if we do it should resolve quickly so try again"+1
 805        # algorithm.
 806        for _attempt in range(3):
 807            total = 0
 808
 809            accum_uri = "kstat:/zones/cpu/sys_zone_accum/%d" % zone.id
 810            uri = "kstat:/zones/cpu/sys_zone_%d" % zone.id
 811
 812            initial = self._kstat_data(accum_uri)
 813            cpus = self._kstat_data(uri)
 814
 815            total += self._sum_kstat_statistic(cpus, 'cpu_nsec_kernel_cur')
 816            total += self._sum_kstat_statistic(cpus, 'cpu_nsec_user_cur')
 817
 818            final = self._kstat_data(accum_uri)
 819
 820            if initial['gen_num'] == final['gen_num']:
 821                total += initial['cpu_nsec_user'] + initial['cpu_nsec_kernel']
 822                return total
 823
 824        LOG.error(_("Unable to get accurate cpu usage beacuse cpu list "
 825                    "keeps changing"))
 826        return 0
 827
 828    def get_info(self, instance):
 829        """Get the current status of an instance, by name (not ID!)
 830
 831        :param instance: nova.objects.instance.Instance object
 832
 833        Returns a InstanceInfo object
 834        """
 835        # TODO(Vek): Need to pass context in for access to auth_token
 836        name = instance['name']
 837        zone = self._get_zone_by_name(name)
 838        if zone is None:
 839            raise exception.InstanceNotFound(instance_id=name)
 840        return hardware.InstanceInfo(state=self._get_state(zone),
 841                                     max_mem_kb=self._get_max_mem(zone),
 842                                     mem_kb=self._get_mem(zone),
 843                                     num_cpu=self._get_num_cpu(zone),
 844                                     cpu_time_ns=self._get_cpu_time(zone))
 845
 846    def get_num_instances(self):
 847        """Return the total number of virtual machines.
 848
 849        Return the number of virtual machines that the hypervisor knows
 850        about.
 851
 852        .. note::
 853
 854            This implementation works for all drivers, but it is
 855            not particularly efficient. Maintainers of the virt drivers are
 856            encouraged to override this method with something more
 857            efficient.
 858        """
 859        return len(self.list_instances())
 860
 861    def instance_exists(self, instance):
 862        """Checks existence of an instance on the host.
 863
 864        :param instance: The instance to lookup
 865
 866        Returns True if an instance with the supplied ID exists on
 867        the host, False otherwise.
 868
 869        .. note::
 870
 871            This implementation works for all drivers, but it is
 872            not particularly efficient. Maintainers of the virt drivers are
 873            encouraged to override this method with something more
 874            efficient.
 875        """
 876        try:
 877            return instance.uuid in self.list_instance_uuids()
 878        except NotImplementedError:
 879            return instance.name in self.list_instances()
 880
 881    def estimate_instance_overhead(self, instance_info):
 882        """Estimate the virtualization overhead required to build an instance
 883        of the given flavor.
 884
 885        Defaults to zero, drivers should override if per-instance overhead
 886        calculations are desired.
 887
 888        :param instance_info: Instance/flavor to calculate overhead for.
 889        :returns: Dict of estimated overhead values.
 890        """
 891        return {'memory_mb': 0}
 892
 893    def _get_list_zone_object(self):
 894        """Return a list of all Solaris Zones objects via RAD."""
 895        return self.rad_connection.list_objects(zonemgr.Zone())
 896
 897    def list_instances(self):
 898        """Return the names of all the instances known to the virtualization
 899        layer, as a list.
 900        """
 901        # TODO(Vek): Need to pass context in for access to auth_token
 902        instances_list = []
 903        for zone in self._get_list_zone_object():
 904            instances_list.append(self.rad_connection.get_object(zone).name)
 905        return instances_list
 906
 907    def list_instance_uuids(self):
 908        """Return the UUIDS of all the instances known to the virtualization
 909        layer, as a list.
 910        """
 911        raise NotImplementedError()
 912
 913    def _rebuild_block_devices(self, context, instance, bdms, recreate):
 914        root_ci = None
 915        rootmp = instance['root_device_name']
 916        for entry in bdms:
 917            if entry['connection_info'] is None:
 918                continue
 919
 920            if entry['device_name'] == rootmp:
 921                root_ci = jsonutils.loads(entry['connection_info'])
 922                # Let's make sure this is a well formed connection_info, by
 923                # checking if it has a serial key that represents the
 924                # volume_id. If not check to see if the block device has a
 925                # volume_id, if so then assign this to the root_ci.serial.
 926                #
 927                # If we cannot repair the connection_info then simply do not
 928                # return a root_ci and let the caller decide if they want to
 929                # fail or not.
 930                if root_ci.get('serial') is None:
 931                    if entry.get('volume_id') is not None:
 932                        root_ci['serial'] = entry['volume_id']
 933                    else:
 934                        LOG.debug(_("Unable to determine the volume id for "
 935                                    "the connection info for the root device "
 936                                    "for instance '%s'") % instance['name'])
 937                        root_ci = None
 938
 939                continue
 940
 941            if not recreate:
 942                ci = jsonutils.loads(entry['connection_info'])
 943                self.detach_volume(ci, instance, entry['device_name'])
 944
 945        if root_ci is None and recreate:
 946            msg = (_("Unable to find the root device for instance '%s'.")
 947                   % instance['name'])
 948            raise exception.NovaException(msg)
 949
 950        return root_ci
 951
 952    def _set_instance_metahostid(self, instance):
 953        """Attempt to get the hostid from the current configured zone and
 954        return the hostid.  Otherwise return None, and do not set the hostid in
 955        the instance
 956        """
 957        hostid = instance.system_metadata.get('hostid')
 958        if hostid is not None:
 959            return hostid
 960
 961        zone = self._get_zone_by_name(instance['name'])
 962        if zone is None:
 963            return None
 964
 965        hostid = lookup_resource_property(zone, 'global', 'hostid')
 966        if hostid:
 967            instance.system_metadata['hostid'] = hostid
 968
 969        return hostid
 970
 971    def rebuild(self, context, instance, image_meta, injected_files,
 972                admin_password, bdms, detach_block_devices,
 973                attach_block_devices, network_info=None,
 974                recreate=False, block_device_info=None,
 975                preserve_ephemeral=False):
 976        """Destroy and re-make this instance.
 977
 978        A 'rebuild' effectively purges all existing data from the system and
 979        remakes the VM with given 'metadata' and 'personalities'.
 980
 981        This base class method shuts down the VM, detaches all block devices,
 982        then spins up the new VM afterwards. It may be overridden by
 983        hypervisors that need to - e.g. for optimisations, or when the 'VM'
 984        is actually proxied and needs to be held across the shutdown + spin
 985        up steps.
 986
 987        :param context: security context
 988        :param instance: nova.objects.instance.Instance
 989                         This function should use the data there to guide
 990                         the creation of the new instance.
 991        :param nova.objects.ImageMeta image_meta:
 992            The metadata of the image of the instance.
 993        :param injected_files: User files to inject into instance.
 994        :param admin_password: Administrator password to set in instance.
 995        :param bdms: block-device-mappings to use for rebuild
 996        :param detach_block_devices: function to detach block devices. See
 997            nova.compute.manager.ComputeManager:_rebuild_default_impl for
 998            usage.
 999        :param attach_block_devices: function to attach block devices. See
1000            nova.compute.manager.ComputeManager:_rebuild_default_impl for
1001            usage.
1002        :param network_info:
1003           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
1004        :param recreate: True if the instance is being recreated on a new
1005            hypervisor - all the cleanup of old state is skipped.
1006        :param block_device_info: Information about block devices to be
1007                                  attached to the instance.
1008        :param preserve_ephemeral: True if the default ephemeral storage
1009                                   partition must be preserved on rebuild
1010        """
1011        if recreate:
1012            instance.system_metadata['evac_from'] = instance['launched_on']
1013            instance.save()
1014            extra_specs = self._get_flavor(instance)['extra_specs'].copy()
1015            brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
1016            if brand == ZONE_BRAND_SOLARIS:
1017                msg = (_("'%s' branded zones do not currently support "
1018                         "evacuation.") % brand)
1019                raise exception.NovaException(msg)
1020        else:
1021            self._power_off(instance, "HALT")
1022
1023        instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
1024        instance.save(expected_task_state=[task_states.REBUILDING])
1025        root_ci = self._rebuild_block_devices(context, instance, bdms,
1026                                              recreate)
1027
1028        if recreate:
1029            if root_ci is not None:
1030                driver_type = root_ci['driver_volume_type']
1031            else:
1032                driver_type = 'local'
1033            if driver_type not in shared_storage:
1034                msg = (_("Root device is not on shared storage for instance "
1035                         "'%s'.") % instance['name'])
1036                raise exception.NovaException(msg)
1037
1038        if not recreate:
1039            self.destroy(context, instance, network_info, block_device_info)
1040            if root_ci is not None:
1041                self._volume_api.detach(context, root_ci['serial'])
1042                self._volume_api.delete(context, root_ci['serial'])
1043
1044                # Go ahead and remove the root bdm from the bdms so that we do
1045                # not trip up spawn either checking against the use of c1d0 or
1046                # attempting to re-attach the root device.
1047                bdms.objects.remove(bdms.root_bdm())
1048                rootdevname = block_device_info.get('root_device_name')
1049                if rootdevname is not None:
1050                    bdi_bdms = block_device_info.get('block_device_mapping')
1051                    for entry in bdi_bdms:
1052                        if entry['mount_device'] == rootdevname:
1053                            bdi_bdms.remove(entry)
1054                            break
1055
1056        instance.task_state = task_states.REBUILD_SPAWNING
1057        instance.save(
1058            expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
1059
1060        # Instead of using a boolean for 'rebuilding' scratch data, use a
1061        # string because the object will translate it to a string anyways.
1062        if recreate:
1063            extra_specs = self._get_flavor(instance)['extra_specs'].copy()
1064
1065            instance.system_metadata['rebuilding'] = 'false'
1066            self._create_config(context, instance, network_info, root_ci, None)
1067            del instance.system_metadata['evac_from']
1068            instance.save()
1069        else:
1070            instance.system_metadata['rebuilding'] = 'true'
1071            self.spawn(context, instance, image_meta, injected_files,
1072                       admin_password, network_info, block_device_info)
1073
1074        del instance.system_metadata['rebuilding']
1075        name = instance['name']
1076        zone = self._get_zone_by_name(name)
1077        if zone is None:
1078            raise exception.InstanceNotFound(instance_id=name)
1079
1080        if recreate:
1081            zone.attach(['-x', 'initialize-hostdata'])
1082
1083            rootmp = instance['root_device_name']
1084            for entry in bdms:
1085                if (entry['connection_info'] is None or
1086                        rootmp == entry['device_name']):
1087                    continue
1088
1089                connection_info = jsonutils.loads(entry['connection_info'])
1090                mount = entry['device_name']
1091                self.attach_volume(context, connection_info, instance, mount)
1092
1093            self._power_on(instance, network_info)
1094
1095        if admin_password is not None:
1096            # Because there is no way to make sure a zone is ready upon
1097            # returning from a boot request. We must give the zone a few
1098            # seconds to boot before attempting to set the admin password.
1099            greenthread.sleep(15)
1100            self.set_admin_password(instance, admin_password)
1101
1102    def _get_flavor(self, instance):
1103        """Retrieve the flavor object as specified in the instance object"""
1104        return flavor_obj.Flavor.get_by_id(
1105            nova_context.get_admin_context(read_deleted='yes'),
1106            instance['instance_type_id'])
1107
1108    def _fetch_image(self, context, instance):
1109        """Fetch an image using Glance given the instance's image_ref."""
1110        glancecache_dirname = CONF.solariszones.glancecache_dirname
1111        fileutils.ensure_tree(glancecache_dirname)
1112        iref = instance['image_ref']
1113        image = os.path.join(glancecache_dirname, iref)
1114        downloading = image + '.downloading'
1115
1116        with lockutils.lock('glance-image-%s' % iref):
1117            if os.path.isfile(downloading):
1118                LOG.debug(_('Cleaning partial download of %s' % iref))
1119                os.unlink(image)
1120                os.unlink(downloading)
1121
1122            elif os.path.exists(image):
1123                LOG.debug(_("Using existing, cached Glance image: id %s")
1124                          % iref)
1125                return image
1126
1127            LOG.debug(_("Fetching new Glance image: id %s") % iref)
1128            try:
1129                # touch the empty .downloading file
1130                with open(downloading, 'w'):
1131                    pass
1132                images.fetch(context, iref, image, instance['user_id'],
1133                             instance['project_id'])
1134                os.unlink(downloading)
1135                return image
1136            except Exception as reason:
1137                LOG.exception(_("Unable to fetch Glance image: id %s: %s")
1138                              % (iref, reason))
1139                raise
1140
1141    @lockutils.synchronized('validate_image')
1142    def _validate_image(self, context, image, instance):
1143        """Validate a glance image for compatibility with the instance."""
1144        # Skip if the image was already checked and confirmed as valid.
1145        if instance['image_ref'] in self._validated_archives:
1146            return
1147
1148        try:
1149            ua = self.archive_manager.getArchive(image)
1150        except Exception as ex:
1151            if isinstance(ex, rad.client.ObjectError):
1152                reason = ex.get_payload().info
1153            else:
1154                reason = str(ex)
1155            raise exception.ImageUnacceptable(image_id=instance['image_ref'],
1156                                              reason=reason)
1157
1158        # Validate the image at this point to ensure:
1159        # - contains one deployable system
1160        deployables = ua.getArchivedSystems()
1161        if len(deployables) != 1:
1162            reason = _("Image must contain only a single deployable system.")
1163            raise exception.ImageUnacceptable(image_id=instance['image_ref'],
1164                                              reason=reason)
1165        # - matching architecture
1166        deployable_arch = str(ua.isa)
1167        compute_arch = platform.processor()
1168        if deployable_arch.lower() != compute_arch:
1169            reason = (_("Unified Archive architecture '%s' is incompatible "
1170                      "with this compute host's architecture, '%s'.")
1171                      % (deployable_arch, compute_arch))
1172
1173            # For some reason we have gotten the wrong architecture image,
1174            # which should have been filtered by the scheduler. One reason this
1175            # could happen is because the images architecture type is
1176            # incorrectly set. Check for this and report a better reason.
1177            glanceapi = glance_api()
1178            image_meta = glanceapi.get(context, instance['image_ref'])
1179            image_properties = image_meta.get('properties')
1180            if image_properties.get('architecture') is None:
1181                reason = reason + (_(" The 'architecture' property is not set "
1182                                     "on the Glance image."))
1183
1184            raise exception.ImageUnacceptable(image_id=instance['image_ref'],
1185                                              reason=reason)
1186        # - single root pool only
1187        if not deployables[0].rootOnly:
1188            reason = _("Image contains more than one ZFS pool.")
1189            raise exception.ImageUnacceptable(image_id=instance['image_ref'],
1190                                              reason=reason)
1191        # - looks like it's OK
1192        self._validated_archives.append(instance['image_ref'])
1193
1194    def _validate_flavor(self, instance):
1195        """Validate the flavor for compatibility with zone brands"""
1196        flavor = self._get_flavor(instance)
1197        extra_specs = flavor['extra_specs'].copy()
1198        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
1199
1200        if brand == ZONE_BRAND_SOLARIS_KZ:
1201            # verify the memory is 256mb aligned
1202            test_size = Size('256MB')
1203            instance_size = Size('%sMB' % instance['memory_mb'])
1204
1205            if instance_size.byte_value % test_size.byte_value:
1206                # non-zero result so it doesn't align
1207                raise MemoryAlignmentIncorrect(
1208                    flavor=flavor['name'],
1209                    memsize=str(instance['memory_mb']),
1210                    align='256')
1211
1212    def _suri_from_volume_info(self, connection_info):
1213        """Returns a suri(5) formatted string based on connection_info.
1214        Currently supports local ZFS volume, NFS, Fibre Channel and iSCSI
1215        driver types.
1216        """
1217        driver_type = connection_info['driver_volume_type']
1218        if driver_type not in ['iscsi', 'fibre_channel', 'local', 'nfs']:
1219            raise exception.VolumeDriverNotFound(driver_type=driver_type)
1220        if driver_type == 'local':
1221            suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path']
1222        elif driver_type == 'iscsi':
1223            data = connection_info['data']
1224            # suri(5) format:
1225            #       iscsi://<host>[:<port>]/target.<IQN>,lun.<LUN>
1226            # luname-only URI format for the multipathing:
1227            #       iscsi://<host>[:<port>]/luname.naa.<ID>
1228            # Sample iSCSI connection data values:
1229            # target_portal: 192.168.1.244:3260
1230            # target_iqn: iqn.2010-10.org.openstack:volume-a89c.....
1231            # target_lun: 1
1232            suri = None
1233            if 'target_iqns' in data:
1234                target = data['target_iqns'][0]
1235                target_lun = data['target_luns'][0]
1236                try:
1237                    utils.execute('/usr/sbin/iscsiadm', 'list', 'target',
1238                                  '-vS', target)
1239                    out, err = utils.execute('/usr/sbin/suriadm', 'lookup-uri',
1240                                             '-t', 'iscsi',
1241                                             '-p', 'target=%s' % target,
1242                                             '-p', 'lun=%s' % target_lun)
1243                    for line in [l.strip() for l in out.splitlines()]:
1244                        if "luname.naa." in line:
1245                            LOG.debug(_("The found luname-only URI for the "
1246                                      "LUN '%s' is '%s'.") %
1247                                      (target_lun, line))
1248                            suri = line
1249                except processutils.ProcessExecutionError as ex:
1250                    reason = ex.stderr
1251                    LOG.debug(_("Failed to lookup-uri for volume '%s', lun "
1252                              "'%s': '%s'.") % (target, target_lun, reason))
1253
1254            if suri is None:
1255                suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'],
1256                                                        data['target_iqn'],
1257                                                        data['target_lun'])
1258            # TODO(npower): need to handle CHAP authentication also
1259        elif driver_type == 'nfs':
1260            data = connection_info['data']
1261            suri = (
1262                'nfs://cinder:cinder@%s/%s' %
1263                (data['export'].replace(':', ''), data['name'])
1264            )
1265
1266        elif driver_type == 'fibre_channel':
1267            data = connection_info['data']
1268            target_wwn = data['target_wwn']
1269            # Ensure there's a fibre channel HBA.
1270            hbas = self._get_fc_hbas()
1271            if not hbas:
1272                LOG.error(_("Cannot attach Fibre Channel volume because "
1273                          "no Fibre Channel HBA initiators were found"))
1274                raise exception.InvalidVolume(
1275                    reason="No host Fibre Channel initiator found")
1276
1277            target_lun = data['target_lun']
1278            # If the volume was exported just a few seconds previously then
1279            # it will probably not be visible to the local adapter yet.
1280            # Invoke 'fcinfo remote-port' on all local HBA ports to trigger
1281            # a refresh.
1282            for wwpn in self._get_fc_wwpns():
1283                utils.execute('/usr/sbin/fcinfo', 'remote-port', '-p', wwpn)
1284
1285            suri = self._lookup_fc_volume_suri(target_wwn, target_lun)
1286        return suri
1287
1288    def _lookup_fc_volume_suri(self, target_wwn, target_lun):
1289        """Searching the LU based URI for the FC LU. """
1290        wwns = []
1291        if isinstance(target_wwn, list):
1292            wwns = target_wwn
1293        else:
1294            wwns.append(target_wwn)
1295
1296        for _none in range(3):
1297            for wwn in wwns:
1298                try:
1299                    out, err = utils.execute('/usr/sbin/suriadm', 'lookup-uri',
1300                                             '-p', 'target=naa.%s' % wwn,
1301                                             '-p', 'lun=%s' % target_lun)
1302                    for line in [l.strip() for l in out.splitlines()]:
1303                        if line.startswith("lu:luname.naa."):
1304                            return line
1305                except processutils.ProcessExecutionError as ex:
1306                    reason = ex.stderr
1307                    LOG.debug(_("Failed to lookup-uri for volume '%s', lun "
1308                              "%s: %s") % (wwn, target_lun, reason))
1309            greenthread.sleep(2)
1310        else:
1311            msg = _("Unable to lookup URI of Fibre Channel volume "
1312                    "with lun '%s'." % target_lun)
1313            raise exception.InvalidVolume(reason=msg)
1314
1315    def _set_global_properties(self, name, extra_specs, brand):
1316        """Set Solaris Zone's global properties if supplied via flavor."""
1317        zone = self._get_zone_by_name(name)
1318        if zone is None:
1319            raise exception.InstanceNotFound(instance_id=name)
1320
1321        # TODO(dcomay): Should figure this out via the brands themselves.
1322        zonecfg_items = [
1323            'bootargs',
1324            'brand',
1325            'hostid'
1326        ]
1327        if brand == ZONE_BRAND_SOLARIS:
1328            zonecfg_items.extend(
1329                ['file-mac-profile', 'fs-allowed', 'limitpriv'])
1330        else:
1331            zonecfg_items.extend(['cpu-arch'])
1332
1333        with ZoneConfig(zone) as zc:
1334            for key, value in extra_specs.iteritems():
1335                # Ignore not-zonecfg-scoped brand properties.
1336                if not key.startswith('zonecfg:'):
1337                    continue
1338                _scope, prop = key.split(':', 1)
1339                # Ignore the 'brand' property if present.
1340                if prop == 'brand':
1341                    continue
1342                # Ignore but warn about unsupported zonecfg-scoped properties.
1343                if prop not in zonecfg_items:
1344                    LOG.warning(_("Ignoring unsupported zone property '%s' "
1345                                  "set on flavor for instance '%s'")
1346                                % (prop, name))
1347                    continue
1348                zc.setprop('global', prop, value)
1349
1350    def _create_boot_volume(self, context, instance):
1351        """Create a (Cinder) volume service backed boot volume"""
1352        boot_vol_az = CONF.solariszones.boot_volume_az
1353        boot_vol_type = CONF.solariszones.boot_volume_type
1354        try:
1355            vol = self._volume_api.create(
1356                context, instance['root_gb'],
1357                instance['hostname'] + "-" + self._rootzpool_suffix,
1358                "Boot volume for instance '%s' (%s)"
1359                % (instance['name'], instance['uuid']),
1360                volume_type=boot_vol_type, availability_zone=boot_vol_az)
1361            # TODO(npower): Polling is what nova/compute/manager also does when
1362            # creating a new volume, so we do likewise here.
1363            while True:
1364                volume = self._volume_api.get(context, vol['id'])
1365                if volume['status'] != 'creating':
1366                    return volume
1367                greenthread.sleep(1)
1368
1369        except Exception as reason:
1370            LOG.exception(_("Unable to create root zpool volume for instance "
1371                            "'%s': %s") % (instance['name'], reason))
1372            raise
1373
1374    def _connect_boot_volume(self, volume, mountpoint, context, instance):
1375        """Connect a (Cinder) volume service backed boot volume"""
1376        instance_uuid = instance['uuid']
1377        volume_id = volume['id']
1378
1379        connector = self.get_volume_connector(instance)
1380        connection_info = self._volume_api.initialize_connection(context,
1381                                                                 volume_id,
1382                                                                 connector)
1383        connection_info['serial'] = volume_id
1384
1385        # Check connection_info to determine if the provided volume is
1386        # local to this compute node. If it is, then don't use it for
1387        # Solaris branded zones in order to avoid a known ZFS deadlock issue
1388        # when using a zpool within another zpool on the same system.
1389        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
1390        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
1391        if brand == ZONE_BRAND_SOLARIS:
1392            driver_type = connection_info['driver_volume_type']
1393            if driver_type == 'local':
1394                msg = _("Detected 'local' zvol driver volume type "
1395                        "from volume service, which should not be "
1396                        "used as a boot device for 'solaris' "
1397                        "branded zones.")
1398                raise exception.InvalidVolume(reason=msg)
1399            elif driver_type == 'iscsi':
1400                # Check for a potential loopback iSCSI situation
1401                data = connection_info['data']
1402                target_portal = data['target_portal']
1403                # Strip off the port number (eg. 127.0.0.1:3260)
1404                host = target_portal.rsplit(':', 1)
1405                # Strip any enclosing '[' and ']' brackets for
1406                # IPv6 addresses.
1407                target_host = host[0].strip('[]')
1408
1409                # Check if target_host is an IP or hostname matching the
1410                # connector host or IP, which would mean the provisioned
1411                # iSCSI LUN is on the same host as the instance.
1412                if target_host in [connector['ip'], connector['host']]:
1413                    msg = _("iSCSI connection info from volume "
1414                            "service indicates that the target is a "
1415                            "local volume, which should not be used "
1416                            "as a boot device for 'solaris' branded "
1417                            "zones.")
1418                    raise exception.InvalidVolume(reason=msg)
1419            # Assuming that fibre_channel is non-local
1420            elif driver_type != 'fibre_channel':
1421                # Some other connection type that we don't understand
1422                # Let zone use some local fallback instead.
1423                msg = _("Unsupported volume driver type '%s' can not be used "
1424                        "as a boot device for zones." % driver_type)
1425                raise exception.InvalidVolume(reason=msg)
1426
1427        # Volume looks OK to use. Notify Cinder of the attachment.
1428        self._volume_api.attach(context, volume_id, instance_uuid, mountpoint)
1429        return connection_info
1430
1431    def _set_boot_device(self, name, connection_info, brand):
1432        """Set the boot device specified by connection_info"""
1433        zone = self._get_zone_by_name(name)
1434        if zone is None:
1435            raise exception.InstanceNotFound(instance_id=name)
1436
1437        suri = self._suri_from_volume_info(connection_info)
1438
1439        with ZoneConfig(zone) as zc:
1440            # ZOSS device configuration is different for the solaris-kz brand
1441            if brand == ZONE_BRAND_SOLARIS_KZ:
1442                zc.zone.setResourceProperties(
1443                    zonemgr.Resource("device",
1444                                     [zonemgr.Property("bootpri", "0")]),
1445                    [zonemgr.Property("storage", suri)])
1446            else:
1447                zc.addresource(ROOTZPOOL_RESOURCE,
1448                               [zonemgr.Property("storage", listvalue=[suri])],
1449                               ignore_exists=True)
1450
1451    def _set_num_cpu(self, name, vcpus, brand):
1452        """Set number of VCPUs in a Solaris Zone configuration."""
1453        zone = self._get_zone_by_name(name)
1454        if zone is None:
1455            raise exception.InstanceNotFound(instance_id=name)
1456
1457        # The Solaris Zone brand type is used to specify the type of
1458        # 'cpu' resource set in the Solaris Zone configuration.
1459        if brand == ZONE_BRAND_SOLARIS:
1460            vcpu_resource = 'capped-cpu'
1461        else:
1462            vcpu_resource = 'virtual-cpu'
1463
1464        # TODO(dcomay): Until 17881862 is resolved, this should be turned into
1465        # an appropriate 'rctl' resource for the 'capped-cpu' case.
1466        with ZoneConfig(zone) as zc:
1467            zc.setprop(vcpu_resource, 'ncpus', str(vcpus))
1468
1469    def _set_memory_cap(self, name, memory_mb, brand):
1470        """Set memory cap in a Solaris Zone configuration."""
1471        zone = self._get_zone_by_name(name)
1472        if zone is None:
1473            raise exception.InstanceNotFound(instance_id=name)
1474
1475        # The Solaris Zone brand type is used to specify the type of
1476        # 'memory' cap set in the Solaris Zone configuration.
1477        if brand == ZONE_BRAND_SOLARIS:
1478            mem_resource = 'swap'
1479        else:
1480            mem_resource = 'physical'
1481
1482        with ZoneConfig(zone) as zc:
1483            zc.setprop('capped-memory', mem_resource, '%dM' % memory_mb)
1484
1485    def _ovs_add_port(self, instance, vif, port):
1486        if vif['type'] == 'binding_failed':
1487            LOG.error(_('Port binding has failed for VIF %s. Ensure that '
1488                        'OVS agent is running and/or bridge_mappings are '
1489                        'correctly configured. VM will not have network '
1490                        'connectivity') % vif)
1491
1492        ovs_bridge = CONF.neutron.ovs_bridge
1493        cmd = ['/usr/sbin/ovs-vsctl',
1494               '--timeout=%s' % CONF.ovs_vsctl_timeout,
1495               '--', '--if-exists', 'del-port', ovs_bridge, port,
1496               '--', 'add-port', ovs_bridge, port,
1497               '--', 'set', 'Interface', port,
1498               'external-ids:iface-id=%s' % vif['id'],
1499               'external-ids:iface-status=active',
1500               'external-ids:attached-mac=%s' % vif['address'],
1501               'external-ids:vm-uuid=%s' % instance['uuid']
1502               ]
1503        try:
1504            out, err = utils.execute(*cmd)
1505        except Exception as reason:
1506            msg = (_("Failed to add port '%s' with MAC address '%s' to "
1507                     "OVS Bridge '%s': %s")
1508                   % (port, vif['address'], ovs_bridge, reason))
1509            raise exception.NovaException(msg)
1510        LOG.debug(_('Successfully added port %s with MAC adddress %s') %
1511                  (port, vif['address']))
1512
1513    def _ovs_delete_port(self, port, log_warnings=False):
1514        ovs_bridge = CONF.neutron.ovs_bridge
1515        cmd = ['/usr/sbin/ovs-vsctl',
1516               '--timeout=%s' % CONF.ovs_vsctl_timeout,
1517               '--', '--if-exists', 'del-port', ovs_bridge, port]
1518        try:
1519            out, err = utils.execute(*cmd)
1520            LOG.debug(_('Removed port %s from the OVS bridge %s') %
1521                      (port, ovs_bridge))
1522        except Exception as reason:
1523            msg = (_("Unable to remove port '%s' from the OVS "
1524                     "bridge '%s': %s") % (port, ovs_bridge, reason))
1525            if log_warnings:
1526                LOG.warning(msg)
1527            else:
1528                raise nova.exception.NovaException(msg)
1529
1530    def _plug_vifs(self, instance, network_info):
1531        if not network_info:
1532            LOG.debug(_("Instance has no VIF. Nothing to plug."))
1533            return
1534
1535        # first find out all the anets for a given instance
1536        try:
1537            out, err = utils.execute('/usr/sbin/dladm', 'show-vnic',
1538                                     '-z', instance['name'],
1539                                     '-po', 'link,macaddress')
1540        except Exception as reason:
1541            msg = (_("Unable to get interfaces for instance '%s': %s")
1542                   % (instance['name'], reason))
1543            raise exception.NovaException(msg)
1544
1545        anetdict = {}
1546        for anet_maddr in out.strip().splitlines():
1547            anet, maddr = anet_maddr.strip().split(':', 1)
1548            maddr = maddr.replace('\\', '')
1549            maddr = ''.join(['%02x' % int(b, 16) for b in maddr.split(':')])
1550            anetdict[maddr] = anet
1551
1552        LOG.debug(_("List of instance %s's anets: %s")
1553                  % (instance['name'], anetdict))
1554        # we now have a list of VNICs that belong to the VM
1555        # we need to map the VNIC to the bridge
1556        for vif in network_info:
1557            vif_maddr = ''.join(['%02x' % int(b, 16) for b in
1558                                 vif['address'].split(':')])
1559            anet = anetdict.get(vif_maddr)
1560            if anet is None:
1561                LOG.error(_('Failed to add port %s connected to network %s '
1562                            'to instance %s')
1563                          % (vif['ovs_interfaceid'], vif['network']['id'],
1564                             instance['name']))
1565                continue
1566            self._ovs_add_port(instance, vif, anet)
1567
1568    def _unplug_vifs(self, instance):
1569        ovs_bridge = CONF.neutron.ovs_bridge
1570        # remove the anets from the OVS bridge
1571        cmd = ['/usr/sbin/ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout,
1572               'list-ports', ovs_bridge]
1573        try:
1574            out, err = utils.execute(*cmd)
1575        except Exception as reason:
1576            msg = (_("Unable to get interfaces for instance '%s': %s")
1577                   % (instance['name'], reason))
1578            raise exception.NovaException(msg)
1579
1580        for port in out.strip().splitlines():
1581            if port.split('/')[0] != instance['name']:
1582                continue
1583            self._ovs_delete_port(port, log_warnings=True)
1584
1585    def _set_ovs_info(self, context, zone, brand, first_anet, vif):
1586        # Need to be admin to retrieve provider:network_type attribute
1587        network_plugin = neutronv2_api.get_client(context, admin=True)
1588        network = network_plugin.show_network(
1589            vif['network']['id'])['network']
1590        network_type = network['provider:network_type']
1591        lower_link = None
1592        if network_type == 'vxlan':
1593            lower_link = 'ovs.vxlan1'
1594        elif network_type in ['vlan', 'flat']:
1595            physical_network = network['provider:physical_network']
1596            # retrieve the other_config information from Open_vSwitch table
1597            try:
1598                results = get_ovsdb_info('Open_vSwitch', ['other_config'])
1599            except Exception as err:
1600                LOG.exception(_("Failed to retrieve other_config: %s"), err)
1601                raise
1602
1603            other_config = results[0]['other_config']
1604            if not other_config:
1605                msg = (_("'other_config' column in 'Open_vSwitch' OVSDB table "
1606                         "is not configured. Please configure it so that the "
1607                         "lower-link can be determined for the instance's "
1608                         "interface."))
1609                LOG.error(msg)
1610                raise exception.NovaException(msg)
1611            bridge_mappings = other_config.get('bridge_mappings')
1612            if not bridge_mappings:
1613                msg = (_("'bridge_mappings' info is not set in the "
1614                         "'other_config' column of 'Open_vSwitch' OVSDB "
1615                         "table. Please configure it so that the lower-link "
1616                         "can be determined for the instance's interface."))
1617                LOG.error(msg)
1618                raise exception.NovaException(msg)
1619            for bridge_mapping in bridge_mappings.split(','):
1620                if physical_network in bridge_mapping:
1621                    lower_link = bridge_mapping.split(':')[1]
1622                    break
1623            if not lower_link:
1624                msg = (_("Failed to determine the lower_link for vif '%s'.") %
1625                       (vif))
1626                LOG.error(msg)
1627                raise exception.NovaException(msg)
1628        else:
1629            # TYPE_GRE and TYPE_LOCAL
1630            msg = (_("Unsupported network type: %s") % network_type)
1631            LOG.error(msg)
1632            raise exception.NovaException(msg)
1633
1634        mtu = network['mtu']
1635        with ZoneConfig(zone) as zc:
1636            if first_anet:
1637                zc.setprop('anet', 'lower-link', lower_link)
1638                zc.setprop('anet', 'configure-allowed-address', 'false')
1639                zc.setprop('anet', 'mac-address', vif['address'])
1640                if mtu > 0:
1641                    zc.setprop('anet', 'mtu', str(mtu))
1642            else:
1643                props = [zonemgr.Property('lower-link', lower_link),
1644                         zonemgr.Property('configure-allowed-address',
1645                                          'false'),
1646                         zonemgr.Property('mac-address', vif['address'])]
1647                if mtu > 0:
1648                    props.append(zonemgr.Property('mtu', str(mtu)))
1649                zc.addresource('anet', props)
1650
1651            prop_filter = [zonemgr.Property('mac-address', vif['address'])]
1652            if brand == ZONE_BRAND_SOLARIS:
1653                anetname = lookup_resource_property(zc.zone, 'anet',
1654                                                    'linkname', prop_filter)
1655            else:
1656                anetid = lookup_resource_property(zc.zone, 'anet', 'id',
1657                                                  prop_filter)
1658                anetname = 'net%s' % anetid
1659        return anetname
1660
1661    def _set_network(self, context, name, instance, network_info, brand,
1662                     sc_dir):
1663        """add networking information to the zone."""
1664        zone = self._get_zone_by_name(name)
1665        if zone is None:
1666            raise exception.InstanceNotFound(instance_id=name)
1667
1668        if not network_info:
1669            with ZoneConfig(zone) as zc:
1670                if brand == ZONE_BRAND_SOLARIS:
1671                    zc.removeresources("anet",
1672                                       [zonemgr.Property("linkname", "net0")])
1673                else:
1674                    zc.removeresources("anet", [zonemgr.Property("id", "0")])
1675                return
1676
1677        for vifid, vif in enumerate(network_info):
1678            LOG.debug("%s", jsonutils.dumps(vif, indent=5))
1679
1680            ip = vif['network']['subnets'][0]['ips'][0]['address']
1681            cidr = vif['network']['subnets'][0]['cidr']
1682            ip_cidr = "%s/%s" % (ip, cidr.split('/')[1])
1683            ip_version = vif['network']['subnets'][0]['version']
1684            dhcp_server = \
1685                vif['network']['subnets'][0]['meta'].get('dhcp_server')
1686            enable_dhcp = dhcp_server is not None
1687            route = vif['network']['subnets'][0]['gateway']['address']
1688            dns_list = vif['network']['subnets'][0]['dns']
1689            nameservers = []
1690            for dns in dns_list:
1691                if dns['type'] == 'dns':
1692                    nameservers.append(dns['address'])
1693
1694            anetname = self._set_ovs_info(context, zone, brand, vifid == 0,
1695                                          vif)
1696
1697            # create the required sysconfig file (or skip if this is part of a
1698            # resize or evacuate process)
1699            tstate = instance['task_state']
1700            if tstate not in [task_states.RESIZE_FINISH,
1701                              task_states.RESIZE_REVERTING,
1702                              task_states.RESIZE_MIGRATING,
1703                              task_states.REBUILD_SPAWNING] or \
1704                (tstate == task_states.REBUILD_SPAWNING and
1705                 instance.system_metadata['rebuilding'] == 'true'):
1706                if enable_dhcp:
1707                    tree = sysconfig.create_ncp_defaultfixed('dhcp',
1708                                                             anetname, vifid,
1709                                                             ip_version)
1710                else:
1711                    host_routes = vif['network']['subnets'][0]['routes']
1712                    tree = sysconfig.create_ncp_defaultfixed('static',
1713                                                             anetname, vifid,
1714                                                             ip_version,
1715                                                             ip_cidr, route,
1716                                                             nameservers,
1717                                                             host_routes)
1718
1719                fp = os.path.join(sc_dir, 'zone-network-%d.xml' % vifid)
1720                sysconfig.create_sc_profile(fp, tree)
1721
1722    def _set_suspend(self, instance):
1723        """Use the instance name to specify the pathname for the suspend image.
1724        """
1725        name = instance['name']
1726        zone = self._get_zone_by_name(name)
1727        if zone is None:
1728            raise exception.InstanceNotFound(instance_id=name)
1729
1730        path = os.path.join(CONF.solariszones.zones_suspend_path,
1731                            '%{zonename}')
1732        with ZoneConfig(zone) as zc:
1733            zc.addresource('suspend', [zonemgr.Property('path', path)])
1734
1735    def _verify_sysconfig(self, sc_dir, instance, admin_password=None):
1736        """verify the SC profile(s) passed in contain an entry for
1737        system/config-user to configure the root account.  If an SSH key is
1738        specified, configure root's profile to use it.
1739        """
1740        usercheck = lambda e: e.attrib.get('name') == 'system/config-user'
1741        hostcheck = lambda e: e.attrib.get('name') == 'system/identity'
1742
1743        root_account_needed = True
1744        hostname_needed = True
1745        sshkey = instance.get('key_data')
1746        name = instance.get('hostname')
1747        encrypted_password = None
1748
1749        # encrypt admin password, using SHA-256 as default
1750        if admin_password is not None:
1751            encrypted_password = sha256_crypt.encrypt(admin_password)
1752
1753        # find all XML files in sc_dir
1754        for root, dirs, files in os.walk(sc_dir):
1755            for fname in [f for f in files if f.endswith(".xml")]:
1756                fileroot = etree.parse(os.path.join(root, fname))
1757
1758                # look for config-user properties
1759                if filter(usercheck, fileroot.findall('service')):
1760                    # a service element was found for config-user.  Verify
1761                    # root's password is set, the admin account name is set and
1762                    # the admin's password is set
1763                    pgs = fileroot.iter('property_group')
1764                    for pg in pgs:
1765                        if pg.attrib.get('name') == 'root_account':
1766                            root_account_needed = False
1767
1768                # look for identity properties
1769                if filter(hostcheck, fileroot.findall('service')):
1770                    for props in fileroot.iter('propval'):
1771                        if props.attrib.get('name') == 'nodename':
1772                            hostname_needed = False
1773
1774        # Verify all of the requirements were met.  Create the required SMF
1775        # profile(s) if needed.
1776        if root_account_needed:
1777            fp = os.path.join(sc_dir, 'config-root.xml')
1778
1779            if admin_password is not None and sshkey is not None:
1780                # store password for horizon retrieval
1781                ctxt = nova_context.get_admin_context()
1782                enc = crypto.ssh_encrypt_text(sshkey, admin_password)
1783                instance.system_metadata.update(
1784                    password.convert_password(ctxt, base64.b64encode(enc)))
1785                instance.save()
1786
1787            if encrypted_password is not None or sshkey is not None:
1788                # set up the root account as 'normal' with no expiration,
1789                # an ssh key, and a root password
1790                tree = sysconfig.create_default_root_account(
1791                    sshkey=sshkey, password=encrypted_password)
1792            else:
1793                # sets up root account with expiration if sshkey is None
1794                # and password is none
1795                tree = sysconfig.create_default_root_account(expire='0')
1796
1797            sysconfig.create_sc_profile(fp, tree)
1798
1799        elif sshkey is not None:
1800            fp = os.path.join(sc_dir, 'config-root-ssh-keys.xml')
1801            tree = sysconfig.create_root_ssh_keys(sshkey)
1802            sysconfig.create_sc_profile(fp, tree)
1803
1804        if hostname_needed and name is not None:
1805            fp = os.path.join(sc_dir, 'hostname.xml')
1806            sysconfig.create_sc_profile(fp, sysconfig.create_hostname(name))
1807
1808    def _create_config(self, context, instance, network_info, connection_info,
1809                       sc_dir, admin_password=None):
1810        """Create a new Solaris Zone configuration."""
1811        name = instance['name']
1812        if self._get_zone_by_name(name) is not None:
1813            raise exception.InstanceExists(name=name)
1814
1815        flavor = self._get_flavor(instance)
1816        extra_specs = flavor['extra_specs'].copy()
1817
1818        # If unspecified, default zone brand is ZONE_BRAND_SOLARIS
1819        brand = extra_specs.get('zonecfg:brand')
1820        if brand is None:
1821            LOG.warning(_("'zonecfg:brand' key not found in extra specs for "
1822                          "flavor '%s'.  Defaulting to 'solaris'"
1823                        % flavor['name']))
1824
1825            brand = ZONE_BRAND_SOLARIS
1826
1827        template = ZONE_BRAND_TEMPLATE.get(brand)
1828        # TODO(dcomay): Detect capability via libv12n(3LIB) or virtinfo(1M).
1829        if template is None:
1830            msg = (_("Invalid brand '%s' specified for instance '%s'"
1831                   % (brand, name)))
1832            raise exception.NovaException(msg)
1833
1834        tstate = instance['task_state']
1835        if tstate not in [task_states.RESIZE_FINISH,
1836                          task_states.RESIZE_REVERTING,
1837                          task_states.RESIZE_MIGRATING,
1838                          task_states.REBUILD_SPAWNING] or \
1839            (tstate == task_states.REBUILD_SPAWNING and
1840             instance.system_metadata['rebuilding'] == 'true'):
1841            sc_profile = extra_specs.get('install:sc_profile')
1842            if sc_profile is not None:
1843                if os.path.isfile(sc_profile):
1844                    shutil.copy(sc_profile, sc_dir)
1845                elif os.path.isdir(sc_profile):
1846                    shutil.copytree(sc_profile,
1847                                    os.path.join(sc_dir, 'sysconfig'))
1848
1849            self._verify_sysconfig(sc_dir, instance, admin_password)
1850
1851        LOG.debug(_("Creating zone configuration for '%s' (%s)")
1852                  % (name, instance['display_name']))
1853        try:
1854            self.zone_manager.create(name, None, template)
1855            self._set_global_properties(name, extra_specs, brand)
1856            hostid = instance.system_metadata.get('hostid')
1857            if hostid:
1858                zone = self._get_zone_by_name(name)
1859                with ZoneConfig(zone) as zc:
1860                    zc.setprop('global', 'hostid', hostid)
1861
1862            if connection_info is not None:
1863                self._set_boot_device(name, connection_info, brand)
1864            self._set_num_cpu(name, instance['vcpus'], brand)
1865            self._set_memory_cap(name, instance['memory_mb'], brand)
1866            self._set_network(context, name, instance, network_info, brand,
1867                              sc_dir)
1868        except Exception as ex:
1869            reason = zonemgr_strerror(ex)
1870            LOG.exception(_("Unable to create configuration for instance '%s' "
1871                            "via zonemgr(3RAD): %s") % (name, reason))
1872            raise
1873
1874    def _create_vnc_console_service(self, instance):
1875        """Create a VNC console SMF service for a Solaris Zone"""
1876        # Basic environment checks first: vncserver and xterm
1877        if not os.path.exists(VNC_SERVER_PATH):
1878            LOG.warning(_("Zone VNC console SMF service not available on this "
1879                          "compute node. %s is missing. Run 'pkg install "
1880                          "x11/server/xvnc'") % VNC_SERVER_PATH)
1881            raise exception.ConsoleTypeUnavailable(console_type='vnc')
1882
1883        if not os.path.exists(XTERM_PATH):
1884            LOG.warning(_("Zone VNC console SMF service not available on this "
1885                          "compute node. %s is missing. Run 'pkg install "
1886                          "terminal/xterm'") % XTERM_PATH)
1887            raise exception.ConsoleTypeUnavailable(console_type='vnc')
1888
1889        name = instance['name']
1890        # TODO(npower): investigate using RAD instead of CLI invocation
1891        try:
1892            out, err = utils.execute('/usr/sbin/svccfg',
1893                                     '-s', VNC_CONSOLE_BASE_FMRI, 'add', name)
1894        except processutils.ProcessExecutionError as ex:
1895            if self._has_vnc_console_service(instance):
1896                LOG.debug(_("Ignoring attempt to create existing zone VNC "
1897                            "console SMF service for instance '%s'") % name)
1898                return
1899            reason = ex.stderr
1900            LOG.exception(_("Unable to create zone VNC console SMF service "
1901                            "'{0}': {1}").format(VNC_CONSOLE_BASE_FMRI + ':' +
1902                                                 name, reason))
1903            raise
1904
1905    def _delete_vnc_console_service(self, instance):
1906        """Delete a VNC console SMF service for a Solaris Zone"""
1907        name = instance['name']
1908        self._disable_vnc_console_service(instance)
1909        # TODO(npower): investigate using RAD instead of CLI invocation
1910        try:
1911            out, err = utils.execute('/usr/sbin/svccfg',
1912                                     '-s', VNC_CONSOLE_BASE_FMRI, 'delete',
1913                                     name)
1914        except processutils.ProcessExecutionError as ex:
1915            if not self._has_vnc_console_service(instance):
1916                LOG.debug(_("Ignoring attempt to delete a non-existent zone "
1917                            "VNC console SMF service for instance '%s'")
1918                          % name)
1919                return
1920            reason = ex.stderr
1921            LOG.exception(_("Unable to delete zone VNC console SMF service "
1922                            "'%s': %s")
1923                          % (VNC_CONSOLE_BASE_FMRI + ':' + name, reason))
1924            raise
1925
1926    def _enable_vnc_console_service(self, instance):
1927        """Enable a zone VNC console SMF service"""
1928        name = instance['name']
1929
1930        console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
1931        # TODO(npower): investigate using RAD instead of CLI invocation
1932        try:
1933            # The console SMF service exits with SMF_TEMP_DISABLE to prevent
1934            # unnecessarily coming online at boot. Tell it to really bring
1935            # it online.
1936            out, err = utils.execute('/usr/sbin/svccfg', '-s', console_fmri,
1937                                     'setprop', 'vnc/nova-enabled=true')
1938            out, err = utils.execute('/usr/sbin/svccfg', '-s', console_fmri,
1939                                     'refresh')
1940            out, err = utils.execute('/usr/sbin/svcadm', 'enable',
1941                                     console_fmri)
1942        except processutils.ProcessExecutionError as ex:
1943            if not self._has_vnc_console_service(instance):
1944                LOG.debug(_("Ignoring attempt to enable a non-existent zone "
1945                            "VNC console SMF service for instance '%s'")
1946                          % name)
1947                return
1948            reason = ex.stderr
1949            LOG.exception(_("Unable to start zone VNC console SMF service "
1950                            "'%s': %s") % (console_fmri, reason))
1951            raise
1952
1953        # Allow some time for the console service to come online.
1954        greenthread.sleep(2)
1955        while True:
1956            try:
1957                out, err = utils.execute('/usr/bin/svcs', '-H', '-o', 'state',
1958                                         console_fmri)
1959                state = out.strip()
1960                if state == 'online':
1961                    break
1962                elif state in ['maintenance', 'offline']:
1963                    LOG.error(_("Zone VNC console SMF service '%s' is in the "
1964                                "'%s' state. Run 'svcs -x %s' for details.")
1965                              % (console_fmri, state, console_fmri))
1966                    raise exception.ConsoleNotFoundForInstance(
1967                        instance_uuid=instance['uuid'])
1968                # Wait for service state to transition to (hopefully) online
1969                # state or offline/maintenance states.
1970                greenthread.sleep(2)
1971            except processutils.ProcessExecutionError as ex:
1972                reason = ex.stderr
1973                LOG.exception(_("Error querying state of zone VNC console SMF "
1974                                "service '%s': %s") % (console_fmri, reason))
1975                raise
1976        # TODO(npower): investigate using RAD instead of CLI invocation
1977        try:
1978            # The console SMF service exits with SMF_TEMP_DISABLE to prevent
1979            # unnecessarily coming online at boot. Make that happen.
1980            out, err = utils.execute('/usr/sbin/svccfg', '-s', console_fmri,
1981                                     'setprop', 'vnc/nova-enabled=false')
1982            out, err = utils.execute('/usr/sbin/svccfg', '-s', console_fmri,
1983                                     'refresh')
1984        except processutils.ProcessExecutionError as ex:
1985            reason = ex.stderr
1986            LOG.exception(_("Unable to update 'vnc/nova-enabled' property for "
1987                            "zone VNC console SMF service '%s': %s")
1988                          % (console_fmri, reason))
1989            raise
1990
1991    def _disable_vnc_console_service(self, instance):
1992        """Disable a zone VNC console SMF service"""
1993        name = instance['name']
1994        if not self._has_vnc_console_service(instance):
1995            LOG.debug(_("Ignoring attempt to disable a non-existent zone VNC "
1996                        "console SMF service for instance '%s'") % name)
1997            return
1998        console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
1999        # TODO(npower): investigate using RAD instead of CLI invocation
2000        try:
2001            out, err = utils.execute('/usr/sbin/svcadm', 'disable',
2002                                     '-s', console_fmri)
2003        except processutils.ProcessExecutionError as ex:
2004            reason = ex.stderr
2005            LOG.exception(_("Unable to disable zone VNC console SMF service "
2006                            "'%s': %s") % (console_fmri, reason))
2007        # The console service sets a SMF instance property for the port
2008        # on which the VNC service is listening. The service needs to be
2009        # refreshed to reset the property value
2010        try:
2011            out, err = utils.execute('/usr/sbin/svccfg', '-s', console_fmri,
2012                                     'refresh')
2013        except processutils.ProcessExecutionError as ex:
2014            reason = ex.stderr
2015            LOG.exception(_("Unable to refresh zone VNC console SMF service "
2016                            "'%s': %s") % (console_fmri, reason))
2017
2018    def _get_vnc_console_service_state(self, instance):
2019        """Returns state of the instance zone VNC console SMF service"""
2020        name = instance['name']
2021        if not self._has_vnc_console_service(instance):
2022            LOG.warning(_("Console state requested for a non-existent zone "
2023                          "VNC console SMF service for instance '%s'")
2024                        % name)
2025            return None
2026        console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
2027        # TODO(npower): investigate using RAD instead of CLI invocation
2028        try:
2029            state, err = utils.execute('/usr/sbin/svcs', '-H', '-o', 'state',
2030                                       console_fmri)
2031            return state.strip()
2032        except processutils.ProcessExecutionError as ex:
2033            reason = ex.stderr
2034            LOG.exception(_("Console state request failed for zone VNC "
2035                            "console SMF service for instance '%s': %s")
2036                          % (name, reason))
2037            raise
2038
2039    def _has_vnc_console_service(self, instance):
2040        """Returns True if the instance has a zone VNC console SMF service"""
2041        name = instance['name']
2042        console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
2043        # TODO(npower): investigate using RAD instead of CLI invocation
2044        try:
2045            utils.execute('/usr/bin/svcs', '-H', '-o', 'state', console_fmri)
2046            return True
2047        except Exception:
2048            return False
2049
2050    def _install(self, instance, image, sc_dir):
2051        """Install a new Solaris Zone root file system."""
2052        name = instance['name']
2053        zone = self._get_zone_by_name(name)
2054        if zone is None:
2055            raise exception.InstanceNotFound(instance_id=name)
2056
2057        # log the zone's configuration
2058        with ZoneConfig(zone) as zc:
2059            LOG.debug("-" * 80)
2060            LOG.debug(zc.zone.exportConfig(True))
2061            LOG.debug("-" * 80)
2062
2063        options = ['-a', image]
2064
2065        if os.listdir(sc_dir):
2066            # the directory isn't empty so pass it along to install
2067            options.extend(['-c', sc_dir])
2068
2069        try:
2070            LOG.debug(_("Installing instance '%s' (%s)") %
2071                      (name, instance['display_name']))
2072            zone.install(options=options)
2073        except Exception as ex:
2074            reason = zonemgr_strerror(ex)
2075            LOG.exception(_("Unable to install root file system for instance "
2076                            "'%s' via zonemgr(3RAD): %s") % (name, reason))
2077            raise
2078
2079        self._set_instance_metahostid(instance)
2080
2081        LOG.debug(_("Installation of instance '%s' (%s) complete") %
2082                  (name, instance['display_name']))
2083
2084    def _power_on(self, instance, network_info):
2085        """Power on a Solaris Zone."""
2086        name = instance['name']
2087        zone = self._get_zone_by_name(name)
2088        if zone is None:
2089            raise exception.InstanceNotFound(instance_id=name)
2090
2091        # Attempt to update the zones hostid in the instance data, to catch
2092        # those instances that might have been created without a hostid stored.
2093        self._set_instance_metahostid(instance)
2094
2095        bootargs = []
2096        if CONF.solariszones.solariszones_boot_options:
2097            reset_bootargs = False
2098            persistent = 'False'
2099
2100            # Get any bootargs already set in the zone
2101            cur_bootargs = lookup_resource_property(zone, 'global', 'bootargs')
2102
2103            # Get any bootargs set in the instance metadata by the user
2104            meta_bootargs = instance.metadata.get('bootargs')
2105
2106            if meta_bootargs:
2107                bootargs = ['--', str(meta_bootargs)]
2108                persistent = str(
2109                    instance.metadata.get('bootargs_persist', 'False'))
2110                if cur_bootargs is not None and meta_bootargs != cur_bootargs:
2111                    with ZoneConfig(zone) as zc:
2112                        reset_bootargs = True
2113                        # Temporarily clear bootargs in zone config
2114                        zc.clear_resource_props('global', ['bootargs'])
2115
2116        try:
2117            zone.boot(bootargs)
2118            self._plug_vifs(instance, network_info)
2119        except Exception as ex:
2120            reason = zonemgr_strerror(ex)
2121            LOG.exception(_("Unable to power on instance '%s' via "
2122                            "zonemgr(3RAD): %s") % (name, reason))
2123            raise exception.InstancePowerOnFailure(reason=reason)
2124        finally:
2125            if CONF.solariszones.solariszones_boot_options:
2126                if meta_bootargs and persistent.lower() == 'false':
2127                    # We have consumed the metadata bootargs and
2128                    # the user asked for them not to be persistent so
2129                    # clear them out now.
2130                    instance.metadata.pop('bootargs', None)
2131                    instance.metadata.pop('bootargs_persist', None)
2132
2133                if reset_bootargs:
2134                    with ZoneConfig(zone) as zc:
2135                        # restore original boot args in zone config
2136                        zc.setprop('global', 'bootargs', cur_bootargs)
2137
2138    def _uninstall(self, instance):
2139        """Uninstall an existing Solaris Zone root file system."""
2140        name = instance['name']
2141        zone = self._get_zone_by_name(name)
2142        if zone is None:
2143            raise exception.InstanceNotFound(instance_id=name)
2144
2145        if zone.state == ZONE_STATE_CONFIGURED:
2146            LOG.debug(_("Uninstall not required for zone '%s' in state '%s'")
2147                      % (name, zone.state))
2148            return
2149        try:
2150            zone.uninstall(['-F'])
2151        except Exception as ex:
2152            reason = zonemgr_strerror(ex)
2153            LOG.exception(_("Unable to uninstall root file system for "
2154                            "instance '%s' via zonemgr(3RAD): %s")
2155                          % (name, reason))
2156            raise
2157
2158    def _delete_config(self, instance):
2159        """Delete an existing Solaris Zone configuration."""
2160        name = instance['name']
2161        if self._get_zone_by_name(name) is None:
2162            raise exception.InstanceNotFound(instance_id=name)
2163
2164        try:
2165            self.zone_manager.delete(name)
2166        except Exception as ex:
2167            reason = zonemgr_strerror(ex)
2168            LOG.exception(_("Unable to delete configuration for instance '%s' "
2169                            "via zonemgr(3RAD): %s") % (name, reason))
2170            raise
2171
2172    def spawn(self, context, instance, image_meta, injected_files,
2173              admin_password, network_info=None, block_device_info=None):
2174        """Create a new instance/VM/domain on the virtualization platform.
2175
2176        Once this successfully completes, the instance should be
2177        running (power_state.RUNNING).
2178
2179        If this fails, any partial instance should be completely
2180        cleaned up, and the virtualization platform should be in the state
2181        that it was before this call began.
2182
2183        :param context: security context
2184        :param instance: nova.objects.instance.Instance
2185                         This function should use the data there to guide
2186                         the creation of the new instance.
2187        :param nova.objects.ImageMeta image_meta:
2188            The metadata of the image of the instance.
2189        :param injected_files: User files to inject into instance.
2190        :param admin_password: Administrator password to set in instance.
2191        :param network_info:
2192           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
2193        :param block_device_info: Information about block devices to be
2194                                  attached to the instance.
2195        """
2196        image = self._fetch_image(context, instance)
2197        self._validate_image(context, image, instance)
2198        self._validate_flavor(instance)
2199
2200        # c1d0 is the standard dev for the default boot device.
2201        # Irrelevant value for ZFS, but Cinder gets stroppy without it.
2202        mountpoint = "c1d0"
2203
2204        # Ensure no block device mappings attempt to use the reserved boot
2205        # device (c1d0).
2206        for entry in block_device_info.get('block_device_mapping'):
2207            if entry['connection_info'] is None:
2208                continue
2209
2210            mount_device = entry['mount_device']
2211            if mount_device == '/dev/' + mountpoint:
2212                msg = (_("Unable to assign '%s' to block device as it is"
2213                         "reserved for the root file system") % mount_device)
2214                raise exception.InvalidDiskInfo(msg)
2215
2216        # Attempt to provision a (Cinder) volume service backed boot volume
2217        volume = self._create_boot_volume(context, instance)
2218        volume_id = volume['id']
2219        name = instance['name']
2220        try:
2221            connection_info = self._connect_boot_volume(volume, mountpoint,
2222                                                        context, instance)
2223        except exception.InvalidVolume as reason:
2224            # This Cinder volume is not usable for ZOSS so discard it.
2225            # zonecfg will apply default zonepath dataset configuration
2226            # instead. Carry on
2227            LOG.warning(_("Volume '%s' is being discarded: %s")
2228                        % (volume_id, reason))
2229            self._volume_api.delete(context, volume_id)
2230            connection_info = None
2231        except Exception as reason:
2232            # Something really bad happened. Don't pass Go.
2233            LOG.exception(_("Unable to attach root zpool volume '%s' to "
2234                            "instance %s: %s") % (volume['id'], name, reason))
2235            self._volume_api.delete(context, volume_id)
2236            raise
2237
2238        # create a new directory for SC profiles
2239        sc_dir = tempfile.mkdtemp(prefix="nova-sysconfig-",
2240                                  dir=CONF.state_path)
2241        os.chmod(sc_dir, 0755)
2242
2243        try:
2244            self._create_config(context, instance, network_info,
2245                                connection_info, sc_dir, admin_password)
2246            self._install(instance, image, sc_dir)
2247
2248            for entry in block_device_info.get('block_device_mapping'):
2249                if entry['connection_info'] is not None:
2250                    self.attach_volume(context, entry['connection_info'],
2251                                       instance, entry['mount_device'])
2252
2253            self._power_on(instance, network_info)
2254        except Exception as ex:
2255            reason = zonemgr_strerror(ex)
2256            LOG.exception(_("Unable to spawn instance '%s' via zonemgr(3RAD): "
2257                            "'%s'") % (name, reason))
2258            # At least attempt to uninstall the instance, depending on where
2259            # the installation got to there could be things left behind that
2260            # need to be cleaned up, e.g a root zpool etc.
2261            try:
2262                self._uninstall(instance)
2263            except Exception as ex:
2264                reason = zonemgr_strerror(ex)
2265                LOG.debug(_("Unable to uninstall instance '%s' via "
2266                            "zonemgr(3RAD): %s") % (name, reason))
2267            try:
2268                self._delete_config(instance)
2269            except Exception as ex:
2270                reason = zonemgr_strerror(ex)
2271                LOG.debug(_("Unable to unconfigure instance '%s' via "
2272                            "zonemgr(3RAD): %s") % (name, reason))
2273
2274            if connection_info is not None:
2275                self._volume_api.detach(context, volume_id)
2276                self._volume_api.delete(context, volume_id)
2277            raise
2278        finally:
2279            # remove the sc_profile temp directory
2280            shutil.rmtree(sc_dir)
2281
2282        if connection_info is not None:
2283            bdm_obj = objects.BlockDeviceMappingList()
2284            # there's only one bdm for this instance at this point
2285            bdm = bdm_obj.get_by_instance_uuid(
2286                context, instance.uuid).objects[0]
2287
2288            # update the required attributes
2289            bdm['connection_info'] = jsonutils.dumps(connection_info)
2290            bdm['source_type'] = 'volume'
2291            bdm['destination_type'] = 'volume'
2292            bdm['device_name'] = mountpoint
2293            bdm['delete_on_termination'] = True
2294            bdm['volume_id'] = volume_id
2295            bdm['volume_size'] = instance['root_gb']
2296            bdm.save()
2297
2298    def _power_off(self, instance, halt_type):
2299        """Power off a Solaris Zone."""
2300        name = instance['name']
2301        zone = self._get_zone_by_name(name)
2302        if zone is None:
2303            raise exception.InstanceNotFound(instance_id=name)
2304
2305        # Attempt to update the zones hostid in the instance data, to catch
2306        # those instances that might have been created without a hostid stored.
2307        self._set_instance_metahostid(instance)
2308
2309        try:
2310            self._unplug_vifs(instance)
2311            if halt_type == 'SOFT':
2312                zone.shutdown()
2313            else:
2314                # 'HARD'
2315                zone.halt()
2316        except Exception as ex:
2317            reason = zonemgr_strerror(ex)
2318            # A shutdown state could still be reached if the error was
2319            # informational and ignorable.
2320            if self._get_state(zone) == power_state.SHUTDOWN:
2321                LOG.warning(_("Ignoring command error returned while "
2322                              "trying to power off instance '%s' via "
2323                              "zonemgr(3RAD): %s" % (name, reason)))
2324                return
2325
2326            LOG.exception(_("Unable to power off instance '%s' "
2327                            "via zonemgr(3RAD): %s") % (name, reason))
2328            raise exception.InstancePowerOffFailure(reason=reason)
2329
2330    def _samehost_revert_resize(self, context, instance, network_info,
2331                                block_device_info):
2332        """Reverts the zones configuration to pre-resize config
2333        """
2334        self.power_off(instance)
2335
2336        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
2337        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
2338
2339        name = instance['name']
2340
2341        self._set_num_cpu(name, instance.vcpus, brand)
2342        self._set_memory_cap(name, instance.memory_mb, brand)
2343
2344        rgb = instance.root_gb
2345        old_rvid = instance.system_metadata.get('old_instance_volid')
2346        if old_rvid:
2347            new_rvid = instance.system_metadata.get('new_instance_volid')
2348            mount_dev = instance['root_device_name']
2349            del instance.system_metadata['old_instance_volid']
2350
2351            self._resize_disk_migration(context, instance, new_rvid, old_rvid,
2352                                        rgb, mount_dev)
2353
2354    def destroy(self, context, instance, network_info, block_device_info=None,
2355                destroy_disks=True, migrate_data=None):
2356        """Destroy the specified instance from the Hypervisor.
2357
2358        If the instance is not found (for example if networking failed), this
2359        function should still succeed.  It's probably a good idea to log a
2360        warning in that case.
2361
2362        :param context: security context
2363        :param instance: Instance object as returned by DB layer.
2364        :param network_info:
2365           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
2366        :param block_device_info: Information about block devices that should
2367                                  be detached from the instance.
2368        :param destroy_disks: Indicates if disks should be destroyed
2369        :param migrate_data: implementation specific params
2370        """
2371        if (instance['task_state'] == task_states.RESIZE_REVERTING and
2372                instance.system_metadata['old_vm_state'] == vm_states.RESIZED):
2373            return
2374
2375        # A destroy is issued for the original zone for an evac case.  If
2376        # the evac fails we need to protect the zone from deletion when
2377        # power comes back on.
2378        evac_from = instance.system_metadata.get('evac_from')
2379        if evac_from is not None and instance['task_state'] is None:
2380            instance.host = evac_from
2381            instance.node = evac_from
2382            del instance.system_metadata['evac_from']
2383            instance.save()
2384
2385            return
2386
2387        try:
2388            # These methods log if problems occur so no need to double log
2389            # here. Just catch any stray exceptions and allow destroy to
2390            # proceed.
2391            if self._has_vnc_console_service(instance):
2392                self._disable_vnc_console_service(instance)
2393                self._delete_vnc_console_service(instance)
2394        except Exception:
2395            pass
2396
2397        name = instance['name']
2398        zone = self._get_zone_by_name(name)
2399        # If instance cannot be found, just return.
2400        if zone is None:
2401            LOG.warning(_("Unable to find instance '%s' via zonemgr(3RAD)")
2402                        % name)
2403            return
2404
2405        try:
2406            if self._get_state(zone) == power_state.RUNNING:
2407                self._power_off(instance, 'HARD')
2408            if self._get_state(zone) == power_state.SHUTDOWN:
2409                self._uninstall(instance)
2410            if self._get_state(zone) == power_state.NOSTATE:
2411                self._delete_config(instance)
2412        except Exception as ex:
2413            reason = zonemgr_strerror(ex)
2414            LOG.warning(_("Unable to destroy instance '%s' via zonemgr(3RAD): "
2415                          "%s") % (name, reason))
2416
2417        # One last point of house keeping. If we are deleting the instance
2418        # during a resize operation we want to make sure the cinder volumes are
2419        # properly cleaned up. We need to do this here, because the periodic
2420        # task that comes along and cleans these things up isn't nice enough to
2421        # pass a context in so that we could simply do the work there.  But
2422        # because we have access to a context, we can handle the work here and
2423        # let the periodic task simply clean up the left over zone
2424        # configuration that might be left around.  Note that the left over
2425        # zone will only show up in zoneadm list, not nova list.
2426        #
2427        # If the task state is RESIZE_REVERTING do not process these because
2428        # the cinder volume cleanup is taken care of in
2429        # finish_revert_migration.
2430        if instance['task_state'] == task_states.RESIZE_REVERTING:
2431            return
2432
2433        tags = ['old_instance_volid', 'new_instance_volid']
2434        for tag in tags:
2435            volid = instance.system_metadata.get(tag)
2436            if volid:
2437                try:
2438                    LOG.debug(_("Deleting volume %s"), volid)
2439                    self._volume_api.delete(context, volid)
2440                    del instance.system_metadata[tag]
2441                except Exception:
2442                    pass
2443
2444    def cleanup(self, context, instance, network_info, block_device_info=None,
2445                destroy_disks=True, migrate_data=None, destroy_vifs=True):
2446        """Cleanup the instance resources .
2447
2448        Instance should have been destroyed from the Hypervisor before calling
2449        this method.
2450
2451        :param context: security context
2452        :param instance: Instance object as returned by DB layer.
2453        :param network_info:
2454           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
2455        :param block_device_info: Information about block devices that should
2456                                  be detached from the instance.
2457        :param destroy_disks: Indicates if disks should be destroyed
2458        :param migrate_data: implementation specific params
2459        """
2460        raise NotImplementedError()
2461
2462    def reboot(self, context, instance, network_info, reboot_type,
2463               block_device_info=None, bad_volumes_callback=None):
2464        """Reboot the specified instance.
2465
2466        After this is called successfully, the instance's state
2467        goes back to power_state.RUNNING. The virtualization
2468        platform should ensure that the reboot action has completed
2469        successfully even in cases in which the underlying domain/vm
2470        is paused or halted/stopped.
2471
2472        :param instance: nova.objects.instance.Instance
2473        :param network_info:
2474           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
2475        :param reboot_type: Either a HARD or SOFT reboot
2476        :param block_device_info: Info pertaining to attached volumes
2477        :param bad_volumes_callback: Function to handle any bad volumes
2478            encountered
2479        """
2480        name = instance['name']
2481        zone = self._get_zone_by_name(name)
2482        if zone is None:
2483            raise exception.InstanceNotFound(instance_id=name)
2484
2485        if self._get_state(zone) == power_state.SHUTDOWN:
2486            self._power_on(instance, network_info)
2487            return
2488
2489        bootargs = []
2490        if CONF.solariszones.solariszones_boot_options:
2491            reset_bootargs = False
2492            persistent = 'False'
2493
2494            # Get any bootargs already set in the zone
2495            cur_bootargs = lookup_resource_property(zone, 'global', 'bootargs')
2496
2497            # Get any bootargs set in the instance metadata by the user
2498            meta_bootargs = instance.metadata.get('bootargs')
2499
2500            if meta_bootargs:
2501                bootargs = ['--', str(meta_bootargs)]
2502                persistent = str(
2503                    instance.metadata.get('bootargs_persist', 'False'))
2504                if cur_bootargs is not None and meta_bootargs != cur_bootargs:
2505                    with ZoneConfig(zone) as zc:
2506                        reset_bootargs = True
2507                        # Temporarily clear bootargs in zone config
2508                        zc.clear_resource_props('global', ['bootargs'])
2509
2510        try:
2511            self._unplug_vifs(instance)
2512            if reboot_type == 'SOFT':
2513                bootargs.insert(0, '-r')
2514                zone.shutdown(bootargs)
2515            else:
2516                zone.reboot(bootargs)
2517            self._plug_vifs(instance, network_info)
2518        except Exception as ex:
2519            reason = zonemgr_strerror(ex)
2520            LOG.exception(_("Unable to reboot instance '%s' via "
2521                            "zonemgr(3RAD): %s") % (name, reason))
2522            raise exception.InstanceRebootFailure(reason=reason)
2523        finally:
2524            if CONF.solariszones.solariszones_boot_options:
2525                if meta_bootargs and persistent.lower() == 'false':
2526                    # We have consumed the metadata bootargs and
2527                    # the user asked for them not to be persistent so
2528                    # clear them out now.
2529                    instance.metadata.pop('bootargs', None)
2530                    instance.metadata.pop('bootargs_persist', None)
2531
2532                if reset_bootargs:
2533                    with ZoneConfig(zone) as zc:
2534                        # restore original boot args in zone config
2535                        zc.setprop('global', 'bootargs', cur_bootargs)
2536
2537    def get_console_pool_info(self, console_type):
2538        # TODO(Vek): Need to pass context in for access to auth_token
2539        raise NotImplementedError()
2540
2541    def _get_console_output(self, instance):
2542        """Builds a string containing the console output (capped at
2543        MAX_CONSOLE_BYTES characters) by reassembling the log files
2544        that Solaris Zones framework maintains for each zone.
2545        """
2546        console_str = ""
2547        avail = MAX_CONSOLE_BYTES
2548
2549        # Examine the log files in most-recently modified order, keeping
2550        # track of the size of each file and of how many characters have
2551        # been seen. If there are still characters left to incorporate,
2552        # then the contents of the log file in question are prepended to
2553        # the console string built so far. When the number of characters
2554        # available has run out, the last fragment under consideration
2555        # will likely begin within the middle of a line. As such, the
2556        # start of the fragment up to the next newline is thrown away.
2557        # The remainder constitutes the start of the resulting console
2558        # output which is then prepended to the console string built so
2559        # far and the result returned.
2560        logfile_pattern = '/var/log/zones/%s.console*' % instance['name']
2561        logfiles = sorted(glob.glob(logfile_pattern), key=os.path.getmtime,
2562                          reverse=True)
2563        for file in logfiles:
2564            size = os.path.getsize(file)
2565            if size == 0:
2566                continue
2567            avail -= size
2568            with open(file, 'r') as log:
2569                if avail < 0:
2570                    (fragment, _) = utils.last_bytes(log, avail + size)
2571                    remainder = fragment.find('\n') + 1
2572                    console_str = fragment[remainder:] + console_str
2573                    break
2574                fragment = ''
2575                for line in log.readlines():
2576                    fragment += line
2577                console_str = fragment + console_str
2578        return console_str
2579
2580    def get_console_output(self, context, instance):
2581        """Get console output for an instance
2582
2583        :param context: security context
2584        :param instance: nova.objects.instance.Instance
2585        """
2586        return self._get_console_output(instance)
2587
2588    def get_vnc_console(self, context, instance):
2589        """Get connection info for a vnc console.
2590
2591        :param context: security context
2592        :param instance: nova.objects.instance.Instance
2593
2594        :returns an instance of console.type.ConsoleVNC
2595        """
2596        # Do not provide console access prematurely. Zone console access is
2597        # exclusive and zones that are still installing require their console.
2598        # Grabbing the zone console will break installation.
2599        name = instance['name']
2600        if instance['vm_state'] == vm_states.BUILDING:
2601            LOG.info(_("VNC console not available until zone '%s' has "
2602                     "completed installation. Try again later.") % name)
2603            raise exception.InstanceNotReady(instance_id=instance['uuid'])
2604
2605        if not self._has_vnc_console_service(instance):
2606            LOG.debug(_("Creating zone VNC console SMF service for "
2607                      "instance '%s'") % name)
2608            self._create_vnc_console_service(instance)
2609
2610        self._enable_vnc_console_service(instance)
2611        console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
2612
2613        # The console service sets an SMF instance property for the port
2614        # on which the VNC service is listening. The service needs to be
2615        # refreshed to reflect the current property value
2616        # TODO(npower): investigate using RAD instead of CLI invocation
2617        try:
2618            out, err = utils.execute('/usr/sbin/svccfg', '-s', console_fmri,
2619                                     'refresh')
2620        except processutils.ProcessExecutionError as ex:
2621            reason = ex.stderr
2622            LOG.exception(_("Unable to refresh zone VNC console SMF service "
2623                            "'%s': %s" % (console_fmri, reason)))
2624            raise
2625
2626        host = CONF.vnc.vncserver_proxyclient_address
2627        try:
2628            out, err = utils.execute('/usr/bin/svcprop', '-p', 'vnc/port',
2629                                     console_fmri)
2630            port = int(out.strip())
2631            return ctype.ConsoleVNC(host=host, port=port,
2632                                    internal_access_path=None)
2633        except processutils.ProcessExecutionError as ex:
2634            reason = ex.stderr
2635            LOG.exception(_("Unable to read VNC console port from zone VNC "
2636                            "console SMF service '%s': %s"
2637                          % (console_fmri, reason)))
2638
2639    def get_spice_console(self, context, instance):
2640        """Get connection info for a spice console.
2641
2642        :param context: security context
2643        :param instance: nova.objects.instance.Instance
2644
2645        :returns an instance of console.type.ConsoleSpice
2646        """
2647        raise NotImplementedError()
2648
2649    def get_rdp_console(self, context, instance):
2650        """Get connection info for a rdp console.
2651
2652        :param context: security context
2653        :param instance: nova.objects.instance.Instance
2654
2655        :returns an instance of console.type.ConsoleRDP
2656        """
2657        raise NotImplementedError()
2658
2659    def get_serial_console(self, context, instance):
2660        """Get connection info for a serial console.
2661
2662        :param context: security context
2663        :param instance: nova.objects.instance.Instance
2664
2665        :returns an instance of console.type.ConsoleSerial
2666        """
2667        raise NotImplementedError()
2668
2669    def get_mks_console(self, context, instance):
2670        """Get connection info for a MKS console.
2671
2672        :param context: security context
2673        :param instance: nova.objects.instance.Instance
2674
2675        :returns an instance of console.type.ConsoleMKS
2676        """
2677        raise NotImplementedError()
2678
2679    def _get_zone_diagnostics(self, zone):
2680        """Return data about Solaris Zone diagnostics."""
2681        if zone.id == -1:
2682            return None
2683
2684        diagnostics = defaultdict(lambda: 0)
2685
2686        for stat in ['lockedmem', 'nprocs', 'swapresv']:
2687            uri = "kstat:/zone_caps/caps/%s_zone_%d/%d" % (stat, zone.id,
2688                                                           zone.id)
2689            diagnostics[stat] = self._kstat_data(uri)['usage']
2690
2691        # Get the inital accumulated data kstat, then get the sys_zone kstat
2692        # and sum all the "*_cur" statistics in it. Then re-get the accumulated
2693        # kstat, and if the generation number hasn't changed, add its values.
2694        # If it has changed, try again a few times then give up because
2695        # something keeps pulling cpus out from under us.
2696
2697        accum_uri = "kstat:/zones/cpu/sys_zone_accum/%d" % zone.id
2698        uri = "kstat:/zones/cpu/sys_zone_%d" % zone.id
2699
2700        for _attempt in range(3):
2701            initial = self._kstat_data(accum_uri)
2702            data = self._kstat_data(uri)
2703            # The list of cpu kstats in data must contain at least one element
2704            # and all elements have the same map of statistics, since they're
2705            # all the same kstat type. This gets a list of all the statistics
2706            # which end in "_cur" from the first (guaranteed) kstat element.
2707            stats = [k for k in data[data.keys()[0]].getMap().keys() if
2708                     k.endswith("_cur")]
2709
2710            for stat in stats:
2711                diagnostics[stat[:-4]] += self._sum_kstat_statistic(data, stat)
2712
2713            final = self._kstat_data(accum_uri)
2714
2715            if initial['gen_num'] == final['gen_num']:
2716                for stat in stats:
2717                    # Remove the '_cur' from the statistic
2718                    diagnostics[stat[:-4]] += initial[stat[:-4]]
2719                break
2720        else:
2721            reason = (_("Could not get diagnostic info for instance '%s' "
2722                        "because the cpu list keeps changing.") % zone.name)
2723            raise nova.exception.MaxRetriesExceeded(reason)
2724
2725        # Remove any None valued elements from diagnostics and return it
2726        return {k: v for k, v in diagnostics.items() if v is not None}
2727
2728    def get_diagnostics(self, instance):
2729        """Return diagnostics data about the given instance.
2730
2731        :param nova.objects.instance.Instance instance:
2732            The instance to which the diagnostic data should be returned.
2733
2734        :return: Has a big overlap to the return value of the newer interface
2735            :func:`get_instance_diagnostics`
2736        :rtype: dict
2737        """
2738        # TODO(Vek): Need to pass context in for access to auth_token
2739        name = instance['name']
2740        zone = self._get_zone_by_name(name)
2741        if zone is None:
2742            raise exception.InstanceNotFound(instance_id=name)
2743        return self._get_zone_diagnostics(zone)
2744
2745    def get_instance_diagnostics(self, instance):
2746        """Return diagnostics data about the given instance.
2747
2748        :param nova.objects.instance.Instance instance:
2749            The instance to which the diagnostic data should be returned.
2750
2751        :return: Has a big overlap to the return value of the older interface
2752            :func:`get_diagnostics`
2753        :rtype: nova.virt.diagnostics.Diagnostics
2754        """
2755        raise NotImplementedError()
2756
2757    def get_all_bw_counters(self, instances):
2758        """Return bandwidth usage counters for each interface on each
2759           running VM.
2760
2761        :param instances: nova.objects.instance.InstanceList
2762        """
2763        raise NotImplementedError()
2764
2765    def get_all_volume_usage(self, context, compute_host_bdms):
2766        """Return usage info for volumes attached to vms on
2767           a given host.-
2768        """
2769        raise NotImplementedError()
2770
2771    def get_host_ip_addr(self):
2772        """Retrieves the IP address of the dom0
2773        """
2774        # TODO(Vek): Need to pass context in for access to auth_token
2775        return CONF.my_ip
2776
2777    def attach_volume(self, context, connection_info, instance, mountpoint,
2778                      disk_bus=None, device_type=None, encryption=None):
2779        """Attach the disk to the instance at mountpoint using info."""
2780        # TODO(npower): Apply mountpoint in a meaningful way to the zone
2781        # For security reasons this is not permitted in a Solaris branded zone.
2782        name = instance['name']
2783        zone = self._get_zone_by_name(name)
2784        if zone is None:
2785            raise exception.InstanceNotFound(instance_id=name)
2786
2787        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
2788        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
2789        if brand != ZONE_BRAND_SOLARIS_KZ:
2790            # Only Solaris kernel zones are currently supported.
2791            reason = (_("'%s' branded zones are not currently supported")
2792                      % brand)
2793            raise NotImplementedError(reason)
2794
2795        suri = self._suri_from_volume_info(connection_info)
2796
2797        resource_scope = [zonemgr.Property("storage", suri)]
2798        if connection_info.get('serial') is not None:
2799            volume = self._volume_api.get(context, connection_info['serial'])
2800            if volume['bootable']:
2801                resource_scope.append(zonemgr.Property("bootpri", "1"))
2802
2803        with ZoneConfig(zone) as zc:
2804            zc.addresource("device", resource_scope)
2805
2806        # apply the configuration to the running zone
2807        if zone.state == ZONE_STATE_RUNNING:
2808            try:
2809                zone.apply()
2810            except Exception as ex:
2811                reason = zonemgr_strerror(ex)
2812                LOG.exception(_("Unable to attach '%s' to instance '%s' via "
2813                                "zonemgr(3RAD): %s") % (suri, name, reason))
2814                with ZoneConfig(zone) as zc:
2815                    zc.removeresources("device", resource_scope)
2816                raise
2817
2818    def detach_volume(self, connection_info, instance, mountpoint,
2819                      encryption=None):
2820        """Detach the disk attached to the instance."""
2821        name = instance['name']
2822        zone = self._get_zone_by_name(name)
2823        if zone is None:
2824            raise exception.InstanceNotFound(instance_id=name)
2825
2826        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
2827        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
2828        if brand != ZONE_BRAND_SOLARIS_KZ:
2829            # Only Solaris kernel zones are currently supported.
2830            reason = (_("'%s' branded zones are not currently supported")
2831                      % brand)
2832            raise NotImplementedError(reason)
2833
2834        suri = self._suri_from_volume_info(connection_info)
2835
2836        # Check if the specific property value exists before attempting removal
2837        resource = lookup_resource_property_value(zone, "device", "storage",
2838                                                  suri)
2839        if not resource:
2840            LOG.warning(_("Storage resource '%s' is not attached to instance "
2841                        "'%s'") % (suri, name))
2842            return
2843
2844        with ZoneConfig(zone) as zc:
2845            zc.removeresources("device", [zonemgr.Property("storage", suri)])
2846
2847        # apply the configuration to the running zone
2848        if zone.state == ZONE_STATE_RUNNING:
2849            try:
2850                zone.apply()
2851            except:
2852                LOG.exception(_("Unable to apply the detach of resource '%s' "
2853                                "to running instance '%s' because the "
2854                                "resource is most likely in use.")
2855                              % (suri, name))
2856
2857                # re-add the entry to the zone configuration so that the
2858                # configuration will reflect what is in cinder before we raise
2859                # the exception, therefore failing the detach and leaving the
2860                # volume in-use.
2861                needed_props = ["storage", "bootpri"]
2862                props = filter(lambda prop: prop.name in needed_props,
2863                               resource.properties)
2864                with ZoneConfig(zone) as zc:
2865                    zc.addresource("device", props)
2866
2867                raise
2868
2869    def swap_volume(self, old_connection_info, new_connection_info,
2870                    instance, mountpoint, resize_to):
2871        """Replace the volume attached to the given `instance`.
2872
2873        :param dict old_connection_info:
2874            The volume for this connection gets detached from the given
2875            `instance`.
2876        :param dict new_connection_info:
2877            The volume for this connection gets attached to the given
2878            'instance'.
2879        :param nova.objects.instance.Instance instance:
2880            The instance whose volume gets replaced by another one.
2881        :param str mountpoint:
2882            The mountpoint in the instance where the volume for
2883            `old_connection_info` is attached to.
2884        :param int resize_to:
2885            If the new volume is larger than the old volume, it gets resized
2886            to the given size (in Gigabyte) of `resize_to`.
2887
2888        :return: None
2889        """
2890        raise NotImplementedError()
2891
2892    def attach_interface(self, instance, image_meta, vif):
2893        """Use hotplug to add a network interface to a running instance.
2894
2895        The counter action to this is :func:`detach_interface`.
2896
2897        :param nova.objects.instance.Instance instance:
2898            The instance which will get an additional network interface.
2899        :param nova.objects.ImageMeta image_meta:
2900            The metadata of the image of the instance.
2901        :param nova.network.model.NetworkInfo vif:
2902            The object which has the information about the interface to attach.
2903
2904        :raise nova.exception.NovaException: If the attach fails.
2905
2906        :return: None
2907        """
2908        name = instance['name']
2909        zone = self._get_zone_by_name(name)
2910        if zone is None:
2911            raise exception.InstanceNotFound(instance_id=name)
2912
2913        ctxt = nova_context.get_admin_context()
2914        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
2915        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
2916        anetname = self._set_ovs_info(ctxt, zone, brand, False, vif)
2917
2918        # apply the configuration if the vm is ACTIVE
2919        if instance['vm_state'] == vm_states.ACTIVE:
2920            try:
2921                zone.apply()
2922            except Exception as ex:
2923                reason = zonemgr_strerror(ex)
2924                msg = (_("Unable to attach interface to instance '%s' via "
2925                         "zonemgr(3RAD): %s") % (name, reason))
2926                with ZoneConfig(zone) as zc:
2927                    prop_filter = [zonemgr.Property('mac-address',
2928                                                    vif['address'])]
2929                    zc.removeresources('anet', prop_filter)
2930                raise nova.exception.NovaException(msg)
2931
2932            # add port to ovs bridge
2933            anet = ''.join([name, '/', anetname])
2934            self._ovs_add_port(instance, vif, anet)
2935
2936    def detach_interface(self, instance, vif):
2937        """Use hotunplug to remove a network interface from a running instance.
2938
2939        The counter action to this is :func:`attach_interface`.
2940
2941        :param nova.objects.instance.Instance instance:
2942            The instance which gets a network interface removed.
2943        :param nova.network.model.NetworkInfo vif:
2944            The object which has the information about the interface to detach.
2945
2946        :raise nova.exception.NovaException: If the detach fails.
2947
2948        :return: None
2949        """
2950        name = instance['name']
2951        zone = self._get_zone_by_name(name)
2952        if zone is None:
2953            raise exception.InstanceNotFound(instance_id=name)
2954
2955        # Check if the specific property value exists before attempting removal
2956        resource = lookup_resource_property_value(zone, 'anet',
2957                                                  'mac-address',
2958                                                  vif['address'])
2959        if not resource:
2960            msg = (_("Interface with MAC address '%s' is not attached to "
2961                     "instance '%s'.") % (vif['address'], name))
2962            raise nova.exception.NovaException(msg)
2963
2964        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
2965        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
2966        for prop in resource.properties:
2967            if brand == ZONE_BRAND_SOLARIS and prop.name == 'linkname':
2968                anetname = prop.value
2969                break
2970            elif brand != ZONE_BRAND_SOLARIS and prop.name == 'id':
2971                anetname = 'net%s' % prop.value
2972                break
2973
2974        with ZoneConfig(zone) as zc:
2975            zc.removeresources('anet', [zonemgr.Property('mac-address',
2976                                                         vif['address'])])
2977
2978        # apply the configuration if the vm is ACTIVE
2979        if instance['vm_state'] == vm_states.ACTIVE:
2980            try:
2981                zone.apply()
2982            except:
2983                msg = (_("Unable to detach interface '%s' from running "
2984                         "instance '%s' because the resource is most likely "
2985                         "in use.") % (anetname, name))
2986                needed_props = ["lower-link", "configure-allowed-address",
2987                                "mac-address", "mtu"]
2988                if brand == ZONE_BRAND_SOLARIS:
2989                    needed_props.append("linkname")
2990                else:
2991                    needed_props.append("id")
2992
2993                props = filter(lambda prop: prop.name in needed_props,
2994                               resource.properties)
2995                with ZoneConfig(zone) as zc:
2996                    zc.addresource('anet', props)
2997                raise nova.exception.NovaException(msg)
2998
2999            # remove anet from OVS bridge
3000            port = ''.join([name, '/', anetname])
3001            self._ovs_delete_port(port)
3002
3003    def _cleanup_migrate_disk(self, context, instance, volume):
3004        """Make a best effort at cleaning up the volume that was created to
3005        hold the new root disk
3006
3007        :param context: the context for the migration/resize
3008        :param instance: nova.objects.instance.Instance being migrated/resized
3009        :param volume: new volume created by the call to cinder create
3010        """
3011        try:
3012            self._volume_api.delete(context, volume['id'])
3013        except Exception as err:
3014            LOG.exception(_("Unable to cleanup the resized volume: %s" % err))
3015
3016    def migrate_disk_and_power_off(self, context, instance, dest,
3017                                   flavor, network_info,
3018                                   block_device_info=None,
3019                                   timeout=0, retry_interval=0):
3020        """Transfers the disk of a running instance in multiple phases, turning
3021        off the instance before the end.
3022
3023        :param nova.objects.instance.Instance instance:
3024            The instance whose disk should be migrated.
3025        :param str dest:
3026            The IP address of the destination host.
3027        :param nova.objects.flavor.Flavor flavor:
3028            The flavor of the instance whose disk get migrated.
3029        :param nova.network.model.NetworkInfo network_info:
3030            The network information of the given `instance`.
3031        :param dict block_device_info:
3032            Information about the block devices.
3033        :param int timeout:
3034            The time in seconds to wait for the guest OS to shutdown.
3035        :param int retry_interval:
3036            How often to signal guest while waiting for it to shutdown.
3037
3038        :return: A list of disk information dicts in JSON format.
3039        :rtype: str
3040        """
3041        LOG.debug("Starting migrate_disk_and_power_off", instance=instance)
3042
3043        samehost = (dest == self.get_host_ip_addr())
3044        if samehost:
3045            instance.system_metadata['resize_samehost'] = samehost
3046
3047        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
3048        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
3049        if brand != ZONE_BRAND_SOLARIS_KZ and not samehost:
3050            reason = (_("'%s' branded zones do not currently support resize "
3051                        "to a different host.") % brand)
3052            raise exception.MigrationPreCheckError(reason=reason)
3053
3054        if brand != flavor['extra_specs'].get('zonecfg:brand'):
3055            reason = (_("Unable to change brand of zone during resize."))
3056            raise exception.MigrationPreCheckError(reason=reason)
3057
3058        orgb = instance['root_gb']
3059        nrgb = flavor.root_gb
3060        if orgb > nrgb:
3061            msg = (_("Unable to resize to a smaller boot volume."))
3062            raise exception.ResizeError(reason=msg)
3063
3064        self.power_off(instance, timeout, retry_interval)
3065
3066        disk_info = None
3067        if nrgb > orgb or not samehost:
3068            bmap = block_device_info.get('block_device_mapping')
3069            rootmp = instance.root_device_name
3070            for entry in bmap:
3071                mountdev = entry['mount_device'].rpartition('/')[2]
3072                if mountdev == rootmp:
3073                    root_ci = entry['connection_info']
3074                    break
3075            else:
3076                # If this is a non-global zone that is on the same host and is
3077                # simply using a dataset, the disk size is purely an OpenStack
3078                # quota.  We can continue without doing any disk work.
3079                if samehost and brand == ZONE_BRAND_SOLARIS:
3080                    return disk_info
3081                else:
3082                    msg = (_("Cannot find an attached root device."))
3083                    raise exception.ResizeError(reason=msg)
3084
3085            if root_ci['driver_volume_type'] == 'iscsi':
3086                volume_id = root_ci['data']['volume_id']
3087            else:
3088                volume_id = root_ci['serial']
3089
3090            if volume_id is None:
3091                msg = (_("Cannot find an attached root device."))
3092                raise exception.ResizeError(reason=msg)
3093
3094            vinfo = self._volume_api.get(context, volume_id)
3095            newvolume = self._volume_api.create(
3096                context, orgb, vinfo['display_name'] + '-resized',
3097                vinfo['display_description'], source_volume=vinfo)
3098
3099            instance.system_metadata['old_instance_volid'] = volume_id
3100            instance.system_metadata['new_instance_volid'] = newvolume['id']
3101
3102            # TODO(npower): Polling is what nova/compute/manager also does when
3103            # creating a new volume, so we do likewise here.
3104            while True:
3105                volume = self._volume_api.get(context, newvolume['id'])
3106                if volume['status'] != 'creating':
3107                    break
3108                greenthread.sleep(1)
3109
3110            if nrgb > orgb:
3111                try:
3112                    self._volume_api.extend(context, newvolume['id'], nrgb)
3113                except Exception:
3114                    LOG.exception(_("Failed to extend the new volume"))
3115                    self._cleanup_migrate_disk(context, instance, newvolume)
3116                    raise
3117
3118            disk_info = newvolume
3119
3120        return disk_info
3121
3122    def snapshot(self, context, instance, image_id, update_task_state):
3123        """Snapshots the specified instance.
3124
3125        :param context: security context
3126        :param instance: nova.objects.instance.Instance
3127        :param image_id: Reference to a pre-created image that will
3128                         hold the snapshot.
3129        """
3130        name = instance['name']
3131        zone = self._get_zone_by_name(name)
3132        if zone is None:
3133            raise exception.InstanceNotFound(instance_id=name)
3134
3135        # look to see if the zone is a kernel zone and is powered off.  If it
3136        # is raise an exception before trying to archive it
3137        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
3138        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
3139        if zone.state != ZONE_STATE_RUNNING and \
3140                brand == ZONE_BRAND_SOLARIS_KZ:
3141            raise exception.InstanceNotRunning(instance_id=name)
3142
3143        # Get original base image info
3144        (base_service, base_id) = glance.get_remote_image_service(
3145            context, instance['image_ref'])
3146        try:
3147            base = base_service.show(context, base_id)
3148        except exception.ImageNotFound:
3149            base = {}
3150
3151        snapshot_service, snapshot_id = glance.get_remote_image_service(
3152            context, image_id)
3153
3154        # Build updated snapshot image metadata
3155        snapshot = snapshot_service.show(context, snapshot_id)
3156        metadata = {
3157            'is_public': False,
3158            'status': 'active',
3159            'name': snapshot['name'],
3160            'properties': {
3161                'image_location': 'snapshot',
3162                'image_state': 'available',
3163                'owner_id': instance['project_id'],
3164                'instance_uuid': instance['uuid'],
3165                'image_type': snapshot['properties']['image_type'],
3166            }
3167        }
3168        # Match architecture, hypervisor_type and vm_mode properties to base
3169        # image.
3170        for prop in ['architecture', 'hypervisor_type', 'vm_mode']:
3171            if prop in base.get('properties', {}):
3172                base_prop = base['properties'][prop]
3173                metadata['properties'][prop] = base_prop
3174
3175        # Set generic container and disk formats initially in case the glance
3176        # service rejects Unified Archives (uar) and ZFS in metadata.
3177        metadata['container_format'] = 'ovf'
3178        metadata['disk_format'] = 'raw'
3179
3180        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
3181        snapshot_directory = CONF.solariszones.solariszones_snapshots_directory
3182        fileutils.ensure_tree(snapshot_directory)
3183        snapshot_name = uuid.uuid4().hex
3184
3185        with utils.tempdir(dir=snapshot_directory) as tmpdir:
3186            out_path = os.path.join(tmpdir, snapshot_name)
3187            zone_name = instance['name']
3188            utils.execute('/usr/sbin/archiveadm', 'create', '--root-only',
3189                          '-z', zone_name, out_path)
3190
3191            LOG.info(_("Snapshot extracted, beginning image upload"),
3192                     instance=instance)
3193            try:
3194                # Upload the archive image to the image service
3195                update_task_state(
3196                    task_state=task_states.IMAGE_UPLOADING,
3197                    expected_state=task_states.IMAGE_PENDING_UPLOAD)
3198                with open(out_path, 'r') as image_file:
3199                    snapshot_service.update(context, image_id, metadata,
3200                                            image_file)
3201                    LOG.info(_("Snapshot image upload complete"),
3202                             instance=instance)
3203                try:
3204                    # Try to update the image metadata container and disk
3205                    # formats more suitably for a unified archive if the
3206                    # glance server recognises them.
3207                    metadata['container_format'] = 'uar'
3208                    metadata['disk_format'] = 'zfs'
3209                    snapshot_service.update(context, image_id, metadata, None)
3210                except exception.Invalid:
3211                    LOG.warning(_("Image service rejected image metadata "
3212                                  "container and disk formats 'uar' and "
3213                                  "'zfs'. Using generic values 'ovf' and "
3214                                  "'raw' as fallbacks."))
3215            finally:
3216                # Delete the snapshot image file source
3217                os.unlink(out_path)
3218
3219    def post_interrupted_snapshot_cleanup(self, context, instance):
3220        """Cleans up any resources left after an interrupted snapshot.
3221
3222        :param context: security context
3223        :param instance: nova.objects.instance.Instance
3224        """
3225        pass
3226
3227    def _cleanup_finish_migration(self, context, instance, disk_info,
3228                                  network_info, samehost):
3229        """Best effort attempt at cleaning up any additional resources that are
3230        not directly managed by Nova or Cinder so as not to leak these
3231        resources.
3232        """
3233        if disk_info:
3234            self._volume_api.detach(context, disk_info['id'])
3235            self._volume_api.delete(context, disk_info['id'])
3236
3237            old_rvid = instance.system_metadata.get('old_instance_volid')
3238            if old_rvid:
3239                connector = self.get_volume_connector(instance)
3240                connection_info = self._volume_api.initialize_connection(
3241                    context, old_rvid, connector)
3242
3243                new_rvid = instance.system_metadata['new_instance_volid']
3244
3245                rootmp = instance.root_device_name
3246                self._volume_api.attach(context, old_rvid, instance['uuid'],
3247                                        rootmp)
3248
3249                bdmobj = objects.BlockDeviceMapping()
3250                bdm = bdmobj.get_by_volume_id(context, new_rvid)
3251                bdm['connection_info'] = jsonutils.dumps(connection_info)
3252                bdm['volume_id'] = old_rvid
3253                bdm.save()
3254
3255                del instance.system_metadata['new_instance_volid']
3256                del instance.system_metadata['old_instance_volid']
3257
3258        if not samehost:
3259            self.destroy(context, instance, network_info)
3260            instance['host'] = instance['launched_on']
3261            instance['node'] = instance['launched_on']
3262
3263    def finish_migration(self, context, migration, instance, disk_info,
3264                         network_info, image_meta, resize_instance,
3265                         block_device_info=None, power_on=True):
3266        """Completes a resize/migration.
3267
3268        :param context: the context for the migration/resize
3269        :param migration: the migrate/resize information
3270        :param instance: nova.objects.instance.Instance being migrated/resized
3271        :param disk_info: the newly transferred disk information
3272        :param network_info:
3273           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
3274        :param nova.objects.ImageMeta image_meta:
3275            The metadata of the image of the instance.
3276        :param resize_instance: True if the instance is being resized,
3277                                False otherwise
3278        :param block_device_info: instance volume block device info
3279        :param power_on: True if the instance should be powered on, False
3280                         otherwise
3281        """
3282        samehost = (migration['dest_node'] == migration['source_node'])
3283        if samehost:
3284            instance.system_metadata['old_vm_state'] = vm_states.RESIZED
3285
3286        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
3287        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
3288        name = instance['name']
3289
3290        if disk_info:
3291            bmap = block_device_info.get('block_device_mapping')
3292            rootmp = instance['root_device_name']
3293            for entry in bmap:
3294                if entry['mount_device'] == rootmp:
3295                    mount_dev = entry['mount_device']
3296                    root_ci = entry['connection_info']
3297                    break
3298
3299        try:
3300            if samehost:
3301                cpu = instance.vcpus
3302                mem = instance.memory_mb
3303                self._set_num_cpu(name, cpu, brand)
3304                self._set_memory_cap(name, mem, brand)
3305
3306                # Add the new disk to the volume if the size of the disk
3307                # changed
3308                if disk_info:
3309                    rgb = instance.root_gb
3310                    self._resize_disk_migration(context, instance,
3311                                                root_ci['serial'],
3312                                                disk_info['id'], rgb,
3313                                                mount_dev)
3314
3315            else:
3316                # No need to check disk_info here, because when not on the
3317                # same host a disk_info is always passed in.
3318                mount_dev = 'c1d0'
3319                root_serial = root_ci['serial']
3320                connection_info = self._resize_disk_migration(context,
3321                                                              instance,
3322                                                              root_serial,
3323                                                              disk_info['id'],
3324                                                              0, mount_dev,
3325                                                              samehost)
3326
3327                self._create_config(context, instance, network_info,
3328                                    connection_info, None)
3329
3330                zone = self._get_zone_by_name(name)
3331                if zone is None:
3332                    raise exception.InstanceNotFound(instance_id=name)
3333
3334                zone.attach(['-x', 'initialize-hostdata'])
3335
3336                bmap = block_device_info.get('block_device_mapping')
3337                for entry in bmap:
3338                    if entry['mount_device'] != rootmp:
3339                        self.attach_volume(context, entry['connection_info'],
3340                                           instance, entry['mount_device'])
3341
3342            if power_on:
3343                self._power_on(instance, network_info)
3344
3345                if brand == ZONE_BRAND_SOLARIS:
3346                    return
3347
3348                # Toggle the autoexpand to extend the size of the rpool.
3349                # We need to sleep for a few seconds to make sure the zone
3350                # is in a state to accept the toggle.  Once bugs are fixed
3351                # around the autoexpand and the toggle is no longer needed
3352                # or zone.boot() returns only after the zone is ready we
3353                # can remove this hack.
3354                greenthread.sleep(15)
3355                out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
3356                                         '/usr/sbin/zpool', 'set',
3357                                         'autoexpand=off', 'rpool')
3358                out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
3359                                         '/usr/sbin/zpool', 'set',
3360                                         'autoexpand=on', 'rpool')
3361        except Exception:
3362            # Attempt to cleanup the new zone and new volume to at least
3363            # give the user a chance to recover without too many hoops
3364            self._cleanup_finish_migration(context, instance, disk_info,
3365                                           network_info, samehost)
3366            raise
3367
3368    def confirm_migration(self, context, migration, instance, network_info):
3369        """Confirms a resize/migration, destroying the source VM.
3370
3371        :param instance: nova.objects.instance.Instance
3372        """
3373        samehost = (migration['dest_host'] == self.get_host_ip_addr())
3374        old_rvid = instance.system_metadata.get('old_instance_volid')
3375        new_rvid = instance.system_metadata.get('new_instance_volid')
3376        if new_rvid and old_rvid:
3377            new_vname = instance['display_name'] + "-" + self._rootzpool_suffix
3378            del instance.system_metadata['old_instance_volid']
3379            del instance.system_metadata['new_instance_volid']
3380
3381            self._volume_api.delete(context, old_rvid)
3382            self._volume_api.update(context, new_rvid,
3383                                    {'display_name': new_vname})
3384
3385        if not samehost:
3386            self.destroy(context, instance, network_info)
3387        else:
3388            del instance.system_metadata['resize_samehost']
3389
3390    def _resize_disk_migration(self, context, instance, configured,
3391                               replacement, newvolumesz, mountdev,
3392                               samehost=True):
3393        """Handles the zone root volume switch-over or simply
3394        initializing the connection for the new zone if not resizing to the
3395        same host
3396
3397        :param context: the context for the _resize_disk_migration
3398        :param instance: nova.objects.instance.Instance being resized
3399        :param configured: id of the current configured volume
3400        :param replacement: id of the new volume
3401        :param newvolumesz: size of the new volume
3402        :param mountdev: the mount point of the device
3403        :param samehost: is the resize happening on the same host
3404        """
3405        connector = self.get_volume_connector(instance)
3406        connection_info = self._volume_api.initialize_connection(context,
3407                                                                 replacement,
3408                                                                 connector)
3409        connection_info['serial'] = replacement
3410        rootmp = instance.root_device_name
3411
3412        if samehost:
3413            name = instance['name']
3414            zone = self._get_zone_by_name(name)
3415            if zone is None:
3416                raise exception.InstanceNotFound(instance_id=name)
3417
3418            # Need to detach the zone and re-attach the zone if this is a
3419            # non-global zone so that the update of the rootzpool resource does
3420            # not fail.
3421            if zone.brand == ZONE_BRAND_SOLARIS:
3422                zone.detach()
3423
3424            try:
3425                self._set_boot_device(name, connection_info, zone.brand)
3426            finally:
3427                if zone.brand == ZONE_BRAND_SOLARIS:
3428                    zone.attach()
3429
3430        try:
3431            self._volume_api.detach(context, configured)
3432        except Exception:
3433            LOG.exception(_("Failed to detach the volume"))
3434            raise
3435
3436        try:
3437            self._volume_api.attach(context, replacement, instance['uuid'],
3438                                    rootmp)
3439        except Exception:
3440            LOG.exception(_("Failed to attach the volume"))
3441            raise
3442
3443        bdmobj = objects.BlockDeviceMapping()
3444        bdm = bdmobj.get_by_volume_id(context, configured)
3445        bdm['connection_info'] = jsonutils.dumps(connection_info)
3446        bdm['volume_id'] = replacement
3447        bdm.save()
3448
3449        if not samehost:
3450            return connection_info
3451
3452    def finish_revert_migration(self, context, instance, network_info,
3453                                block_device_info=None, power_on=True):
3454        """Finish reverting a resize/migration.
3455
3456        :param context: the context for the finish_revert_migration
3457        :param instance: nova.objects.instance.Instance being migrated/resized
3458        :param network_info:
3459           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
3460        :param block_device_info: instance volume block device info
3461        :param power_on: True if the instance should be powered on, False
3462                         otherwise
3463        """
3464        # If this is not a samehost migration then we need to re-attach the
3465        # original volume to the instance. Otherwise we need to update the
3466        # original zone configuration.
3467        samehost = instance.system_metadata.get('resize_samehost')
3468        if samehost:
3469            self._samehost_revert_resize(context, instance, network_info,
3470                                         block_device_info)
3471            del instance.system_metadata['resize_samehost']
3472
3473        old_rvid = instance.system_metadata.get('old_instance_volid')
3474        if old_rvid:
3475            connector = self.get_volume_connector(instance)
3476            connection_info = self._volume_api.initialize_connection(context,
3477                                                                     old_rvid,
3478                                                                     connector)
3479
3480            new_rvid = instance.system_metadata['new_instance_volid']
3481            self._volume_api.detach(context, new_rvid)
3482            self._volume_api.delete(context, new_rvid)
3483
3484            rootmp = instance.root_device_name
3485            self._volume_api.attach(context, old_rvid, instance['uuid'],
3486                                    rootmp)
3487
3488            bdmobj = objects.BlockDeviceMapping()
3489            bdm = bdmobj.get_by_volume_id(context, new_rvid)
3490            bdm['connection_info'] = jsonutils.dumps(connection_info)
3491            bdm['volume_id'] = old_rvid
3492            bdm.save()
3493
3494            del instance.system_metadata['new_instance_volid']
3495            del instance.system_metadata['old_instance_volid']
3496        else:
3497            new_rvid = instance.system_metadata.get('new_instance_volid')
3498            if new_rvid:
3499                del instance.system_metadata['new_instance_volid']
3500                self._volume_api.delete(context, new_rvid)
3501
3502        self._power_on(instance, network_info)
3503
3504    def pause(self, instance):
3505        """Pause the given instance.
3506
3507        A paused instance doesn't use CPU cycles of the host anymore. The
3508        state of the VM could be stored in the memory or storage space of the
3509        host, depending on the underlying hypervisor technology.
3510        A "stronger" version of `pause` is :func:'suspend'.
3511        The counter action for `pause` is :func:`unpause`.
3512
3513        :param nova.objects.instance.Instance instance:
3514            The instance which should be paused.
3515
3516        :return: None
3517        """
3518        # TODO(Vek): Need to pass context in for access to auth_token
3519        raise NotImplementedError()
3520
3521    def unpause(self, instance):
3522        """Unpause the given paused instance.
3523
3524        The paused instance gets unpaused and will use CPU cycles of the
3525        host again. The counter action for 'unpause' is :func:`pause`.
3526        Depending on the underlying hypervisor technology, the guest has the
3527        same state as before the 'pause'.
3528
3529        :param nova.objects.instance.Instance instance:
3530            The instance which should be unpaused.
3531
3532        :return: None
3533        """
3534        # TODO(Vek): Need to pass context in for access to auth_token
3535        raise NotImplementedError()
3536
3537    def suspend(self, context, instance):
3538        """Suspend the specified instance.
3539
3540        A suspended instance doesn't use CPU cycles or memory of the host
3541        anymore. The state of the instance could be persisted on the host
3542        and allocate storage space this way. A "softer" way of `suspend`
3543        is :func:`pause`. The counter action for `suspend` is :func:`resume`.
3544
3545        :param nova.context.RequestContext context:
3546            The context for the suspend.
3547        :param nova.objects.instance.Instance instance:
3548            The instance to suspend.
3549
3550        :return: None
3551        """
3552        name = instance['name']
3553        zone = self._get_zone_by_name(name)
3554        if zone is None:
3555            raise exception.InstanceNotFound(instance_id=name)
3556
3557        if zone.brand != ZONE_BRAND_SOLARIS_KZ:
3558            # Only Solaris kernel zones are currently supported.
3559            reason = (_("'%s' branded zones do not currently support "
3560                        "suspend. Use 'nova reset-state --active %s' "
3561                        "to reset instance state back to 'active'.")
3562                      % (zone.brand, instance['display_name']))
3563            raise exception.InstanceSuspendFailure(reason=reason)
3564
3565        if self._get_state(zone) != power_state.RUNNING:
3566            reason = (_("Instance '%s' is not running.") % name)
3567            raise exception.InstanceSuspendFailure(reason=reason)
3568
3569        try:
3570            new_path = os.path.join(CONF.solariszones.zones_suspend_path,
3571                                    '%{zonename}')
3572            if not lookup_resource(zone, 'suspend'):
3573                # add suspend if not configured
3574                self._set_suspend(instance)
3575            elif lookup_resource_property(zone, 'suspend', 'path') != new_path:
3576                # replace the old suspend resource with the new one
3577                with ZoneConfig(zone) as zc:
3578                    zc.removeresources('suspend')
3579                self._set_suspend(instance)
3580
3581            zone.suspend()
3582            self._unplug_vifs(instance)
3583        except Exception as ex:
3584            reason = zonemgr_strerror(ex)
3585            LOG.exception(_("Unable to suspend instance '%s' via "
3586                            "zonemgr(3RAD): %s") % (name, reason))
3587            raise exception.InstanceSuspendFailure(reason=reason)
3588
3589    def resume(self, context, instance, network_info, block_device_info=None):
3590        """resume the specified suspended instance.
3591
3592        The suspended instance gets resumed and will use CPU cycles and memory
3593        of the host again. The counter action for 'resume' is :func:`suspend`.
3594        Depending on the underlying hypervisor technology, the guest has the
3595        same state as before the 'suspend'.
3596
3597        :param nova.context.RequestContext context:
3598            The context for the resume.
3599        :param nova.objects.instance.Instance instance:
3600            The suspended instance to resume.
3601        :param nova.network.model.NetworkInfo network_info:
3602            Necessary network information for the resume.
3603        :param dict block_device_info:
3604            Instance volume block device info.
3605
3606        :return: None
3607        """
3608        name = instance['name']
3609        zone = self._get_zone_by_name(name)
3610        if zone is None:
3611            raise exception.InstanceNotFound(instance_id=name)
3612
3613        if zone.brand != ZONE_BRAND_SOLARIS_KZ:
3614            # Only Solaris kernel zones are currently supported.
3615            reason = (_("'%s' branded zones do not currently support "
3616                      "resume.") % zone.brand)
3617            raise exception.InstanceResumeFailure(reason=reason)
3618
3619        # check that the instance is suspended
3620        if self._get_state(zone) != power_state.SHUTDOWN:
3621            reason = (_("Instance '%s' is not suspended.") % name)
3622            raise exception.InstanceResumeFailure(reason=reason)
3623
3624        try:
3625            zone.boot()
3626            self._plug_vifs(instance, network_info)
3627        except Exception as ex:
3628            reason = zonemgr_strerror(ex)
3629            LOG.exception(_("Unable to resume instance '%s' via "
3630                            "zonemgr(3RAD): %s") % (name, reason))
3631            raise exception.InstanceResumeFailure(reason=reason)
3632
3633    def resume_state_on_host_boot(self, context, instance, network_info,
3634                                  block_device_info=None):
3635        """resume guest state when a host is booted.
3636
3637        :param instance: nova.objects.instance.Instance
3638        """
3639        name = instance['name']
3640        zone = self._get_zone_by_name(name)
3641        if zone is None:
3642            raise exception.InstanceNotFound(instance_id=name)
3643
3644        # TODO(dcomay): Should reconcile with value of zone's autoboot
3645        # property.
3646        if self._get_state(zone) not in (power_state.CRASHED,
3647                                         power_state.SHUTDOWN):
3648            return
3649
3650        self._power_on(instance, network_info)
3651
3652    def rescue(self, context, instance, network_info, image_meta,
3653               rescue_password):
3654        """Rescue the specified instance.
3655
3656        :param nova.context.RequestContext context:
3657            The context for the rescue.
3658        :param nova.objects.instance.Instance instance:
3659            The instance being rescued.
3660        :param nova.network.model.NetworkInfo network_info:
3661            Necessary network information for the resume.
3662        :param nova.objects.ImageMeta image_meta:
3663            The metadata of the image of the instance.
3664        :param rescue_password: new root password to set for rescue.
3665        """
3666        raise NotImplementedError()
3667
3668    def set_bootable(self, instance, is_bootable):
3669        """Set the ability to power on/off an instance.
3670
3671        :param instance: nova.objects.instance.Instance
3672        """
3673        raise NotImplementedError()
3674
3675    def unrescue(self, instance, network_info):
3676        """Unrescue the specified instance.
3677
3678        :param instance: nova.objects.instance.Instance
3679        """
3680        # TODO(Vek): Need to pass context in for access to auth_token
3681        raise NotImplementedError()
3682
3683    def power_off(self, instance, timeout=0, retry_interval=0):
3684        """Power off the specified instance.
3685
3686        :param instance: nova.objects.instance.Instance
3687        :param timeout: time to wait for GuestOS to shutdown
3688        :param retry_interval: How often to signal guest while
3689                               waiting for it to shutdown
3690        """
3691        self._power_off(instance, 'SOFT')
3692
3693    def power_on(self, context, instance, network_info,
3694                 block_device_info=None):
3695        """Power on the specified instance.
3696
3697        :param instance: nova.objects.instance.Instance
3698        """
3699        self._power_on(instance, network_info)
3700
3701    def trigger_crash_dump(self, instance):
3702        """Trigger crash dump mechanism on the given instance.
3703
3704        Stalling instances can be triggered to dump the crash data. How the
3705        guest OS reacts in details, depends on the configuration of it.
3706
3707        :param nova.objects.instance.Instance instance:
3708            The instance where the crash dump should be triggered.
3709
3710        :return: None
3711        """
3712        raise NotImplementedError()
3713
3714    def soft_delete(self, instance):
3715        """Soft delete the specified instance.
3716
3717        A soft-deleted instance doesn't allocate any resources anymore, but is
3718        still available as a database entry. The counter action :func:`restore`
3719        uses the database entry to create a new instance based on that.
3720
3721        :param nova.objects.instance.Instance instance:
3722            The instance to soft-delete.
3723
3724        :return: None
3725        """
3726        raise NotImplementedError()
3727
3728    def restore(self, instance):
3729        """Restore the specified soft-deleted instance.
3730
3731        The restored instance will be automatically booted. The counter action
3732        for `restore` is :func:`soft_delete`.
3733
3734        :param nova.objects.instance.Instance instance:
3735            The soft-deleted instance which should be restored from the
3736            soft-deleted data.
3737
3738        :return: None
3739        """
3740        raise NotImplementedError()
3741
3742    def _get_zpool_property(self, prop, zpool):
3743        """Get the value of property from the zpool."""
3744        try:
3745            value = None
3746            (out, _err) = utils.execute('/usr/sbin/zpool', 'get', prop, zpool)
3747        except processutils.ProcessExecutionError as ex:
3748            reason = ex.stderr
3749            LOG.exception(_("Failed to get property '%s' from zpool '%s': %s")
3750                          % (prop, zpool, reason))
3751            return value
3752
3753        zpool_prop = out.splitlines()[1].split()
3754        if zpool_prop[1] == prop:
3755            value = zpool_prop[2]
3756        return value
3757
3758    def _update_host_stats(self):
3759        """Update currently known host stats."""
3760        host_stats = {}
3761
3762        host_stats['vcpus'] = os.sysconf('SC_NPROCESSORS_ONLN')
3763
3764        pages = os.sysconf('SC_PHYS_PAGES')
3765        host_stats['memory_mb'] = self._pages_to_kb(pages) / 1024
3766
3767        out, err = utils.execute('/usr/sbin/zfs', 'list', '-Ho', 'name', '/')
3768        root_zpool = out.split('/')[0]
3769        size = self._get_zpool_property('size', root_zpool)
3770        if size is not None:
3771            host_stats['local_gb'] = Size(size).get(Size.gb_units)
3772        else:
3773            host_stats['local_gb'] = 0
3774
3775        # Account for any existing processor sets by looking at the the number
3776        # of CPUs not assigned to any processor sets.
3777        uri = "kstat:/misc/unix/pset/0"
3778        data = self._kstat_data(uri)
3779
3780        if data is not None:
3781            host_stats['vcpus_used'] = host_stats['vcpus'] - data['ncpus']
3782        else:
3783            host_stats['vcpus_used'] = 0
3784
3785        # Subtract the number of free pages from the total to get the used.
3786        uri = "kstat:/pages/unix/system_pages"
3787        data = self._kstat_data(uri)
3788        if data is not None:
3789            free_ram = data['freemem']
3790            free_ram = self._pages_to_kb(free_ram) / 1024
3791            host_stats['memory_mb_used'] = host_stats['memory_mb'] - free_ram
3792        else:
3793            host_stats['memory_mb_used'] = 0
3794
3795        free = self._get_zpool_property('free', root_zpool)
3796        if free is not None:
3797            free_disk_gb = Size(free).get(Size.gb_units)
3798        else:
3799            free_disk_gb = 0
3800        host_stats['local_gb_used'] = host_stats['local_gb'] - free_disk_gb
3801
3802        host_stats['hypervisor_type'] = 'solariszones'
3803        host_stats['hypervisor_version'] = \
3804            versionutils.convert_version_to_int(HYPERVISOR_VERSION)
3805        host_stats['hypervisor_hostname'] = self._uname[1]
3806
3807        if self._uname[4] == 'i86pc':
3808            architecture = arch.X86_64
3809        else:
3810            architecture = arch.SPARC64
3811        cpu_info = {
3812            'arch': architecture
3813        }
3814        host_stats['cpu_info'] = jsonutils.dumps(cpu_info)
3815
3816        host_stats['disk_available_least'] = free_disk_gb
3817        host_stats['supported_instances'] = [
3818            (architecture, hv_type.SOLARISZONES, vm_mode.SOL)
3819        ]
3820        host_stats['numa_topology'] = None
3821
3822        self._host_stats = host_stats
3823
3824    def get_available_resource(self, nodename):
3825        """Retrieve resource information.
3826
3827        This method is called when nova-compute launches, and
3828        as part of a periodic task that records the results in the DB.
3829
3830        :param nodename:
3831            node which the caller want to get resources from
3832            a driver that manages only one node can safely ignore this
3833        :returns: Dictionary describing resources
3834        """
3835        self._update_host_stats()
3836        host_stats = self._host_stats
3837
3838        resources = {}
3839        resources['vcpus'] = host_stats['vcpus']
3840        resources['memory_mb'] = host_stats['memory_mb']
3841        resources['local_gb'] = host_stats['local_gb']
3842        resources['vcpus_used'] = host_stats['vcpus_used']
3843        resources['memory_mb_used'] = host_stats['memory_mb_used']
3844        resources['local_gb_used'] = host_stats['local_gb_used']
3845        resources['hypervisor_type'] = host_stats['hypervisor_type']
3846        resources['hypervisor_version'] = host_stats['hypervisor_version']
3847        resources['hypervisor_hostname'] = host_stats['hypervisor_hostname']
3848        resources['cpu_info'] = host_stats['cpu_info']
3849        resources['disk_available_least'] = host_stats['disk_available_least']
3850        resources['supported_instances'] = host_stats['supported_instances']
3851        resources['numa_topology'] = host_stats['numa_topology']
3852        return resources
3853
3854    def pre_live_migration(self, context, instance, block_device_info,
3855                           network_info, disk_info, migrate_data=None):
3856        """Prepare an instance for live migration
3857
3858        :param context: security context
3859        :param instance: nova.objects.instance.Instance object
3860        :param block_device_info: instance block device information
3861        :param network_info: instance network information
3862        :param disk_info: instance disk information
3863        :param migrate_data: a LiveMigrateData object
3864        """
3865        return migrate_data
3866
3867    def _live_migration(self, name, dest, dry_run=False):
3868        """Live migration of a Solaris kernel zone to another host."""
3869        zone = self._get_zone_by_name(name)
3870        if zone is None:
3871            raise exception.InstanceNotFound(instance_id=name)
3872
3873        options = []
3874        live_migration_cipher = CONF.solariszones.live_migration_cipher
3875        if live_migration_cipher is not None:
3876            options.extend(['-c', live_migration_cipher])
3877        if dry_run:
3878            options.append('-nq')
3879        options.append('ssh://nova@' + dest)
3880        zone.migrate(options)
3881
3882    def live_migration(self, context, instance, dest,
3883                       post_method, recover_method, block_migration=False,
3884                       migrate_data=None):
3885        """Live migration of an instance to another host.
3886
3887        :param context: security context
3888        :param instance:
3889            nova.db.sqlalchemy.models.Instance object
3890            instance object that is migrated.
3891        :param dest: destination host
3892        :param post_method:
3893            post operation method.
3894            expected nova.compute.manager._post_live_migration.
3895        :param recover_method:
3896            recovery method when any exception occurs.
3897            expected nova.compute.manager._rollback_live_migration.
3898        :param block_migration: if true, migrate VM disk.
3899        :param migrate_data: a LiveMigrateData object
3900
3901        """
3902        name = instance['name']
3903        try:
3904            self._live_migration(name, dest, dry_run=False)
3905        except Exception as ex:
3906            with excutils.save_and_reraise_exception():
3907                reason = zonemgr_strerror(ex)
3908                LOG.exception(_("Unable to live migrate instance '%s' to host "
3909                                "'%s' via zonemgr(3RAD): %s")
3910                              % (name, dest, reason))
3911                recover_method(context, instance, dest, block_migration)
3912
3913        post_method(context, instance, dest, block_migration, migrate_data)
3914
3915    def live_migration_force_complete(self, instance):
3916        """Force live migration to complete
3917
3918        :param instance: Instance being live migrated
3919
3920        """
3921        raise NotImplementedError()
3922
3923    def live_migration_abort(self, instance):
3924        """Abort an in-progress live migration.
3925
3926        :param instance: instance that is live migrating
3927
3928        """
3929        raise NotImplementedError()
3930
3931    def rollback_live_migration_at_destination(self, context, instance,
3932                                               network_info,
3933                                               block_device_info,
3934                                               destroy_disks=True,
3935                                               migrate_data=None):
3936        """Clean up destination node after a failed live migration.
3937
3938        :param context: security context
3939        :param instance: instance object that was being migrated
3940        :param network_info: instance network information
3941        :param block_device_info: instance block device information
3942        :param destroy_disks:
3943            if true, destroy disks at destination during cleanup
3944        :param migrate_data: a LiveMigrateData object
3945
3946        """
3947        pass
3948
3949    def post_live_migration(self, context, instance, block_device_info,
3950                            migrate_data=None):
3951        """Post operation of live migration at source host.
3952
3953        :param context: security context
3954        :instance: instance object that was migrated
3955        :block_device_info: instance block device information
3956        :param migrate_data: a LiveMigrateData object
3957        """
3958        try:
3959            # These methods log if problems occur so no need to double log
3960            # here. Just catch any stray exceptions and allow destroy to
3961            # proceed.
3962            if self._has_vnc_console_service(instance):
3963                self._disable_vnc_console_service(instance)
3964                self._delete_vnc_console_service(instance)
3965        except Exception:
3966            pass
3967
3968        name = instance['name']
3969        zone = self._get_zone_by_name(name)
3970        # If instance cannot be found, just return.
3971        if zone is None:
3972            LOG.warning(_("Unable to find instance '%s' via zonemgr(3RAD)")
3973                        % name)
3974            return
3975
3976        try:
3977            self._delete_config(instance)
3978        except Exception as ex:
3979            reason = zonemgr_strerror(ex)
3980            LOG.exception(_("Unable to delete configuration for instance '%s' "
3981                            "via zonemgr(3RAD): %s") % (name, reason))
3982            raise
3983
3984    def post_live_migration_at_source(self, context, instance, network_info):
3985        """Unplug VIFs from networks at source.
3986
3987        :param context: security context
3988        :param instance: instance object reference
3989        :param network_info: instance network information
3990        """
3991        self._unplug_vifs(instance)
3992
3993    def post_live_migration_at_destination(self, context, instance,
3994                                           network_info,
3995                                           block_migration=False,
3996                                           block_device_info=None):
3997        """Post operation of live migration at destination host.
3998
3999        :param context: security context
4000        :param instance: instance object that is migrated
4001        :param network_info: instance network information
4002        :param block_migration: if true, post operation of block_migration.
4003        """
4004        self._plug_vifs(instance, network_info)
4005
4006    def check_instance_shared_storage_local(self, context, instance):
4007        """Check if instance files located on shared storage.
4008
4009        This runs check on the destination host, and then calls
4010        back to the source host to check the results.
4011
4012        :param context: security context
4013        :param instance: nova.objects.instance.Instance object
4014        """
4015        raise NotImplementedError()
4016
4017    def check_instance_shared_storage_remote(self, context, data):
4018        """Check if instance files located on shared storage.
4019
4020        :param context: security context
4021        :param data: result of check_instance_shared_storage_local
4022        """
4023        raise NotImplementedError()
4024
4025    def check_instance_shared_storage_cleanup(self, context, data):
4026        """Do cleanup on host after check_instance_shared_storage calls
4027
4028        :param context: security context
4029        :param data: result of check_instance_shared_storage_local
4030        """
4031        pass
4032
4033    def check_can_live_migrate_destination(self, context, instance,
4034                                           src_compute_info, dst_compute_info,
4035                                           block_migration=False,
4036                                           disk_over_commit=False):
4037        """Check if it is possible to execute live migration.
4038
4039        This runs checks on the destination host, and then calls
4040        back to the source host to check the results.
4041
4042        :param context: security context
4043        :param instance: nova.db.sqlalchemy.models.Instance
4044        :param src_compute_info: Info about the sending machine
4045        :param dst_compute_info: Info about the receiving machine
4046        :param block_migration: if true, prepare for block migration
4047        :param disk_over_commit: if true, allow disk over commit
4048        :returns: a LiveMigrateData object (hypervisor-dependent)
4049        """
4050        src_cpu_info = jsonutils.loads(src_compute_info['cpu_info'])
4051        src_cpu_arch = src_cpu_info['arch']
4052        dst_cpu_info = jsonutils.loads(dst_compute_info['cpu_info'])
4053        dst_cpu_arch = dst_cpu_info['arch']
4054        if src_cpu_arch != dst_cpu_arch:
4055            reason = (_("CPU architectures between source host '%s' (%s) and "
4056                        "destination host '%s' (%s) are incompatible.")
4057                      % (src_compute_info['hypervisor_hostname'], src_cpu_arch,
4058                         dst_compute_info['hypervisor_hostname'],
4059                         dst_cpu_arch))
4060            raise exception.MigrationPreCheckError(reason=reason)
4061
4062        extra_specs = self._get_flavor(instance)['extra_specs'].copy()
4063        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
4064        if brand != ZONE_BRAND_SOLARIS_KZ:
4065            # Only Solaris kernel zones are currently supported.
4066            reason = (_("'%s' branded zones do not currently support live "
4067                        "migration.") % brand)
4068            raise exception.MigrationPreCheckError(reason=reason)
4069
4070        if block_migration:
4071            reason = (_('Block migration is not currently supported.'))
4072            raise exception.MigrationPreCheckError(reason=reason)
4073        if disk_over_commit:
4074            reason = (_('Disk overcommit is not currently supported.'))
4075            raise exception.MigrationPreCheckError(reason=reason)
4076
4077        dest_check_data = objects.SolarisZonesLiveMigrateData()
4078        dest_check_data.hypervisor_hostname = \
4079            dst_compute_info['hypervisor_hostname']
4080        return dest_check_data
4081
4082    def check_can_live_migrate_destination_cleanup(self, context,
4083                                                   dest_check_data):
4084        """Do required cleanup on dest host after check_can_live_migrate calls
4085
4086        :param context: security context
4087        :param dest_check_data: result of check_can_live_migrate_destination
4088        """
4089        pass
4090
4091    def _check_local_volumes_present(self, block_device_info):
4092        """Check if local volumes are attached to the instance."""
4093        bmap = block_device_info.get('block_device_mapping')
4094        for entry in bmap:
4095            connection_info = entry['connection_info']
4096            driver_type = connection_info['driver_volume_type']
4097            if driver_type == 'local':
4098                reason = (_("Instances with attached '%s' volumes are not "
4099                            "currently supported.") % driver_type)
4100                raise exception.MigrationPreCheckError(reason=reason)
4101
4102    def check_can_live_migrate_source(self, context, instance,
4103                                      dest_check_data, block_device_info=None):
4104        """Check if it is possible to execute live migration.
4105
4106        This checks if the live migration can succeed, based on the
4107        results from check_can_live_migrate_destination.
4108
4109        :param context: security context
4110        :param instance: nova.db.sqlalchemy.models.Instance
4111        :param dest_check_data: result of check_can_live_migrate_destination
4112        :param block_device_info: result of _get_instance_block_device_info
4113        :returns: a LiveMigrateData object
4114        """
4115        if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
4116            obj = objects.SolarisZonesLiveMigrateData()
4117            obj.from_legacy_dict(dest_check_data)
4118            dest_check_data = obj
4119
4120        self._check_local_volumes_present(block_device_info)
4121        name = instance['name']
4122        dest = dest_check_data.hypervisor_hostname
4123        try:
4124            self._live_migration(name, dest, dry_run=True)
4125        except Exception as ex:
4126            reason = zonemgr_strerror(ex)
4127            raise exception.MigrationPreCheckError(reason=reason)
4128        return dest_check_data
4129
4130    def get_instance_disk_info(self, instance,
4131                               block_device_info=None):
4132        """Retrieve information about actual disk sizes of an instance.
4133
4134        :param instance: nova.objects.Instance
4135        :param block_device_info:
4136            Optional; Can be used to filter out devices which are
4137            actually volumes.
4138        :return:
4139            json strings with below format::
4140
4141                "[{'path':'disk',
4142                   'type':'raw',
4143                   'virt_disk_size':'10737418240',
4144                   'backing_file':'backing_file',
4145                   'disk_size':'83886080'
4146                   'over_committed_disk_size':'10737418240'},
4147                   ...]"
4148        """
4149        raise NotImplementedError()
4150
4151    def refresh_security_group_rules(self, security_group_id):
4152        """This method is called after a change to security groups.
4153
4154        All security groups and their associated rules live in the datastore,
4155        and calling this method should apply the updated rules to instances
4156        running the specified security group.
4157
4158        An error should be raised if the operation cannot complete.
4159
4160        """
4161        # TODO(Vek): Need to pass context in for access to auth_token
4162        raise NotImplementedError()
4163
4164    def refresh_instance_security_rules(self, instance):
4165        """Refresh security group rules
4166
4167        Gets called when an instance gets added to or removed from
4168        the security group the instance is a member of or if the
4169        group gains or loses a rule.
4170        """
4171        raise NotImplementedError()
4172
4173    def reset_network(self, instance):
4174        """reset networking for specified instance."""
4175        # TODO(Vek): Need to pass context in for access to auth_token
4176        pass
4177
4178    def ensure_filtering_rules_for_instance(self, instance, network_info):
4179        """Setting up filtering rules and waiting for its completion.
4180
4181        To migrate an instance, filtering rules to hypervisors
4182        and firewalls are inevitable on destination host.
4183        ( Waiting only for filtering rules to hypervisor,
4184        since filtering rules to firewall rules can be set faster).
4185
4186        Concretely, the below method must be called.
4187        - setup_basic_filtering (for nova-basic, etc.)
4188        - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
4189
4190        to_xml may have to be called since it defines PROJNET, PROJMASK.
4191        but libvirt migrates those value through migrateToURI(),
4192        so , no need to be called.
4193
4194        Don't use thread for this method since migration should
4195        not be started when setting-up filtering rules operations
4196        are not completed.
4197
4198        :param instance: nova.objects.instance.Instance object
4199
4200        """
4201        # TODO(Vek): Need to pass context in for access to auth_token
4202        pass
4203
4204    def filter_defer_apply_on(self):
4205        """Defer application of IPTables rules."""
4206        pass
4207
4208    def filter_defer_apply_off(self):
4209        """Turn off deferral of IPTables rules and apply the rules now."""
4210        pass
4211
4212    def unfilter_instance(self, instance, network_info):
4213        """Stop filtering instance."""
4214        # TODO(Vek): Need to pass context in for access to auth_token
4215        pass
4216
4217    def set_admin_password(self, instance, new_pass):
4218        """Set the root password on the specified instance.
4219
4220        :param instance: nova.objects.instance.Instance
4221        :param new_pass: the new password
4222        """
4223        name = instance['name']
4224        zone = self._get_zone_by_name(name)
4225        if zone is None:
4226            raise exception.InstanceNotFound(instance_id=name)
4227
4228        if zone.state == ZONE_STATE_RUNNING:
4229            out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
4230                                     '/usr/bin/passwd', '-p',
4231                                     "'%s'" % sha256_crypt.encrypt(new_pass))
4232        else:
4233            raise exception.InstanceNotRunning(instance_id=name)
4234
4235    def inject_file(self, instance, b64_path, b64_contents):
4236        """Writes a file on the specified instance.
4237
4238        The first parameter is an instance of nova.compute.service.Instance,
4239        and so the instance is being specified as instance.name. The second
4240        parameter is the base64-encoded path to which the file is to be
4241        written on the instance; the third is the contents of the file, also
4242        base64-encoded.
4243
4244        NOTE(russellb) This method is deprecated and will be removed once it
4245        can be removed from nova.compute.manager.
4246        """
4247        # TODO(Vek): Need to pass context in for access to auth_token
4248        raise NotImplementedError()
4249
4250    def change_instance_metadata(self, context, instance, diff):
4251        """Applies a diff to the instance metadata.
4252
4253        This is an optional driver method which is used to publish
4254        changes to the instance's metadata to the hypervisor.  If the
4255        hypervisor has no means of publishing the instance metadata to
4256        the instance, then this method should not be implemented.
4257
4258        :param context: security context
4259        :param instance: nova.objects.instance.Instance
4260        """
4261        pass
4262
4263    def inject_network_info(self, instance, nw_info):
4264        """inject network info for specified instance."""
4265        # TODO(Vek): Need to pass context in for access to auth_token
4266        pass
4267
4268    def poll_rebooting_instances(self, timeout, instances):
4269        """Perform a reboot on all given 'instances'.
4270
4271        Reboots the given `instances` which are longer in the rebooting state
4272        than `timeout` seconds.
4273
4274        :param int timeout:
4275            The timeout (in seconds) for considering rebooting instances
4276            to be stuck.
4277        :param list instances:
4278            A list of nova.objects.instance.Instance objects that have been
4279            in rebooting state longer than the configured timeout.
4280
4281        :return: None
4282        """
4283        # TODO(Vek): Need to pass context in for access to auth_token
4284        raise NotImplementedError()
4285
4286    def host_power_action(self, action):
4287        """Reboots, shuts down or powers up the host.
4288
4289        :param str action:
4290            The action the host should perform. The valid actions are:
4291            ""startup", "shutdown" and "reboot".
4292
4293        :return: The result of the power action
4294        :rtype: : str
4295        """
4296
4297        raise NotImplementedError()
4298
4299    def host_maintenance_mode(self, host, mode):
4300        """Start/Stop host maintenance window.
4301
4302        On start, it triggers the migration of all instances to other hosts.
4303        Consider the combination with :func:`set_host_enabled`.
4304
4305        :param str host:
4306            The name of the host whose maintenance mode should be changed.
4307        :param bool mode:
4308            If `True`, go into maintenance mode. If `False`, leave the
4309            maintenance mode.
4310
4311        :return: "on_maintenance" if switched to maintenance mode or
4312                 "off_maintenance" if maintenance mode got left.
4313        :rtype: str
4314        """
4315
4316        raise NotImplementedError()
4317
4318    def set_host_enabled(self, enabled):
4319        """Sets the ability of this host to accept new instances.
4320
4321        :param bool enabled:
4322            If this is `True`, the host will accept new instances. If it is
4323            `False`, the host won't accept new instances.
4324
4325        :return: If the host can accept further instances, return "enabled",
4326                 if further instances shouldn't be scheduled to this host,
4327                 return "disabled".
4328        :rtype: str
4329        """
4330        # TODO(Vek): Need to pass context in for access to auth_token
4331        raise NotImplementedError()
4332
4333    def get_host_uptime(self):
4334        """Returns the result of calling the Linux command `uptime` on this
4335        host.
4336
4337        :return: A text which contains the uptime of this host since the
4338                 last boot.
4339        :rtype: str
4340        """
4341        # TODO(Vek): Need to pass context in for access to auth_token
4342        return utils.execute('/usr/bin/uptime')[0]
4343
4344    def plug_vifs(self, instance, network_info):
4345        """Plug virtual interfaces (VIFs) into the given `instance` at
4346        instance boot time.
4347
4348        The counter action is :func:`unplug_vifs`.
4349
4350        :param nova.objects.instance.Instance instance:
4351            The instance which gets VIFs plugged.
4352        :param nova.network.model.NetworkInfo network_info:
4353            The object which contains information about the VIFs to plug.
4354
4355        :return: None
4356        """
4357        # TODO(Vek): Need to pass context in for access to auth_token
4358        pass
4359
4360    def unplug_vifs(self, instance, network_info):
4361        # NOTE(markus_z): 2015-08-18
4362        # The compute manager doesn't use this interface, which seems odd
4363        # since the manager should be the controlling thing here.
4364        """Unplug virtual interfaces (VIFs) from networks.
4365
4366        The counter action is :func:`plug_vifs`.
4367
4368        :param nova.objects.instance.Instance instance:
4369            The instance which gets VIFs unplugged.
4370        :param nova.network.model.NetworkInfo network_info:
4371            The object which contains information about the VIFs to unplug.
4372
4373        :return: None
4374        """
4375        raise NotImplementedError()
4376
4377    def get_host_cpu_stats(self):
4378        """Get the currently known host CPU stats.
4379
4380        :returns: a dict containing the CPU stat info, eg:
4381
4382            | {'kernel': kern,
4383            |  'idle': idle,
4384            |  'user': user,
4385            |  'iowait': wait,
4386            |   'frequency': freq},
4387
4388                  where kern and user indicate the cumulative CPU time
4389                  (nanoseconds) spent by kernel and user processes
4390                  respectively, idle indicates the cumulative idle CPU time
4391                  (nanoseconds), wait indicates the cumulative I/O wait CPU
4392                  time (nanoseconds), since the host is booting up; freq
4393                  indicates the current CPU frequency (MHz). All values are
4394                  long integers.
4395
4396        """
4397        raise NotImplementedError()
4398
4399    def block_stats(self, instance, disk_id):
4400        """Return performance counters associated with the given disk_id on the
4401        given instance.  These are returned as [rd_req, rd_bytes, wr_req,
4402        wr_bytes, errs], where rd indicates read, wr indicates write, req is
4403        the total number of I/O requests made, bytes is the total number of
4404        bytes transferred, and errs is the number of requests held up due to a
4405        full pipeline.
4406
4407        All counters are long integers.
4408
4409        This method is optional.  On some platforms (e.g. XenAPI) performance
4410        statistics can be retrieved directly in aggregate form, without Nova
4411        having to do the aggregation.  On those platforms, this method is
4412        unused.
4413
4414        Note that this function takes an instance ID.
4415        """
4416        raise NotImplementedError()
4417
4418    def deallocate_networks_on_reschedule(self, instance):
4419        """Does the driver want networks deallocated on reschedule?"""
4420        return False
4421
4422    def macs_for_instance(self, instance):
4423        """What MAC addresses must this instance have?
4424
4425        Some hypervisors (such as bare metal) cannot do freeform virtualisation
4426        of MAC addresses. This method allows drivers to return a set of MAC
4427        addresses that the instance is to have. allocate_for_instance will take
4428        this into consideration when provisioning networking for the instance.
4429
4430        Mapping of MAC addresses to actual networks (or permitting them to be
4431        freeform) is up to the network implementation layer. For instance,
4432        with openflow switches, fixed MAC addresses can still be virtualised
4433        onto any L2 domain, with arbitrary VLANs etc, but regular switches
4434        require pre-configured MAC->network mappings that will match the
4435        actual configuration.
4436
4437        Most hypervisors can use the default implementation which returns None.
4438        Hypervisors with MAC limits should return a set of MAC addresses, which
4439        will be supplied to the allocate_for_instance call by the compute
4440        manager, and it is up to that call to ensure that all assigned network
4441        details are compatible with the set of MAC addresses.
4442
4443        This is called during spawn_instance by the compute manager.
4444
4445        :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
4446            None means 'no constraints', a set means 'these and only these
4447            MAC addresses'.
4448        """
4449        return None
4450
4451    def dhcp_options_for_instance(self, instance):
4452        """Get DHCP options for this instance.
4453
4454        Some hypervisors (such as bare metal) require that instances boot from
4455        the network, and manage their own TFTP service. This requires passing
4456        the appropriate options out to the DHCP service. Most hypervisors can
4457        use the default implementation which returns None.
4458
4459        This is called during spawn_instance by the compute manager.
4460
4461        Note that the format of the return value is specific to the Neutron
4462        client API.
4463
4464        :return: None, or a set of DHCP options, eg:
4465
4466             |    [{'opt_name': 'bootfile-name',
4467             |      'opt_value': '/tftpboot/path/to/config'},
4468             |     {'opt_name': 'server-ip-address',
4469             |      'opt_value': '1.2.3.4'},
4470             |     {'opt_name': 'tftp-server',
4471             |      'opt_value': '1.2.3.4'}
4472             |    ]
4473
4474        """
4475        return None
4476
4477    def manage_image_cache(self, context, all_instances):
4478        """Manage the driver's local image cache.
4479
4480        Some drivers chose to cache images for instances on disk. This method
4481        is an opportunity to do management of that cache which isn't directly
4482        related to other calls into the driver. The prime example is to clean
4483        the cache and remove images which are no longer of interest.
4484
4485        :param all_instances: nova.objects.instance.InstanceList
4486        """
4487        pass
4488
4489    def add_to_aggregate(self, context, aggregate, host, **kwargs):
4490        """Add a compute host to an aggregate.
4491
4492        The counter action to this is :func:`remove_from_aggregate`
4493
4494        :param nova.context.RequestContext context:
4495            The security context.
4496        :param nova.objects.aggregate.Aggregate aggregate:
4497            The aggregate which should add the given `host`
4498        :param str host:
4499            The name of the host to add to the given `aggregate`.
4500        :param dict kwargs:
4501            A free-form thingy...
4502
4503        :return: None
4504        """
4505        # NOTE(jogo) Currently only used for XenAPI-Pool
4506        raise NotImplementedError()
4507
4508    def remove_from_aggregate(self, context, aggregate, host, **kwargs):
4509        """Remove a compute host from an aggregate.
4510
4511        The counter action to this is :func:`add_to_aggregate`
4512
4513        :param nova.context.RequestContext context:
4514            The security context.
4515        :param nova.objects.aggregate.Aggregate aggregate:
4516            The aggregate which should remove the given `host`
4517        :param str host:
4518            The name of the host to remove from the given `aggregate`.
4519        :param dict kwargs:
4520            A free-form thingy...
4521
4522        :return: None
4523        """
4524        raise NotImplementedError()
4525
4526    def undo_aggregate_operation(self, context, op, aggregate,
4527                                 host, set_error=True):
4528        """Undo for Resource Pools."""
4529        raise NotImplementedError()
4530
4531    def get_volume_connector(self, instance):
4532        """Get connector information for the instance for attaching to volumes.
4533
4534        Connector information is a dictionary representing the ip of the
4535        machine that will be making the connection, the name of the iscsi
4536        initiator, the WWPN and WWNN values of the Fibre Channel initiator,
4537        and the hostname of the machine as follows::
4538
4539            {
4540                'ip': ip,
4541                'initiator': initiator,
4542                'wwnns': wwnns,
4543                'wwpns': wwpns,
4544                'host': hostname
4545            }
4546
4547        """
4548        connector = {
4549            'ip': self.get_host_ip_addr(),
4550            'host': CONF.host
4551        }
4552        if not self._initiator:
4553            self._initiator = self._get_iscsi_initiator()
4554
4555        if self._initiator:
4556            connector['initiator'] = self._initiator
4557        else:
4558            LOG.debug(_("Could not determine iSCSI initiator name"),
4559                      instance=instance)
4560
4561        if not self._fc_wwnns:
4562            self._fc_wwnns = self._get_fc_wwnns()
4563            if not self._fc_wwnns or len(self._fc_wwnns) == 0:
4564                LOG.debug(_('Could not determine Fibre Channel '
4565                          'World Wide Node Names'),
4566                          instance=instance)
4567
4568        if not self._fc_wwpns:
4569            self._fc_wwpns = self._get_fc_wwpns()
4570            if not self._fc_wwpns or len(self._fc_wwpns) == 0:
4571                LOG.debug(_('Could not determine Fibre Channel '
4572                          'World Wide Port Names'),
4573                          instance=instance)
4574
4575        if self._fc_wwnns and self._fc_wwpns:
4576            connector["wwnns"] = self._fc_wwnns
4577            connector["wwpns"] = self._fc_wwpns
4578        return connector
4579
4580    def get_available_nodes(self, refresh=False):
4581        """Returns nodenames of all nodes managed by the compute service.
4582
4583        This method is for multi compute-nodes support. If a driver supports
4584        multi compute-nodes, this method returns a list of nodenames managed
4585        by the service. Otherwise, this method should return
4586        [hypervisor_hostname].
4587        """
4588        if refresh or not self._host_stats:
4589            self._update_host_stats()
4590        stats = self._host_stats
4591        if not isinstance(stats, list):
4592            stats = [stats]
4593        return [s['hypervisor_hostname'] for s in stats]
4594
4595    def node_is_available(self, nodename):
4596        """Return whether this compute service manages a particular node."""
4597        if nodename in self.get_available_nodes():
4598            return True
4599        # Refresh and check again.
4600        return nodename in self.get_available_nodes(refresh=True)
4601
4602    def get_per_instance_usage(self):
4603        """Get information about instance resource usage.
4604
4605        :returns: dict of  nova uuid => dict of usage info
4606        """
4607        return {}
4608
4609    def instance_on_disk(self, instance):
4610        """Checks access of instance files on the host.
4611
4612        :param instance: nova.objects.instance.Instance to lookup
4613
4614        Returns True if files of an instance with the supplied ID accessible on
4615        the host, False otherwise.
4616
4617        .. note::
4618            Used in rebuild for HA implementation and required for validation
4619            of access to instance shared disk files
4620        """
4621        bdmobj = objects.BlockDeviceMappingList
4622        bdms = bdmobj.get_by_instance_uuid(nova_context.get_admin_context(),
4623                                           instance['uuid'])
4624
4625        root_ci = None
4626        rootmp = instance['root_device_name']
4627        for entry in bdms:
4628            if entry['connection_info'] is None:
4629                continue
4630
4631            if entry['device_name'] == rootmp:
4632                root_ci = jsonutils.loads(entry['connection_info'])
4633                break
4634
4635        if root_ci is None:
4636            msg = (_("Unable to find the root device for instance '%s'.")
4637                   % instance['name'])
4638            raise exception.NovaException(msg)
4639
4640        driver_type = root_ci['driver_volume_type']
4641        return driver_type in shared_storage
4642
4643    def register_event_listener(self, callback):
4644        """Register a callback to receive events.
4645
4646        Register a callback to receive asynchronous event
4647        notifications from hypervisors. The callback will
4648        be invoked with a single parameter, which will be
4649        an instance of the nova.virt.event.Event class.
4650        """
4651
4652        self._compute_event_callback = callback
4653
4654    def emit_event(self, event):
4655        """Dispatches an event to the compute manager.
4656
4657        Invokes the event callback registered by the
4658        compute manager to dispatch the event. This
4659        must only be invoked from a green thread.
4660        """
4661
4662        if not self._compute_event_callback:
4663            LOG.debug("Discarding event %s", str(event))
4664            return
4665
4666        if not isinstance(event, virtevent.Event):
4667            raise ValueError(
4668                _("Event must be an instance of nova.virt.event.Event"))
4669
4670        try:
4671            LOG.debug("Emitting event %s", str(event))
4672            self._compute_event_callback(event)
4673        except Exception as ex:
4674            LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"),
4675                      {'event': event, 'ex': ex})
4676
4677    def delete_instance_files(self, instance):
4678        """Delete any lingering instance files for an instance.
4679
4680        :param instance: nova.objects.instance.Instance
4681        :returns: True if the instance was deleted from disk, False otherwise.
4682        """
4683        # Delete the zone configuration for the instance using destroy, because
4684        # it will simply take care of the work, and we don't need to duplicate
4685        # the code here.
4686        LOG.debug(_("Cleaning up for instance %s"), instance['name'])
4687        try:
4688            self.destroy(None, instance, None)
4689        except Exception:
4690            return False
4691        return True
4692
4693    @property
4694    def need_legacy_block_device_info(self):
4695        """Tell the caller if the driver requires legacy block device info.
4696
4697        Tell the caller whether we expect the legacy format of block
4698        device info to be passed in to methods that expect it.
4699        """
4700        return True
4701
4702    def volume_snapshot_create(self, context, instance, volume_id,
4703                               create_info):
4704        """Snapshots volumes attached to a specified instance.
4705
4706        The counter action to this is :func:`volume_snapshot_delete`
4707
4708        :param nova.context.RequestContext context:
4709            The security context.
4710        :param nova.objects.instance.Instance  instance:
4711            The instance that has the volume attached
4712        :param uuid volume_id:
4713            Volume to be snapshotted
4714        :param create_info: The data needed for nova to be able to attach
4715               to the volume.  This is the same data format returned by
4716               Cinder's initialize_connection() API call.  In the case of
4717               doing a snapshot, it is the image file Cinder expects to be
4718               used as the active disk after the snapshot operation has
4719               completed.  There may be other data included as well that is
4720               needed for creating the snapshot.
4721        """
4722        raise NotImplementedError()
4723
4724    def volume_snapshot_delete(self, context, instance, volume_id,
4725                               snapshot_id, delete_info):
4726        """Deletes a snapshot of a volume attached to a specified instance.
4727
4728        The counter action to this is :func:`volume_snapshot_create`
4729
4730        :param nova.context.RequestContext context:
4731            The security context.
4732        :param nova.objects.instance.Instance instance:
4733            The instance that has the volume attached.
4734        :param uuid volume_id:
4735            Attached volume associated with the snapshot
4736        :param uuid snapshot_id:
4737            The snapshot to delete.
4738        :param dict delete_info:
4739            Volume backend technology specific data needed to be able to
4740            complete the snapshot.  For example, in the case of qcow2 backed
4741            snapshots, this would include the file being merged, and the file
4742            being merged into (if appropriate).
4743
4744        :return: None
4745        """
4746        raise NotImplementedError()
4747
4748    def default_root_device_name(self, instance, image_meta, root_bdm):
4749        """Provide a default root device name for the driver.
4750
4751        :param nova.objects.instance.Instance instance:
4752            The instance to get the root device for.
4753        :param nova.objects.ImageMeta image_meta:
4754            The metadata of the image of the instance.
4755        :param nova.objects.BlockDeviceMapping root_bdm:
4756            The description of the root device.
4757        """
4758        raise NotImplementedError()
4759
4760    def default_device_names_for_instance(self, instance, root_device_name,
4761                                          *block_device_lists):
4762        """Default the missing device names in the block device mapping."""
4763        raise NotImplementedError()
4764
4765    def get_device_name_for_instance(self, instance,
4766                                     bdms, block_device_obj):
4767        """Get the next device name based on the block device mapping.
4768
4769        :param instance: nova.objects.instance.Instance that volume is
4770                         requesting a device name
4771        :param bdms: a nova.objects.BlockDeviceMappingList for the instance
4772        :param block_device_obj: A nova.objects.BlockDeviceMapping instance
4773                                 with all info about the requested block
4774                                 device. device_name does not need to be set,
4775                                 and should be decided by the driver
4776                                 implementation if not set.
4777
4778        :returns: The chosen device name.
4779        """
4780        raise NotImplementedError()
4781
4782    def is_supported_fs_format(self, fs_type):
4783        """Check whether the file format is supported by this driver
4784
4785        :param fs_type: the file system type to be checked,
4786                        the validate values are defined at disk API module.
4787        """
4788        # NOTE(jichenjc): Return False here so that every hypervisor
4789        #                 need to define their supported file system
4790        #                 type and implement this function at their
4791        #                 virt layer.
4792        return False
4793
4794    def quiesce(self, context, instance, image_meta):
4795        """Quiesce the specified instance to prepare for snapshots.
4796
4797        If the specified instance doesn't support quiescing,
4798        InstanceQuiesceNotSupported is raised. When it fails to quiesce by
4799        other errors (e.g. agent timeout), NovaException is raised.
4800
4801        :param context:  request context
4802        :param instance: nova.objects.instance.Instance to be quiesced
4803        :param nova.objects.ImageMeta image_meta:
4804            The metadata of the image of the instance.
4805        """
4806        raise NotImplementedError()
4807
4808    def unquiesce(self, context, instance, image_meta):
4809        """Unquiesce the specified instance after snapshots.
4810
4811        If the specified instance doesn't support quiescing,
4812        InstanceQuiesceNotSupported is raised. When it fails to quiesce by
4813        other errors (e.g. agent timeout), NovaException is raised.
4814
4815        :param context:  request context
4816        :param instance: nova.objects.instance.Instance to be unquiesced
4817        :param nova.objects.ImageMeta image_meta:
4818            The metadata of the image of the instance.
4819        """
4820        raise NotImplementedError()
4821
4822    def network_binding_host_id(self, context, instance):
4823        """Get host ID to associate with network ports.
4824
4825        :param context:  request context
4826        :param instance: nova.objects.instance.Instance that the network
4827                         ports will be associated with
4828        :returns: a string representing the host ID
4829        """
4830        return instance.get('host')