Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
Size: Mime:
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

"""
A connection to a hypervisor through libvirt.

Supports KVM, LXC, QEMU, UML, XEN and Parallels.

"""

import collections
from collections import deque
import contextlib
import errno
import functools
import glob
import itertools
import mmap
import operator
import os
import shutil
import tempfile
import time
import uuid

import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range

from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import remotefs
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova.volume import cinder
from nova.volume import encryptors

libvirt = None

uefi_logged = False

LOG = logging.getLogger(__name__)

CONF = nova.conf.CONF

DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
    libvirt_firewall.__name__,
    libvirt_firewall.IptablesFirewallDriver.__name__)

DEFAULT_UEFI_LOADER_PATH = {
    "x86_64": "/usr/share/OVMF/OVMF_CODE.fd",
    "aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd"
}

MAX_CONSOLE_BYTES = 100 * units.Ki

# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None

# Guest config console string
CONSOLE = "console=tty0 console=ttyS0"

GuestNumaConfig = collections.namedtuple(
    'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])

libvirt_volume_drivers = [
    'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
    'iser=nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
    'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
    'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
    'rbd=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
    'sheepdog=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
    'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
    'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
    'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver',
    'glusterfs='
        'nova.virt.libvirt.volume.glusterfs.LibvirtGlusterfsVolumeDriver',
    'fibre_channel='
        'nova.virt.libvirt.volume.fibrechannel.'
        'LibvirtFibreChannelVolumeDriver',
    'scality=nova.virt.libvirt.volume.scality.LibvirtScalityVolumeDriver',
    'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
    'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
    'hgst=nova.virt.libvirt.volume.hgst.LibvirtHGSTVolumeDriver',
    'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
    'disco=nova.virt.libvirt.volume.disco.LibvirtDISCOVolumeDriver',
    'vzstorage='
        'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver',
]


def patch_tpool_proxy():
    """eventlet.tpool.Proxy doesn't work with old-style class in __str__()
    or __repr__() calls. See bug #962840 for details.
    We perform a monkey patch to replace those two instance methods.
    """
    def str_method(self):
        return str(self._obj)

    def repr_method(self):
        return repr(self._obj)

    tpool.Proxy.__str__ = str_method
    tpool.Proxy.__repr__ = repr_method


patch_tpool_proxy()

# For information about when MIN_LIBVIRT_VERSION and
# NEXT_MIN_LIBVIRT_VERSION can be changed, consult
#
#   https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix
#
# Currently this is effectively the min version for i686/x86_64
# + KVM/QEMU, as other architectures/hypervisors require newer
# versions. Over time, this will become a common min version
# for all architectures/hypervisors, as this value rises to
# meet them.
MIN_LIBVIRT_VERSION = (1, 2, 1)
MIN_QEMU_VERSION = (1, 5, 3)
# TODO(berrange): Re-evaluate this at start of each release cycle
# to decide if we want to plan a future min version bump.
# MIN_LIBVIRT_VERSION can be updated to match this after
# NEXT_MIN_LIBVIRT_VERSION  has been at a higher value for
# one cycle
NEXT_MIN_LIBVIRT_VERSION = (1, 2, 1)
NEXT_MIN_QEMU_VERSION = (1, 5, 3)

# When the above version matches/exceeds this version
# delete it & corresponding code using it
# Relative block commit & rebase (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION = (1, 2, 7)
# Libvirt version 1.2.17 is required for successful block live migration
# of vm booted from image with attached devices
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17)
# libvirt discard feature
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
# While earlier versions could support NUMA reporting and
# NUMA placement, not until 1.2.7 was there the ability
# to pin guest nodes to host nodes, so mandate that. Without
# this the scheduler cannot make guaranteed decisions, as the
# guest placement may not match what was requested
MIN_LIBVIRT_NUMA_VERSION = (1, 2, 7)
# PowerPC based hosts that support NUMA using libvirt
MIN_LIBVIRT_NUMA_VERSION_PPC = (1, 2, 19)
# Versions of libvirt with known NUMA topology issues
# See bug #1449028
BAD_LIBVIRT_NUMA_VERSIONS = [(1, 2, 9, 2)]
# While earlier versions could support hugepage backed
# guests, not until 1.2.8 was there the ability to request
# a particular huge page size. Without this the scheduler
# cannot make guaranteed decisions, as the huge page size
# used by the guest may not match what was requested
MIN_LIBVIRT_HUGEPAGE_VERSION = (1, 2, 8)
# Versions of libvirt with broken cpu pinning support. This excludes
# versions of libvirt with broken NUMA support since pinning needs
# NUMA
# See bug #1438226
BAD_LIBVIRT_CPU_POLICY_VERSIONS = [(1, 2, 10)]
# qemu 2.1 introduces support for pinning memory on host
# NUMA nodes, along with the ability to specify hugepage
# sizes per guest NUMA node
MIN_QEMU_NUMA_HUGEPAGE_VERSION = (2, 1, 0)
# fsFreeze/fsThaw requirement
MIN_LIBVIRT_FSFREEZE_VERSION = (1, 2, 5)

# UEFI booting support
MIN_LIBVIRT_UEFI_VERSION = (1, 2, 9)

# Hyper-V paravirtualized time source
MIN_LIBVIRT_HYPERV_TIMER_VERSION = (1, 2, 2)
MIN_QEMU_HYPERV_TIMER_VERSION = (2, 0, 0)

# parallels driver support
MIN_LIBVIRT_PARALLELS_VERSION = (1, 2, 12)

# Ability to set the user guest password with Qemu
MIN_LIBVIRT_SET_ADMIN_PASSWD = (1, 2, 16)

# s/390 & s/390x architectures with KVM
MIN_LIBVIRT_KVM_S390_VERSION = (1, 2, 13)
MIN_QEMU_S390_VERSION = (2, 3, 0)

# libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled.
# libvirt 1.3 fix f391889f4e942e22b9ef8ecca492de05106ce41e
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0)

# ppc64/ppc64le architectures with KVM
# NOTE(rfolco): Same levels for Libvirt/Qemu on Big Endian and Little
# Endian giving the nuance around guest vs host architectures
MIN_LIBVIRT_KVM_PPC64_VERSION = (1, 2, 12)
MIN_QEMU_PPC64_VERSION = (2, 1, 0)

# Auto converge support
MIN_LIBVIRT_AUTO_CONVERGE_VERSION = (1, 2, 3)
MIN_QEMU_AUTO_CONVERGE = (1, 6, 0)

# Names of the types that do not get compressed during migration
NO_COMPRESSION_TYPES = ('qcow2',)


# number of serial console limit
QEMU_MAX_SERIAL_PORTS = 4
# Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined
ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1

# realtime support
MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13)

# libvirt postcopy support
MIN_LIBVIRT_POSTCOPY_VERSION = (1, 3, 3)

# qemu postcopy support
MIN_QEMU_POSTCOPY_VERSION = (2, 5, 0)

MIN_LIBVIRT_OTHER_ARCH = {arch.S390: MIN_LIBVIRT_KVM_S390_VERSION,
                          arch.S390X: MIN_LIBVIRT_KVM_S390_VERSION,
                          arch.PPC: MIN_LIBVIRT_KVM_PPC64_VERSION,
                          arch.PPC64: MIN_LIBVIRT_KVM_PPC64_VERSION,
                          arch.PPC64LE: MIN_LIBVIRT_KVM_PPC64_VERSION,
                         }
MIN_QEMU_OTHER_ARCH = {arch.S390: MIN_QEMU_S390_VERSION,
                       arch.S390X: MIN_QEMU_S390_VERSION,
                       arch.PPC: MIN_QEMU_PPC64_VERSION,
                       arch.PPC64: MIN_QEMU_PPC64_VERSION,
                       arch.PPC64LE: MIN_QEMU_PPC64_VERSION,
                      }

# perf events support
MIN_LIBVIRT_PERF_VERSION = (2, 0, 0)
LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'

PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt',
                                'mbml': 'mbm_local',
                                'mbmt': 'mbm_total',
                               }


class LibvirtDriver(driver.ComputeDriver):
    capabilities = {
        "has_imagecache": True,
        "supports_recreate": True,
        "supports_migrate_to_same_host": False,
        "supports_attach_interface": True,
        "supports_device_tagging": True,
    }

    def __init__(self, virtapi, read_only=False):
        super(LibvirtDriver, self).__init__(virtapi)

        global libvirt
        if libvirt is None:
            libvirt = importutils.import_module('libvirt')
            libvirt_migrate.libvirt = libvirt

        self._host = host.Host(self._uri(), read_only,
                               lifecycle_event_handler=self.emit_event,
                               conn_event_handler=self._handle_conn_event)
        self._initiator = None
        self._fc_wwnns = None
        self._fc_wwpns = None
        self._caps = None
        self._supported_perf_events = []
        self.firewall_driver = firewall.load_driver(
            DEFAULT_FIREWALL_DRIVER,
            host=self._host)

        self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()

        self.volume_drivers = driver.driver_dict_from_config(
            self._get_volume_drivers(), self)

        self._disk_cachemode = None
        self.image_cache_manager = imagecache.ImageCacheManager()
        self.image_backend = imagebackend.Backend(CONF.use_cow_images)

        self.disk_cachemodes = {}

        self.valid_cachemodes = ["default",
                                 "none",
                                 "writethrough",
                                 "writeback",
                                 "directsync",
                                 "unsafe",
                                ]
        self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
                                                                      'qemu')

        for mode_str in CONF.libvirt.disk_cachemodes:
            disk_type, sep, cache_mode = mode_str.partition('=')
            if cache_mode not in self.valid_cachemodes:
                LOG.warning(_LW('Invalid cachemode %(cache_mode)s specified '
                             'for disk type %(disk_type)s.'),
                         {'cache_mode': cache_mode, 'disk_type': disk_type})
                continue
            self.disk_cachemodes[disk_type] = cache_mode

        self._volume_api = cinder.API()
        self._image_api = image.API()

        sysinfo_serial_funcs = {
            'none': lambda: None,
            'hardware': self._get_host_sysinfo_serial_hardware,
            'os': self._get_host_sysinfo_serial_os,
            'auto': self._get_host_sysinfo_serial_auto,
        }

        self._sysinfo_serial_func = sysinfo_serial_funcs.get(
            CONF.libvirt.sysinfo_serial)

        self.job_tracker = instancejobtracker.InstanceJobTracker()
        self._remotefs = remotefs.RemoteFilesystem()

        self._live_migration_flags = self._block_migration_flags = 0
        self.active_migrations = {}

        # Compute reserved hugepages from conf file at the very
        # beginning to ensure any syntax error will be reported and
        # avoid any re-calculation when computing resources.
        self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()

    def _get_volume_drivers(self):
        return libvirt_volume_drivers

    @property
    def disk_cachemode(self):
        if self._disk_cachemode is None:
            # We prefer 'none' for consistent performance, host crash
            # safety & migration correctness by avoiding host page cache.
            # Some filesystems (eg GlusterFS via FUSE) don't support
            # O_DIRECT though. For those we fallback to 'writethrough'
            # which gives host crash safety, and is safe for migration
            # provided the filesystem is cache coherent (cluster filesystems
            # typically are, but things like NFS are not).
            self._disk_cachemode = "none"
            if not self._supports_direct_io(CONF.instances_path):
                self._disk_cachemode = "writethrough"
        return self._disk_cachemode

    def _set_cache_mode(self, conf):
        """Set cache mode on LibvirtConfigGuestDisk object."""
        try:
            source_type = conf.source_type
            driver_cache = conf.driver_cache
        except AttributeError:
            return

        cache_mode = self.disk_cachemodes.get(source_type,
                                              driver_cache)
        conf.driver_cache = cache_mode

    def _do_quality_warnings(self):
        """Warn about untested driver configurations.

        This will log a warning message about untested driver or host arch
        configurations to indicate to administrators that the quality is
        unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
        is tested upstream.
        """
        caps = self._host.get_capabilities()
        hostarch = caps.host.cpu.arch
        if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
            hostarch not in (arch.I686, arch.X86_64)):
            LOG.warning(_LW('The libvirt driver is not tested on '
                         '%(type)s/%(arch)s by the OpenStack project and '
                         'thus its quality can not be ensured. For more '
                         'information, see: http://docs.openstack.org/'
                         'developer/nova/support-matrix.html'),
                        {'type': CONF.libvirt.virt_type, 'arch': hostarch})

    def _handle_conn_event(self, enabled, reason):
        LOG.info(_LI("Connection event '%(enabled)d' reason '%(reason)s'"),
                 {'enabled': enabled, 'reason': reason})
        self._set_host_enabled(enabled, reason)

    def _version_to_string(self, version):
        return '.'.join([str(x) for x in version])

    def init_host(self, host):
        self._host.initialize()

        self._do_quality_warnings()

        self._parse_migration_flags()

        self._supported_perf_events = self._get_supported_perf_events()

        if (CONF.libvirt.virt_type == 'lxc' and
                not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
            LOG.warning(_LW("Running libvirt-lxc without user namespaces is "
                         "dangerous. Containers spawned by Nova will be run "
                         "as the host's root user. It is highly suggested "
                         "that user namespaces be used in a public or "
                         "multi-tenant environment."))

        # Stop libguestfs using KVM unless we're also configured
        # to use this. This solves problem where people need to
        # stop Nova use of KVM because nested-virt is broken
        if CONF.libvirt.virt_type != "kvm":
            guestfs.force_tcg()

        if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
            raise exception.NovaException(
                _('Nova requires libvirt version %s or greater.') %
                self._version_to_string(MIN_LIBVIRT_VERSION))

        if (CONF.libvirt.virt_type in ("qemu", "kvm") and
            not self._host.has_min_version(hv_ver=MIN_QEMU_VERSION)):
            raise exception.NovaException(
                _('Nova requires QEMU version %s or greater.') %
                self._version_to_string(MIN_QEMU_VERSION))

        if (CONF.libvirt.virt_type == 'parallels' and
            not self._host.has_min_version(MIN_LIBVIRT_PARALLELS_VERSION)):
            raise exception.NovaException(
                _('Running Nova with parallels virt_type requires '
                  'libvirt version %s') %
                self._version_to_string(MIN_LIBVIRT_PARALLELS_VERSION))

        # Give the cloud admin a heads up if we are intending to
        # change the MIN_LIBVIRT_VERSION in the next release.
        if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
            LOG.warning(_LW('Running Nova with a libvirt version less than '
                            '%(version)s is deprecated. The required minimum '
                            'version of libvirt will be raised to %(version)s '
                            'in the next release.'),
                        {'version': self._version_to_string(
                            NEXT_MIN_LIBVIRT_VERSION)})
        if (CONF.libvirt.virt_type in ("qemu", "kvm") and
            not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
            LOG.warning(_LW('Running Nova with a QEMU version less than '
                            '%(version)s is deprecated. The required minimum '
                            'version of QEMU will be raised to %(version)s '
                            'in the next release.'),
                        {'version': self._version_to_string(
                            NEXT_MIN_QEMU_VERSION)})

        kvm_arch = arch.from_host()
        if (CONF.libvirt.virt_type in ('kvm', 'qemu') and
            kvm_arch in MIN_LIBVIRT_OTHER_ARCH and
                not self._host.has_min_version(
                                        MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch),
                                        MIN_QEMU_OTHER_ARCH.get(kvm_arch))):
            raise exception.NovaException(
                _('Running Nova with qemu/kvm virt_type on %(arch)s '
                  'requires libvirt version %(libvirt_ver)s and '
                  'qemu version %(qemu_ver)s, or greater') %
                {'arch': kvm_arch,
                 'libvirt_ver': self._version_to_string(
                     MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch)),
                 'qemu_ver': self._version_to_string(
                     MIN_QEMU_OTHER_ARCH.get(kvm_arch))})

    def _prepare_migration_flags(self):
        migration_flags = 0

        migration_flags |= libvirt.VIR_MIGRATE_LIVE

        # Adding p2p flag only if xen is not in use, because xen does not
        # support p2p migrations
        if CONF.libvirt.virt_type != 'xen':
            migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER

        # Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated
        # instance will remain defined on the source host
        migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE

        live_migration_flags = block_migration_flags = migration_flags

        # Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations
        # will be live-migrations instead
        block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC

        return (live_migration_flags, block_migration_flags)

    def _handle_live_migration_tunnelled(self, migration_flags):
        if (CONF.libvirt.live_migration_tunnelled is None or
                CONF.libvirt.live_migration_tunnelled):
            migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
        return migration_flags

    def _is_post_copy_available(self):
        if self._host.has_min_version(lv_ver=MIN_LIBVIRT_POSTCOPY_VERSION,
                                      hv_ver=MIN_QEMU_POSTCOPY_VERSION):
            return True
        return False

    def _handle_live_migration_post_copy(self, migration_flags):
        if CONF.libvirt.live_migration_permit_post_copy:
            if self._is_post_copy_available():
                migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
            else:
                LOG.info(_LI('The live_migration_permit_post_copy is set '
                             'to True, but it is not supported.'))
        return migration_flags

    def _handle_live_migration_auto_converge(self, migration_flags):
        if self._host.has_min_version(lv_ver=MIN_LIBVIRT_AUTO_CONVERGE_VERSION,
                                      hv_ver=MIN_QEMU_AUTO_CONVERGE):
            if (self._is_post_copy_available() and
                    (migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0):
                LOG.info(_LI('The live_migration_permit_post_copy is set to '
                             'True and post copy live migration is available '
                             'so auto-converge will not be in use.'))
            elif CONF.libvirt.live_migration_permit_auto_converge:
                migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
        elif CONF.libvirt.live_migration_permit_auto_converge:
            LOG.info(_LI('The live_migration_permit_auto_converge is set '
                            'to True, but it is not supported.'))
        return migration_flags

    def _parse_migration_flags(self):
        (live_migration_flags,
            block_migration_flags) = self._prepare_migration_flags()

        live_migration_flags = self._handle_live_migration_tunnelled(
            live_migration_flags)
        block_migration_flags = self._handle_live_migration_tunnelled(
            block_migration_flags)

        live_migration_flags = self._handle_live_migration_post_copy(
            live_migration_flags)
        block_migration_flags = self._handle_live_migration_post_copy(
            block_migration_flags)

        live_migration_flags = self._handle_live_migration_auto_converge(
            live_migration_flags)
        block_migration_flags = self._handle_live_migration_auto_converge(
            block_migration_flags)

        self._live_migration_flags = live_migration_flags
        self._block_migration_flags = block_migration_flags

    # TODO(sahid): This method is targeted for removal when the tests
    # have been updated to avoid its use
    #
    # All libvirt API calls on the libvirt.Connect object should be
    # encapsulated by methods on the nova.virt.libvirt.host.Host
    # object, rather than directly invoking the libvirt APIs. The goal
    # is to avoid a direct dependency on the libvirt API from the
    # driver.py file.
    def _get_connection(self):
        return self._host.get_connection()

    _conn = property(_get_connection)

    @staticmethod
    def _uri():
        if CONF.libvirt.virt_type == 'uml':
            uri = CONF.libvirt.connection_uri or 'uml:///system'
        elif CONF.libvirt.virt_type == 'xen':
            uri = CONF.libvirt.connection_uri or 'xen:///'
        elif CONF.libvirt.virt_type == 'lxc':
            uri = CONF.libvirt.connection_uri or 'lxc:///'
        elif CONF.libvirt.virt_type == 'parallels':
            uri = CONF.libvirt.connection_uri or 'parallels:///system'
        else:
            uri = CONF.libvirt.connection_uri or 'qemu:///system'
        return uri

    @staticmethod
    def _live_migration_uri(dest):
        # Only Xen and QEMU support live migration, see
        # https://libvirt.org/migration.html#scenarios for reference
        uris = {
            'kvm': 'qemu+tcp://%s/system',
            'qemu': 'qemu+tcp://%s/system',
            'xen': 'xenmigr://%s/system',
        }
        virt_type = CONF.libvirt.virt_type
        uri = CONF.libvirt.live_migration_uri or uris.get(virt_type)
        if uri is None:
            raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
        return uri % dest

    @staticmethod
    def _migrate_uri(dest):
        uri = None
        # Only QEMU live migrations supports migrate-uri parameter
        virt_type = CONF.libvirt.virt_type
        if virt_type in ('qemu', 'kvm'):
            # QEMU accept two schemes: tcp and rdma.  By default
            # libvirt build the URI using the remote hostname and the
            # tcp schema.
            uri = 'tcp://%s' % dest
        # Because dest might be of type unicode, here we might return value of
        # type unicode as well which is not acceptable by libvirt python
        # binding when Python 2.7 is in use, so let's convert it explicitly
        # back to string. When Python 3.x is in use, libvirt python binding
        # accepts unicode type so it is completely fine to do a no-op str(uri)
        # conversion which will return value of type unicode.
        return uri and str(uri)

    def instance_exists(self, instance):
        """Efficient override of base instance_exists method."""
        try:
            self._host.get_guest(instance)
            return True
        except exception.NovaException:
            return False

    def list_instances(self):
        names = []
        for guest in self._host.list_guests(only_running=False):
            names.append(guest.name)

        return names

    def list_instance_uuids(self):
        uuids = []
        for guest in self._host.list_guests(only_running=False):
            uuids.append(guest.uuid)

        return uuids

    def plug_vifs(self, instance, network_info):
        """Plug VIFs into networks."""
        for vif in network_info:
            self.vif_driver.plug(instance, vif)

    def _unplug_vifs(self, instance, network_info, ignore_errors):
        """Unplug VIFs from networks."""
        for vif in network_info:
            try:
                self.vif_driver.unplug(instance, vif)
            except exception.NovaException:
                if not ignore_errors:
                    raise

    def unplug_vifs(self, instance, network_info):
        self._unplug_vifs(instance, network_info, False)

    def _teardown_container(self, instance):
        inst_path = libvirt_utils.get_instance_path(instance)
        container_dir = os.path.join(inst_path, 'rootfs')
        rootfs_dev = instance.system_metadata.get('rootfs_device_name')
        LOG.debug('Attempting to teardown container at path %(dir)s with '
                  'root device: %(rootfs_dev)s',
                  {'dir': container_dir, 'rootfs_dev': rootfs_dev},
                  instance=instance)
        disk_api.teardown_container(container_dir, rootfs_dev)

    def _destroy(self, instance, attempt=1):
        try:
            guest = self._host.get_guest(instance)
            if CONF.serial_console.enabled:
                # This method is called for several events: destroy,
                # rebuild, hard-reboot, power-off - For all of these
                # events we want to release the serial ports acquired
                # for the guest before destroying it.
                serials = self._get_serial_ports_from_guest(guest)
                for hostname, port in serials:
                    serial_console.release_port(host=hostname, port=port)
        except exception.InstanceNotFound:
            guest = None

        # If the instance is already terminated, we're still happy
        # Otherwise, destroy it
        old_domid = -1
        if guest is not None:
            try:
                old_domid = guest.id
                guest.poweroff()

            except libvirt.libvirtError as e:
                is_okay = False
                errcode = e.get_error_code()
                if errcode == libvirt.VIR_ERR_NO_DOMAIN:
                    # Domain already gone. This can safely be ignored.
                    is_okay = True
                elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
                    # If the instance is already shut off, we get this:
                    # Code=55 Error=Requested operation is not valid:
                    # domain is not running

                    state = guest.get_power_state(self._host)
                    if state == power_state.SHUTDOWN:
                        is_okay = True
                elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
                    errmsg = e.get_error_message()
                    if (CONF.libvirt.virt_type == 'lxc' and
                        errmsg == 'internal error: '
                                  'Some processes refused to die'):
                        # Some processes in the container didn't die
                        # fast enough for libvirt. The container will
                        # eventually die. For now, move on and let
                        # the wait_for_destroy logic take over.
                        is_okay = True
                elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
                    LOG.warning(_LW("Cannot destroy instance, operation time "
                                 "out"),
                             instance=instance)
                    reason = _("operation time out")
                    raise exception.InstancePowerOffFailure(reason=reason)
                elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
                    if e.get_int1() == errno.EBUSY:
                        # NOTE(danpb): When libvirt kills a process it sends it
                        # SIGTERM first and waits 10 seconds. If it hasn't gone
                        # it sends SIGKILL and waits another 5 seconds. If it
                        # still hasn't gone then you get this EBUSY error.
                        # Usually when a QEMU process fails to go away upon
                        # SIGKILL it is because it is stuck in an
                        # uninterruptible kernel sleep waiting on I/O from
                        # some non-responsive server.
                        # Given the CPU load of the gate tests though, it is
                        # conceivable that the 15 second timeout is too short,
                        # particularly if the VM running tempest has a high
                        # steal time from the cloud host. ie 15 wallclock
                        # seconds may have passed, but the VM might have only
                        # have a few seconds of scheduled run time.
                        LOG.warning(_LW('Error from libvirt during destroy. '
                                     'Code=%(errcode)s Error=%(e)s; '
                                     'attempt %(attempt)d of 3'),
                                 {'errcode': errcode, 'e': e,
                                  'attempt': attempt},
                                 instance=instance)
                        with excutils.save_and_reraise_exception() as ctxt:
                            # Try up to 3 times before giving up.
                            if attempt < 3:
                                ctxt.reraise = False
                                self._destroy(instance, attempt + 1)
                                return

                if not is_okay:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE('Error from libvirt during destroy. '
                                      'Code=%(errcode)s Error=%(e)s'),
                                  {'errcode': errcode, 'e': e},
                                  instance=instance)

        def _wait_for_destroy(expected_domid):
            """Called at an interval until the VM is gone."""
            # NOTE(vish): If the instance disappears during the destroy
            #             we ignore it so the cleanup can still be
            #             attempted because we would prefer destroy to
            #             never fail.
            try:
                dom_info = self.get_info(instance)
                state = dom_info.state
                new_domid = dom_info.id
            except exception.InstanceNotFound:
                LOG.info(_LI("During wait destroy, instance disappeared."),
                         instance=instance)
                raise loopingcall.LoopingCallDone()

            if state == power_state.SHUTDOWN:
                LOG.info(_LI("Instance destroyed successfully."),
                         instance=instance)
                raise loopingcall.LoopingCallDone()

            # NOTE(wangpan): If the instance was booted again after destroy,
            #                this may be an endless loop, so check the id of
            #                domain here, if it changed and the instance is
            #                still running, we should destroy it again.
            # see https://bugs.launchpad.net/nova/+bug/1111213 for more details
            if new_domid != expected_domid:
                LOG.info(_LI("Instance may be started again."),
                         instance=instance)
                kwargs['is_running'] = True
                raise loopingcall.LoopingCallDone()

        kwargs = {'is_running': False}
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
                                                     old_domid)
        timer.start(interval=0.5).wait()
        if kwargs['is_running']:
            LOG.info(_LI("Going to destroy instance again."),
                     instance=instance)
            self._destroy(instance)
        else:
            # NOTE(GuanQiang): teardown container to avoid resource leak
            if CONF.libvirt.virt_type == 'lxc':
                self._teardown_container(instance)

    def destroy(self, context, instance, network_info, block_device_info=None,
                destroy_disks=True, migrate_data=None):
        self._destroy(instance)
        self.cleanup(context, instance, network_info, block_device_info,
                     destroy_disks, migrate_data)

    def _undefine_domain(self, instance):
        try:
            guest = self._host.get_guest(instance)
            try:
                guest.delete_configuration()
            except libvirt.libvirtError as e:
                with excutils.save_and_reraise_exception():
                    errcode = e.get_error_code()
                    LOG.error(_LE('Error from libvirt during undefine. '
                                  'Code=%(errcode)s Error=%(e)s'),
                              {'errcode': errcode, 'e': e}, instance=instance)
        except exception.InstanceNotFound:
            pass

    def cleanup(self, context, instance, network_info, block_device_info=None,
                destroy_disks=True, migrate_data=None, destroy_vifs=True):
        if destroy_vifs:
            self._unplug_vifs(instance, network_info, True)

        retry = True
        while retry:
            try:
                self.unfilter_instance(instance, network_info)
            except libvirt.libvirtError as e:
                try:
                    state = self.get_info(instance).state
                except exception.InstanceNotFound:
                    state = power_state.SHUTDOWN

                if state != power_state.SHUTDOWN:
                    LOG.warning(_LW("Instance may be still running, destroy "
                                 "it again."), instance=instance)
                    self._destroy(instance)
                else:
                    retry = False
                    errcode = e.get_error_code()
                    LOG.exception(_LE('Error from libvirt during unfilter. '
                                      'Code=%(errcode)s Error=%(e)s'),
                                  {'errcode': errcode, 'e': e},
                                  instance=instance)
                    reason = "Error unfiltering instance."
                    raise exception.InstanceTerminationFailure(reason=reason)
            except Exception:
                retry = False
                raise
            else:
                retry = False

        # FIXME(wangpan): if the instance is booted again here, such as the
        #                 the soft reboot operation boot it here, it will
        #                 become "running deleted", should we check and destroy
        #                 it at the end of this method?

        # NOTE(vish): we disconnect from volumes regardless
        block_device_mapping = driver.block_device_info_get_mapping(
            block_device_info)
        for vol in block_device_mapping:
            connection_info = vol['connection_info']
            disk_dev = vol['mount_device']
            if disk_dev is not None:
                disk_dev = disk_dev.rpartition("/")[2]

            if ('data' in connection_info and
                    'volume_id' in connection_info['data']):
                volume_id = connection_info['data']['volume_id']
                encryption = encryptors.get_encryption_metadata(
                    context, self._volume_api, volume_id, connection_info)

                if encryption:
                    # The volume must be detached from the VM before
                    # disconnecting it from its encryptor. Otherwise, the
                    # encryptor may report that the volume is still in use.
                    encryptor = self._get_volume_encryptor(connection_info,
                                                           encryption)
                    encryptor.detach_volume(**encryption)

            try:
                self._disconnect_volume(connection_info, disk_dev)
            except Exception as exc:
                with excutils.save_and_reraise_exception() as ctxt:
                    if destroy_disks:
                        # Don't block on Volume errors if we're trying to
                        # delete the instance as we may be partially created
                        # or deleted
                        ctxt.reraise = False
                        LOG.warning(
                            _LW("Ignoring Volume Error on vol %(vol_id)s "
                                "during delete %(exc)s"),
                            {'vol_id': vol.get('volume_id'), 'exc': exc},
                            instance=instance)

        if destroy_disks:
            # NOTE(haomai): destroy volumes if needed
            if CONF.libvirt.images_type == 'lvm':
                self._cleanup_lvm(instance, block_device_info)
            if CONF.libvirt.images_type == 'rbd':
                self._cleanup_rbd(instance)

        is_shared_block_storage = False
        if migrate_data and 'is_shared_block_storage' in migrate_data:
            is_shared_block_storage = migrate_data.is_shared_block_storage
        if destroy_disks or is_shared_block_storage:
            attempts = int(instance.system_metadata.get('clean_attempts',
                                                        '0'))
            success = self.delete_instance_files(instance)
            # NOTE(mriedem): This is used in the _run_pending_deletes periodic
            # task in the compute manager. The tight coupling is not great...
            instance.system_metadata['clean_attempts'] = str(attempts + 1)
            if success:
                instance.cleaned = True
            instance.save()

        self._undefine_domain(instance)

    def _detach_encrypted_volumes(self, instance, block_device_info):
        """Detaches encrypted volumes attached to instance."""
        disks = jsonutils.loads(self.get_instance_disk_info(instance,
                                                            block_device_info))
        encrypted_volumes = filter(dmcrypt.is_encrypted,
                                   [disk['path'] for disk in disks])
        for path in encrypted_volumes:
            dmcrypt.delete_volume(path)

    def _get_serial_ports_from_guest(self, guest, mode=None):
        """Returns an iterator over serial port(s) configured on guest.

        :param mode: Should be a value in (None, bind, connect)
        """
        xml = guest.get_xml_desc()
        tree = etree.fromstring(xml)

        # The 'serial' device is the base for x86 platforms. Other platforms
        # (e.g. kvm on system z = arch.S390X) can only use 'console' devices.
        xpath_mode = "[@mode='%s']" % mode if mode else ""
        serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
        console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode

        tcp_devices = tree.findall(serial_tcp)
        if len(tcp_devices) == 0:
            tcp_devices = tree.findall(console_tcp)
        for source in tcp_devices:
            yield (source.get("host"), int(source.get("service")))

    @staticmethod
    def _get_rbd_driver():
        return rbd_utils.RBDDriver(
                pool=CONF.libvirt.images_rbd_pool,
                ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
                rbd_user=CONF.libvirt.rbd_user)

    def _cleanup_rbd(self, instance):
        # NOTE(nic): On revert_resize, the cleanup steps for the root
        # volume are handled with an "rbd snap rollback" command,
        # and none of this is needed (and is, in fact, harmful) so
        # filter out non-ephemerals from the list
        if instance.task_state == task_states.RESIZE_REVERTING:
            filter_fn = lambda disk: (disk.startswith(instance.uuid) and
                                      disk.endswith('disk.local'))
        else:
            filter_fn = lambda disk: disk.startswith(instance.uuid)
        LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)

    def _cleanup_lvm(self, instance, block_device_info):
        """Delete all LVM disks for given instance object."""
        if instance.get('ephemeral_key_uuid') is not None:
            self._detach_encrypted_volumes(instance, block_device_info)

        disks = self._lvm_disks(instance)
        if disks:
            lvm.remove_volumes(disks)

    def _lvm_disks(self, instance):
        """Returns all LVM disks for given instance object."""
        if CONF.libvirt.images_volume_group:
            vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
            if not os.path.exists(vg):
                return []
            pattern = '%s_' % instance.uuid

            def belongs_to_instance(disk):
                return disk.startswith(pattern)

            def fullpath(name):
                return os.path.join(vg, name)

            logical_volumes = lvm.list_volumes(vg)

            disk_names = filter(belongs_to_instance, logical_volumes)
            disks = map(fullpath, disk_names)
            return disks
        return []

    def get_volume_connector(self, instance):
        root_helper = utils.get_root_helper()
        return connector.get_connector_properties(
            root_helper, CONF.my_block_storage_ip,
            CONF.libvirt.volume_use_multipath,
            enforce_multipath=True,
            host=CONF.host)

    def _cleanup_resize(self, instance, network_info):
        target = libvirt_utils.get_instance_path(instance) + '_resize'

        if os.path.exists(target):
            # Deletion can fail over NFS, so retry the deletion as required.
            # Set maximum attempt as 5, most test can remove the directory
            # for the second time.
            utils.execute('rm', '-rf', target, delay_on_retry=True,
                          attempts=5)

        root_disk = self.image_backend.image(instance, 'disk')
        # TODO(nic): Set ignore_errors=False in a future release.
        # It is set to True here to avoid any upgrade issues surrounding
        # instances being in pending resize state when the software is updated;
        # in that case there will be no snapshot to remove.  Once it can be
        # reasonably assumed that no such instances exist in the wild
        # anymore, it should be set back to False (the default) so it will
        # throw errors, like it should.
        if root_disk.exists():
            root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
                                  ignore_errors=True)

        if instance.host != CONF.host:
            self._undefine_domain(instance)
            self.unplug_vifs(instance, network_info)
            self.unfilter_instance(instance, network_info)

    def _get_volume_driver(self, connection_info):
        driver_type = connection_info.get('driver_volume_type')
        if driver_type not in self.volume_drivers:
            raise exception.VolumeDriverNotFound(driver_type=driver_type)
        return self.volume_drivers[driver_type]

    def _connect_volume(self, connection_info, disk_info):
        vol_driver = self._get_volume_driver(connection_info)
        vol_driver.connect_volume(connection_info, disk_info)

    def _disconnect_volume(self, connection_info, disk_dev):
        vol_driver = self._get_volume_driver(connection_info)
        vol_driver.disconnect_volume(connection_info, disk_dev)

    def _get_volume_config(self, connection_info, disk_info):
        vol_driver = self._get_volume_driver(connection_info)
        return vol_driver.get_config(connection_info, disk_info)

    def _get_volume_encryptor(self, connection_info, encryption):
        encryptor = encryptors.get_volume_encryptor(connection_info,
                                                    **encryption)
        return encryptor

    def _check_discard_for_attach_volume(self, conf, instance):
        """Perform some checks for volumes configured for discard support.

        If discard is configured for the volume, and the guest is using a
        configuration known to not work, we will log a message explaining
        the reason why.
        """
        if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio':
            LOG.debug('Attempting to attach volume %(id)s with discard '
                      'support enabled to an instance using an '
                      'unsupported configuration. target_bus = '
                      '%(bus)s. Trim commands will not be issued to '
                      'the storage device.',
                      {'bus': conf.target_bus,
                       'id': conf.serial},
                      instance=instance)

    def attach_volume(self, context, connection_info, instance, mountpoint,
                      disk_bus=None, device_type=None, encryption=None):
        guest = self._host.get_guest(instance)

        disk_dev = mountpoint.rpartition("/")[2]
        bdm = {
            'device_name': disk_dev,
            'disk_bus': disk_bus,
            'device_type': device_type}

        # Note(cfb): If the volume has a custom block size, check that
        #            that we are using QEMU/KVM and libvirt >= 0.10.2. The
        #            presence of a block size is considered mandatory by
        #            cinder so we fail if we can't honor the request.
        data = {}
        if ('data' in connection_info):
            data = connection_info['data']
        if ('logical_block_size' in data or 'physical_block_size' in data):
            if ((CONF.libvirt.virt_type != "kvm" and
                 CONF.libvirt.virt_type != "qemu")):
                msg = _("Volume sets block size, but the current "
                        "libvirt hypervisor '%s' does not support custom "
                        "block size") % CONF.libvirt.virt_type
                raise exception.InvalidHypervisorType(msg)

        disk_info = blockinfo.get_info_from_bdm(
            instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
        self._connect_volume(connection_info, disk_info)
        conf = self._get_volume_config(connection_info, disk_info)
        self._set_cache_mode(conf)

        self._check_discard_for_attach_volume(conf, instance)

        try:
            state = guest.get_power_state(self._host)
            live = state in (power_state.RUNNING, power_state.PAUSED)

            if encryption:
                encryptor = self._get_volume_encryptor(connection_info,
                                                       encryption)
                encryptor.attach_volume(context, **encryption)

            guest.attach_device(conf, persistent=True, live=live)
        except Exception as ex:
            LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
                          mountpoint, instance=instance)
            if isinstance(ex, libvirt.libvirtError):
                errcode = ex.get_error_code()
                if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
                    self._disconnect_volume(connection_info, disk_dev)
                    raise exception.DeviceIsBusy(device=disk_dev)

            with excutils.save_and_reraise_exception():
                self._disconnect_volume(connection_info, disk_dev)

    def _swap_volume(self, guest, disk_path, new_path, resize_to):
        """Swap existing disk with a new block device."""
        dev = guest.get_block_device(disk_path)

        # Save a copy of the domain's persistent XML file
        xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)

        # Abort is an idempotent operation, so make sure any block
        # jobs which may have failed are ended.
        try:
            dev.abort_job()
        except Exception:
            pass

        try:
            # NOTE (rmk): blockRebase cannot be executed on persistent
            #             domains, so we need to temporarily undefine it.
            #             If any part of this block fails, the domain is
            #             re-defined regardless.
            if guest.has_persistent_configuration():
                guest.delete_configuration()

            # Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
            # allow writing to existing external volume file
            dev.rebase(new_path, copy=True, reuse_ext=True)

            while dev.wait_for_job():
                time.sleep(0.5)

            dev.abort_job(pivot=True)
            if resize_to:
                # NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
                # is bug in libvirt. So we need waiting for the pivot is
                # finished. libvirt bug #1119173
                while dev.wait_for_job(wait_for_job_clean=True):
                    time.sleep(0.5)
                dev.resize(resize_to * units.Gi / units.Ki)
        finally:
            self._host.write_instance_config(xml)

    def swap_volume(self, old_connection_info,
                    new_connection_info, instance, mountpoint, resize_to):

        guest = self._host.get_guest(instance)

        disk_dev = mountpoint.rpartition("/")[2]
        if not guest.get_disk(disk_dev):
            raise exception.DiskNotFound(location=disk_dev)
        disk_info = {
            'dev': disk_dev,
            'bus': blockinfo.get_disk_bus_for_disk_dev(
                CONF.libvirt.virt_type, disk_dev),
            'type': 'disk',
            }
        self._connect_volume(new_connection_info, disk_info)
        conf = self._get_volume_config(new_connection_info, disk_info)
        if not conf.source_path:
            self._disconnect_volume(new_connection_info, disk_dev)
            raise NotImplementedError(_("Swap only supports host devices"))

        # Save updates made in connection_info when connect_volume was called
        volume_id = new_connection_info.get('serial')
        bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
            nova_context.get_admin_context(), volume_id, instance.uuid)
        driver_bdm = driver_block_device.convert_volume(bdm)
        driver_bdm['connection_info'] = new_connection_info
        driver_bdm.save()

        self._swap_volume(guest, disk_dev, conf.source_path, resize_to)
        self._disconnect_volume(old_connection_info, disk_dev)

    def _get_existing_domain_xml(self, instance, network_info,
                                 block_device_info=None):
        try:
            guest = self._host.get_guest(instance)
            xml = guest.get_xml_desc()
        except exception.InstanceNotFound:
            disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
                                                instance,
                                                instance.image_meta,
                                                block_device_info)
            xml = self._get_guest_xml(nova_context.get_admin_context(),
                                      instance, network_info, disk_info,
                                      instance.image_meta,
                                      block_device_info=block_device_info)
        return xml

    def detach_volume(self, connection_info, instance, mountpoint,
                      encryption=None):
        disk_dev = mountpoint.rpartition("/")[2]
        try:
            guest = self._host.get_guest(instance)

            state = guest.get_power_state(self._host)
            live = state in (power_state.RUNNING, power_state.PAUSED)

            wait_for_detach = guest.detach_device_with_retry(guest.get_disk,
                                                             disk_dev,
                                                             persistent=True,
                                                             live=live)

            if encryption:
                # The volume must be detached from the VM before
                # disconnecting it from its encryptor. Otherwise, the
                # encryptor may report that the volume is still in use.
                encryptor = self._get_volume_encryptor(connection_info,
                                                       encryption)
                encryptor.detach_volume(**encryption)

            wait_for_detach()
        except exception.InstanceNotFound:
            # NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
            #                will throw InstanceNotFound exception. Need to
            #                disconnect volume under this circumstance.
            LOG.warning(_LW("During detach_volume, instance disappeared."),
                     instance=instance)
        except exception.DeviceNotFound:
            raise exception.DiskNotFound(location=disk_dev)
        except libvirt.libvirtError as ex:
            # NOTE(vish): This is called to cleanup volumes after live
            #             migration, so we should still disconnect even if
            #             the instance doesn't exist here anymore.
            error_code = ex.get_error_code()
            if error_code == libvirt.VIR_ERR_NO_DOMAIN:
                # NOTE(vish):
                LOG.warning(_LW("During detach_volume, instance disappeared."),
                         instance=instance)
            else:
                raise

        self._disconnect_volume(connection_info, disk_dev)

    def attach_interface(self, instance, image_meta, vif):
        guest = self._host.get_guest(instance)

        self.vif_driver.plug(instance, vif)
        self.firewall_driver.setup_basic_filtering(instance, [vif])
        cfg = self.vif_driver.get_config(instance, vif, image_meta,
                                         instance.flavor,
                                         CONF.libvirt.virt_type,
                                         self._host)
        try:
            state = guest.get_power_state(self._host)
            live = state in (power_state.RUNNING, power_state.PAUSED)
            guest.attach_device(cfg, persistent=True, live=live)
        except libvirt.libvirtError:
            LOG.error(_LE('attaching network adapter failed.'),
                     instance=instance, exc_info=True)
            self.vif_driver.unplug(instance, vif)
            raise exception.InterfaceAttachFailed(
                    instance_uuid=instance.uuid)

    def detach_interface(self, instance, vif):
        guest = self._host.get_guest(instance)
        cfg = self.vif_driver.get_config(instance, vif,
                                         instance.image_meta,
                                         instance.flavor,
                                         CONF.libvirt.virt_type, self._host)
        try:
            self.vif_driver.unplug(instance, vif)
            state = guest.get_power_state(self._host)
            live = state in (power_state.RUNNING, power_state.PAUSED)
            guest.detach_device(cfg, persistent=True, live=live)
        except libvirt.libvirtError as ex:
            error_code = ex.get_error_code()
            if error_code == libvirt.VIR_ERR_NO_DOMAIN:
                LOG.warning(_LW("During detach_interface, "
                             "instance disappeared."),
                         instance=instance)
            else:
                # NOTE(mriedem): When deleting an instance and using Neutron,
                # we can be racing against Neutron deleting the port and
                # sending the vif-deleted event which then triggers a call to
                # detach the interface, so we might have failed because the
                # network device no longer exists. Libvirt will fail with
                # "operation failed: no matching network device was found"
                # which unfortunately does not have a unique error code so we
                # need to look up the interface by MAC and if it's not found
                # then we can just log it as a warning rather than tracing an
                # error.
                mac = vif.get('address')
                interface = guest.get_interface_by_mac(mac)
                if interface:
                    LOG.error(_LE('detaching network adapter failed.'),
                             instance=instance, exc_info=True)
                    raise exception.InterfaceDetachFailed(
                            instance_uuid=instance.uuid)

                # The interface is gone so just log it as a warning.
                LOG.warning(_LW('Detaching interface %(mac)s failed  because '
                                'the device is no longer found on the guest.'),
                            {'mac': mac}, instance=instance)

    def _create_snapshot_metadata(self, image_meta, instance,
                                  img_fmt, snp_name):
        metadata = {'is_public': False,
                    'status': 'active',
                    'name': snp_name,
                    'properties': {
                                   'kernel_id': instance.kernel_id,
                                   'image_location': 'snapshot',
                                   'image_state': 'available',
                                   'owner_id': instance.project_id,
                                   'ramdisk_id': instance.ramdisk_id,
                                   }
                    }
        if instance.os_type:
            metadata['properties']['os_type'] = instance.os_type

        # NOTE(vish): glance forces ami disk format to be ami
        if image_meta.disk_format == 'ami':
            metadata['disk_format'] = 'ami'
        else:
            metadata['disk_format'] = img_fmt

        if image_meta.obj_attr_is_set("container_format"):
            metadata['container_format'] = image_meta.container_format
        else:
            metadata['container_format'] = "bare"

        return metadata

    def snapshot(self, context, instance, image_id, update_task_state):
        """Create snapshot from a running VM instance.

        This command only works with qemu 0.14+
        """
        try:
            guest = self._host.get_guest(instance)

            # TODO(sahid): We are converting all calls from a
            # virDomain object to use nova.virt.libvirt.Guest.