Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
idna / lib / python2.7 / site-packages / contego / nova / extension / driver / backends / rdb_backend.py
Size: Mime:
# Copyright 2015 TrilioData Inc.
# All Rights Reserved.

import copy
import os
import uuid
import json
import re
import rbd
import rados
import time
from tempfile import mkstemp

try:
    from oslo_log import log as logging
except ImportError:
    from nova.openstack.common import log as logging

try:
    from oslo_utils import encodeutils
except ImportError:
    from nova.openstack.common import strutils as encodeutils

try:
    from oslo_config import cfg
except ImportError:
    from oslo.config import cfg

try:
    from oslo_utils import fileutils
except ImportError:
    from nova.openstack.common import fileutils

from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient import session

from nova import utils

try:
    import nova.virt.libvirt.rbd_utils as rbd_utils
except BaseException:
    from nova.virt.libvirt.storage import rbd_utils

import nova.virt.libvirt.utils as libvirt_utils
from nova.virt.libvirt.imagebackend import Rbd
from contego.nova.extension.driver import qemuimages

from contego.nova.extension.driver import vault
from contego import utils as contego_utils

import rbd_base

CONF = cfg.CONF
LOG = logging.getLogger(__name__)


class RBDBackend(rbd_base.RBDBaseBackend):

    def upload_snapshot(self, disk_info, **kwargs):
        context = kwargs['context']
        params = kwargs['params']

        snapshot_full_name = disk_info['path'] + '@snapshot-' + \
            disk_info['backings'][0]['path']
        volume_name = disk_info['path'].split('/')[1]
        pool_name = disk_info['path'].split('/')[0]
        volume_name = encodeutils.safe_encode(volume_name)
        progress_tracker_path = \
            vault.get_progress_tracker_path(params['metadata'])

        # prev_snapshot_name should be available in disk_info:
        prev_snapshot_name = None
        if disk_info['prev_disk_info']:
            prev_snapshot_name = 'snapshot-' + encodeutils.safe_encode(
                disk_info['prev_disk_info']['backings'][0]['path'])

            # verify if the snapshot still exists, otherwise default to
            # full snapshot
            with rbd_utils.RBDVolumeProxy(self.driver,
                                          volume_name,
                                          pool=pool_name) as volume:
                allsnaps = set([s['name'] for s in volume.list_snaps()])
                if prev_snapshot_name not in allsnaps:
                    prev_snapshot_name = None

        # diffsize = self.diff_size(snapshot_full_name,
                    # from_snap=prev_snapshot_name)
        vsize = self._size(disk_info['path'])

        curr_devname = None
        extentsfile = None
        qcow2file = None

        try:
            extentsfile = self._get_temp_file_path(True)
            if CONF.vault_storage_type.lower() in ('nfs', 'swift-s', 's3'):
                qcow2file = vault.get_snapshot_vm_disk_resource_path(
                     params['metadata'])  # noqa
                head, tail = os.path.split(qcow2file)
                fileutils.ensure_tree(head)
            else:
                qcow2file = self._get_temp_file_path(True)

            self._export_diff(snapshot_full_name, extentsfile,
                              from_snap=prev_snapshot_name)
            try:
                libvirt_utils.create_cow_image(None, qcow2file, vsize)
            except:
                # s3 sometimes may throw exception in create_cow_images
                # due its eventual consistency nature
                # give sometime for S3 to propagate the changes
                time.sleep(30)
                qemuimages.qemu_img_info(qcow2file)

            if prev_snapshot_name:
                backing_image = disk_info['prev_disk_info']['path'] + '@' +\
                    prev_snapshot_name
            else:
                backing_image = pool_name + '/' + volume_name
            self._create_qcow2_from_extents(qcow2file, extentsfile,
                                            backing_image, snapshot_full_name,
                                            progress_tracker_path)
        except Exception as ex:
            LOG.exception(ex)
            time.sleep(30)
            os.remove(qcow2file)
            raise
        finally:
            if extentsfile and os.path.exists(extentsfile):
                os.remove(extentsfile)

        # create a new token with new expiration date to upload the image
        if qcow2file != vault.get_snapshot_vm_disk_resource_path(
                params['metadata']):
            newcontext = rbd_base.get_new_context(context)
            vault.upload_snapshot_vm_disk_resource(
                newcontext, params['metadata'], qcow2file)
            os.remove(qcow2file)

    def ceph_rename_volume(self, source, target):
        args = [source]
        args += [target]
        kwargs = {'run_as_root': True}
        out, err = self.rbd_keyring_search_and_execute(
            'mv', *args, **kwargs)

        if err != '':
            raise Exception("Cannot run rbd rename %s to %s. Error %s" %
                            (source, target, err))

        return

    def ceph_delete_volume(self, volume_name):
        args = [volume_name]
        kwargs = {'run_as_root': True}
        out, err = self.rbd_keyring_search_and_execute(
            'rm', *args, **kwargs)

        return

    def ceph_volume_info(self, volume_name):
        args = [volume_name]
        args += ['--format=json']
        kwargs = {'run_as_root': True}
        out, err = self.rbd_keyring_search_and_execute(
            'info', *args, **kwargs)

        if err != '':
            raise Exception("Cannot run rbd info on %s. Error %s" %
                            (volume_name, err))

        return json.loads(out)

    def create_volume_from_file(self, volume_name, volume_path,
                                backup_image_file_path, ceph_conf_file,
                                progress_tracking_file_path,
                                use_existing=False):

        volume_endpoint = 'rbd:' + volume_name
        volume_endpoint += ':id=' + self.key_user.split('.')[1]
        volume_endpoint += ':conf=' + ceph_conf_file

        cmdspec = ['qemu-img', 'convert', '-p', ]
        if use_existing:
            cmdspec += ['-n']
        cmdspec += ['-t', 'none', '-O',
                    'raw', backup_image_file_path, volume_endpoint]

        cmd = " ".join(cmdspec)
        LOG.debug(('create_volume_from_file cmd %s ' % cmd))

        self._execute_qemu_img_and_track_progress(
                     cmdspec, volume_name, backup_image_file_path,
                     progress_tracking_file_path)

    def transfer_qemu_image_to_volume_by_new_volume(
            self,
            volume_path,
            backup_image_file_path,
            progress_tracking_file_path):

        # import image to rbd volume

        # load the self.key_user
        pool_name = volume_path.split('/')[0]
        args = [pool_name]
        kwargs = {}
        self.rbd_keyring_search_and_execute('ls', *args, **kwargs)

        if not self.key_user:
            raise Exception(
                "Could not find any valid ceph key to use. "
                "Please make sure 'rbd ls -l' can be run successfully")

        temp_volume = pool_name + '/' + "volume-" + \
            str(uuid.uuid4().hex) + '-TrilioVault'

        with rbd_base.make_copy_ceph_conf(self) as temp_path:
            try:
                self.create_volume_from_file(temp_volume, volume_path,
                                             backup_image_file_path, temp_path,
                                             progress_tracking_file_path)
            except Exception as ex:
                LOG.exception(ex)
                time.sleep(30)
                self.ceph_delete_volume(temp_volume)
                raise

        # delete volume created by cinder
        cinder_volume_name = volume_path
        self.ceph_delete_volume(cinder_volume_name)

        # rename the other volume to cinder created volume name
        self.ceph_rename_volume(temp_volume, cinder_volume_name)

        # Get the info on the newly restored volume
        statinfo = self.ceph_volume_info(cinder_volume_name)

        LOG.debug("Transferred backup image to ceph volume %s. "
                  "Transferred size %s" %
                  (volume_path, statinfo.get('object_size', "Not Found")))

        return

    def transfer_qemu_image_to_volume_by_overwrite(
            self,
            volume_path,
            backup_image_file_path,
            progress_tracking_file_path):

        # load the self.key_user
        pool_name = volume_path.split('/')[0]
        args = [pool_name]
        kwargs = {}
        self.rbd_keyring_search_and_execute('ls', *args, **kwargs)

        if not self.key_user:
            raise Exception(
                "Could not find any valid ceph key to use. "
                "Please make sure 'rbd ls -l' can be run successfully")

        with rbd_base.make_copy_ceph_conf(self) as temp_path:
            try:
                self.create_volume_from_file(volume_path, volume_path,
                                             backup_image_file_path, temp_path,
                                             progress_tracking_file_path,
                                             use_existing=True)
            except Exception as ex:
                LOG.exception(ex)
                time.sleep(30)
                raise

        # Get the info on the newly restored volume
        statinfo = self.ceph_volume_info(volume_path)

        LOG.debug("Transferred backup image to ceph volume %s. "
                  "Transferred size %s" %
                  (volume_path, statinfo.get('object_size', "Not Found")))

        return

    def volume_has_snapshots(self, volume_path):

        volume_name = volume_path.split('/')[1]
        pool_name = volume_path.split('/')[0]
        volume_name = encodeutils.safe_encode(volume_name)

        with rbd_utils.RBDVolumeProxy(
             self.driver, volume_name, pool=pool_name) as volume:  # noqa

            allsnaps = set([s['name'] for s in volume.list_snaps()])

            if len(allsnaps):
                return True

        return False

    def transfer_qemu_image_to_volume(self, volume_path,
                                      backup_image_file_path,
                                      progress_tracking_file_path):
        try:
            if self.volume_has_snapshots(volume_path):
                self.transfer_qemu_image_to_volume_by_overwrite(
                    volume_path, backup_image_file_path,
                    progress_tracking_file_path)
            else:
                self.transfer_qemu_image_to_volume_by_new_volume(
                    volume_path, backup_image_file_path,
                    progress_tracking_file_path)
        except Exception as ex:
            LOG.exception(ex)
            raise


"""
driver = ContegoRBDDriver("volumes", "/etc/ceph/ceph.conf", "cinder")
driver.create_snapshot(snapshot)

fileh, extentsfile = mkstemp()
close(fileh)
remove(extentsfile)

fileh, qcow2file = mkstemp()
close(fileh)
remove(qcow2file)

driver.export_diff("volumes/"+snapshot['volume_name']+"@"+snapshot['name'],
                   extentsfile)

vsize = driver.volume_size(snapshot['volume_name'])
libutils_utils.create_image("qcow2", qcow2file, vsize)
driver.create_qcow2_from_extents(qcow2file, extentsfile)
self.delete_snapshot(snapshot)
"""