Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
workloadmgr / workloadmgr / workloads / workload_utils.py
Size: Mime:
import copy
import os
import sys
import json
import shutil
import subprocess
from tempfile import mkstemp

from oslo_config import cfg

from workloadmgr.openstack.common import log as logging
from workloadmgr import autolog
from workloadmgr import flags
from workloadmgr import settings
from workloadmgr.vault import vault
from workloadmgr.compute import nova
from workloadmgr.db.workloadmgrdb import WorkloadMgrDB
from workloadmgr.openstack.common import jsonutils
from workloadmgr.openstack.common import timeutils
from workloadmgr import exception
from workloadmgr.common.workloadmgr_keystoneclient import KeystoneClientBase
import pickle as pickle
from workloadmgr.datamover import contego
from workloadmgr.common import clients

workloads_manager_opts = [
    cfg.StrOpt('cloud_unique_id',
               default='test-cloud-id',
               help='cloud unique id.'),
]

FLAGS = flags.FLAGS
FLAGS.register_opts(workloads_manager_opts)

CONF = cfg.CONF

LOG = logging.getLogger(__name__)
Logger = autolog.Logger(LOG)

db = WorkloadMgrDB().db


def process_trust_values(name_in_backend, backend_settings_by_name, db_settings_by_name):
    setting_values = backend_settings_by_name.get(name_in_backend)
    if setting_values:
        for db_key, db_value in db_settings_by_name.items():
            if db_value['user_id'] == setting_values['user_id'] and \
                    db_value['project_id'] == setting_values['project_id']:
                backend_settings_by_name.pop(name_in_backend, None)
                break


@autolog.log_method(logger=Logger)
def upload_settings_db_entry(cntx):
    # use context as none since we want settings of all users/tenants
    # TODO: implement settings persistance per user/tenant
    (backup_target, path) = vault.get_settings_backup_target()
    settings_db = db.setting_get_all(
        None, read_deleted='no', backup_settings=True)
    for setting in settings_db:
        if 'password' in setting.name.lower() and 'smtp_server_password' not in setting.name:
            setting.value = '******'
        for kvpair in setting.metadata:
            if 'Password' in kvpair['key'] or 'password' in kvpair['key']:
                kvpair['value'] = '******'

    settings_json = jsonutils.dumps(settings_db)
    settings_path = os.path.join(str(CONF.cloud_unique_id), "settings_db")
    db_settings = json.loads(settings_json)

    try:
        backend_settings = json.loads(backup_target.get_object(settings_path))
    except Exception as ex:
        backend_settings = []

    # If on the backend we have more settings than DB means we haven't
    # imported them yet, In that case appending DB settings with backend
    # settings.
    # persisting backend and DB values and if found duplicate overriding them
    # with DB values based on user_id and project_id
    backend_settings_types = [setting['type'] for setting in backend_settings]
    backend_settings_by_name = {backend_setting['name']: backend_setting for backend_setting in backend_settings}
    db_settings_types = [setting['type'] for setting in db_settings]
    db_settings_by_name = {db_setting['name']: db_setting for db_setting in db_settings}
    common_types = list(set(db_settings_types).intersection(set(backend_settings_types)))
    new_backend_settings_by_name = copy.deepcopy(backend_settings_by_name)
    if common_types:
        for key, value in backend_settings_by_name.items():
            if value['type'] in common_types:
                if value['type'] == 'trust_id':
                    process_trust_values(key, new_backend_settings_by_name, db_settings_by_name)
                else:
                    new_backend_settings_by_name.pop(key, None)

    new_backend_settings_by_name.update(db_settings_by_name)
    new_backend_settings = [value for key, value in new_backend_settings_by_name.items()]
    new_settings = json.dumps(new_backend_settings)
    backup_target.put_object(settings_path, new_settings)


@autolog.log_method(logger=Logger)
def upload_allowed_quota_db_entry(cntx, project_id):
    (backup_target, path) = vault.get_allowed_quota_backup_target()
    # use admin context since we want fetch allowed_quota of all projects
    admin_context = nova._get_tenant_context(cntx)
    admin_context.is_admin = True
    allowed_quota_db = db.get_allowed_quotas(
        admin_context, project_id=project_id
    )

    allowed_quota_json = jsonutils.dumps(allowed_quota_db)
    allowed_quota_path = os.path.join(str(CONF.cloud_unique_id), "allowed_quota_db")
    db_allowed_quotas = json.loads(allowed_quota_json)

    try:
        backend_allowed_quotas = json.loads(
            backup_target.get_object(allowed_quota_path)
        )
    except Exception as ex:
        backend_allowed_quotas = []

    backend_allowed_quota_by_project = {}
    for backend_allowed_quota in backend_allowed_quotas:
        if backend_allowed_quota['project_id'] not in backend_allowed_quota_by_project:
            backend_allowed_quota_by_project[backend_allowed_quota['project_id']] = []
        backend_allowed_quota_by_project[backend_allowed_quota['project_id']].append(backend_allowed_quota)

    db_allowed_quota_by_project = {}
    if not db_allowed_quotas:
        db_allowed_quota_by_project[project_id] = []
    for db_allowed_quota in db_allowed_quotas:
        if db_allowed_quota['project_id'] not in db_allowed_quota_by_project:
            db_allowed_quota_by_project[db_allowed_quota['project_id']] = []
        db_allowed_quota_by_project[db_allowed_quota['project_id']].append(db_allowed_quota)

    new_backend_allowed_quota = []
    backend_allowed_quota_by_project.update(db_allowed_quota_by_project)
    for key, values in backend_allowed_quota_by_project.items():
        new_backend_allowed_quota.extend([value for value in values])
    new_allowed_quota = json.dumps(new_backend_allowed_quota)
    backup_target.put_object(allowed_quota_path, new_allowed_quota)


@autolog.log_method(logger=Logger)
def upload_workload_db_entry(cntx, workload_id):
    workload_db = db.workload_get(cntx, workload_id)
    backup_endpoint = db.get_metadata_value(workload_db.metadata,
                                            'backup_media_target')

    backup_target = vault.get_backup_target(backup_endpoint)
    parent = backup_target.get_workload_path({'workload_id': workload_id})

    for kvpair in workload_db.metadata:
        if 'Password' in kvpair['key'] or 'password' in kvpair['key']:
            kvpair['value'] = '******'
    workload_json = jsonutils.dumps(workload_db)
    path = os.path.join(parent, "workload_db")
    backup_target.put_object(path, workload_json)

    workload_vms_db = db.workload_vms_get(cntx, workload_id)
    workload_vms_json = jsonutils.dumps(workload_vms_db)
    path = os.path.join(parent, "workload_vms_db")
    backup_target.put_object(path, workload_vms_json)


def _async_put_object(backup_target, path, workload_json):
    fd, tmpfile = mkstemp(text=True)
    os.close(fd)
    with open(tmpfile, "w") as f:
        f.write(workload_json)

    return [tmpfile, path]


@autolog.log_method(logger=Logger)
def upload_snapshot_db_entry(cntx, snapshot_id, snapshot_status=None):
    put_object_threads = []
    snapshot = db.snapshot_get(cntx, snapshot_id, read_deleted='yes')
    if snapshot['data_deleted']:
        return

    workload_id = snapshot['workload_id']
    workload_db = db.workload_get(cntx, workload_id)
    backup_endpoint = db.get_metadata_value(workload_db.metadata,
                                            'backup_media_target')

    backup_target = vault.get_backup_target(backup_endpoint)

    parent = backup_target.get_workload_path({'workload_id': workload_id})

    for kvpair in workload_db.metadata:
        if 'Password' in kvpair['key'] or 'password' in kvpair['key']:
            kvpair['value'] = '******'
    workload_json = jsonutils.dumps(workload_db)

    path = os.path.join(parent, "workload_db")
    put_object_threads.append(_async_put_object(backup_target, path, workload_json))

    workload_vms_db = db.workload_vms_get(cntx, snapshot['workload_id'])
    workload_vms_json = jsonutils.dumps(workload_vms_db)
    path = os.path.join(parent, "workload_vms_db")
    put_object_threads.append(_async_put_object(backup_target, path, workload_vms_json))

    snapshot_db = db.snapshot_get(cntx, snapshot['id'], read_deleted='yes')
    if snapshot_status:
        snapshot_db.status = snapshot_status
    snapshot_json = jsonutils.dumps(snapshot_db)
    parent = backup_target.get_snapshot_path({'workload_id': workload_id,
                                              'snapshot_id': snapshot['id']})
    path = os.path.join(parent, "snapshot_db")
    # Add to the vault
    put_object_threads.append(_async_put_object(backup_target, path, snapshot_json))

    # Dump network topology related data
    snap_network_resources = db.snapshot_network_resources_get(
        cntx, snapshot_id)

    net_topology_json = jsonutils.dumps(snap_network_resources)
    path = os.path.join(parent, "network_topology_db")
    put_object_threads.append(_async_put_object(backup_target, path, net_topology_json))

    snapvms = db.snapshot_vms_get(cntx, snapshot['id'])
    snapvms_json = jsonutils.dumps(snapvms)
    path = os.path.join(parent, "snapshot_vms_db")

    # Add to the vault
    put_object_threads.append(_async_put_object(backup_target, path, snapvms_json))

    resources = db.snapshot_resources_get(cntx, snapshot['id'])
    resources_json = jsonutils.dumps(resources)
    path = os.path.join(parent, "resources_db")
    put_object_threads.append(_async_put_object(backup_target, path, resources_json))

    for res in resources:
        res_json = jsonutils.dumps(res, sort_keys=True, indent=2)

        vm_res_id = 'vm_res_id_%s' % (res['id'])
        for meta in res.metadata:
            if meta.key == "label":
                vm_res_id = 'vm_res_id_%s_%s' % (res['id'], meta.value)
                break
        if res.resource_type == "network" or \
                res.resource_type == "subnet" or \
                res.resource_type == "router" or \
                res.resource_type == "nic":
            path = os.path.join(parent, "network", vm_res_id, "network_db")
            network = db.vm_network_resource_snaps_get(cntx, res.id)
            network_json = jsonutils.dumps(network)
            put_object_threads.append(_async_put_object(backup_target, path, network_json))
        elif res.resource_type == "disk":
            path = os.path.join(
                parent,
                "vm_id_" +
                res.vm_id,
                vm_res_id.replace(
                    ' ',
                    ''),
                "disk_db")
            disk = db.vm_disk_resource_snaps_get(cntx, res.id)
            disk_json = jsonutils.dumps(disk)
            put_object_threads.append(_async_put_object(backup_target, path, disk_json))
        elif res.resource_type == "security_group":
            path = os.path.join(
                parent,
                "security_group",
                vm_res_id,
                "security_group_db")
            security_group = db.vm_security_group_rule_snaps_get(cntx, res.id)
            security_group_json = jsonutils.dumps(security_group)
            put_object_threads.append(_async_put_object(backup_target, path, security_group_json))

    # Wait for all of the async put threads to complete.
    fd, tmpfile = mkstemp(text=True)
    os.close(fd)

    with open(tmpfile, "w") as f:
        json.dump(put_object_threads, f)

    put_object_threads.append([tmpfile, tmpfile])
    try:
        if subprocess.call([sys.executable, os.path.join(os.path.dirname(__file__), "pcopy.py"), tmpfile]):
            raise Exception("Cannot write snapshot record to backup target")
    finally:
        for tmpfile, x in put_object_threads:
            try:
                os.remove(tmpfile)
            except Exception as ex:
                LOG.exception(ex)


@autolog.log_method(logger=Logger)
def upload_config_workload_db_entry(cntx):
    try:
        config_workload_db = db.config_workload_get(cntx)
        backup_endpoint = config_workload_db['backup_media_target']
        backup_target = vault.get_backup_target(backup_endpoint)
        config_workload_storage_path = backup_target.get_config_workload_path()

        config_workload_json = jsonutils.dumps(config_workload_db)

        path = os.path.join(config_workload_storage_path, "config_workload_db")
        backup_target.put_object(path, config_workload_json)
    except Exception as ex:
        LOG.exception(ex)


@autolog.log_method(logger=Logger)
def upload_config_backup_db_entry(cntx, backup_id):
    try:
        config_db = db.config_backup_get(cntx, backup_id)
        config_workload_db = db.config_workload_get(cntx)
        backup_endpoint = config_workload_db['backup_media_target']

        backup_target = vault.get_backup_target(backup_endpoint)
        parent = config_db['vault_storage_path']

        config_json = jsonutils.dumps(config_db)
        path = os.path.join(parent, "config_backup_db")
        backup_target.put_object(path, config_json)
    except Exception as ex:
        LOG.exception(ex)


@autolog.log_method(logger=Logger)
def upload_policy_db_entry(cntx, policy_id):
    try:
        policy = db.policy_get(cntx, policy_id)
        backup_target, path = vault.get_settings_backup_target()
        policy_path = backup_target.get_policy_path()
        policy_path = os.path.join(
            policy_path, 'policy' + '_' + str(policy_id))
        policy_json = jsonutils.dumps(policy)
        backup_target.put_object(policy_path, policy_json)
    except Exception as ex:
        LOG.exception(ex)


@autolog.log_method(logger=Logger)
def _remove_data(context, snapshot_id):
    snapshot_with_data = db.snapshot_get(
        context, snapshot_id, read_deleted='yes')
    if snapshot_with_data.status != 'deleted':
        return
    if snapshot_with_data.data_deleted:
        return
    try:
        LOG.info(_('Deleting the data of snapshot %s of workload %s') %
                 (snapshot_with_data.id, snapshot_with_data.workload_id))
        workload_obj = db.workload_get(context, snapshot_with_data.workload_id)
        backup_endpoint = db.get_metadata_value(workload_obj.metadata,
                                                'backup_media_target')

        backup_target = vault.get_backup_target(backup_endpoint)
        backup_target.snapshot_delete(context,
                                      {'workload_id': snapshot_with_data.workload_id,
                                       'workload_name': workload_obj.display_name,
                                       'snapshot_id': snapshot_with_data.id})
        db.snapshot_update(
            context, snapshot_with_data.id, {
                'data_deleted': True})
    except Exception as ex:
        LOG.exception(ex)


@autolog.log_method(logger=Logger)
def _snapshot_delete(context, snapshot_id, database_only=False):
    snapshot = db.snapshot_get(context, snapshot_id, read_deleted='yes')
    db.snapshot_delete(context, snapshot.id)

    child_snapshots = db.get_snapshot_children(context, snapshot_id)
    all_child_snapshots_deleted = True
    for child_snapshot_id in child_snapshots:
        try:
            child_snapshot = db.snapshot_get(
                context, child_snapshot_id, read_deleted='yes')
            if child_snapshot.status == 'error' or child_snapshot.status == 'deleted':
                continue
            all_child_snapshots_deleted = False
            break
        except Exception as ex:
            LOG.exception(ex)
    if all_child_snapshots_deleted and database_only is False:
        _remove_data(context, snapshot_id)
    if database_only is False:
        upload_snapshot_db_entry(context, snapshot_id)


@autolog.log_method(logger=Logger)
def snapshot_delete(context, snapshot_id, database_only=False):
    """
    Delete an existing snapshot
    """
    _snapshot_delete(context, snapshot_id, database_only)

    child_snapshots = db.get_snapshot_children(context, snapshot_id)
    for child_snapshot_id in child_snapshots:
        try:
            child_snapshot = db.snapshot_get(
                context, child_snapshot_id, read_deleted='yes')
            if child_snapshot.status == 'deleted' and child_snapshot.data_deleted == False:
                # now see if the data can be deleted
                _snapshot_delete(context, child_snapshot_id, database_only)
        except Exception as ex:
            LOG.exception(ex)

    parent_snapshots = db.get_snapshot_parents(context, snapshot_id)
    for parent_snapshot_id in parent_snapshots:
        try:
            parent_snapshot = db.snapshot_get(
                context, parent_snapshot_id, read_deleted='yes')
            if parent_snapshot.status == 'deleted' and parent_snapshot.data_deleted == False:
                # now see if the data can be deleted
                _snapshot_delete(context, parent_snapshot_id, database_only)
        except Exception as ex:
            LOG.exception(ex)


@autolog.log_method(logger=Logger)
def delete_if_chain(context, snapshot, snapshots_to_delete):
    try:
        snapshots_to_delete_ids = set()
        for snapshot_to_delete in snapshots_to_delete:
            snapshots_to_delete_ids.add(snapshot_to_delete.id)

        snapshot_obj = db.snapshot_type_time_size_update(
            context, snapshot['id'])
        workload_obj = db.workload_get(context, snapshot_obj.workload_id)
        snapshots_all = db.snapshot_get_all_by_project_workload(
            context, context.project_id, workload_obj.id, read_deleted='yes')

        snap_chains = []
        snap_chain = set()
        snap_chains.append(snap_chain)
        for snap in reversed(snapshots_all):
            if snap.snapshot_type == 'full':
                snap_chain = set()
                snap_chains.append(snap_chain)
            snap_chain.add(snap.id)

        for snap_chain in snap_chains:
            if snap_chain.issubset(snapshots_to_delete_ids):
                for snap in snap_chain:
                    db.snapshot_delete(context, snap)
                for snap in snap_chain:
                    _remove_data(context, snap)

    except Exception as ex:
        LOG.exception(ex)


def download_snapshot_vm_from_object_store(
        context, restore_id, snapshot_id, snapshot_vm_id):
    snapshot = db.snapshot_get(context, snapshot_id, read_deleted='yes')
    workload = db.workload_get(context, snapshot.workload_id)

    object_store_download_time = 0
    object_store_download_time += vault.download_snapshot_vm_from_object_store(
        context,
        {
            'restore_id': restore_id,
            'workload_id': snapshot.workload_id,
            'snapshot_id': snapshot.id,
            'snapshot_vm_id': snapshot_vm_id})

    parent_snapshots = db.get_snapshot_parents(context, snapshot_id)

    for parent_snapshot_id in parent_snapshots:
        parent_snapshot_vms = db.snapshot_vms_get(context, parent_snapshot_id)
        for parent_snapshot_vm in parent_snapshot_vms:
            if parent_snapshot_vm.vm_id == snapshot_vm_id:
                object_store_download_time += vault.download_snapshot_vm_from_object_store(
                    context,
                    {
                        'restore_id': restore_id,
                        'workload_id': snapshot.workload_id,
                        'workload_name': workload.display_name,
                        'snapshot_id': parent_snapshot_id,
                        'snapshot_vm_id': snapshot_vm_id})
    return object_store_download_time


def download_snapshot_vm_resource_from_object_store(
        context, restore_id, snapshot_id, snapshot_vm_resource_id):
    snapshot = db.snapshot_get(context, snapshot_id, read_deleted='yes')
    workload = db.workload_get(context, snapshot.workload_id)
    snapshot_vm_resource = db.snapshot_vm_resource_get(
        context, snapshot_vm_resource_id)
    snapshot_vm = db.snapshot_vm_get(
        context, snapshot_vm_resource.vm_id, snapshot.id)

    object_store_download_time = 0
    while snapshot_vm_resource:
        object_store_download_time += vault.download_snapshot_vm_resource_from_object_store(
            context,
            {
                'restore_id': restore_id,
                'workload_id': snapshot.workload_id,
                'workload_name': workload.display_name,
                'snapshot_id': snapshot_vm_resource.snapshot_id,
                'snapshot_vm_id': snapshot_vm_resource.vm_id,
                'snapshot_vm_name': snapshot_vm.vm_name,
                'snapshot_vm_resource_id': snapshot_vm_resource.id,
                'snapshot_vm_resource_name': snapshot_vm_resource.resource_name})
        vm_disk_resource_snaps = db.vm_disk_resource_snaps_get(
            context, snapshot_vm_resource.id)
        for vm_disk_resource_snap in vm_disk_resource_snaps:
            if vm_disk_resource_snap.vm_disk_resource_snap_backing_id:
                vm_disk_resource_snap_backing = db.vm_disk_resource_snap_get(
                    context, vm_disk_resource_snap.vm_disk_resource_snap_backing_id)
                if vm_disk_resource_snap_backing.snapshot_vm_resource_id != snapshot_vm_resource.id:
                    snapshot_vm_resource = db.snapshot_vm_resource_get(
                        context, vm_disk_resource_snap_backing.snapshot_vm_resource_id)
                    break
            else:
                snapshot_vm_resource = None
                break
    return object_store_download_time


def purge_snapshot_vm_from_staging_area(context, snapshot_id, snapshot_vm_id):
    snapshot = db.snapshot_get(context, snapshot_id, read_deleted='yes')
    workload = db.workload_get(context, snapshot.workload_id)
    backup_endpoint = db.get_metadata_value(workload.metadata,
                                            'backup_media_target')

    backup_target = vault.get_backup_target(backup_endpoint)

    backup_target.purge_snapshot_vm_from_staging_area(
        context,
        {
            'workload_id': snapshot.workload_id,
            'snapshot_id': snapshot_id,
            'snapshot_vm_id': snapshot_vm_id})

    parent_snapshots = db.get_snapshot_parents(context, snapshot_id)

    for parent_snapshot_id in parent_snapshots:
        parent_snapshot_vms = db.snapshot_vms_get(context, parent_snapshot_id)
        for parent_snapshot_vm in parent_snapshot_vms:
            if parent_snapshot_vm.vm_id == snapshot_vm_id:
                backup_target.purge_snapshot_vm_from_staging_area(
                    context,
                    {
                        'workload_id': snapshot.workload_id,
                        'snapshot_id': parent_snapshot_id,
                        'snapshot_vm_id': snapshot_vm_id})


def purge_snapshot_vm_resource_from_staging_area(
        context, snapshot_id, snapshot_vm_resource_id):
    snapshot = db.snapshot_get(context, snapshot_id, read_deleted='yes')
    workload = db.workload_get(context, snapshot.workload_id)
    backup_endpoint = db.get_metadata_value(workload.metadata,
                                            'backup_media_target')

    backup_target = vault.get_backup_target(backup_endpoint)
    snapshot_vm_resource = db.snapshot_vm_resource_get(
        context, snapshot_vm_resource_id)
    snapshot_vm = db.snapshot_vm_get(
        context, snapshot_vm_resource.vm_id, snapshot.id)

    while snapshot_vm_resource:
        backup_target.purge_snapshot_vm_resource_from_staging_area(
            context,
            {
                'workload_id': snapshot.workload_id,
                'snapshot_id': snapshot_vm_resource.snapshot_id,
                'snapshot_vm_id': snapshot_vm_resource.vm_id,
                'snapshot_vm_name': snapshot_vm.vm_name,
                'snapshot_vm_resource_id': snapshot_vm_resource.id,
                'snapshot_vm_resource_name': snapshot_vm_resource.resource_name})
        vm_disk_resource_snap = db.vm_disk_resource_snap_get_top(
            context, snapshot_vm_resource.id)
        if vm_disk_resource_snap.vm_disk_resource_snap_backing_id:
            vm_disk_resource_snap = db.vm_disk_resource_snap_get(
                context, vm_disk_resource_snap.vm_disk_resource_snap_backing_id)
            snapshot_vm_resource = db.snapshot_vm_resource_get(
                context, vm_disk_resource_snap.snapshot_vm_resource_id)
        else:
            snapshot_vm_resource = None


def purge_restore_vm_from_staging_area(
        context, restore_id, snapshot_id, snapshot_vm_id):
    snapshot = db.snapshot_get(context, snapshot_id, read_deleted='yes')
    workload = db.workload_get(context, snapshot.workload_id)
    backup_endpoint = db.get_metadata_value(workload.metadata,
                                            'backup_media_target')

    backup_target = vault.get_backup_target(backup_endpoint)
    backup_target.purge_restore_vm_from_staging_area(
        context,
        {
            'restore_id': restore_id,
            'workload_id': snapshot.workload_id,
            'snapshot_id': snapshot_id,
            'snapshot_vm_id': snapshot_vm_id})
    """
    parent_snapshots = db.get_snapshot_parents(context, snapshot_id)
    for parent_snapshot_id in parent_snapshots:
        parent_snapshot_vms = db.snapshot_vms_get(context, parent_snapshot_id)
        for parent_snapshot_vm in parent_snapshot_vms:
            if  parent_snapshot_vm.vm_id == snapshot_vm_id:
                vault.purge_restore_vm_from_staging_area(context, { 'restore_id': restore_id,
                                                                    'workload_id': snapshot.workload_id,
                                                                    'snapshot_id': parent_snapshot_id,
                                                                    'snapshot_vm_id': snapshot_vm_id})
    """


def purge_restore_vm_resource_from_staging_area(
        context, restore_id, snapshot_id, snapshot_vm_resource_id):
    snapshot = db.snapshot_get(context, snapshot_id, read_deleted='yes')
    workload = db.workload_get(context, snapshot.workload_id)
    backup_endpoint = db.get_metadata_value(workload.metadata,
                                            'backup_media_target')

    backup_target = vault.get_backup_target(backup_endpoint)
    snapshot_vm_resource = db.snapshot_vm_resource_get(
        context, snapshot_vm_resource_id)
    snapshot_vm = db.snapshot_vm_get(
        context, snapshot_vm_resource.vm_id, snapshot.id)

    while snapshot_vm_resource:
        backup_target.purge_restore_vm_resource_from_staging_area(
            context,
            {
                'restore_id': restore_id,
                'workload_id': snapshot.workload_id,
                'snapshot_id': snapshot_vm_resource.snapshot_id,
                'snapshot_vm_id': snapshot_vm_resource.vm_id,
                'snapshot_vm_name': snapshot_vm.vm_name,
                'snapshot_vm_resource_id': snapshot_vm_resource.id,
                'snapshot_vm_resource_name': snapshot_vm_resource.resource_name})
        vm_disk_resource_snap = db.vm_disk_resource_snap_get_top(
            context, snapshot_vm_resource.id)
        if vm_disk_resource_snap.vm_disk_resource_snap_backing_id:
            vm_disk_resource_snap = db.vm_disk_resource_snap_get(
                context, vm_disk_resource_snap.vm_disk_resource_snap_backing_id)
            snapshot_vm_resource = db.snapshot_vm_resource_get(
                context, vm_disk_resource_snap.snapshot_vm_resource_id)
        else:
            snapshot_vm_resource = None


def common_apply_retention_policy(cntx, instances, snapshot):
    def _delete_deleted_snap_chains(cntx, snapshot):
        try:
            snapshot_obj = db.snapshot_type_time_size_update(
                cntx, snapshot['id'])
            workload_obj = db.workload_get(cntx, snapshot_obj.workload_id)

            backup_endpoint = db.get_metadata_value(workload_obj.metadata,
                                                    'backup_media_target')
            backup_target = vault.get_backup_target(backup_endpoint)

            snapshots_all = db.snapshot_get_all_by_project_workload(
                cntx, cntx.project_id, workload_obj.id, read_deleted='yes')

            snap_chains = []
            snap_chain = []
            snap_chains.append(snap_chain)
            for snap in reversed(snapshots_all):
                if snap.snapshot_type == 'full':
                    snap_chain = []
                    snap_chains.append(snap_chain)
                snap_chain.append(snap)

            deleted_snap_chains = []
            for snap_chain in snap_chains:
                deleted_chain = True
                for snap in snap_chain:
                    if snap.status != 'deleted':
                        deleted_chain = False
                        break
                if deleted_chain:
                    deleted_snap_chains.append(snap_chain)

            for snap_chain in deleted_snap_chains:
                for snap in snap_chain:
                    if snap.deleted and snap.data_deleted == False:
                        LOG.info(
                            _('Deleting the data of snapshot %s %s %s of workload %s') %
                            (snap.display_name,
                             snap.id,
                             snap.created_at.strftime("%d-%m-%Y %H:%M:%S"),
                             workload_obj.display_name))
                        db.snapshot_update(
                            cntx, snap.id, {
                                'data_deleted': True})
                        backup_target.snapshot_delete(cntx,
                                                      {'workload_id': snap.workload_id,
                                                       'workload_name': workload_obj.display_name,
                                                       'snapshot_id': snap.id})
        except Exception as ex:
            LOG.exception(ex)

    try:
        db.snapshot_update(
            cntx, snapshot['id'], {
                'progress_msg': 'Applying retention policy', 'status': 'executing'})
        _delete_deleted_snap_chains(cntx, snapshot)
        affected_snapshots = []
        snapshot_obj = db.snapshot_get(cntx, snapshot['id'])
        workload_obj = db.workload_get(cntx, snapshot_obj.workload_id)
        backup_endpoint = db.get_metadata_value(workload_obj.metadata,
                                                'backup_media_target')
        backup_target = vault.get_backup_target(backup_endpoint)

        jobschedule = pickle.loads(bytes(workload_obj.jobschedule, 'utf-8'))
        retention_policy_type = jobschedule['retention_policy_type']
        retention_policy_value = jobschedule['retention_policy_value']
        snapshots_to_keep = {'number': -1, 'days': -1}
        if retention_policy_type == 'Number of Snapshots to Keep':
            snapshots_to_keep['number'] = int(retention_policy_value)
            if snapshots_to_keep['number'] <= 0:
                snapshots_to_keep['number'] = 1
        elif retention_policy_type == 'Number of days to retain Snapshots':
            snapshots_to_keep['days'] = int(retention_policy_value)
            if snapshots_to_keep['days'] <= 0:
                snapshots_to_keep['days'] = 1

        snapshots_all = db.snapshot_get_all_by_project_workload(
            cntx, cntx.project_id, workload_obj.id, read_deleted='yes')
        snapshots_valid = []
        snapshots_valid.append(snapshot_obj)
        for snap in snapshots_all:
            if snapshots_valid[0].id == snap.id:
                continue
            if snap.status == 'available':
                snapshots_valid.append(snap)
            elif snap.status == 'deleted' and snap.data_deleted == False:
                snapshots_valid.append(snap)

        snapshot_to_commit = None
        snapshots_to_delete = set()
        retained_snap_count = 0
        for idx, snap in enumerate(snapshots_valid):
            if snapshots_to_keep['number'] == -1:
                if (timeutils.utcnow() -
                    snap.created_at).days < snapshots_to_keep['days']:
                    retained_snap_count = retained_snap_count + 1
                else:
                    if snapshot_to_commit is None:
                        snapshot_to_commit = snapshots_valid[idx - 1]
                    snapshots_to_delete.add(snap)
            else:
                if retained_snap_count < snapshots_to_keep['number']:
                    if snap.status == 'deleted':
                        continue
                    else:
                        retained_snap_count = retained_snap_count + 1
                else:
                    if snapshot_to_commit is None:
                        snapshot_to_commit = snapshots_valid[idx - 1]
                    snapshots_to_delete.add(snap)

        if backup_target.commit_supported() == False:
            delete_if_chain(cntx, snapshot, snapshots_to_delete)
            return (snapshot_to_commit, snapshots_to_delete,
                    affected_snapshots, workload_obj, snapshot_obj, 0)

        return (snapshot_to_commit, snapshots_to_delete,
                affected_snapshots, workload_obj, snapshot_obj, 1)

    except Exception as ex:
        LOG.exception(ex)
        raise ex


def common_apply_retention_disk_check(
        cntx, snapshot_to_commit, snap, workload_obj):
    def _snapshot_disks_deleted(snap):
        try:
            all_disks_deleted = True
            some_disks_deleted = False
            snapshot_vm_resources = db.snapshot_resources_get(cntx, snap.id)
            for snapshot_vm_resource in snapshot_vm_resources:
                if snapshot_vm_resource.resource_type != 'disk':
                    continue
                if snapshot_vm_resource.snapshot_type == 'full' and \
                        snapshot_vm_resource.status != 'deleted' and all_disks_deleted:
                    db.snapshot_vm_resource_delete(
                        cntx, snapshot_vm_resource.id)
                    continue
                if snapshot_vm_resource.status != 'deleted':
                    all_disks_deleted = False
                else:
                    some_disks_deleted = True
            return all_disks_deleted, some_disks_deleted
        except exception.SnapshotVMResourcesNotFound as ex:
            LOG.exception(ex)
            return False, True

    db.snapshot_type_time_size_update(cntx, snapshot_to_commit.id)
    backup_endpoint = db.get_metadata_value(workload_obj.metadata,
                                            'backup_media_target')
    backup_target = vault.get_backup_target(backup_endpoint)

    all_disks_deleted, some_disks_deleted = _snapshot_disks_deleted(snap)
    if some_disks_deleted:
        db.snapshot_delete(cntx, snap.id)
    if all_disks_deleted:
        db.snapshot_delete(cntx, snap.id)
        db.snapshot_update(cntx, snap.id, {'data_deleted': True})
        try:
            LOG.info(
                _('Deleting the data of snapshot %s %s %s of workload %s') %
                (snap.display_name,
                 snap.id,
                 snap.created_at.strftime("%d-%m-%Y %H:%M:%S"),
                 workload_obj.display_name))
            backup_target.snapshot_delete(cntx,
                                          {'workload_id': snap.workload_id,
                                           'workload_name': workload_obj.display_name,
                                           'snapshot_id': snap.id})
        except Exception as ex:
            LOG.exception(ex)


def common_apply_retention_snap_delete(cntx, snap, workload_obj):
    db.snapshot_delete(cntx, snap.id)
    backup_endpoint = db.get_metadata_value(workload_obj.metadata,
                                            'backup_media_target')
    backup_target = vault.get_backup_target(backup_endpoint)
    if not snap.data_deleted:
        db.snapshot_update(cntx, snap.id, {'data_deleted': True})
        try:
            LOG.info(
                _('Deleting the data of snapshot %s %s %s of workload %s') %
                (snap.display_name,
                 snap.id,
                 snap.created_at.strftime("%d-%m-%Y %H:%M:%S"),
                 workload_obj.display_name))
            backup_target.snapshot_delete(cntx,
                                          {'workload_id': snap.workload_id,
                                           'workload_name': workload_obj.display_name,
                                           'snapshot_id': snap.id})
        except Exception as ex:
            LOG.exception(ex)


def common_apply_retention_db_backing_update(cntx, snapshot_vm_resource,
                                             vm_disk_resource_snap,
                                             vm_disk_resource_snap_backing,
                                             affected_snapshots):
    vm_disk_resource_snap_values = {
        'size': vm_disk_resource_snap_backing.size,
        'vm_disk_resource_snap_backing_id': vm_disk_resource_snap_backing.vm_disk_resource_snap_backing_id}
    db.vm_disk_resource_snap_update(
        cntx,
        vm_disk_resource_snap.id,
        vm_disk_resource_snap_values)

    snapshot_vm_resource_backing = db.snapshot_vm_resource_get(
        cntx, vm_disk_resource_snap_backing.snapshot_vm_resource_id)
    snapshot_vm_resource_values = {
        'size': snapshot_vm_resource_backing.size,
        'snapshot_type': snapshot_vm_resource_backing.snapshot_type,
        'time_taken': snapshot_vm_resource_backing.time_taken}

    db.snapshot_vm_resource_update(
        cntx,
        snapshot_vm_resource.id,
        snapshot_vm_resource_values)
    db.vm_disk_resource_snap_delete(cntx, vm_disk_resource_snap_backing.id)
    db.snapshot_vm_resource_delete(
        cntx, vm_disk_resource_snap_backing.snapshot_vm_resource_id)
    snapshot_vm_resource_backing = db.snapshot_vm_resource_get(
        cntx, vm_disk_resource_snap_backing.snapshot_vm_resource_id)
    if snapshot_vm_resource_backing.snapshot_id not in affected_snapshots:
        affected_snapshots.append(snapshot_vm_resource_backing.snapshot_id)

    return affected_snapshots


@autolog.log_method(logger=Logger)
def _remove_config_backup_data(context, backup_id):
    try:
        LOG.info(_('Deleting the data of config backup %s ') % (backup_id))
        config_workload_obj = db.config_workload_get(context)
        backup_endpoint = config_workload_obj['backup_media_target']
        backup_target = vault.get_backup_target(backup_endpoint)
        backup_target.config_backup_delete(context, backup_id)
    except Exception as ex:
        LOG.exception(ex)


@autolog.log_method(logger=Logger)
def config_backup_delete(context, backup_id):
    """
    Delete an existing config backup
    """
    try:
        db.config_backup_delete(context, backup_id)
        _remove_config_backup_data(context, backup_id)
    except Exception as ex:
        LOG.exception(ex)


@autolog.log_method(logger=Logger)
def policy_delete(context, policy_id):
    """
    Delete an existing policy.
    """
    try:
        backup_target, path = vault.get_settings_backup_target()
        backup_target.policy_delete(context, policy_id)
    except Exception as ex:
        LOG.exception(ex)


@autolog.log_method(logger=Logger)
def get_compute_host(context):
    try:
        # Look for contego node, which is up
        compute_nodes = get_compute_nodes(context, up_only=True)
        if len(compute_nodes) > 0:
            return compute_nodes[0].host
        else:
            message = "No compute node is up for validate database credentials."
            raise exception.ErrorOccurred(reason=message)
    except Exception as ex:
        raise ex


@autolog.log_method(logger=Logger)
def validate_database_creds(context, databases, trust_creds):
    try:
        host = get_compute_host(context)
        contego_service = contego.API(production=True)
        params = {'databases': databases,
                  'host': host, 'trust_creds': trust_creds}

        status = contego_service.validate_database_creds(context, params)
        if status['result'] != "success":
            message = "Please verify given database credentials."
            raise exception.ErrorOccurred(reason=message)
        else:
            return True
    except exception as ex:
        raise ex


@autolog.log_method(logger=Logger)
def validate_trusted_user_and_key(context, trust_creds):
    try:
        host = get_compute_host(context)
        contego_service = contego.API(production=True)
        params = {'host': host, 'trust_creds': trust_creds}

        status = contego_service.validate_trusted_user_and_key(context, params)
        if status['result'] != "success":
            message = "Please verify, given trusted user should have passwordless sudo access using given private key."
            raise exception.ErrorOccurred(reason=message)
        else:
            return True
    except Exception as ex:
        raise ex


@autolog.log_method(logger=Logger)
def get_controller_nodes(context):
    try:
        contego_service = contego.API(production=True)
        result = contego_service.get_controller_nodes(context)
        return result['controller_nodes']
    except exception as ex:
        raise ex


@autolog.log_method(logger=Logger)
def get_compute_nodes(context, host=None, up_only=False):
    try:
        contego_nodes = []
        resp = {}
        admin_cntx = nova._get_tenant_context(context, cloud_admin=True)
        clients.initialise()
        contego_client = clients.Clients(admin_cntx).client("contego")
        service_info = contego_client.contego.get_service_list()
        if service_info and isinstance(service_info, tuple) and service_info[1].get('services'):
            resp['services'] = service_info[1]['services']
        else:
            resp['services'] = []
        for service in resp['services']:
            if up_only is True:
                if service['binary'] == 'tvault-contego' and service['state'] == 'up':
                    contego_nodes.append(service)
            else:
                if service['binary'] == 'tvault-contego':
                    contego_nodes.append(service)
        return contego_nodes
    except Exception as ex:
        raise ex


@autolog.log_method(logger=Logger)
def get_restore_options(
        name, desc, snapshot_obj, restore_type='selective',
        restore_topology=False):
    try:
        restore_options = {
            'name': name,
            'description': desc,
            'oneclickrestore': False,
            'restore_type': restore_type,
            'type': 'openstack',
            'openstack': {
                'restore_topology': restore_topology,
                'instances': [],
                'restore_topology': restore_topology,
                # TODO: Add support to populate 'networks_mapping' if needed.
                'networks_mapping': {
                    'networks': [],
                },
            },
        }

        for instance in snapshot_obj['instances']:
            # Find the flavor for this vm
            name = instance['name']

            flavor = {
                "disk": instance['flavor']['disk'],
                "vcpus": instance['flavor']['vcpus'],
                "ram": instance['flavor']['ram'],
                "ephemeral": instance['flavor']['ephemeral'],
                "swap": ""
            }

            # TODO: check what should be availability_zone
            az = instance['metadata'].get('availability_zone', 'nova')
            vm_options = {
                'id': instance['id'],
                'include': True,
                'name': name,
                'flavor': flavor,
                'availability_zone': az,
                'nics': [nic for nic in instance.get('nics', [])],
                "vdisks": [],
            }

            # get new volume types
            for vdisk in instance['vdisks']:
                if not vdisk.get('volume_id', None):
                    continue

                # TODO: check the default value
                new_type = {
                    'id': vdisk['volume_id'],
                    'new_volume_type': vdisk.get('volume_type', None),
                    'availability_zone': vdisk.get('availability_zone', ''),
                }
                vm_options['vdisks'].append(new_type)
            restore_options['openstack']['instances'].append(vm_options)
        return restore_options
    except Exception as ex:
        raise ex