Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
Size: Mime:
# vim: tabstop=4 shiftwidth=4 softtabstop=4


# Copyright (c) 2014 TrilioData, Inc.
# All Rights Reserved.

"""
Handles all requests relating to the  workloadmgr service.
"""
import base64
import pickle as pickle
import importlib
import json
import os
import socket
import threading
import time
import uuid
import zlib
import functools
import re
import six
from pkg_resources import packaging, get_distribution

from cryptography.hazmat.primitives.asymmetric import utils as crypto_utils
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.primitives import hashes

from datetime import datetime
from munch import munchify
from oslo_messaging import exceptions as oslo_exceptions
from concurrent.futures import ThreadPoolExecutor
from oslo_config import cfg

from pyVmomi import vim, vmodl

from operator import itemgetter
from workloadmgr.apscheduler.triggers import WorkloadMgrTrigger
from workloadmgr.common import clients
from workloadmgr.common import context as wlm_context
from workloadmgr.openstack.common import log as logging
from workloadmgr.openstack.common import fileutils

from workloadmgr import utils
from workloadmgr.utils import (
    encrypt_password,
    tvault_key_file_name,
    get_vcenter_service_instance,
    calculate_difference_in_time,
    calculate_file_md5sum,
)
from workloadmgr.workloads import rpcapi as workloads_rpcapi
from workloadmgr.scheduler import rpcapi as scheduler_rpcapi
from workloadmgr.wlm_cron import rpcapi as cron_rpcapi
from workloadmgr.api.validation_models import (
    workload_quota_check, storage_quota_check,
    vms_quota_check, snapshot_quota_check
)
from workloadmgr.db import base
from workloadmgr import exception as wlm_exceptions
from workloadmgr import flags
from workloadmgr.compute import nova
from workloadmgr.volume import cinder
from workloadmgr.keymanager import barbican
from workloadmgr.image import glance
from workloadmgr.vault import vault
from workloadmgr.openstack.common import timeutils
from workloadmgr.workloads import workload_utils
from workloadmgr import auditlog
from workloadmgr import autolog
from workloadmgr import policy
from workloadmgr.db.sqlalchemy import models
from workloadmgr.db.sqlalchemy.session import get_session
from workloadmgr.common.workloadmgr_keystoneclient import KeystoneClient
from workloadmgr.api.validation_models import workloads as workload_validator
from keystoneauth1.exceptions.http import NotFound as KsNotFound
from keystoneauth1.exceptions.http import Unauthorized as KsUnauthorized
from keystoneauth1.exceptions.http import Forbidden as KsForbidden
from workloadmgr.openstack.common.gettextutils import _


workload_lock = threading.Lock()
migration_lock = threading.Lock()

migration_plan_vcenter_opts = [
    cfg.StrOpt('vcenter_url',
               default='vcenter',
               help='vCenter URL'),
    cfg.StrOpt('vcenter_username',
               default='administrator',
               help='vCenter administrator username.'),
    cfg.StrOpt('vcenter_password',
               default='password',
               help='vCenter password.'),
    cfg.BoolOpt('vcenter_nossl',
                default=False,
                help='Set to true to disable client side ssl '
                     'certificate verification.'),
    cfg.StrOpt('vcenter_cert_path',
                default="/etc/ssl/certs/ca-bundle.crt",
                help='vcenter ssl certs, required for connection.'),
    cfg.StrOpt('vcenter_thumbprint',
                default='00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00',
                help='vcenter thumbprint, required for virt-v2v tool'),
    cfg.IntOpt('max_retries',
                default=10,
                help='Max number of retries for any kind of operations '\
                     'related to VMware VM migration feature.'),
    cfg.IntOpt('retry_interval',
                default=30,
                help='Seconds to wait before retrying.'),
    cfg.IntOpt('max_vast_retries',
                default=10,
                help='Max number of retries for any kind of operations '\
                     'related to VMware VM migration feature.'),
]

FLAGS = flags.FLAGS
FLAGS.register_opts(migration_plan_vcenter_opts, group='vcenter_migration')

LOG = logging.getLogger(__name__)
Logger = autolog.Logger(LOG)
AUDITLOG = auditlog.getAuditLogger()

CONF = cfg.CONF

EULA_FILE_PATH = 'workloadmgrclient/EULA.txt'
total_storage_usage = None
total_storage_usage_refresh = datetime.now()

_job_scheduler = None


# do not decorate this function with autolog

def create_trust(cloud_admin=False):
    def create_trust_dec(func):
        def trust_create_wrapper(*args, **kwargs):
            # Clean up trust if the role is changed
            try:
                context = args[1]
                API_obj = args[0]
                trustee_roles = [role.strip() \
                    for role in vault.CONF.trustee_role.split(',')]

                trusts = API_obj.trust_list(context)
                for t in trusts:
                    for meta in t.metadata:
                        if meta.key == "role_name":
                            trust_details = args[0].trust_show(context, t.name)
                            trust_roles = [tr.get('name') \
                                for tr in trust_details.roles]
                            meta_val_list = [m.strip() \
                                for m in meta.value.split(',')]
                            if not set(trustee_roles).issubset(
                                    set(meta_val_list)) or \
                                    not set(trustee_roles).issubset(
                                    set(trust_roles)):
                                if set([vault.CONF.cloud_admin_role]).issubset(
                                    set(trust_roles)):
                                    continue
                                args[0].trust_delete(context, t.name)

                trusts = API_obj.trust_list(context)
                if context.roles and \
                        len(set(trustee_roles) & set(context.roles)) < 1:
                    for t in trusts:
                        API_obj.trust_delete(context, t.name)
                # create new trust if the trust is not created
                trusts = API_obj.trust_list(context)
                # if trust doesn not exists in DB for given user_id & project_id then create it
                if not trusts:
                    if cloud_admin is True:
                        API_obj.trust_create(context, vault.CONF.cloud_admin_role, is_cloud_trust=True)
                    else:
                        API_obj.trust_create(context, vault.CONF.trustee_role)
                else:
                    if cloud_admin is True:
                        cntx = wlm_context.RequestContext(
                            trustor_user_id=context.user_id,
                            auth_token=context.auth_token,
                            tenant_id=context.project_id,
                            is_admin=True)
                    else:
                        cntx = wlm_context.RequestContext(
                            trustor_user_id=context.user_id,
                            auth_token=context.auth_token,
                            tenant_id=context.project_id,
                            is_admin=False)
                    clients.initialise()
                    keystoneclient = clients.Clients(cntx).client("keystone")
                    try:
                        os_trust_list = keystoneclient.client.trusts.get(trusts[0].value)
                    except KsNotFound:
                        # if trust does not exists in openstack then remove from the DB and create new trust for given user_id & project_id
                        for db_trust in trusts:
                            API_obj.trust_delete(context, t.name)
                        if cloud_admin is True:
                            API_obj.trust_create(context, vault.CONF.cloud_admin_role, is_cloud_trust=True)
                        else:
                            API_obj.trust_create(context, vault.CONF.trustee_role)
            except Exception as ex:
                LOG.exception(ex)
                LOG.error(_("trust is broken, assign valid trustee role"))
                args[1].trust_failed = 'yes'

            return func(*args, **kwargs)
        return trust_create_wrapper
    return create_trust_dec


def parse_license_text(licensetext,
                       public_key=vault.CONF.triliovault_public_key):

    with open(public_key, 'rb') as f:
        key = load_pem_public_key(f.read(), backend=default_backend())

    if not isinstance(key, dsa.DSAPublicKey):
        raise wlm_exceptions.InternalError(
            "Invalid TrilioVault public key ",
            "Cannot validate license")

    if "License Key" not in licensetext:
        raise wlm_exceptions.InvalidLicense(
            message="Cannot find License Key in license file")

    try:
        licensekey = licensetext[licensetext.find(
            "License Key") + len("License Key"):].lstrip().rstrip()
        license_pair_base64 = licensekey[0:licensekey.find('X02')]
        license_pair = base64.b64decode(license_pair_base64)
        ord_len = license_pair.find(b'\r')
        license_text_len = ord(license_pair[0:ord_len].decode('UTF-32BE'))
        license_text = license_pair[ord_len:license_text_len + ord_len]
        license_signature = license_pair[ord_len + license_text_len:]

        chosen_hash = hashes.SHA1()
        hasher = hashes.Hash(chosen_hash, default_backend())
        hasher.update(license_text)
        digest = hasher.finalize()
        try:
            key.verify(license_signature, digest, crypto_utils.Prehashed(chosen_hash))
            properties_text = zlib.decompress(license_text[5:]).decode('utf-8')
            license = {}
            for line in properties_text.split('\n'):
                if len(line.split("=")) != 2:
                    continue
                license[line.split("=")[0].strip()] = line.split("=")[
                    1].lstrip().rstrip()

            return license
        except Exception as ex:
            raise wlm_exceptions.InvalidLicense(
                message="Cannot verify the license signature: {}".format(ex))
    except BaseException:
        raise wlm_exceptions.InvalidLicense(
            message="Cannot verify the license signature")

# Note: Adding *args and **kwargs to provide backward compatibility
def validate_license_key(licensekey, *args, **kwargs):
    if not licensekey:
        raise wlm_exceptions.InvalidLicense(
            message="License does not exists")
    if datetime.now() > datetime.strptime(
            licensekey['LicenseExpiryDate'], "%Y-%m-%d"):
        raise wlm_exceptions.InvalidLicense(
            message="License expired. License expriration date '%s'. "
                    "Today is '%s'" % (licensekey['LicenseExpiryDate'],
                                       datetime.now().strftime("%Y-%m-%d")))
    message = "License is valid till {}".format(datetime.strptime(
        licensekey['LicenseExpiryDate'], "%Y-%m-%d"))

    return message


class check_license(object):

    def __init__(self, f):
        """
        If there are no decorator arguments, the function
        to be decorated is passed to the constructor.
        """
        self.f = f

    def __call__(self, *args, **kwargs):
        """
        The __call__ method is not called until the
        decorated function is called.
        """
        apiclass = args[0]
        context = args[1]
        try:
            apiclass.get_usage_and_validate_against_license(
                context, method=self.f.__name__)
            return self.f(*args, **kwargs)
        except Exception as ex:
            LOG.exception(ex)
            raise


def upload_settings(func):
    def upload_settings_wrapper(*args, **kwargs):
        # Clean up trust if the role is changed
        context = args[1]

        ret_val = func(*args, **kwargs)
        workload_utils.upload_settings_db_entry(context)
        return ret_val

    return upload_settings_wrapper


def import_backend_settings(func):
    def import_backend_settings_wrapper(*args, **kwargs):
        # import backend settings into DB
        context = args[1]
        workload_utils.import_backend_settings_to_db(context)
        ret_val = func(*args, **kwargs)
        return ret_val

    return import_backend_settings_wrapper


def wrap_check_policy(func):
    """
    Check policy corresponding to the wrapped methods prior to execution
    This decorator requires the first 2 args of the wrapped function
    to be (self, context)
    """

    @functools.wraps(func)
    def wrapped(self, context, *args, **kwargs):
        check_policy(context, func.__name__)
        context.project_only = 'yes'
        if 'snapshot' in func.__name__:
            context.project_only = 'no'
        return func(self, context, *args, **kwargs)

    return wrapped


def check_policy(context, action):
    target = {
        'project_id': context.project_id,
        'user_id': context.user_id,
    }
    if 'workload' in action:
        _action = 'workload:%s' % action
    elif 'snapshot' in action:
        _action = 'snapshot:%s' % action
    elif 'restore' in action:
        _action = 'restore:%s' % action
    elif 'filesearch' in action:
        _action = 'filesearch:%s' % action
    else:
        _action = 'workload:%s' % action
    policy.enforce(context, _action, target)


class API(base.Base):

    """API for interacting with the Workload Manager."""

    # This singleton implementation is not thread safe
    # REVISIT: should the singleton be threadsafe

    __single = None  # the one, true Singleton

    @autolog.log_method(logger=Logger)
    def __new__(classtype, *args, **kwargs):
        # Check to see if a __single exists already for this class
        # Compare class types instead of just looking for None so
        # that subclasses will create their own __single objects
        if not isinstance(classtype.__single, classtype):
            classtype.__single = object.__new__(classtype, *args, **kwargs)
        return classtype.__single

    @autolog.log_method(logger=Logger)
    def __init__(self, db_driver=None):
        if not hasattr(self, "workloads_rpcapi"):
            self.workloads_rpcapi = workloads_rpcapi.WorkloadMgrAPI()

        if not hasattr(self, "scheduler_rpcapi"):
            self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()

        if not hasattr(self, "cron_rpcapi"):
            self.cron_rpcapi = cron_rpcapi.CronAPI()

        super(API, self).__init__(db_driver)


    @autolog.log_method(logger=Logger)
    def _apply_workload_policy(self, context, policy_id, jobschedule, backup_target_types):
        try:
            policy = self.policy_get(context, policy_id)
            assignments = self.get_assigned_policies(
                context, context.project_id)

            available_policies = [
                assignment.policy_id for assignment in assignments]
            if len(available_policies) == 0:
                message = "No policy is assigned to project: %s" % (
                    context.project_id)
                raise wlm_exceptions.ErrorOccurred(message)
            if policy_id not in available_policies:
                message = "Given policy: %s not availabe for project: %s" % (
                    policy_id, context.project_id)
                raise wlm_exceptions.ErrorOccurred(message)

            exclude_fields = ['start_date', 'end_date', 'start_time']
            immutable_btt = self.is_btt_immutable(context, backup_target_types)
            for field_value in policy.field_values:
                if field_value['policy_field_name'] in exclude_fields:
                    continue
                jobschedule[field_value['policy_field_name']
                            ] = field_value['value']
            if immutable_btt:
                if jobschedule.get('hourly') and jobschedule['hourly'].get('interval'):
                    jobschedule['hourly']['interval'] = '24'
                for sch_field in ['daily', 'weekly', 'monthly', 'yearly']:
                    jobschedule.pop(sch_field, None)
               
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def search(self, context, data):
        if not re.match("^(/[^/ ]*)+/?$", data['filepath']):
            msg = _('Provide valid linux filepath to search')
            raise wlm_exceptions.Invalid(reason=msg)
        vm_found = self.db.workload_vm_get_by_id(
            context, data['vm_id'], read_deleted='yes', workloads_filter='deleted')
        if len(vm_found) == 0:
            # Check in snapshot vms
            vm_found = self.db.snapshot_vm_get(context, data['vm_id'], None)
            if vm_found is None:
                msg = _('vm_id not existing with this tenant')
                raise wlm_exceptions.Invalid(reason=msg)
            snapshot = self.db.snapshot_get(context, vm_found.snapshot_id)
            workload_id = snapshot.workload_id
        else:
            workload_id = vm_found[0].workload_id
        workload = self.db.workload_get(context, workload_id)
        if workload['status'] != 'available':
            msg = _('Vm workload is not in available state to perform search')
            raise wlm_exceptions.Invalid(reason=msg)
        kwargs = {'vm_id': data['vm_id'], 'status': 'completed'}
        search_list = self.db.file_search_get_all(context, **kwargs)
        if len(search_list) > 0:
            msg = _('Search with this vm_id already in exceution')
            raise wlm_exceptions.Invalid(reason=msg)
        if data['date_from'] != '':
            try:
                datetime.strptime(data['date_from'], '%Y-%m-%dT%H:%M:%S')
            except BaseException:
                msg = _("Please provide "
                        "valid date_from in Format YYYY-MM-DDTHH:MM:SS")
                raise wlm_exceptions.Invalid(reason=msg)

            if data['date_to'] != '':
                try:
                    datetime.strptime(data['date_to'], '%Y-%m-%dT%H:%M:%S')
                except BaseException:
                    msg = _("Please provide "
                            "valid date_to in Format YYYY-MM-DDTHH:MM:SS")
                    raise wlm_exceptions.Invalid(reason=msg)
        if isinstance(data['snapshot_ids'], list):
            data['snapshot_ids'] = ",".join(data['snapshot_ids'])
        options = {'vm_id': data['vm_id'],
                   'project_id': context.project_id,
                   'user_id': context.user_id,
                   'filepath': data['filepath'],
                   'snapshot_ids': data['snapshot_ids'],
                   'start': data['start'],
                   'end': data['end'],
                   'date_from': data['date_from'],
                   'date_to': data['date_to'],
                   'status': 'executing', }
        search = self.db.file_search_create(context, options)
        self.scheduler_rpcapi.file_search(
            context, FLAGS.scheduler_topic, search['id'])
        return search

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def search_show(self, context, search_id):
        search = self.db.file_search_get(context, search_id)
        return search

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_get(self, context, workload_type_id):
        workload_type = self.db.workload_type_get(context, workload_type_id)
        workload_type_dict = dict(workload_type)
        metadata = {}
        for kvpair in workload_type.metadata:
            metadata.setdefault(kvpair['key'], kvpair['value'])
        workload_type_dict['metadata'] = metadata
        return workload_type_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_show(self, context, workload_type_id):
        workload_type = self.db.workload_type_get(context, workload_type_id)
        workload_type_dict = dict(workload_type)
        metadata = {}
        for kvpair in workload_type.metadata:
            metadata.setdefault(kvpair['key'], kvpair['value'])
        workload_type_dict['metadata'] = metadata
        return workload_type_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_get_all(self, context, search_opts={}):
        workload_types = self.db.workload_types_get(context)
        return workload_types

    @autolog.log_method(logger=Logger)
    @create_trust(cloud_admin=True)
    @wrap_check_policy
    def workload_type_create(self, context, id, name,
                             description, is_public, metadata):
        """
        Create a workload_type. No RPC call is made
        """
        if hasattr(context, 'trust_failed'):
            raise wlm_exceptions.Invalid(
                 reason=_('Trust broken: Cannot create cloud admin trust.'
                          'Please make sure admin user has the %s role assigned' %
                          vault.CONF.cloud_admin_role))

        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            name +
            '\' Create Requested',
            None)
        options = {'user_id': context.user_id,
                   'project_id': context.project_id,
                   'id': id,
                   'display_name': name,
                   'display_description': description,
                   'is_public': is_public,
                   'status': 'available',
                   'metadata': metadata, }

        workload_type = self.db.workload_type_create(context, options)
        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            name +
            '\' Create Submitted',
            workload_type)
        return workload_type

    @autolog.log_method(logger=Logger)
    def workload_type_update_all(self, context, data):
        """
        update all workload_types. No RPC call is made
        """
        self.db.workload_type_update_all(context, data)
        workload_types = self.db.workload_types_get(context)
        return workload_types

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_delete(self, context, workload_type_id):
        """
        Delete a workload_type. No RPC call is made
        """
        workload_type = self.workload_type_get(context, workload_type_id)
        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            workload_type['display_name'] +
            '\' Delete Requested',
            workload_type)
        if workload_type['status'] not in ['available', 'error']:
            msg = _("WorkloadType status must be 'available' or 'error'")
            raise wlm_exceptions.InvalidState(reason=msg)

        # TODO(giri): check if this workload_type is referenced by other
        # workloads

        self.db.workload_type_delete(context, workload_type_id)
        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            workload_type['display_name'] +
            '\' Delete Submitted',
            workload_type)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_discover_instances(
            self, context, workload_type_id, metadata):
        """
        Discover Instances of a workload_type. RPC call is made
        """
        if not metadata:
            msg = _(
                'metadata field is null. Pass valid metadata to discover the workload')
            raise wlm_exceptions.Invalid(reason=msg)
        try:
            return self.workloads_rpcapi.workload_type_discover_instances(
                context, socket.gethostname(), workload_type_id, metadata)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_topology(self, context, workload_type_id, metadata):
        """
        Topology  of a workload_type. RPC call is made
        """
        if not metadata:
            msg = _(
                'metadata field is null. Pass valid metadata to discover the workload')
            raise wlm_exceptions.Invalid(reason=msg)

        try:
            return self.workloads_rpcapi.workload_type_topology(
                context, socket.gethostname(), workload_type_id, metadata)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get(self, context, workload_id):
        kwargs = {}
        workload = self.db.workload_get(context, workload_id)
        workload_dict = dict(workload)

        workload_dict['storage_usage'] = {
            'usage': 0, 'full': {
                'snap_count': 0, 'usage': 0}, 'incremental': {
                'snap_count': 0, 'usage': 0}}
        for workload_snapshot in self.db.snapshot_get_all_by_workload(
                context, workload_id, **kwargs):
            if not workload_snapshot.data_deleted:
                if workload_snapshot.snapshot_type == 'incremental':
                    workload_dict['storage_usage']['incremental']['snap_count'] = \
                        workload_dict['storage_usage'][
                            'incremental']['snap_count'] + 1
                    workload_dict['storage_usage']['incremental']['usage'] = \
                        workload_dict['storage_usage']['incremental']['usage'] + \
                        workload_snapshot.size
                else:
                    workload_dict['storage_usage']['full']['snap_count'] = workload_dict['storage_usage']['full'][
                        'snap_count'] + 1
                    workload_dict['storage_usage']['full']['usage'] = workload_dict[
                        'storage_usage']['full']['usage'] + workload_snapshot.size
        workload_dict['storage_usage']['usage'] = workload_dict['storage_usage']['full']['usage'] + \
            workload_dict['storage_usage']['incremental']['usage']

        workload_vms = []
        for workload_vm_obj in self.db.workload_vms_get(context, workload.id):
            workload_vm = {
                'id': workload_vm_obj.vm_id,
                'name': workload_vm_obj.vm_name}
            metadata = {}
            for kvpair in workload_vm_obj.metadata:
                metadata.setdefault(kvpair['key'], kvpair['value'])
            workload_vm['metadata'] = metadata
            workload_vms.append(workload_vm)
        workload_dict['instances'] = workload_vms

        metadata = {}
        for kvpair in workload.metadata:
            metadata.setdefault(kvpair['key'], kvpair['value'])
        metadata['backup_media_target'] = metadata.get(
            "backup_media_target", "NA")
        if context.is_admin is False:
            metadata.get("backup_media_target", None) and \
                metadata.pop("backup_media_target")

        workload_dict['metadata'] = metadata
        workload_dict['jobschedule'] = pickle.loads(bytes(workload.jobschedule, 'utf-8'))
        workload_dict['jobschedule']['enabled'] = False
        workload_dict['jobschedule'][
            'global_jobscheduler'] = self.workload_get_global_job_scheduler(context)
        # find the job object based on workload_id
        jobs = self.db.job_get_all(context)
        for job in jobs:
            if job.workload_id == workload_id:
                workload_dict['jobschedule']['enabled'] = True
                break

        return workload_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_show(self, context, workload_id):
        kwargs = {}
        workload = self.db.workload_get(context, workload_id, **kwargs)
        workload_dict = dict(workload)
        workload_dict['storage_usage'] = {
            'usage': 0, 'full': {
                'snap_count': 0, 'usage': 0}, 'incremental': {
                'snap_count': 0, 'usage': 0}}

        for workload_snapshot in self.db.snapshot_get_all_by_workload(
                context, workload_id, **kwargs):
            if workload_snapshot is None:
                msg = _("Not found any snapshots or operation not allowed")
                wlm_exceptions.ErrorOccurred(reason=msg)

            if not workload_snapshot.data_deleted:
                if workload_snapshot.snapshot_type == 'incremental':
                    workload_dict['storage_usage']['incremental']['snap_count'] = \
                        workload_dict['storage_usage'][
                            'incremental']['snap_count'] + 1
                    workload_dict['storage_usage']['incremental']['usage'] = \
                        workload_dict['storage_usage']['incremental']['usage'] + \
                        workload_snapshot.size
                else:
                    workload_dict['storage_usage']['full']['snap_count'] = workload_dict['storage_usage']['full'][
                        'snap_count'] + 1
                    workload_dict['storage_usage']['full']['usage'] = workload_dict[
                        'storage_usage']['full']['usage'] + workload_snapshot.size
        workload_dict['storage_usage']['usage'] = workload_dict['storage_usage']['full']['usage'] + \
            workload_dict['storage_usage']['incremental']['usage']

        workload_vms = []
        for workload_vm_obj in self.db.workload_vms_get(context, workload.id):
            workload_vm = {
                'id': workload_vm_obj.vm_id,
                'name': workload_vm_obj.vm_name}
            metadata = {}
            for kvpair in workload_vm_obj.metadata:
                metadata.setdefault(kvpair['key'], kvpair['value'])
            workload_vm['metadata'] = metadata
            workload_vms.append(workload_vm)
        workload_dict['instances'] = workload_vms

        metadata = {}
        metadata_type = self.db.workload_type_get(
            context, workload.workload_type_id).metadata
        for kvpair in workload.metadata:
            mtype = None
            for mtype in metadata_type:
                try:
                    if mtype['key'] == kvpair['key'] and json.loads(
                            mtype.value)['type'] == 'password':
                        break
                except BaseException as bex:
                    LOG.exception(bex)

            try:
                if mtype['key'] == kvpair['key'] and json.loads(
                        mtype.value)['type'] == 'password':
                    metadata.setdefault(kvpair['key'], "**********")
                else:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
            except BaseException:
                metadata.setdefault(kvpair['key'], kvpair['value'])
                pass

        metadata['backup_media_target'] = metadata.get(
            "backup_media_target", "NA")
        metadata['backup_target_type'] = metadata.get(
             "backup_target_types", "NA")
        workload_dict['metadata'] = metadata
        workload_dict['jobschedule'] = pickle.loads(bytes(workload.jobschedule, 'utf-8'))
        workload_dict['jobschedule']['enabled'] = False
        workload_dict['jobschedule'][
           'global_jobscheduler'] = self.workload_get_global_job_scheduler(context)

        # find the job object based on workload_id
        jobs = self.db.job_get_all(context)
        for job in jobs:
            if job.workload_id == workload_id:
                current_time = datetime.utcnow()
                workload_dict['jobschedule']['enabled'] = True
                next_run_time = WorkloadMgrTrigger(
                    workload_dict['jobschedule']
                ).get_next_fire_time(current_time)
                if isinstance(next_run_time, datetime):
                    time_diff = next_run_time - current_time
                    workload_dict['jobschedule'][
                        'nextrun'] = time_diff.total_seconds()
                else:
                    # if no "nextrun" then show that trigger is disabled
                    workload_dict['jobschedule']['enabled'] = False
                break
        return workload_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_all(self, context, search_opts={}):
        workloads = self.db.workload_get_all(context, **search_opts)
        return workloads

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_vms_get_all(self, context, search_opts={}):
        if not search_opts.get('project_list'):
            try:
                keystone_client = KeystoneClient(context)
                projects = keystone_client.client.client.projects.list()
                search_opts['project_list'] = [project.id for project in projects]
            except Exception as ex:
                LOG.exception(ex)
                # in case error while listing out projects, showing proteted VM's from current user context
                search_opts['project_list'] = [context.project_id]
        workload_list_by_project = self.workload_get_all(context, search_opts={'project_list': search_opts['project_list']})
        search_opts['workload_list'] = [workload.id for workload in workload_list_by_project if workload.project_id in search_opts['project_list']]
        vms = []
        if search_opts['workload_list']:
            db_vms = self.db.workload_vms_get(context, None, **search_opts)
            vms = [{'id': vm.vm_id} for vm in db_vms]
        return {'protected_vms': vms}

    @autolog.log_method(logger=Logger)
    @create_trust()
    @check_license
    @wrap_check_policy
    def workload_create(self, context, name, description, workload_type_id,
                        source_platform, instances, jobschedule, metadata,
                        is_workload_encrypted=False, secret_uuid=None, availability_zone=None, backup_target_types=None):
        """
        Make the RPC call to create a workload.
        """
        try:
            allowed_quota_obj = workload_quota_check(
                context.project_id, self.db
            )
            if allowed_quota_obj \
                    and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                # TODO: check how to handle this warning at UI side
                AUDITLOG.log(
                    context,
                    "Current project's workload allowed quota has reached to high watermark" +
                    "Contact to the admin",
                    None)
                LOG.warning("Current project's workload allowed quota has "
                            "reached to high watermark. Contact to the admin")
        except ValueError as e:
            raise wlm_exceptions.QuotaLimitExceededError()
        except Exception as ex:
            LOG.exception(ex)
            raise
        try:
            allowed_quota_obj = vms_quota_check(
                context.project_id, self.db, len(instances)
            )
            if allowed_quota_obj \
                    and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                # TODO: check how to handle this warning at UI side
                AUDITLOG.log(
                    context,
                    "Current project's VM allowed quota has reached to high watermark" +
                    "Contact to the admin",
                    None)
                LOG.warning("Current project's VM allowed quota has "
                            "reached to high watermark. Contact to the admin")
        except ValueError as e:
            raise wlm_exceptions.QuotaLimitExceededError()
        except Exception as ex:
            LOG.exception(ex)
            raise

        try:
            AUDITLOG.log(
                context,
                'Workload \'' +
                name +
                '\' Create Requested',
                None)

            policy_id = metadata.get('policy_id', None)
            if policy_id is not None:
                self._apply_workload_policy(context, policy_id, jobschedule, backup_target_types)

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))

            compute_service = nova.API(production=True)
            instances_with_name = compute_service.get_servers(context)
            instance_ids = [x.id for x in instances_with_name]
            # TODO(giri): optimize this lookup

            if len(instances) == 0:
                raise wlm_exceptions.InvalidRequest(
                    reason="No instances found in the workload create request")

            for instance in instances:
                # Check whether given instance id exist or not.
                if not instance_ids or instance['instance-id'] not in instance_ids:
                    raise wlm_exceptions.InstanceNotFound(
                        instance_id=instance['instance-id'])
                for instance_with_name in instances_with_name:
                    if instance_with_name.tenant_id != context.project_id:
                        msg = _(
                            'Invalid instance as ' +
                            instance_with_name.name +
                            ' is not associated with your current tenant')
                        raise wlm_exceptions.Invalid(reason=msg)
                    if instance['instance-id'] == instance_with_name.id:
                        vm_found = self.db.workload_vm_get_by_id(
                            context, instance_with_name.id)
                        if isinstance(vm_found, list):
                            if len(vm_found) > 0:
                                msg = _(
                                    'Invalid instance as ' +
                                    instance_with_name.name +
                                    ' already attached with other workload')
                                raise wlm_exceptions.Invalid(reason=msg)
                        else:
                            msg = _(
                                'Error processing instance' +
                                instance_with_name.name)
                            raise wlm_exceptions.Invalid(reason=msg)
                        instance['instance-name'] = instance_with_name.name
                        if instance_with_name.metadata:
                            instance['metadata'] = instance_with_name.metadata
                            if 'imported_from_vcenter' in instance_with_name.metadata and \
                                    instances_with_name[0].metadata['imported_from_vcenter'] == 'True':
                                source_platform = "vmware"
                        break

            workload_type_id_valid = False
            workload_types = self.workload_type_get_all(context)
            for workload_type in workload_types:
                if workload_type_id is None and workload_type.display_name == 'Serial':
                    workload_type_id = workload_type.id
                    workload_type_id_valid = True
                    break
                if workload_type_id == workload_type.id:
                    workload_type_id_valid = True
                    break

            if not workload_type_id_valid:
                msg = _('Invalid workload type')
                raise wlm_exceptions.Invalid(reason=msg)

            if str(jobschedule['enabled']).lower() in ('true', '1'):
                jobschedule['enabled'] = True
            elif str(jobschedule['enabled']).lower() in ('false', '0'):
                jobschedule['enabled'] = False

            # validate start_time, start_date and end_date of jobscheduler
            jobschedule = workload_validator.WorkloadValidator(context=context, body={})._validate_jobschedule_date_time(jobschedule)
            if 'hostnames' not in metadata:
                metadata['hostnames'] = json.dumps([])

            if 'preferredgroup' not in metadata:
                metadata['preferredgroup'] = json.dumps([])
   
            if 'backup_target_types' not in metadata:
                if not backup_target_types:
                    btt_ref = self.db.get_default_backup_target_type(context)
                    backup_target_types = btt_ref[0].name
                metadata['backup_target_types'] = backup_target_types.strip() if not workload_utils.is_valid_uuid(backup_target_types) \
                                                  else self.db.backup_target_type_show(context, backup_target_types).name.strip()

            if 'backup_media_target' not in metadata:
                metadata['backup_media_target'] = vault.get_backup_target_by_backup_target_type(backup_target_types.strip()).backup_endpoint

            # validate if BT is available
            vault.get_backup_target_by_backup_target_type(metadata['backup_target_types'])

            # check if BT is immutable and update retention_days_to_keep property when it's triggered from CLI
            if self.is_btt_immutable(context, metadata['backup_target_types']):
                if not jobschedule.get("retentionmanual"):
                    if jobschedule["manual"].get("retention_days_to_keep"):
                        jobschedule["retentionmanual"] = {
                                "retentionmanual": jobschedule["manual"]["retention_days_to_keep"]
                                }
                        jobschedule["manual"].pop("retention_days_to_keep", None)
                    else:
                        jobschedule["retentionmanual"] = {
                                "retentionmanual": 30
                                }

            options = {'user_id': context.user_id,
                       'project_id': context.project_id,
                       'display_name': name,
                       'display_description': description,
                       'status': 'creating',
                       'source_platform': source_platform,
                       'workload_type_id': workload_type_id,
                       'metadata': metadata,
                       'encryption': is_workload_encrypted,
                       'secret_uuid': secret_uuid,
                       'jobschedule': str(pickle.dumps(jobschedule, 0), 'utf-8'),
                       'host': socket.gethostname(), }
            workload = self.db.workload_create(context, options)

            for instance in instances:
                values = {'workload_id': workload.id,
                          'vm_id': instance['instance-id'].strip(),
                          'vm_name': instance['instance-name'],
                          'status': 'available',
                          'metadata': instance.get('metadata', {})}
                vm = self.db.workload_vms_create(context, values)

            if is_workload_encrypted:
                barbican_service = barbican.API()
                body = {
                        'metadata':{
                            'workload_id': workload.id,
                            'workload_name': workload.name
                            }
                    }
                res = barbican_service.update_secret_metadata(
                        context,
                        secret_uuid,
                        body)

            self.scheduler_rpcapi.workload_create(context,
                                                  FLAGS.scheduler_topic,
                                                  workload['id'])

            # Now register the job with job scheduler
            # HEre is the catch. The workload may not been fully created yet
            # so the job call back should only start creating snapshots when
            # the workload is successfully created.
            # the workload has errored during workload creation, then it should
            # remove itself from the job queue
            # if we fail to schedule the job, we should fail the
            # workload create request?
            # _snapshot_create_callback([], kwargs={  'workload_id':workload.id,
            #                                        'user_id': workload.user_id,
            #                                        'project_id':workload.project_id})
            #
            #               jobschedule = {'start_date': '06/05/2014',
            #                              'end_date': '07/05/2015',
            #                              'interval': '1 hr',
            #                              'start_time': '2:30 PM',
            #                              'retention_policy_type': 'Number of Snapshots to Keep',
            #                              'retention_policy_value': '30'}

            self.cron_rpcapi.workload_add_scheduler_job(context, jobschedule, workload)

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' Create Submitted',
                workload)
            return workload
        except Exception as ex:
            LOG.exception(ex)
            if 'workload' in locals():
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else {}})
            raise


    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def workload_modify(self, context, workload_id, workload, is_admin_dashboard=False):
        """
        Make the RPC call to modify a workload.
        """
        def _should_revisit_jobschedule(src_jobschedule, dest_jobschedule):
            if src_jobschedule['enabled'] == dest_jobschedule['enabled']:
                check_list = ['interval', 'end_date', 'start_date', 'start_time']
                return any(src_jobschedule.get(check, None) != dest_jobschedule.get(check, None)
                           for check in check_list if check in src_jobschedule)
            return False
        workloadobj = self.workload_get(context, workload_id)

        # make sure if BT is available
        vault.get_backup_target_by_backup_target_type(workloadobj['metadata']['backup_target_types'])

        no_of_new_instances = len(workload.get('instances', [])) - len(workloadobj.get('instances', []))
        if no_of_new_instances and no_of_new_instances > 0:
            try:
                allowed_quota_obj = vms_quota_check(
                    context.project_id, self.db, no_of_new_instances
                )
                if allowed_quota_obj \
                        and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                    # TODO: check how to handle this warning at UI side
                    AUDITLOG.log(
                        context,
                        "Current project's VM allowed quota has reached to high watermark" +
                        "Contact to the admin",
                        None)
                    LOG.warning("Current project's VM allowed quota has "
                                "reached to high watermark. Contact to the admin")
            except ValueError as e:
                raise wlm_exceptions.QuotaLimitExceededError()
            except Exception as ex:
                LOG.exception(ex)
                raise

        AUDITLOG.log(
            context,
            'Workload \'' +
            workloadobj['display_name'] +
            '\' Modify Requested',
            None)

        purge_metadata = False
        pause_workload = False
        unpause_workload = False
        options = {}

        if hasattr(context, 'trust_failed'):
            raise wlm_exceptions.Invalid(
                reason=_('Trust broken: Cannot edit workload. ' +
                         'Expected role "%s" is not found. ' % vault.CONF.trustee_role +
                         'Please make sure user has the trustee role assigned'))
        # Always update the user_id from the context. This will handle the case where
        # a user creates the workload, user gets deleted and a new user assigned to
        # take up that role. old workload job schedulers won't run because they
        # are executed based on the user_id that is saved in workload record.
        # By forcing user_id update, the new user can switch the job scheduler
        # to generate token based on his trust.
        # If call is coming from Admin panel then don't update the user id as
        # it will result in failure of scheduler trust.
        if is_admin_dashboard is False:
            options['user_id'] = context.user_id
        if 'name' in workload and workload['name']:
            options['display_name'] = workload['name']

        if 'description' in workload and workload['description']:
            options['display_description'] = workload['description']

        assignments = self.get_assigned_policies(context, context.project_id)
        available_policies = [assignment.policy_id for assignment in assignments]

        if len(available_policies) > 0 and 'policy_id' not in workload.get('metadata', {})\
                                      and len(workload.get('jobschedule', {})) > 0:

            fields = self.db.policy_fields_get_all(context)
            policy_fields = [f.field_name for f in fields]

            if len(set(workload.get('jobschedule', {}).keys()).intersection(set(policy_fields))) > 0:
                msg = "Can not update policy fields settings when policies are "\
                    "applied on project, please use available policies: %s" %(available_policies)
                raise wlm_exceptions.ErrorOccurred(reason=msg)

        if 'metadata' in workload and workload['metadata']:
            purge_metadata = True
            options['metadata'] = workload['metadata']
            if 'policy_id' in workload['metadata'] and workload['metadata']['policy_id'] is not None:
                policy_id = workload['metadata']['policy_id']
                if policy_id not in available_policies:
                    message = "Policy %s is not assigned to project %s" % (
                        policy_id, context.project_id)
                    raise wlm_exceptions.ErrorOccurred(message)

                if 'jobschedule' not in workload:
                    workload['jobschedule'] = {}
                self._apply_workload_policy(
                    context, policy_id, workload['jobschedule'], workloadobj['metadata']['backup_target_types'])

        if 'jobschedule' in workload and workload['jobschedule']:
            # timezone conversion only when user provided start_date or start_time
            flag_timezone_convert = True if (workload['jobschedule'].get('start_date') or workload['jobschedule'].get('start_time')) else False

            if 'start_time' not in workload['jobschedule']:
                workload['jobschedule']['start_time'] = workloadobj[
                    'jobschedule']['start_time']

            if 'enabled' not in workload['jobschedule']:
                workload['jobschedule']['enabled'] = workloadobj[
                    'jobschedule']['enabled']

            if 'start_date' not in workload['jobschedule']:
                workload['jobschedule']['start_date'] = workloadobj[
                    'jobschedule']['start_date']

            if str(workload['jobschedule']['enabled']).lower() in ('true', '1'):
                workload['jobschedule']['enabled'] = True
            elif str(workload['jobschedule']['enabled']).lower() in ('false', '0'):
                workload['jobschedule']['enabled'] = False

            sch_key = {
                    "hourly": [ "interval", "retention", "snapshot_type"],
                    "daily": ["backup_time", "retention", "snapshot_type"],
                    "weekly": [ "backup_day", "retention", "snapshot_type"],
                    "monthly": ["month_backup_day", "retention", "snapshot_type"],
                    "yearly": ["backup_month", "retention", "snapshot_type"],
                    "retentionmanual": ["retentionmanual"],
                    "manual": ["retention"]}

            # in case retention_days_to_keep is provided from CLI
            if workload['jobschedule'].get('manual') and workload['jobschedule']["manual"].get("retention_days_to_keep"):
                workload['jobschedule']["retentionmanual"] = {
                            "retentionmanual": workload['jobschedule']["manual"]["retention_days_to_keep"]
                            }
                workload['jobschedule']["manual"].pop("retention_days_to_keep", None)

            if not workload['jobschedule']['enabled'] and not workloadobj['jobschedule']['enabled']:
                # in case JS is off but we retentionmanual in DB for immutable BT. When user doesn't provide retentionmanual then use from DB
                if "retentionmanual" not in workload['jobschedule'] and workloadobj['jobschedule'].get("retentionmanual"):
                    workload['jobschedule']["retentionmanual"] = workloadobj['jobschedule']["retentionmanual"]
                # in case JS is off, When user doesn't provide manual then use from DB
                if "manual" not in workload['jobschedule'] and workloadobj['jobschedule'].get("manual"):
                    workload['jobschedule']["manual"] = workloadobj['jobschedule']["manual"]
            else:
                for k in sch_key.keys():
                    if k not in workload['jobschedule']:
                        workload['jobschedule'][k] = workloadobj['jobschedule'].get(k)

            if workloadobj['jobschedule']['enabled'] != workload['jobschedule'][
                    'enabled'] and workloadobj['jobschedule']['enabled']:
                pause_workload = True

            if workloadobj['jobschedule']['enabled'] != workload['jobschedule']['enabled'] and workload['jobschedule']['enabled']:
                unpause_workload = True

            # validate start_time, start_date and end_date of jobscheduler only when not trigered from backups admin
            if not is_admin_dashboard:
                workload['jobschedule'] = workload_validator.WorkloadValidator(context=context, body={})._validate_jobschedule_date_time(workload['jobschedule'], workloadobj['jobschedule'], flag_timezone_convert)

            if workload['jobschedule']['enabled'] and \
               _should_revisit_jobschedule(workload['jobschedule'], workloadobj['jobschedule']):
                pause_workload = True
                unpause_workload = True

            options['jobschedule'] = str(pickle.dumps(workload['jobschedule'], 0), 'utf-8')

        if 'instances' in workload and workload['instances']:
            compute_service = nova.API(production=True)
            instances = workload['instances']
            instances_with_name = compute_service.get_servers(context)
            for instance in instances:
                if not isinstance(instance, dict) or \
                        'instance-id' not in instance:
                    msg = _(
                        "Workload definition key 'instances' must be a dictionary "
                        "with 'instance-id' key")
                    raise wlm_exceptions.Invalid(reason=msg)

                found = False
                for existing_instance in instances_with_name:
                    if existing_instance.tenant_id != context.project_id:
                        msg = _(
                            'Invalid instance as ' +
                            existing_instance.name +
                            ' is not associated with your current tenant')
                        raise wlm_exceptions.Invalid(reason=msg)
                    if instance['instance-id'] == existing_instance.id:
                        vm_found = self.db.workload_vm_get_by_id(
                            context, existing_instance.id)
                        if isinstance(vm_found, list):
                            if len(vm_found) > 0 and \
                                    vm_found[0].workload_id != workload_id:
                                msg = _("Invalid instance as " +
                                        existing_instance.name +
                                        " already part of workload '%s'" %
                                        (vm_found[0].workload_id))
                                raise wlm_exceptions.Invalid(reason=msg)
                        else:
                            msg = _(
                                'Error processing instance' +
                                existing_instance.name)
                            raise wlm_exceptions.Invalid(reason=msg)

                        instance['instance-name'] = existing_instance.name
                        instance['metadata'] = existing_instance.metadata
                        found = True
                        break

                if not found:
                    msg = _(
                        "Workload definition contains instance id that cannot be "
                        "found in the cloud")
                    raise wlm_exceptions.Invalid(reason=msg)

            for vm in self.db.workload_vms_get(context, workload_id):
                try:
                    compute_service.delete_meta(context, vm.vm_id,
                                                ['workload_id', 'workload_name'])
                    self.db.workload_vms_delete(context, vm.vm_id, workload_id)
                except nova.nova_exception.NotFound as ex:
                    LOG.info('Instance ID:{} does not exist. Error: {}'.format(vm.vm_id, ex))
                    self.db.workload_vms_delete(context, vm.vm_id, workload_id)

            for instance in instances:
                values = {'workload_id': workload_id,
                          'vm_id': instance['instance-id'],
                          'metadata': instance['metadata'],
                          'status': 'available',
                          'vm_name': instance['instance-name']}
                vm = self.db.workload_vms_create(context, values)
                compute_service.set_meta_item(
                    context, vm.vm_id, 'workload_id', workload_id)
                compute_service.set_meta_item(
                    context, vm.vm_id, 'workload_name', workloadobj['display_name'])

        try:
            if pause_workload is True:
                self.workload_pause(context, workload_id)
            workload_obj = self.db.workload_update(
                context, workload_id, options, purge_metadata)
            if unpause_workload is True:
                self.workload_resume(context, workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=_("Error Modifying workload"))

        workload_utils.upload_workload_db_entry(context, workload_id)

        AUDITLOG.log(
            context,
            'Workload \'' +
            workload_obj['display_name'] +
            '\' Modify Submitted',
            workload_obj)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_delete(self, context, workload_id, database_only=False):
        """
        Delete a workload. No RPC call is made
        """
        try:
            if context.is_admin is False and database_only is True:
                raise wlm_exceptions.AdminRequired()

            workload = self.workload_get(context, workload_id)

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Delete Requested',
                workload)
            if workload['status'] not in ['available', 'error']:
                msg = _("Workload status must be 'available' or 'error'")
                raise wlm_exceptions.InvalidState(reason=msg)

            '''
            workloads = self.db.workload_get_all(context)
            for workload in workloads:
                if workload.deleted:
                    continue
                workload_type = self.db.workload_type_get(context, workload.workload_type_id)
                if (workload_type.display_name == 'Composite'):
                    for kvpair in workload.metadata:
                        if kvpair['key'] == 'workloadgraph':
                            graph = json.loads(kvpair['value'])
                            for flow in graph['children']:
                                for member in flow['children']:
                                    if 'type' in member:
                                        if member['data']['id'] == workload_id:
                                            msg = _(
                                                'Operation not allowed since this workload is a member of a composite workflow')
                                            raise wlm_exceptions.InvalidState(reason=msg)
            '''
            jobs = self.db.job_get_all(context)

            if database_only is True:
                self.db.workload_update(
                    context, workload_id, {
                        'status': 'deleting'})

                # Remove workload entry from workload_vm's
                compute_service = nova.API(production=True)
                workload_vms = self.db.workload_vms_get(context, workload_id)
                for vm in workload_vms:
                    try:
                        compute_service.delete_meta(
                            context, vm.vm_id, [
                                "workload_id", 'workload_name'])
                        self.db.workload_vms_delete(context, vm.vm_id, workload_id)
                    except nova.nova_exception.NotFound as ex:
                        LOG.info('Instance ID:{} does not exist. Error: {}'.format(vm.vm_id, ex))
                        self.db.workload_vms_delete(context, vm.vm_id, workload_id)

                # Remove all snapshots from workload
                snapshots = self.db.snapshot_get_all_by_workload(
                    context, workload_id)
                for snapshot in snapshots:
                    workload_utils.snapshot_delete(
                        context, snapshot.id, database_only)

                # remove the scheduled jobs for workload
                for job in jobs:
                    if job.workload_id == workload_id:
                        self.cron_rpcapi.workload_pause(context, workload_id)
                        break

                self.db.workload_delete(context, workload_id)

            else:
                snapshots = self.db.snapshot_get_all_by_project_workload(
                    context, context.project_id, workload_id)
                if len(snapshots) > 0:
                    msg = _(
                        'This workload contains snapshots. Please delete all snapshots and try again..')
                    raise wlm_exceptions.InvalidState(reason=msg)

                self.db.workload_update(
                    context, workload_id, {
                        'status': 'deleting'})

                # remove the scheduled jobs for workload
                for job in jobs:
                    if job.workload_id == workload_id:
                        self.cron_rpcapi.workload_pause(context, workload_id)
                        break

                self.scheduler_rpcapi.workload_delete(
                    context, FLAGS.scheduler_topic, workload_id)

            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Delete Submitted',
                workload)
        except wlm_exceptions.InvalidState as ex:
            LOG.exception(str(ex))
            err_msg = 'Delete operation failed, ' + str(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))
        except Exception as ex:
            LOG.exception(ex)
            err_msg = 'Delete operation failed, ' + str(ex)
            if type(ex) == KsUnauthorized:
                err_msg = err_msg + '\nTry creating wlm trust for user %s ' \
                    'and retry this operation' %(context.user_id)
            self.db.workload_update(
                    context, workload_id, {
                        'error_msg':err_msg,
                        'status': 'error'})
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_reset(self, context, workload_id):
        """
        Reset a workload. When a workload is reset, any overlay files that were
        created as part of the snapshot operation are commited back to original
        files
        """
        try:
            workload = self.workload_get(context, workload_id)
            display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Reset Requested',
                workload)
            if workload['status'] not in ['available', 'resetting']:
                msg = _("Workload status must be 'available'")
                raise wlm_exceptions.InvalidState(reason=msg)

            self.db.workload_update(
                context, workload_id, {
                    'status': 'resetting'})
            self.scheduler_rpcapi.workload_reset(
                context, FLAGS.scheduler_topic, workload_id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Reset Submitted',
                workload)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_import_workloads_list(self, context, project_id=None, backup_targets=[]):
        try:
            AUDITLOG.log(context, 'Get Import Workloads List Requested', None)
            if not context.is_admin:
                raise wlm_exceptions.AdminRequired()

            if backup_targets:
                # eliminate duplicate backup targets
                backup_targets = list(set(backup_targets))

            # validate all BTs
            for bt_id in backup_targets:
                try:
                    bt_obj = self.db.backup_target_show(context, bt_id)
                    vault.get_backup_target(bt_obj.filesystem_export)
                except (AttributeError, wlm_exception.BackupTargetNotFound) as ex:
                    LOG.error(ex)
                    raise wlm_exceptions.BackupTargetNotFound(backup_target_id=bt_id)
            if project_id:
                if not isinstance(project_id, list):
                    project_id_list = [project_id]
                else:
                    project_id_list = project_id
            else:
                keystone_client = KeystoneClient(context)
                projects = keystone_client.client.get_project_list_for_import(
                               context)
                project_id_list = [project.id for project in projects]


            # create job for listing importable wl operation
            values = {"status": "created", "action": "list_importable_workloads"}
            jobid = self.db.import_job_create(context, values)
            values.update({'project_id': project_id_list, 'jobid': jobid, 'backup_targets': backup_targets})
            self.scheduler_rpcapi.get_importable_workload_list(context, FLAGS.scheduler_topic, values)
            self.db.import_job_update(context, jobid, {'status': 'in-progress'})
            LOG.info('Listing Importable Workloads Job Submitted Successfully, job-id: {0}'.format(jobid))
            return jobid
        except Exception as ex:
            LOG.exception(ex)
            raise ex


    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def import_workloads(self, context, workload_ids, source_btt, source_btt_all, target_btt, upgrade):
        """ Workload Import related method
            Args:
                context: RequestContext
                workload_ids: list
                upgrade: bool

            Returns:
                jobid: uuid

            Raises:
                wlm_exceptions.Duplicate: If Duplicate WL Import.
                wlm_exceptions.AdminRequired: If context is non-admin.
        """
        try:
            LOG.info('Import Workloads Requested')
            if context.is_admin is not True and upgrade is True:
                raise wlm_exceptions.AdminRequired()

            if len(workload_ids) != len(set(workload_ids)):
                raise wlm_exceptions.Duplicate()

            if target_btt:
                # check if provided target BTT exists
                self.db.backup_target_type_show(context, target_btt)

            workloads = []
            source_bt_all = None    # holds all BTs belongs to provided source BTT
            source_btt_obj = None    # holds source BTT db model object
            source_backup_targets = {}    # holds BTT:BT ID's of provided source BTTs
            target_btt_obj = None    # holds target BTT db model object
            use_default_target_btt = False    # flag denotes user didn't provide target BTT hence using default BTT
            importable_wl = {}    # holds importable workloads
            wl_not_found = []

            if not target_btt:
                target_btt = self.db.get_default_backup_target_type(context)[0].id
                use_default_target_btt = True
            target_btt_obj = self.db.backup_target_type_show(context, target_btt)
            target_backup_target = vault.get_backup_target_by_backup_target_type(target_btt_obj.id)
            target_btt_obj['backup_target_types']['filesystem_export_mount_path'] = target_backup_target.mountpath
            target_btt_obj['backup_target_types']['filesystem_export'] = target_backup_target.backup_endpoint

            # if source-btt is not given then consider default BTT as source-btt
            if not len(source_btt):
                source_btt = [self.db.get_default_backup_target_type(context)[0].id]
            else:
                # eliminate duplicate source btt
                source_btt = list(set(source_btt))

            # if source_btt_all provided then consider all available BTT as source-btt
            if source_btt_all:
                source_bt_list, source_btt = [], []
                for btt in self.db.backup_target_type_get_all(context):
                    # keeping only one BTT for each BT to avoid duplicate loops
                    if btt.backup_targets_id not in source_bt_list:
                        source_bt_list.append(btt.backup_targets_id)
                        source_btt.append(btt.id)

            # validate all source BTT
            for source_btt_id in source_btt:
                try:
                    vault.get_backup_target_by_backup_target_type(source_btt_id)
                    source_btt_obj = self.db.backup_target_type_show(context, source_btt_id)
                    source_backup_targets[source_btt_id] = source_btt_obj['backup_targets_id']
                except AttributeError as ex:
                    LOG.error(ex)
                    raise wlm_exceptions.BackupTargetTypeNotFound(backup_target_type_id=source_btt_id)

            # if workload_ids not provided then consider all available workloads from source BTTs
            if not workload_ids:
                # making async call to keep all importable workloads for given source-btt in DB
                keystone_client = KeystoneClient(context)
                projects = keystone_client.client.get_project_list_for_import(
                               context)
                project_id_list = [project.id for project in projects]


                # create job for the wl import operation
                importable_wl_list_values = {"status": "created", "action": "list_importable_workloads"}
                importable_wl_list_jobid = self.db.import_job_create(context, importable_wl_list_values)

                # put list of importable workloads for each BTT into DB
                importable_wl_list_values.update({'project_id': project_id_list, 'jobid': importable_wl_list_jobid, 'backup_targets': source_backup_targets.values()})
                self.scheduler_rpcapi.get_importable_workload_list(context, FLAGS.scheduler_topic, importable_wl_list_values)
                self.db.import_job_update(context, importable_wl_list_jobid, {'status': 'in-progress'})
            else:
                # when user provides workload ids
                for wl_id in workload_ids:
                    # when workload IDs provided then only single BTT is expected
                    source_backup_target = vault.get_backup_target_by_backup_target_type(list(source_backup_targets.keys())[0])
                    if vault.find_wl_in_backup_target_type(wl_id, source_backup_target.mountpath):
                        importable_wl[wl_id] = list(source_backup_targets.keys())[0]
                    # keeping track of wl not found in provided source BTTs
                    if wl_id not in importable_wl.keys():
                        wl_not_found.append(wl_id)        

            module_name = 'workloadmgr.db.imports.import_workloads'
            import_workload_module = importlib.import_module(module_name)
            import_workload_module.import_settings(context, models.DB_VERSION)

            global_job_scheduler_status = self.workload_get_global_job_scheduler(context)
            if not global_job_scheduler_status:
                # global job scheduler should be enable before
                # workload import to persist their job triggers
                self.workload_enable_global_job_scheduler(context)

            # create job for the wl import operation
            values = {"status": "created", "action": "import_workloads"}
            jobid = self.db.import_job_create(context, values)
            values.update({"jobid": jobid})

            # update workloads which are not found in provided source BTT
            for wl_id in wl_not_found:
                values.update({"workload_id": wl_id, "source_btt": source_btt_obj, "target_btt": target_btt_obj})
                importing_wl_job_data_ref = self.db.workload_import_create(context, values)
                self.db.workload_import_update(context, importing_wl_job_data_ref.id, {'status': 'error', 'message': 'Workload {0} Not Present in provided {1} backup target type(s)'.format(wl_id, source_btt_obj.id)})

            if workload_ids:
                # schedule wl import for available workloads in case workload IDs provided
                for wl_id, btt_id in importable_wl.items():
                    source_backup_target = vault.get_backup_target_by_backup_target_type(btt_id)
                    source_btt_obj = self.db.backup_target_type_show(context, btt_id)
                    source_btt_obj['backup_target_types']['filesystem_export_mount_path'] = source_backup_target.mountpath
                    source_btt_obj['backup_target_types']['filesystem_export'] = source_backup_target.backup_endpoint
                    values.update({"workload_id": wl_id, "source_btt": source_btt_obj, "target_btt": target_btt_obj})
                    importing_wl_job_data_ref = self.db.workload_import_create(context, values)
                    if use_default_target_btt:
                        # if target-btt arg not given then use BTT as source-btt and import the workload
                        values["target_btt"] = source_btt_obj
                    values.update({'importing_wl_job_data_id': importing_wl_job_data_ref.id})
                    self.scheduler_rpcapi.workload_import(context, FLAGS.scheduler_topic, values, upgrade)
            else:
                # when user does not provide workload IDs then we have scheduled getting importable wl list for all source-btt
                # this daemon thread is looking for those wls and triggers import operation for same
                def daemon_wl_import_trigger():
                    try:
                        start_time = datetime.utcnow()
                        while (datetime.utcnow() - start_time).total_seconds() < CONF.importable_wl_list_timeout:    # maximum time we wait to get single unprocessed wl from DB
                            # fetching the importable workload for the job who's status in 'unprocessed'
                            unprocessed_wl_db_objs = self.db.importable_workloads_get_all(context, jobid=importable_wl_list_jobid, status='unprocessed')
                            # initiating import operation for the unprocessed wls
                            for job_data_ref in unprocessed_wl_db_objs:
                                # getting the source-btt for the unprocessed wl based on it's BT
                                btt = [btt_id for btt_id, bt_id in source_backup_targets.items() if json.loads(job_data_ref.data)['backup_target'] == bt_id]
                                btt_id = btt[0]
                                LOG.info('initiating workload import for {} workload from {} BTT and importable_wl_job_id: {}'.format(json.loads(job_data_ref.data)['workload_id'], btt_id, importable_wl_list_jobid))
                                # assigning source-btt parameters
                                source_backup_target = vault.get_backup_target_by_backup_target_type(btt_id)
                                source_btt_obj = self.db.backup_target_type_show(context, btt_id)
                                source_btt_obj['backup_target_types']['filesystem_export_mount_path'] = source_backup_target.mountpath
                                source_btt_obj['backup_target_types']['filesystem_export'] = source_backup_target.backup_endpoint
                                values.update({"workload_id": json.loads(job_data_ref.data)['workload_id'], "source_btt": source_btt_obj, "target_btt": target_btt_obj})
                                # creating workload import DB entry
                                importing_wl_job_data_ref = self.db.workload_import_create(context, values)
                                if use_default_target_btt:
                                    # if target-btt arg not given then use BTT as source-btt and import the workload
                                    values["target_btt"] = source_btt_obj
                                # adding importable_wl_job_data_id  and importing_wl_job_data_id to the values to be used by wlm-workloads
                                values.update({'importable_wl_job_data_id': job_data_ref.id, 'importing_wl_job_data_id': importing_wl_job_data_ref.id})

                                # scheduling the wl import operation
                                LOG.info('scheduling workload import for: {}'.format(values))
                                self.scheduler_rpcapi.workload_import(context, FLAGS.scheduler_topic, values, upgrade)

                                LOG.info('successfully triggered workload import for {} workload from {} BTT'.format(json.loads(job_data_ref.data)['workload_id'], btt_id))
                                # setting current wl as 'processing' once we trigger wl import operation
                                self.db.importable_workloads_update(context, job_data_ref.id, values={'status': 'processing'})

                                # updating start_time after processing wl import operation
                                start_time = datetime.utcnow()
                            # when the job is finished then exit
                            importable_wl_job_ref = self.db.import_job_get(context, importable_wl_list_jobid)
                            if importable_wl_job_ref and importable_wl_job_ref.status in ('completed', 'error'):
                                LOG.info('All importable workloads are scheduled')
                                # fetching the importable workload for the job who's status in 'unprocessed'
                                unprocessed_wl_db_objs = self.db.importable_workloads_get_all(context, jobid=importable_wl_list_jobid, status='unprocessed')
                                if not unprocessed_wl_db_objs:
                                    self.db.import_job_update(context, jobid, {'status': 'completed'})
                                    break
                            time.sleep(3)    # sleeping for 3 seconds before checking unprocessed wl list from DB
                        else:
                            # when job is timed out
                            self.db.import_job_update(context, values.get('jobid'), {'status': 'timeout'})
                    except Exception as ex:
                        LOG.exception('failed to trigger workload import operation due to {}'.format(ex))
                daemon_thread = threading.Thread(target=daemon_wl_import_trigger)
                daemon_thread.start()
            self.db.import_job_update(context, jobid, {'status': 'in-progress'})
            LOG.info('Import Workloads Job Submitted Successfully, job-id: {0}'.format(jobid))
            return jobid
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_import_workloads_progress( self, context, jobid):
        """
        {'jobid':[{
                    "id": "1234567890",
                    "created_at": "22nd Aug 2023",
                    "wllist": [{
                        'id':'123',
                        'name':'Test-WL-01',
                        'progress': 10}, {
                        'id':'124',
                        'name': 'Test-W:-02',
                        'progress': 25}],
                    "completedat": "22nd Aug 2023",
                    "status": 'In-Progress'
                }]}"""
        resp = {
                 'jobid': [{
                           "wllist": [],
                         } ],
                  'action': 'import_workloads'
                }
        job = self.db.import_job_get(context, jobid)
        wls = self.db.workload_import_get_all(context, job.jobid)
        job_resp = resp['jobid'][0]
        job_resp['id'] = job.jobid
        job_resp['created_at'] = job.created_at
        job_resp['completedat'] = job.updated_at
        job_resp['status'] = job.status
        wllist = []
        for each_wl in wls:
            wl_resp = {}
            wl_resp['id'] = json.loads(each_wl.data)['workload_id']
            #wl_resp['created_at'] = json.loads(each_wl.data)['created_at']
            wl_resp['progress'] = json.loads(each_wl.data)['progress']
            wl_resp['status'] = json.loads(each_wl.data)['status']
            wl_resp['message'] = json.loads(each_wl.data).get('message', '')
            job_resp['wllist'].append(wl_resp)
        return resp

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_importable_workloads_list(self, context, jobid):
        json_resp = {
                "workload_list": [],
                "action": "list_importable_workloads"
               }
        job = self.db.import_job_get(context, jobid)
        wls = self.db.importable_workloads_get_all(context, jobid=job.jobid)
        json_resp['id'] = job.jobid
        json_resp['created_at'] = job.created_at
        json_resp['updated_at'] = job.updated_at
        json_resp['status'] = job.status
        for each_wl in wls:
            wl_resp = {}
            wl_resp['workload_id'] = json.loads(each_wl.data)['workload_id']
            wl_resp['name'] = json.loads(each_wl.data)['name']
            wl_resp['project_id'] = json.loads(each_wl.data)['project_id']
            wl_resp['backup_target'] = json.loads(each_wl.data)['backup_target']
            wl_resp['backup_target_types'] = json.loads(each_wl.data)['backup_target_types']
            wl_resp['backup_target_types_id'] = json.loads(each_wl.data)['backup_target_types_id']
            json_resp['workload_list'].append(wl_resp)
        return json_resp

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_job_details(self, context, jobid):
        job = self.db.import_job_get(context, jobid)
        if not getattr(job, 'action', None) or job.action == "import_workloads":
            return self.get_import_workloads_progress(context, job.jobid)
        elif job.action == "list_importable_workloads":
            return self.get_importable_workloads_list(context, job.jobid)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def abandon_resources(self, context, workload_ids, policy_ids, all_workloads, all_policies, cloud_wide):
        kwargs = {}
        if not context.is_admin and (policy_ids or all_policies or cloud_wide):
            raise wlm_exceptions.AdminRequired()

        if not cloud_wide:
            kwargs['user_list'] = [context.user_id]
            kwargs['project_list'] = [context.project_id]
        if all_workloads:
            kwargs['all_workloads'] = True
        if all_policies:
            kwargs['all_policies'] = True
        if workload_ids:
            kwargs['workload_ids'] = workload_ids
        if policy_ids:
            kwargs['policy_ids'] = policy_ids
        return self.db.abandon_resources(context, **kwargs)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_nodes(self, context):
        nodes = []
        try:
            for node_record in self.db.service_get_all(context):
                try:
                    if node_record['topic'] != 'workloadmgr-workloads':
                        continue
                    status = 'Up'
                    if not utils.service_is_up(
                            node_record) or node_record['disabled']:
                        status = 'Down'
                    time_since_update = calculate_difference_in_time(node_record.get('updated_at'))
                    node_data = {'node': node_record.host,
                                  'id': node_record.id,
                                  'version': node_record.version,
                                  'is_vip': False,
                                  'status': status,
                                  'time_since_update': time_since_update.seconds}
                    nodes.append(node_data)
                except Exception as ex:
                    LOG.exception(ex)
        except Exception as ex:
            LOG.exception(ex)
        return dict(nodes=nodes)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def remove_node(self, context, ip):
        try:
            for node_record in self.db.service_get_all_by_topic(
                    context, topic='workloadmgr-workloads'):
                try:
                    ipaddress = ''
                    ip_addresses = node_record.ip_addresses.split(';')
                    if len(node_record.ip_addresses) > 0 and len(
                            node_record.ip_addresses[0]) > 0:
                        ipaddress = ip_addresses[0]

                    if any([ipaddress == ip, node_record.host == ip]
                           ) and socket.gethostname() != node_record.host:
                        if utils.service_is_up(node_record):
                            msg = _(
                                "Node is up, Please shutdown/delete node then only this can be removed with this command")
                            raise wlm_exceptions.ErrorOccurred(reason=msg)
                        self.db.service_delete(context, int(node_record.id))

                except Exception as ex:
                    LOG.exception(ex)
                    raise ex
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_contego_status(self, context, host=None, ip=None):
        try:
            compute_service = nova.API(production=True)
            compute_contego_records = compute_service.contego_service_status(
                context, host, ip)
            return compute_contego_records
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def add_node(self, context, ip):
        try:
            for node_record in self.db.service_get_all_by_topic(
                    context, topic='workloadmgr-workloads'):
                try:
                    ipaddress = ''
                    ip_addresses = node_record.ip_addresses.split(';')
                    if len(node_record.ip_addresses) > 0 and len(
                            node_record.ip_addresses[0]) > 0:
                        ipaddress = ip_addresses[0]
                    if socket.gethostname() == node_record.host:
                        controller_ip = ipaddress
                    if any([ipaddress == ip, node_record.host == ip]):
                        msg = _("Other node with same ip addresss exists")
                        raise wlm_exceptions.ErrorOccurred(reason=msg)
                except Exception as ex:
                    LOG.exception(ex)
                    raise ex
            import subprocess
            file_name = context.user_id + '.txt'
            command = [
                'sudo',
                'curl',
                '-k',
                '--cookie-jar',
                file_name,
                '--data',
                "username=admin&password=password",
                "https://" +
                ip +
                "/login"]
            try:
                res = subprocess.check_output(command)
            except Exception as ex:
                msg = _("Error resolving " + ip)
                raise wlm_exceptions.ErrorOccurred(reason=msg)
            subprocess.call(command, shell=False)
            config_inputs = {}
            for setting in self.db.setting_get_all_by_project(
                    context, "Configurator"):
                config_inputs[setting.name] = setting.value
            if not config_inputs:
                msg = _("No configurations found")
                raise wlm_exceptions.ErrorOccurred(reason=msg)
            command = ['sudo', 'curl', '-k', '--cookie', file_name, '--data',
                       "refresh=1&from=api&tvault-primary-node=" +
                       controller_ip + "&nodetype=additional",
                       "https://" + ip + "/configure_vmware"]
            subprocess.call(command, shell=False)
            urls = [
                'configure_host',
                'authenticate_with_vcenter',
                'authenticate_with_swift',
                'register_service',
                'configure_api',
                'configure_scheduler',
                'configure_service',
                'start_api',
                'start_scheduler',
                'start_service',
                'register_workloadtypes',
                'workloads_import',
                'discover_vcenter',
                'ntp_setup']
            if len(config_inputs['swift_auth_url']) == 0:
                urls.remove('authenticate_with_swift')
            if config_inputs['import_workloads'] == 'off':
                urls.remove('workloads_import')
            if config_inputs['ntp_enabled'] == 'off':
                urls.remove('ntp_setup')
            for url in urls:
                command = [
                    'sudo',
                    'curl',
                    '-k',
                    '--cookie',
                    file_name,
                    "https://" +
                    ip +
                    "/" +
                    url]
                res = subprocess.check_output(command)
                if res != '{"status": "Success"}' and url != 'ntp_setup':
                    command = ['sudo', 'rm', '-rf', file_name]
                    subprocess.call(command, shell=False)
                    msg = _(res)
                    raise wlm_exceptions.ErrorOccurred(reason=msg)
            command = ['sudo', 'rm', '-rf', file_name]
            subprocess.call(command, shell=False)

        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_storage_usage(self, context):

        storages_usage = {}
        total_usage = 0
        nfsstats = vault.get_capacities_utilizations(context)

        for backup_target_section in vault.CONF.enabled_backends:
            for nfsshare in vault.CONF[backup_target_section].vault_storage_filesystem_export.split(','):
                nfsshare = nfsshare.strip()
                stat = nfsstats[nfsshare]

                total_capacity = stat['total_capacity']
                total_utilization = stat['total_utilization']
                nfsstatus = stat['nfsstatus']

                storages_usage[nfsshare] = {
                    'storage_type': vault.CONF.vault_storage_type,
                    'nfs_share(s)': [
                        {
                            "nfsshare": nfsshare,
                            "status": "Online" if nfsstatus else "Offline",
                            "capacity": utils.sizeof_fmt(total_capacity),
                            "utilization": utils.sizeof_fmt(total_utilization),
                        },
                    ],
                    'total_capacity': total_capacity,
                    'total_utilization': total_utilization,
                    'total_capacity_humanized': utils.sizeof_fmt(total_capacity),
                    'total_utilization_humanized': utils.sizeof_fmt(total_utilization),
                    'available_capacity_humanized': utils.sizeof_fmt(
                        float(total_capacity) - float(total_utilization)),
                    'total_utilization_percent': 0.0 if not bool(total_capacity) else round(((float(total_utilization) / float(total_capacity)) * 100), 2),
                }

        storage_usage = {
            'storage_usage': list(storages_usage.values()),
            'count_dict': {}}
        full = 0
        incr = 0
        total = 0
        full_size = 0
        incr_size = 0
        kwargs = {"get_all": True, "read_metadata": False}
        workload_kwargs = {'project_id': '', 'all_workloads': True, 'page_number': '', 'nfs_share': ''}
        all_workload_list = self.db.workload_get_all(context, **workload_kwargs)

        for workload in all_workload_list:
            kwargs['workload_id'] = workload.id
            for snapshot in self.db.snapshot_get_all(context, **kwargs):
                if snapshot.snapshot_type == 'full':
                    full = full + 1
                    full_size = full_size + float(snapshot.size)
                elif snapshot.snapshot_type == 'incremental':
                    incr = incr + 1
                    incr_size = incr_size + float(snapshot.size)
                total = total + 1

        if (full + incr) > 0:
            full_total_count_percent = \
                round(((float(full) / float((full + incr))) * 100), 2)
            storage_usage['count_dict']['full_total_count_percent'] = \
                str(full_total_count_percent)
            storage_usage['count_dict']['full_total_count'] = str(full)
            storage_usage['count_dict']['incr_total_count'] = str(incr)

        storage_usage['count_dict']['full'] = full_size
        storage_usage['count_dict']['incremental'] = incr_size
        storage_usage['count_dict']['total'] = full_size + incr_size
        total_usage = storage_usage['count_dict']['total']
        if float(total_usage) > 0:
            storage_usage['count_dict']['full_snaps_utilization'] = \
                round(((float(full_size) / float(total_usage)) * 100), 2)
            storage_usage['count_dict']['incremental_snaps_utilization'] = \
                round(((float(incr) / float(total_usage)) * 100), 2)
        else:
            storage_usage['count_dict']['full_snaps_utilization'] = '0'
            storage_usage['count_dict']['incremental_snaps_utilization'] = '0'
        return storage_usage

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_recentactivities(self, context, time_in_minutes):
        recentactivites = []
        now = timeutils.utcnow()
        time_offset = datetime.now() - datetime.utcnow()
        try:
            for workload in self.db.workload_get_all(
                    context,
                    read_deleted='yes',
                    dashboard_item='activities',
                    time_in_minutes=time_in_minutes
            ):
                recentactivity = {'activity_type': '',
                                  'activity_time': '',
                                  'activity_description': '',
                                  'activity_result': workload.status,
                                  'object_type': 'workload',
                                  'object_name': workload.display_name,
                                  'object_id': workload.id,
                                  'object_user_id': workload.user_id,
                                  'object_project_id': workload.project_id,
                                  }
                description_suffix = \
                    "(Workload: '%s' - '%s')" % \
                    (workload.display_name,
                     (workload.created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"))
                if workload.deleted:
                    recentactivity['activity_type'] = 'delete'
                    recentactivity['activity_time'] = workload.deleted_at
                    recentactivity['activity_description'] = \
                        "Workload deleted. " + description_suffix
                else:
                    recentactivity['activity_type'] = 'create'
                    recentactivity['activity_time'] = workload.created_at
                    if workload.status == 'error':
                        recentactivity['activity_description'] = \
                            "Workload failed. " + description_suffix
                    else:
                        recentactivity['activity_description'] = \
                            "Workload created. " + description_suffix
                recentactivites.append(recentactivity)

            for snapshot in self.db.snapshot_get_all(
                    context,
                    read_deleted='yes',
                    dashboard_item='activities',
                    time_in_minutes=time_in_minutes):
                recentactivity = {'activity_type': '',
                                  'activity_time': '',
                                  'activity_description': '',
                                  'activity_result': snapshot.status,
                                  'object_type': 'snapshot',
                                  'object_name': snapshot.display_name,
                                  'object_id': snapshot.id,
                                  'object_user_id': snapshot.user_id,
                                  'object_project_id': snapshot.project_id,
                                  }
                description_suffix = \
                    "(Snapshot: '%s' - '%s', Workload: '%s' - '%s')" % \
                    (snapshot.display_name,
                     (snapshot.created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"),
                     snapshot.workload_name,
                     (snapshot.workload_created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"))
                if snapshot.deleted:
                    recentactivity['activity_type'] = 'delete'
                    recentactivity['activity_time'] = snapshot.deleted_at
                    recentactivity['activity_description'] = \
                        "Snapshot deleted. " + description_suffix
                else:
                    recentactivity['activity_type'] = 'create'
                    recentactivity['activity_time'] = snapshot.created_at
                    if snapshot.status == 'error':
                        recentactivity['activity_description'] = \
                            "Snapshot failed. " + description_suffix
                    elif snapshot.status == 'available':
                        recentactivity['activity_description'] = \
                            "Snapshot created. " + description_suffix
                    elif snapshot.status == 'cancelled':
                        recentactivity['activity_type'] = 'cancel'
                        recentactivity['activity_description'] = \
                            "Snapshot cancelled. " + description_suffix
                    else:
                        recentactivity['activity_description'] = \
                            "Snapshot is in progress. " + description_suffix
                recentactivites.append(recentactivity)

            for restore in self.db.restore_get_all(
                    context,
                    read_deleted='yes',
                    dashboard_item='activities',
                    time_in_minutes=time_in_minutes):
                recentactivity = {'activity_type': '',
                                  'activity_time': '',
                                  'activity_description': '',
                                  'activity_result': restore.status,
                                  'object_type': 'restore',
                                  'object_name': restore.display_name,
                                  'object_id': restore.id,
                                  'object_user_id': restore.user_id,
                                  'object_project_id': restore.project_id,
                                  }
                description_suffix = \
                    "(Restore: '%s' - '%s', Snapshot: '%s' - '%s', Workload: '%s' - '%s')" % \
                    (restore.display_name,
                     (restore.created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"),
                     restore.snapshot_name,
                     (restore.snapshot_created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"),
                     restore.workload_name,
                     (restore.workload_created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"))
                if restore.deleted:
                    recentactivity['activity_type'] = 'delete'
                    recentactivity['activity_time'] = snapshot.deleted_at
                    recentactivity['activity_description'] = \
                        "Restore deleted. " + description_suffix
                else:
                    recentactivity['activity_type'] = 'create'
                    recentactivity['activity_time'] = restore.created_at
                    if restore.status == 'error':
                        recentactivity['activity_description'] = \
                            "Restore failed. " + description_suffix
                    elif restore.status == 'available':
                        recentactivity['activity_description'] = \
                            "Restore completed. " + description_suffix
                    elif restore.status == 'cancelled':
                        recentactivity['activity_type'] = 'cancel'
                        recentactivity['activity_description'] = \
                            "Restore Cancelled. " + description_suffix
                    else:
                        recentactivity['activity_description'] = \
                            "Restore is in progress. " + description_suffix
                recentactivites.append(recentactivity)

            recentactivites = sorted(recentactivites,
                                     key=itemgetter('activity_time'),
                                     reverse=True)
        except Exception as ex:
            LOG.exception(ex)
        return dict(recentactivites=recentactivites)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_auditlog(self, context, time_in_minutes, time_from, time_to):
        auditlog = []
        try:
            auditlog = AUDITLOG.get_records(
                time_in_minutes, time_from, time_to)
        except Exception as ex:
            LOG.exception(ex)
        return dict(auditlog=auditlog)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_workflow(self, context, workload_id):
        """
        Get the workflow of the workload. RPC call is made
        """
        try:
            return self.workloads_rpcapi.workload_get_workflow_details(
                context, socket.gethostname(), workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_topology(self, context, workload_id):
        """
        Get the topology of the workload. RPC call is made
        """
        try:
            return self.workloads_rpcapi.workload_get_topology(
                context, socket.gethostname(), workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_discover_instances(self, context, workload_id):
        """
        Discover Instances of a workload_type. RPC call is made
        """
        try:
            return self.workloads_rpcapi.workload_discover_instances(
                context, socket.gethostname(), workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    def _is_workload_paused(self, context, workload_id):
        workload = self.workload_get(context, workload_id)
        jobs = self.db.job_get_all(context)
        for job in jobs:
            if job.workload_id == workload_id:
                return False
        return True

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_pause(self, context, workload_id):
        """
        Pause workload job schedule. No RPC call is made
        """
        workload = self.workload_get(context, workload_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Pause Requested',
            workload)
        job = self.db.job_get(context, workload_id)
        if job:
            assert uuid.UUID(job.workload_id) == uuid.UUID(workload_id)
        self.cron_rpcapi.workload_pause(context, workload_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Pause Submitted',
            workload)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_resume(self, context, workload_id):
        workload = self.db.workload_get(context, workload_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Resume Requested',
            workload)
        job = self.db.job_get(context, workload_id)
        if job:
            msg = _('Workload job scheduler is not paused')
            raise wlm_exceptions.InvalidState(reason=msg)
        jobschedule = pickle.loads(bytes(workload['jobschedule'], 'utf-8'))
        if len(jobschedule) >= 1:
            self.cron_rpcapi.workload_add_scheduler_job(context, jobschedule, workload)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Resume Submitted',
            workload)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_unlock(self, context, workload_id):
        workload = self.workload_get(context, workload_id)
        display_name = workload['display_name']
        AUDITLOG.log(
            context,
            'Workload \'' +
            display_name +
            '\' Unlock Requested',
            workload)
        if not workload['deleted']:
            self.db.workload_update(
                context, workload_id, {
                    'status': 'available'})
        AUDITLOG.log(
            context,
            'Workload \'' +
            display_name +
            '\' Unlock Submitted',
            workload)

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def workload_disable_global_job_scheduler(self, context):

        try:
            if vault.CONF.global_job_scheduler_override:
                raise Exception("global_job_scheduler_override is set to True"
                                " in triliovault-wlm.conf. Cannot disable global "
                                " job scheduler")

            if self.workload_get_global_job_scheduler(context) is False:
                # scheduler is already stopped. Nothing to do
                return

            self.cron_rpcapi.workload_disable_global_job_scheduler(context)

            setting = {'category': "job_scheduler",
                       'name': "global-job-scheduler",
                       'description': "Controls job scheduler status",
                       'value': False,
                       'user_id': context.user_id,
                       'is_public': False,
                       'is_hidden': True,
                       'metadata': {},
                       'type': "job-scheduler-setting", }

            try:
                self.db.setting_get(context, setting['name'], get_hidden=True, cloud_setting=True)
                self.db.setting_update(context, setting['name'], setting, cloud_setting=True)
            except wlm_exceptions.SettingNotFound:
                self.db.setting_create(context, setting)

        except Exception as ex:
            LOG.exception(ex)
            raise

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def workload_enable_global_job_scheduler(self, context):

        if self.workload_get_global_job_scheduler(context) is True:
            # scheduler is already running. Nothing to do
            return

        self.cron_rpcapi.workload_enable_global_job_scheduler(context)
        setting = {'category': "job_scheduler",
                   'name': "global-job-scheduler",
                   'description': "Controls job scheduler status",
                   'value': True,
                   'user_id': context.user_id,
                   'is_public': False,
                   'is_hidden': True,
                   'metadata': {},
                   'type': "job-scheduler-setting", }
        try:
            try:
                self.db.setting_get(context, setting['name'], get_hidden=True, cloud_setting=True)
                self.db.setting_update(context, setting['name'], setting, cloud_setting=True)
            except wlm_exceptions.SettingNotFound:
                self.db.setting_create(context, setting)

        except Exception as ex:
            LOG.exception(ex)
            raise Exception("Cannot enable job scheduler globally")

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_global_job_scheduler(self, context):

        try:
            cron_status = \
             self.cron_rpcapi.workload_get_global_job_scheduler(context)
            if cron_status is None:
                raise Exception("Error in retrieving cron status, "\
                        "check wlm-cron log")
            return cron_status
        except oslo_exceptions.MessagingTimeout as ex:
            LOG.exception(ex)
            raise Exception("Not able to connect with wlm-cron, "\
                    "Please check service status.")
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @create_trust()
    @check_license
    @wrap_check_policy
    def workload_snapshot(self, context, workload_id,
                          snapshot_type, name, description, is_scheduled=False,
                          retention_tags=['manual']):
        """
        Make the RPC call to snapshot a workload.
        """
        workload = None
        try:
            if is_scheduled:
                # In case of scheduled snapshots need to update the context
                job = self.db.job_get(context, workload_id)
                kwargs = pickle.loads(bytes(job.kwargs, 'utf-8'))
                kwargs['tenant_id'] = kwargs['project_id']
                context = nova._get_tenant_context(munchify(kwargs))
                admin_context = nova._get_tenant_context(munchify(kwargs), cloud_admin=True)
            else:
                admin_context = nova._get_tenant_context(context, cloud_admin=True)
        except Exception as ex:
            LOG.exception(ex)
            raise
        try:
            workload = self.workload_get(context, workload_id)

            if not workload.get("encryption"):
                wv_obj = workload_validator.WorkloadValidator(context=context, body=workload)
                wv_obj._validate_logical_path_for_workload_encryption()
        except Exception as ex:
            LOG.exception(str(ex))
            options = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'workload_id': workload_id,
                'snapshot_type': "full",
                'display_name': name,
                'display_description': description,
                'host': '',
                'status': 'error',
                'error_msg': str(ex),
                'metadata': {}, }
            admin_context = wlm_context.get_admin_context()
            snapshot = self.db.snapshot_create(admin_context, options)
            self.scheduler_rpcapi.schedule_send_email(
                context, FLAGS.scheduler_topic, snapshot.id, 'snapshot')
            return snapshot

        quota_exceeded = False
        error_msg = "Check allowed quotas"
        try:
            allowed_quota_obj = snapshot_quota_check(
                context.project_id, self.db
            )
            if allowed_quota_obj \
                    and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                # TODO: check how to handle this warning at UI side
                AUDITLOG.log(
                    context,
                    "Current project's snapshot allowed quota has reached to high watermark" +
                    "Contact to the admin",
                    None)
                LOG.warning("Current project's snapshot allowed quota has "
                            "reached to high watermark. Contact to the admin")
        except ValueError as e:
            LOG.exception(e)
            quota_exceeded = True
            error_msg = "Current project's snapshot allowed quota has exceeded. Contact to the admin"
        except Exception as ex:
            LOG.exception(ex)
            raise

        if not quota_exceeded:
            try:
                admin_context.is_admin = True
                total_storage = self.get_tenants_usage(admin_context)

                if total_storage and \
                    total_storage.get('tenants_usage', {}).get(context.project_id, {}):
                    allowed_quota_obj = storage_quota_check(
                        context.project_id,
                        self.db, total_storage['tenants_usage'][context.project_id].get('used_capacity',0))
                    if allowed_quota_obj \
                            and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                        # TODO: check how to handle this warning at UI side
                        AUDITLOG.log(
                            context,
                            "Current project's storage allowed quota has reached to high watermark" +
                            "Contact to the admin",
                            None)
                        LOG.warning("Current project's storage allowed quota has "
                                    "reached to high watermark. Contact to the admin")
            except ValueError as e:
                LOG.exception(e)
                quota_exceeded = True
                error_msg =  "Current project's storage allowed quota has exceeded. Contact to the admin"
            except Exception as ex:
                LOG.exception(ex)
                raise

        if quota_exceeded:
            options = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'workload_id': workload_id,
                'snapshot_type': "full",
                'display_name': name,
                'display_description': description,
                'host': '',
                'status': 'error',
                'error_msg': error_msg,
                'metadata': {}, }

            admin_context = wlm_context.get_admin_context()
            snapshot = self.db.snapshot_create(admin_context, options)
            self.scheduler_rpcapi.schedule_send_email(
                context, FLAGS.scheduler_topic, snapshot.id, 'snapshot')
            return snapshot

        instances = [workload_vm_obj.vm_id for workload_vm_obj in
                         self.db.workload_vms_get(context, workload['id'])]
        if len(instances) != len(set(instances)):
            msg = _('Workload has some duplicate vm entries. '
                    'Remove duplicate entries and retry again.\n'
                    'Hint: Open edit workload dialog in horizon dashboard '
                    'and update the workload to remove any deplications')
            raise wlm_exceptions.InvalidState(reason=msg)


        try:
            volume_quota_exceeded = False
            compute_service = nova.API(production=True)
            volume_service = cinder.API()
            cinder_volume_size_list = []
            required_volume_size = 0
            instance_volume_sizes_dict = {instance:[] for instance in instances}
            for cinder_volume in volume_service.get_all(context):
                if cinder_volume.get('attach_status') == 'attached' and \
                        cinder_volume.get('instance_uuid') in instances:
                    instance_volume_sizes_dict[cinder_volume['instance_uuid']].append(cinder_volume['size'])
                    cinder_volume_size_list.append(cinder_volume['size'])

            if CONF.serial_vm_backup and int(workload['jobschedule'].get('fullbackup_interval', -1)) == 0:
                for instance_uuid, volume_size_list in instance_volume_sizes_dict.items():
                    if volume_size_list and (sum(volume_size_list) + max(volume_size_list)) > required_volume_size:
                        required_volume_size = sum(volume_size_list) + max(volume_size_list)
            else:
                required_volume_size = sum(cinder_volume_size_list) + max(cinder_volume_size_list)

            if required_volume_size:
                volume_quota = volume_service.get_volume_quota(context, context.project_id)
                if volume_quota.gigabytes.get('limit') != -1 \
                    and volume_quota.gigabytes.get('limit') < (volume_quota.gigabytes.get('in_use') + required_volume_size):
                    volume_quota_exceeded = True
                    error_msg = "volume allowed quota for project: {} has exceeded. Required Size: {}, Available: {}".format(context.project_id, required_volume_size, volume_quota.gigabytes.get('limit') - volume_quota.gigabytes.get('in_use'))
                    LOG.exception(error_msg)
        except Exception as ex:
            LOG.exception(ex)

        if volume_quota_exceeded:
            options = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'workload_id': workload_id,
                'snapshot_type': "full",
                'display_name': name,
                'display_description': description,
                'host': '',
                'status': 'error',
                'error_msg': error_msg,
                'metadata': {}, }

            context = wlm_context.get_admin_context()
            snapshot = self.db.snapshot_create(context, options)
            return snapshot

        try:
            if not workload:
                workload = self.workload_get(context, workload_id)

            if int(workload['jobschedule'].get('fullbackup_interval', -1)) == 0:
                snapshot_type = "full"

            snapshot_display_name = ''
            if name and len(name) > 0:
                snapshot_display_name = '\'' + name + '\''
            else:
                snapshot_display_name = '\'' + 'Undefined' + '\''

            AUDITLOG.log(context, 'Workload \'' + workload['display_name'] +
                         '\' ' + snapshot_type + ' Snapshot ' +
                         snapshot_display_name + ' Create Requested', workload)

            workloads = self.db.workload_get_all(context)
            for workload in workloads:
                if workload.deleted:
                    continue
                workload_type = self.db.workload_type_get(
                    context, workload.workload_type_id)
                if (workload_type.display_name == 'Composite'):
                    for kvpair in workload.metadata:
                        if kvpair['key'] == 'workloadgraph':
                            graph = json.loads(kvpair['value'])
                            for flow in graph['children']:
                                for member in flow['children']:
                                    if 'type' in member:
                                        if member['data']['id'] == workload_id:
                                            msg = _(
                                                'Operation not allowed since this workload is a member of a composite workflow')
                                            raise wlm_exceptions.InvalidState(
                                                reason=msg)

            try:
                workload_lock.acquire()
                workload = self.workload_get(context, workload_id)
                if workload['status'].lower() != 'available':
                    msg = _(
                        "Workload must be in the 'available' state to take a snapshot")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload_id, {
                        'status': 'locked'})
            finally:
                workload_lock.release()

            instances = [workload_vm_obj.vm_id for workload_vm_obj in
                         self.db.workload_vms_get(context, workload['id'])]
            if len(instances) != len(set(instances)):
                msg = _('Workload has some duplicate vm entries. '
                        'Remove duplicate entries and retry again.\n'
                        'Hint: Open edit workload dialog in horizon dashboard '
                        'and update the workload to remove any duplications')
                raise wlm_exceptions.InvalidState(reason=msg)

            metadata = {}
            metadata.setdefault('cancel_requested', '0')
            metadata["retention_tags"] = json.dumps(retention_tags)
            options = {'user_id': context.user_id,
                       'project_id': context.project_id,
                       'workload_id': workload_id,
                       'snapshot_type': snapshot_type,
                       'display_name': name,
                       'display_description': description,
                       'host': '',
                       'status': 'creating',
                       'metadata': metadata, }
            snapshot = self.db.snapshot_create(context, options)

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                snapshot['snapshot_type'] +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Create Submitted',
                workload)

            self.db.snapshot_update(context,
                                    snapshot.id,
                                    {'progress_percent': 0,
                                     'progress_msg': 'Snapshot operation is scheduled',
                                     'status': 'executing'})
            self.scheduler_rpcapi.workload_snapshot(
                context, FLAGS.scheduler_topic, snapshot['id'])
            return snapshot
        except Exception as ex:
            if 'snapshot' in locals():
                self.db.workload_update(
                    context, snapshot.workload_id, {
                        'status': 'available'})
                self.db.snapshot_update(
                    context, snapshot.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else str(ex)})
                self.scheduler_rpcapi.schedule_send_email(
                    context, FLAGS.scheduler_topic, snapshot.id, 'snapshot')
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_get(self, context, snapshot_id):
        rv = self.db.snapshot_get(
            context,
            snapshot_id,
            project_only=context.project_only)
        snapshot_details = dict(rv)
        snapshot_vms = []
        try:
            for snapshot_vm_obj in self.db.snapshot_vms_get(
                    context, snapshot_id):
                snapshot_vm = {'id': snapshot_vm_obj.vm_id,
                               'name': snapshot_vm_obj.vm_name,
                               'status': snapshot_vm_obj.status, }
                metadata = {}
                for kvpair in snapshot_vm_obj.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                snapshot_vm['metadata'] = metadata
                snapshot_vms.append(snapshot_vm)
        except Exception as ex:
            LOG.debug("Failed to fetch snapshot: {}".format(snapshot_id))
            LOG.debug(ex)
        snapshot_details.setdefault('instances', snapshot_vms)
        return snapshot_details

    @autolog.log_method(logger=Logger)
    def snapshot_set(self, context, snapshot_id, status):
        """
          Args:-
                *snapshot_id--string
                *status--string/None
                TODO:- Need to be set by priviledged user in future.

          Return:-
                *On-Successful:- Snapshot Object
                *OnFailure:- Exception Object
        """
        snapshot = self.db.snapshot_get(context, snapshot_id)
        binary = FLAGS.workloads_topic
        service_disabled_status = None
        if snapshot.status.lower() not in ['error', 'available', 'mounted']:
            host = snapshot.host
            try:
                service_info = self.db.service_get_by_args(context, snapshot.host, binary)
            except wlm_exceptions.HostBinaryNotFound as ex:
                self.db.snapshot_update(
                        context, snapshot_id, {
                        'status': 'available' if snapshot.status.lower() == 'mounting' else 'error'})
                snapshot = self.db.snapshot_get(context, snapshot_id)
                return snapshot
            if service_info and hasattr(service_info, 'disabled'):
                service_disabled_status = service_info.disabled
            if service_disabled_status and not utils.service_is_up(service_info):
                self.db.snapshot_update(
                        context, snapshot_id, {
                        'status': 'available' if snapshot.status.lower() == 'mounting' else 'error'})
                snapshot = self.db.snapshot_get(context, snapshot_id)
                return snapshot
            else:
                raise Exception("Please check service status on the Host({0})".format(snapshot.host))
        else:
            raise Exception("Snapshot({0}) is in {1} state, It should not be in Error, Available, mounted state.".format(snapshot_id, snapshot.status))

    # @autolog.log_method(logger=Logger, log_args=False, log_retval=False)
    @wrap_check_policy
    def snapshot_show(self, context, snapshot_id):
        def _get_pit_resource_id(metadata, key):
            for metadata_item in metadata:
                if metadata_item['key'] == key:
                    pit_id = metadata_item['value']
                    return pit_id

        def _get_pit_resource(snapshot_vm_common_resources, pit_id):
            for snapshot_vm_resource in snapshot_vm_common_resources:
                if snapshot_vm_resource.resource_pit_id == pit_id:
                    return snapshot_vm_resource

        def _get_vm_network_resource_snap_get(context, snapshot_vm_resource_id,
                                    rv, snapshot_id, models):
            vm_nic_snapshot = None
            try:
                vm_nic_snapshot = self.db.vm_network_resource_snap_get(
                                context, snapshot_vm_resource_id)
            except wlm_exceptions.VMNetworkResourceSnapNotFound as ex:
                """ There could be possibility network_resource is not imported yet.
                    hence, we need to import this resource from network_db file from backend"""
                resource_type = 'network_db'
                try:
                    status = workload_utils.import_resource(context, rv.workload_id,
                                                            snapshot_id,
                                                            resource_type,
                                                            models.DB_VERSION )
                    vm_nic_snapshot = self.db.vm_network_resource_snap_get(
                                    context, snapshot_vm_resource.id)
                except Exception as ex:
                    raise ex

            return vm_nic_snapshot

        rv = self.db.snapshot_show(
            context, snapshot_id, project_only=context.project_only)
        if rv is None:
            msg = _("Not found snapshot or operation not allowed")
            wlm_exceptions.ErrorOccurred(reason=msg)

        snapshot_details = dict(rv.items())

        snapshot_vms = []
        try:
            for snapshot_vm_obj in self.db.snapshot_vms_get(
                    context, snapshot_id):
                snapshot_vm = {'id': snapshot_vm_obj.vm_id,
                               'name': snapshot_vm_obj.vm_name,
                               'status': snapshot_vm_obj.status,
                               }
                metadata = {}
                for kvpair in snapshot_vm_obj.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                snapshot_vm['metadata'] = metadata
                snapshot_vm['nics'] = []
                snapshot_vm['vdisks'] = []
                snapshot_vm['security_group'] = []
                snapshot_vm_resources = None
                snapshot_vm_resources = self.db.snapshot_vm_resources_get(
                    context, snapshot_vm_obj.vm_id, snapshot_id)
                snapshot_vm_common_resources = self.db.snapshot_vm_resources_get(
                    context, snapshot_id, snapshot_id)
                for snapshot_vm_resource in snapshot_vm_resources:
                    """ flavor """
                    if snapshot_vm_resource.resource_type == 'flavor':
                        vm_flavor = snapshot_vm_resource
                        snapshot_vm['flavor'] = {
                            'vcpus': self.db.get_metadata_value(
                                vm_flavor.metadata, 'vcpus'), 'ram': self.db.get_metadata_value(
                                vm_flavor.metadata, 'ram'), 'disk': self.db.get_metadata_value(
                                vm_flavor.metadata, 'disk'), 'ephemeral': self.db.get_metadata_value(
                                vm_flavor.metadata, 'ephemeral')}
                    """ security group """
                    if snapshot_vm_resource.resource_type == 'security_group':
                        if self.db.get_metadata_value(
                                snapshot_vm_resource.metadata,
                                'vm_attached') in (
                                True,
                                '1',
                                None):
                            snapshot_vm['security_group'].append(
                                {
                                    'name': self.db.get_metadata_value(
                                        snapshot_vm_resource.metadata,
                                        'name'),
                                    'security_group_type': self.db.get_metadata_value(
                                        snapshot_vm_resource.metadata,
                                        'security_group_type')})

                    """ nics """
                    if snapshot_vm_resource.resource_type == 'nic':
                        vm_nic_snapshot = None
                        try:
                            vm_nic_snapshot = _get_vm_network_resource_snap_get(
                                context, snapshot_vm_resource.id,
                                    rv, snapshot_id, models)
                        except Exception as ex:
                            raise ex
                        nic_data = pickle.loads(bytes(vm_nic_snapshot.pickle, 'utf-8'))
                        nic = {'mac_address': nic_data['mac_address'],
                                   'ip_address': nic_data['ip_address'], }
                        nic['network'] = {
                            'id': self.db.get_metadata_value(
                                    vm_nic_snapshot.metadata, 'network_id'), 'name': self.db.get_metadata_value(
                                    vm_nic_snapshot.metadata, 'network_name'), 'cidr': nic_data.get(
                                    'cidr', None), 'network_type': nic_data['network_type']}

                        pit_id = _get_pit_resource_id(
                            vm_nic_snapshot.metadata, 'subnet_id')
                        if pit_id:
                            try:
                                vm_nic_subnet = _get_pit_resource(
                                        snapshot_vm_common_resources, pit_id)
                                vm_nic_subnet_snapshot = self.db.vm_network_resource_snap_get(
                                        context, vm_nic_subnet.id)
                                subnet = pickle.loads(
                                        bytes(vm_nic_subnet_snapshot.pickle, 'utf-8'))
                                nic['network']['subnet'] = {
                                        'id': subnet.get(
                                            'id', None), 'name': subnet.get(
                                            'name', None), 'cidr': subnet.get(
                                            'cidr', None), 'ip_version': subnet.get(
                                            'ip_version', None), 'gateway_ip': subnet.get(
                                            'gateway_ip', None), }
                            except Exception as ex:
                                    LOG.exception(ex)
                        snapshot_vm['nics'].append(nic)

                    """ vdisks """
                    if snapshot_vm_resource.resource_type == 'disk':
                        vdisk = {
                            'label': self.db.get_metadata_value(
                                snapshot_vm_resource.metadata,
                                'label'),
                            'resource_id': snapshot_vm_resource.id,
                            'restore_size': snapshot_vm_resource.restore_size,
                            'vm_id': snapshot_vm_resource.vm_id}

                        if self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'image_id'):
                            vdisk['image_id'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'image_id')
                            vdisk['image_name'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'image_name')
                            vdisk['hw_qemu_guest_agent'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'hw_qemu_guest_agent')
                        else:
                            vdisk['volume_id'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_id')
                            vdisk['volume_name'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_name')
                            vdisk['volume_size'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_size')
                            vdisk['volume_type'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_type')
                            vdisk['volume_mountpoint'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_mountpoint')

                            vdisk['volume_id'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_id')
                            vdisk['volume_name'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_name')
                            vdisk['volume_size'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_size')
                            vdisk['volume_type'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_type')
                            vdisk['volume_mountpoint'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_mountpoint')
                            if self.db.get_metadata_value(
                                    snapshot_vm_resource.metadata, 'availability_zone'):
                                vdisk['availability_zone'] = self.db.get_metadata_value(
                                    snapshot_vm_resource.metadata, 'availability_zone')
                            vdisk['metadata'] = json.loads(self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'metadata', "{}"))

                        snapshot_vm['vdisks'].append(vdisk)
                snapshot_vms.append(snapshot_vm)

        except Exception as ex:
            LOG.exception(ex)

        snapshot_details['instances'] = snapshot_vms
        return snapshot_details

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_get_all(self, context, search_opts={}):
        search_opts["read_metadata"] = False
        
        snapshots = self.db.snapshot_get_all(context, **search_opts)
        return snapshots

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_next(self, context, snapshot_id, workload_id):
        """
        Args:
            context: HttpRequest Object
            snapshot_id: snapshot_uuid
            workload_id: Workload_uuid

        Returns: list of snapshot ids.

        """
        t = []
        rv = self.db.snapshot_next(
            context, snapshot_id, workload_id, project_only=context.project_only)
        try:
            for each in rv:
                t.append((each.get('id')))
                return t
        except StopIteration as si:
            t.append(None)
        except Exception as e:
            LOG.exception(e)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_prev(self, context, snapshot_id, workload_id):
        """

        Args:
            context: HttpRequest Object
            snapshot_id: Snapshot_uuid
            workload_id: Workload_uuid

        Returns: list of previous snapshot_ids

        """
        t = []
        rv = self.db.snapshot_prev(
            context, snapshot_id, workload_id, project_only=context.project_only)
        try:
            for each in rv:
                t.append((each.get('id')))
                return t
        except StopIteration as si:
            t.append(None)

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def snapshot_delete(self, context, snapshot_id):
        """
        Delete a workload snapshot. No RPC call required
        """
        try:
            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot delete snapshot. ' +
                         'Expected role "%s" is not found. ' % vault.CONF.trustee_role +
                         'Please make sure user has the trustee role assigned'))
            snapshot = self.snapshot_get(context, snapshot_id)
            workload = self.workload_get(context, snapshot['workload_id'])

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            snapshot_snapshot_type = snapshot['snapshot_type']
            workload_display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Delete Requested',
                snapshot)

            if snapshot['status'] not in ['available', 'error', 'cancelled']:
                msg = _(
                    "Snapshot status must be 'available' or 'error' or 'cancelled'")
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available' and workload['status'].lower(
                ) != 'locked_for_delete':
                    msg = _(
                        "Workload must be in the 'available' state to delete a snapshot")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, snapshot['workload_id'], {
                        'status': 'locked_for_delete'})
            finally:
                workload_lock.release()

            restores = self.db.restore_get_all_by_project_snapshot(
                context, context.project_id, snapshot_id)
            for restore in restores:
                if restore.restore_type == 'test':
                    msg = _('This workload snapshot contains testbubbles')
                    raise wlm_exceptions.InvalidState(reason=msg)

            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'deleting'})

            status_messages = {'message': 'Snapshot delete operation starting'}
            options = {
                'display_name': "Snapshot Delete",
                'display_description': "Snapshot delete for snapshot id %s" % snapshot_id,
                'status': "starting",
                'status_messages': status_messages,
            }

            self.scheduler_rpcapi.snapshot_delete(
                context, FLAGS.scheduler_topic, snapshot_id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Delete Submitted',
                snapshot)

            return snapshot_id

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    def _validate_restore_options(self, context, snapshot_id, options):
        if options.get('type', "") != "openstack":
            msg = _("'type' field in options is not set to 'openstack'")
            raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if 'openstack' not in options:
            msg = _("'openstack' field is not in options")
            raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options.get("restore_type", None) not in (
                'inplace', 'selective', 'oneclick'):
            msg = _(
                "'restore_type' field must be one of 'inplace', 'selective', 'oneclick'")
            raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options.get("restore_type", None) in ('inplace', 'selective'):
            # If instances is not available should we restore entire snapshot?
            if 'instances' not in options['openstack']:
                msg = _("'instances' field is not in found "
                        "in options['instances']")
                raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options['openstack'].get('restore_topology', False) is True:
           if len(options['openstack']['networks_mapping'].get('networks', [])) > 0:
              msg = _("While restoring network topology cannot pass network mapping.")
              raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options.get("restore_type", None) in ('selective'):
            restore_instances = options['openstack']['instances']
            restore_ins_ids = set([ins['id'] for ins in restore_instances])

            snap_vms = self.db.snapshot_vms_get(context, snapshot_id)
            snap_vm_ids = set([vm.vm_id for vm in snap_vms])

            if len(restore_ins_ids - snap_vm_ids) > 0:
                msg = _("Please provide valid instance id in restore options.")
                raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

            missing_vms = snap_vm_ids - restore_ins_ids
            if len(missing_vms) > 0:
                for missing_vm in missing_vms:
                    restore_instances.append({'include': False, 'id': missing_vm})

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def snapshot_restore(self, context, snapshot_id, test,
                         name, description, options):
        """
        Make the RPC call to restore a snapshot.
        """
        try:
            self._validate_restore_options(context, snapshot_id, options)

            # If snapshot not found in current project then it throws SnapshotNotFound.
            snapshot = self.db.snapshot_get(
                context, snapshot_id, project_only='yes')

            workload = self.workload_get(context, snapshot['workload_id'])

            workload_display_name = workload['display_name']
            snapshot_display_name = snapshot['display_name']
            snapshot_snapshot_type = snapshot['snapshot_type']

            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            restore_display_name = ''

            if not name or len(name) == 0:
                name = 'Undefined'
            restore_display_name = '\'' + name + '\''
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Create Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available':
                    msg = _(
                        "Workload must be in the 'available' state to restore")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'locked'})
            finally:
                workload_lock.release()
            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'restoring'})

            restore_type = "restore"
            if test:
                restore_type = "test"

            if options.get('name'):
                options['name'] = options['name'].encode(encoding='UTF-8')

            if options.get('description'):
                options['description'] = options['description'].encode(encoding='UTF-8')

            values = {'user_id': context.user_id,
                      'project_id': context.project_id,
                      'snapshot_id': snapshot_id,
                      'restore_type': restore_type,
                      'display_name': name,
                      'display_description': description,
                      'pickle': str(pickle.dumps(options, 0), 'utf-8'),
                      'host': '',
                      'status': 'restoring', }
            restore = self.db.restore_create(context, values)

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))
            self.db.restore_update(context,
                                   restore.id,
                                   {'progress_percent': 0,
                                    'progress_msg': 'Restore operation is scheduled',
                                    'status': 'restoring'})
            self.scheduler_rpcapi.snapshot_restore(
                context, FLAGS.scheduler_topic, restore['id'])
            restore_display_name = restore['display_name']
            restore_restore_type = restore['restore_type']
            if restore_display_name == 'One Click Restore':
                local_time = self.get_local_time(
                    context, restore['created_at'])
                restore_display_name = local_time + \
                    ' (' + restore['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Create Submitted',
                restore)
            return restore
        except Exception as ex:
            if 'restore' in locals():
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'available'})
                self.db.snapshot_update(
                    context, snapshot_id, {
                        'status': 'available'})
                self.db.restore_update(
                    context, restore.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else str(ex)})
                self.scheduler_rpcapi.schedule_send_email(
                    context, FLAGS.scheduler_topic, restore.id, 'restore')
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_cancel(self, context, snapshot_id):
        """
        Make the RPC call to cancel snapshot
        """
        try:
            snapshot = self.db.snapshot_get(context, snapshot_id)
            admin_context = wlm_context.get_admin_context()
            workload = self.workload_get(admin_context, snapshot['workload_id'])

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Cancel Requested',
                snapshot)
            if snapshot.status in ['available', 'cancelled', 'error']:
                return

            metadata = {}
            metadata.setdefault('cancel_requested', '1')
            self.db.snapshot_update(context,
                                    snapshot_id,
                                    {
                                        'metadata': metadata,
                                        'status': 'cancelling'
                                    })
            backup_target = vault.get_backup_target(
                workload['metadata']['backup_media_target'])
            cancel_file = backup_target.get_cancelled_file_path({'snapshot_id': snapshot_id})
            fileutils.ensure_tree(os.path.dirname(cancel_file))
            with open(cancel_file, "w") as f:
                 pass

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Cancel Submitted',
                snapshot)

            return True

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_restore_network_topology(
            self, context, snapshot_id, name, description
    ):
        """
        Make the RPC call to restore a snapshot's Network Topology.
        """
        snapshot = self.snapshot_show(context, snapshot_id)
        try:
            restore_options = workload_utils.get_restore_options(name, description, snapshot, restore_type='network_topology', restore_topology=True)
        except Exception as ex:
            LOG.exception('Failed to fetch network options, with error: %s', ex)
            raise ex

        admin_context = wlm_context.get_admin_context()
        workload = self.workload_get(admin_context, snapshot['workload_id'])
        workload_display_name = workload['display_name']
        snapshot_display_name = snapshot['display_name']
        try:
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(context, snapshot['created_at'])
                snapshot_display_name = local_time + ' (' + snapshot['display_name'] + ')'

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Network Topology Restore Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available':
                    msg = _("Workload must be in the 'available' state to restore")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'locked'})
            finally:
                workload_lock.release()

            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'restoring'})

            restore_type = "network-topology"
            values = {'user_id': context.user_id,
                      'project_id': context.project_id,
                      'snapshot_id': snapshot_id,
                      'restore_type': restore_type,
                      'display_name': name,
                      'display_description': description,
                      'pickle': str(pickle.dumps(restore_options, 0), 'utf-8'),
                      'host': '',
                      'status': 'restoring', }
            restore = self.db.restore_create(context, values)

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))

            self.db.restore_update(context,
                                   restore.id,
                                   {'progress_percent': 0,
                                    'progress_msg': 'Network Topology Restore operation is scheduled',
                                    'status': 'restoring'})
            self.scheduler_rpcapi.network_topology_restore(
                context, FLAGS.scheduler_topic, restore['id'])

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Network Topology Restore Request Submitted',
                restore)
            return restore
        except Exception as ex:
            if 'restore' in locals():
                self.db.workload_update(context, workload['id'], {'status': 'available'})
                self.db.snapshot_update(context, snapshot_id, {'status': 'available'})
                self.db.restore_update(
                    context, restore.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else str(ex)})

            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) % (ex.kwargs if hasattr(ex, 'kwargs') else {})
            )

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_mount(self, context, snapshot_id, mount_vm_id):
        """
        Make the RPC call to Mount the snapshot.
        """
        compute_service = nova.API(production=True)
        try:
            snapshot = self.snapshot_get(context, snapshot_id)
            server = compute_service.get_server_by_id(context, mount_vm_id)
            flavor_id = server.flavor['id']
            fl = compute_service.get_flavor_by_id(context, flavor_id)
            if fl.ephemeral:
                error_msg = "Recovery manager instance cannot have ephemeral disk"
                raise Exception(error_msg)

            if hasattr(server, 'image') and server.image:
                (image_service, image_id) = glance.get_remote_image_service(
                    context, server.image['id'])
                context = nova._get_tenant_context(context)
                metadata = image_service.show(context, server.image['id'])
                error_msg = "Recovery manager instance needs to be created with glance image property 'hw_qemu_guest_agent=yes'"
                if 'hw_qemu_guest_agent' in metadata['properties'].keys():
                    if metadata['properties']['hw_qemu_guest_agent'] != 'yes':
                        raise Exception(error_msg)
                else:
                    raise Exception(error_msg)

            if not snapshot:
                msg = _('Invalid snapshot id')
                raise wlm_exceptions.Invalid(reason=msg)

            workload = self.workload_get(context, snapshot['workload_id'])
            snapshots_all = self.db.snapshot_get_all(context)
            for snapshot_one in snapshots_all:
                if snapshot_one.status == 'mounted':
                    if workload['source_platform'] == 'openstack':
                        mounted_vm_id = self.db.get_metadata_value(
                            snapshot_one.metadata, 'mount_vm_id')
                        if mounted_vm_id is not None:
                            if mount_vm_id == mounted_vm_id:
                                workload_one = self.db.workload_get(context, snapshot_one.workload_id)
                                msg = _(
                                    'snapshot %s (%s) from workload %s (%s) is already mounted on this VM' %
                                    (snapshot_one.display_name, snapshot_one.id, workload_one.display_name, workload_one.id))
                                raise wlm_exceptions.InvalidParameterValue(
                                    err=msg)

            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Mount Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            self.db.snapshot_update(context, snapshot_id,
                                    {'status': 'mounting'})
            self.scheduler_rpcapi.snapshot_mount(
                context, FLAGS.scheduler_topic, snapshot_id, mount_vm_id)

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Mount Submitted',
                snapshot)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_dismount(self, context, snapshot_id):
        """
        Make the RPC call to Dismount the snapshot.
        """
        try:
            snapshot = self.snapshot_get(context, snapshot_id)

            if not snapshot:
                msg = _('Invalid snapshot id')
                raise wlm_exceptions.Invalid(reason=msg)

            snapshot_db_obj = self.db.snapshot_get(context, snapshot_id)
            mount_vm_id = self.db.get_metadata_value(
                snapshot_db_obj.metadata, 'mount_vm_id')

            if not mount_vm_id:
                msg = _('Could not find recovery manager vm id in the snapshot metadata')
                LOG.error(msg)
                raise wlm_exceptions.Invalid(reason=msg)

            workload = self.workload_get(context, snapshot['workload_id'])
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Dismount Requested',
                snapshot)

            if snapshot['status'] != 'mounted':
                msg = _('Snapshot status must be mounted')
                raise wlm_exceptions.InvalidState(reason=msg)

            self.scheduler_rpcapi.snapshot_dismount(
                context, FLAGS.scheduler_topic, snapshot_id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Dismount Submitted',
                snapshot)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_restore_security_groups(
            self, context, snapshot_id, name, description
    ):
        """
        Make the RPC call to restore a snapshot's Security Groups
        """
        snapshot = self.snapshot_show(context, snapshot_id)
        try:
            restore_options = workload_utils.get_restore_options(
                name, description, snapshot,
                restore_type='security_group', restore_topology=True
            )
        except Exception as ex:
            LOG.exception('Failed to fetch security group options, with error: %s', ex)
            raise ex

        admin_context = wlm_context.get_admin_context()
        workload = self.workload_get(admin_context, snapshot['workload_id'])
        workload_display_name = workload['display_name']
        snapshot_display_name = snapshot['display_name']
        try:
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(context, snapshot['created_at'])
                snapshot_display_name = local_time + ' (' + snapshot['display_name'] + ')'

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Security Groups Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available':
                    msg = _("Workload must be in the 'available' state to restore")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'locked'})
            finally:
                workload_lock.release()

            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'restoring'})

            restore_type = "security_group"
            values = {'user_id': context.user_id,
                      'project_id': context.project_id,
                      'snapshot_id': snapshot_id,
                      'restore_type': restore_type,
                      'display_name': name,
                      'display_description': description,
                      'pickle': str(pickle.dumps(restore_options, 0), 'utf-8'),
                      'host': '',
                      'status': 'restoring', }
            restore = self.db.restore_create(context, values)

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))

            self.db.restore_update(context,
                                   restore.id,
                                   {'progress_percent': 0,
                                    'progress_msg': 'Security Groups operation is scheduled',
                                    'status': 'restoring'})
            self.scheduler_rpcapi.security_groups_restore(
                context, FLAGS.scheduler_topic, restore['id'])

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Security Groups Restore Request Submitted',
                restore)
            return restore
        except Exception as ex:
            if 'restore' in locals():
                self.db.workload_update(context, workload['id'], {'status': 'available'})
                self.db.snapshot_update(context, snapshot_id, {'status': 'available'})
                self.db.restore_update(
                    context, restore.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else str(ex)})

            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) % (ex.kwargs if hasattr(ex, 'kwargs') else {})
            )

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def mounted_list(self, context, workload_id):
        """
        Gets list of mounted snapshots
        """
        try:
            mounted_snapshots = []
            kwargs = {"workload_id": workload_id}
            snapshots = self.db.snapshot_get_all(context, **kwargs)
            if len(snapshots) == 0:
                msg = _("Not found any snapshots")
                wlm_exceptions.ErrorOccurred(reason=msg)

            for snapshot in snapshots:
                if snapshot.status == 'mounted':
                    mounted = {'snapshot_id': snapshot.id,
                               'snapshot_name': snapshot.display_name,
                               'workload_id': snapshot.workload_id,
                               'status': snapshot.status,
                               }
                    mounted_snapshots.append(mounted)
            return dict(mounted_snapshots=mounted_snapshots)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_get(self, context, restore_id):
        rv = self.db.restore_get(context, restore_id)
        restore_details = dict(rv)

        snapshot = self.db.snapshot_get(
            context,
            rv.snapshot_id,
            read_deleted="yes",
            project_only=context.project_only)
        restore_details.setdefault('workload_id', snapshot.workload_id)

        restore_details['snapshot_details'] = dict(snapshot)

        instances = []
        try:
            vms = self.db.restored_vms_get(context, restore_id)
            for vm in vms:
                restored_vm = {'id': vm.vm_id,
                               'name': vm.vm_name,
                               'time_taken': vm.time_taken,
                               'status': vm.status, }
                metadata = {}
                for kvpair in vm.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                restored_vm['metadata'] = metadata
                instances.append(restored_vm)
        except Exception as ex:
            LOG.exception(ex)
        restore_details.setdefault('instances', instances)
        return restore_details

    # @autolog.log_method(logger=Logger, log_args=False, log_retval=False)
    @wrap_check_policy
    def restore_show(self, context, restore_id):
        rv = self.db.restore_show(context, restore_id)
        restore_details = dict(rv)

        snapshot = self.db.snapshot_get(
            context,
            rv.snapshot_id,
            read_deleted="yes",
            project_only=context.project_only)
        restore_details.setdefault('workload_id', snapshot.workload_id)

        restore_details['snapshot_details'] = dict(snapshot)
        instances = []
        try:
            vms = self.db.restored_vms_get(context, restore_id)
            for vm in vms:
                restored_vm = {'id': vm.vm_id,
                               'name': vm.vm_name,
                               'status': vm.status, }
                metadata = {}
                for kvpair in vm.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                restored_vm['metadata'] = metadata
                instances.append(restored_vm)
        except Exception as ex:
            LOG.exception(ex)
        restore_details.setdefault('instances', instances)

        ports_list = []
        networks_list = []
        subnets_list = []
        routers_list = []
        flavors_list = []
        security_groups_list = []
        try:
            resources = self.db.restored_vm_resources_get(
                context, restore_id, restore_id)
            for resource in resources:
                if resource.resource_type == 'port':
                    ports_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                if resource.resource_type == 'network':
                    networks_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'subnet':
                    subnets_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'router':
                    routers_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'flavor':
                    flavors_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'security_group':
                    security_groups_list.append(
                        {'id': resource.id, 'name': resource.resource_name})

        except Exception as ex:
            LOG.exception(ex)
        restore_details.setdefault('ports', ports_list)
        restore_details.setdefault('networks', networks_list)
        restore_details.setdefault('subnets', subnets_list)
        restore_details.setdefault('routers', routers_list)
        restore_details.setdefault('flavors', flavors_list)
        restore_details.setdefault('security_groups', security_groups_list)
        return restore_details

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_get_all(self, context, snapshot_id=None):
        restores = self.db.restore_get_all(context, snapshot_id)
        return restores

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_delete(self, context, restore_id):
        """
        Delete a workload restore.
        """
        restore_details = self.restore_show(context, restore_id)

        restore = self.db.restore_get(context, restore_id)
        snapshot = self.db.snapshot_get(context, restore['snapshot_id'])
        workload = self.workload_get(context, snapshot['workload_id'])

        # check if BT is available
        vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

        restore_display_name = restore['display_name']
        if restore_display_name == 'One Click Restore':
            local_time = self.get_local_time(context, restore['created_at'])
            restore_display_name = local_time + \
                ' (' + restore['display_name'] + ')'
        snapshot_display_name = snapshot['display_name']
        if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
            local_time = self.get_local_time(context, snapshot['created_at'])
            snapshot_display_name = local_time + \
                ' (' + snapshot['display_name'] + ')'
        workload_display_name = workload['display_name']
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload_display_name +
            '\' ' +
            'Snapshot \'' +
            snapshot_display_name +
            '\' Restore \'' +
            restore_display_name +
            '\' Delete Requested',
            restore)

        if restore_details['target_platform'] == 'vmware':
            self.db.restore_delete(context, restore_id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Delete Submitted',
                restore)
            return

        if restore_details['status'] not in [
                'available', 'error', 'cancelled']:
            msg = _(
                "Status of the requested resource status must be 'available' or 'error' or 'cancelled'")
            raise wlm_exceptions.InvalidState(reason=msg)

        """
        if restore_details['restore_type'] == 'test':
            network_service =  neutron.API(production=False)
            compute_service = nova.API(production=False)
        else:
            network_service =  neutron.API(production=True)
            compute_service = nova.API(production=True)
        image_service = glance.get_default_image_service(production= (restore_details['restore_type'] != 'test'))
        for instance in restore_details['instances']:
            try:
                vm = compute_service.get_server_by_id(context, instance['id'])
                compute_service.delete(context, instance['id'])
                image_service.delete(context, vm.image['id'])
                #TODO(giri): delete the cinder volumes
            except Exception as exception:
                msg = _("Error deleting instance %(instance_id)s with failure: %(exception)s")
                LOG.debug(msg, {'instance_id': instance['id'], 'exception': exception})
                LOG.exception(exception)
        for port in restore_details['ports']:
            try:
                network_service.delete_port(context,port['id'])
            except Exception as exception:
                msg = _("Error deleting port %(port_id)s with failure: %(exception)s")
                LOG.debug(msg, {'port_id': port['id'], 'exception': exception})
                LOG.exception(exception)
        for router in restore_details['routers']:
            try:
                network_service.delete_router(context,router['id'])
            except Exception as exception:
                msg = _("Error deleting router %(router_id)s with failure: %(exception)s")
                LOG.debug(msg, {'router_id': router['id'], 'exception': exception})
                LOG.exception(exception)
        for subnet in restore_details['subnets']:
            try:
                network_service.delete_subnet(context,subnet['id'])
            except Exception as exception:
                msg = _("Error deleting subnet %(subnet_id)s with failure: %(exception)s")
                LOG.debug(msg, {'subnet_id': subnet['id'], 'exception': exception})
                LOG.exception(exception)
        for network in restore_details['networks']:
            try:
                network_service.delete_network(context,network['id'])
            except Exception as exception:
                msg = _("Error deleting network %(network_id)s with failure: %(exception)s")
                LOG.debug(msg, {'network_id': network['id'], 'exception': exception})
                LOG.exception(exception)
        for flavor in restore_details['flavors']:
            try:
                compute_service.delete_flavor(context,flavor['id'])
            except Exception as exception:
                msg = _("Error deleting flavor %(flavor_id)s with failure: %(exception)s")
                LOG.debug(msg, {'flavor_id': flavor['id'], 'exception': exception})
                LOG.exception(exception)
        for security_group in restore_details['security_groups']:
            try:
                network_service.security_group_delete(context,security_group['id'])
            except Exception as exception:
                msg = _("Error deleting security_group %(security_group_id)s with failure: %(exception)s")
                LOG.debug(msg, {'security_group_id': security_group['id'], 'exception': exception})
                LOG.exception(exception)
        """

        self.db.restore_delete(context, restore_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload_display_name +
            '\' ' +
            'Snapshot \'' +
            snapshot_display_name +
            '\' Restore \'' +
            restore_display_name +
            '\' Delete Submitted',
            restore)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_cancel(self, context, restore_id):
        """
        Make the RPC call to cancel restore
        """
        try:
            restore = self.db.restore_get(context, restore_id)
            snapshot = self.db.snapshot_get(context, restore['snapshot_id'])
            admin_context = wlm_context.get_admin_context()
            workload = self.workload_get(admin_context, snapshot['workload_id'])

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(workload['metadata']['backup_target_types'])

            restore_display_name = restore['display_name']
            if restore_display_name == 'One Click Restore':
                local_time = self.get_local_time(
                    context, restore['created_at'])
                restore_display_name = local_time + \
                    ' (' + restore['display_name'] + ')'
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            workload_display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Cancel Requested',
                restore)

            if restore.status in ['available', 'cancelled', 'error']:
                return

            metadata = {}
            metadata.setdefault('cancel_requested', '1')

            self.db.restore_update(context,
                                   restore_id,
                                   {
                                       'metadata': metadata,
                                       'status': 'cancelling'
                                   })
            backup_target = vault.get_backup_target(
                workload['metadata']['backup_media_target'])
            cancel_file = backup_target.get_cancelled_file_path(
                {'restore_id': restore_id})
            fileutils.ensure_tree(os.path.dirname(cancel_file))
            with open(cancel_file, "w") as f:
                 pass

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Cancel Submitted',
                restore)

            return True

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_all_project_quota_types(self, context, quota_type_id=None):
        try:
            quota_types = self.db.get_all_project_quota_types(quota_type_id)
            return quota_types or []
        except Exception as ex:
            LOG.exception(ex)
        return []

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def create_project_quota_type(self, context, project_quota_type_obj):
        try:
            return self.db.create_project_quota_type(
                project_quota_type_obj.to_dict()
            ) or {}
        except Exception as ex:
            LOG.exception(ex)
        return {}

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def update_project_quota_types(self, context, update_dict):
        try:
            return self.db.update_project_quota_types(update_dict) or {}
        except Exception as ex:
            LOG.exception(ex)
        return {}

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_allowed_quotas(
            self, context, project_id=None,
            allowed_quota_id=None, quota_type_id=None
    ):
        try:
            if project_id:
                keystone_client = KeystoneClient(context)
                try:
                    keystone_client.client.client.projects.get(project_id)
                except KsNotFound as ex:
                    LOG.exception(ex)
                    raise wlm_exceptions.ProjectNotFound(project_id=project_id)
                except KsUnauthorized as ex:
                    LOG.exception(ex)
                    raise wlm_exceptions.NotAuthorized()
                except KsForbidden as ex:
                    LOG.exception(ex)
                    raise wlm_exceptions.Forbidden(action='identity:get_project')
            return self.db.get_allowed_quotas(
                context, project_id, allowed_quota_id, quota_type_id
            ) or []
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def create_allowed_quotas(self, context, allowed_quota_objs):
        res = []
        try:
            data = [obj.to_dict() for obj in allowed_quota_objs]
            project_ids = [each_data['project_id'] for each_data in data]
            project_ids = list(set(project_ids))
            res = self.db.create_allowed_quotas(context, data) or []
            keystone_client = KeystoneClient(context)

            if res:
                # there is no possibility that allow quota creation
                # will happen for multiple projects at time, but still looping over
                for project_id in project_ids:
                    try:
                        keystone_client.client.client.projects.get(project_id)
                    except KsNotFound as ex:
                        LOG.exception(ex)
                        raise wlm_exceptions.ProjectNotFound(project_id=project_id)
                    except KsUnauthorized as ex:
                        LOG.exception(ex)
                        raise wlm_exceptions.NotAuthorized()
                    except KsForbidden as ex:
                        LOG.exception(ex)
                        raise wlm_exceptions.Forbidden(action='identity:get_project')
                    workload_utils.upload_allowed_quota_db_entry(
                        context, project_id
                    )
        except Exception as ex:
            LOG.exception(ex)
            raise ex
        return res

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def modify_allowed_quotas(self, context, allowed_quota_id, data):
        res = None
        try:
            project_id = data['project_id']
            if project_id:
                keystone_client = KeystoneClient(context)
                try:
                    keystone_client.client.client.projects.get(project_id)
                except KsNotFound as ex:
                    LOG.exception(ex)
                    raise wlm_exceptions.ProjectNotFound(project_id=project_id)
                except KsUnauthorized as ex:
                    LOG.exception(ex)
                    raise wlm_exceptions.NotAuthorized()
                except KsForbidden as ex:
                    LOG.exception(ex)
                    raise wlm_exceptions.Forbidden(action='identity:get_project')
            is_modified = self.db.modify_allowed_quotas(context, allowed_quota_id, data) or []
            if is_modified:
                res = self.db.get_allowed_quotas(
                    context, project_id, allowed_quota_id
                )
                workload_utils.upload_allowed_quota_db_entry(
                    context, project_id
                )
                if res and isinstance(res, list):
                    return res[0]
        except Exception as ex:
            LOG.exception(ex)
            raise ex
        return res

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def delete_allowed_quota(self, context, allowed_quota_id):
        try:
            quota_obj = self.db.get_allowed_quotas(
                context, allowed_quota_id=allowed_quota_id
            )

            self.db.delete_allowed_quota(context, allowed_quota_id)
            if quota_obj and isinstance(quota_obj, list) and quota_obj[0].get('project_id', None):
                workload_utils.upload_allowed_quota_db_entry(
                    context, quota_obj[0].project_id
                )

        except Exception as ex:
            LOG.exception(ex)

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def settings_create(self, context, settings):
        created_settings = []
        try:
            for setting in settings:
                # if smtp_server_password exists in setting then encrypt it.
                if "smtp_server_password" in setting.get('name'):
                    setting['value'] = encrypt_password(setting.get('value'), tvault_key_file_name).decode('utf-8')

                created_settings.append(
                    self.db.setting_create(
                        context, setting))
        except Exception as ex:
            LOG.exception(ex)
        return created_settings

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def settings_update(self, context, settings):
        updated_settings = []
        try:
            for setting in settings:
                # if smtp_server_password exists in setting then encrypt it.
                if "smtp_server_password" in setting.get('name'):
                    setting['value'] = encrypt_password(setting.get('value'), tvault_key_file_name).decode('utf-8')

                updated_settings.append(
                    self.db.setting_update(
                        context, setting['name'], setting))
        except Exception as ex:
            LOG.exception(ex)
        return updated_settings

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def setting_delete(self, context, name):
        self.db.setting_delete(context, name)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def setting_get(self, context, name, get_hidden=False):
        try:
            return self.db.setting_get(context, name, get_hidden=get_hidden)
        except Exception as ex:
            LOG.exception(ex)
        return None

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def settings_get(self, context, get_hidden=False):
        settings = []
        try:
            return self.db.setting_get_all(context, get_hidden=get_hidden)
        except Exception as ex:
            LOG.exception(ex)
        return settings

    @autolog.log_method(logger=Logger)
    def get_local_time(self, context, record_time):
        """
        Convert and return the date and time - from GMT to local time
        """
        try:
            epoch = time.mktime(record_time.timetuple())
            offset = datetime.fromtimestamp(
                epoch) - datetime.utcfromtimestamp(epoch)
            local_time = datetime.strftime(
                (record_time + offset), "%m/%d/%Y %I:%M %p")
            return local_time
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def trust_create(self, context, role_names, is_cloud_trust=False):

        try:
            provided_roles = [role.strip() for role in role_names.split(',')]

            # Fetch all the roles using admin context and
            # match the provided roles
            clients.initialise()
            if not is_cloud_trust:
                admin_context = nova._get_tenant_context(context,
                        cloud_admin=True)
                user_keystoneclient = clients.Clients(
                        admin_context).client("keystone")
                existing_roles = user_keystoneclient.client.roles.list()
            else:
                user_keystoneclient = clients.Clients(
                        context).client("keystone")
                existing_roles = user_keystoneclient.client.roles.list()

            matched_roles = []
            matched_roles_ids = []
            for e_role in existing_roles:
                for p_role in provided_roles:
                    if p_role in (e_role.name, e_role.id):
                        matched_roles.append(p_role)
                        matched_roles_ids.append(e_role.id)

            matched_roles.sort()
            provided_roles.sort()
            if matched_roles != provided_roles:
                unmatched_roles = list(set(matched_roles).symmetric_difference(
                    set(provided_roles)))
                LOG.debug("Failed to find roles %s for user %s"
                          % (unmatched_roles, context._user_id))

                user_domain_id = user_keystoneclient.client.users.get(
                        context._user_id).domain_id
                LOG.debug("Looking for roles in user domain %s"
                        % user_domain_id)
                roles_in_domain = user_keystoneclient.client.roles.list(
                        domain_id=user_domain_id)
                for e_role in roles_in_domain:
                    for p_role in unmatched_roles:
                        if p_role in (e_role.name, e_role.id):
                            matched_roles.append(p_role)
                            matched_roles_ids.append(e_role.id)

            matched_roles.sort()
            provided_roles.sort()
            if matched_roles != provided_roles:
                unmatched_roles = list(set(matched_roles).symmetric_difference(
                    set(provided_roles)))
                raise wlm_exceptions.MissingCredentialError(
                    message="Invalid roles %s" % unmatched_roles)

            # create trust
            cntx = wlm_context.RequestContext(
                trustor_user_id=context.user_id,
                auth_token=context.auth_token,
                tenant_id=context.project_id,
                roles=matched_roles_ids,
                is_admin=False)
            keystoneclient = clients.Clients(cntx).client("keystone")
            try:
                trust_context = keystoneclient.create_trust_context()
            except Exception as ex:
                LOG.error("Failed to create trust for user %s with roles %s"
                        % (context._user_id, matched_roles))
                raise ex

            setting = {'category': "identity",
                       'name': "trust-%s" % str(uuid.uuid4()),
                       'description': 'token id for user %s project %s' %
                                       (context.user_id, context.project_id),
                       'value': trust_context.trust_id,
                       'user_id': 'cloud_admin' if is_cloud_trust else context.user_id,
                       'is_public': False,
                       'is_hidden': True,
                       'metadata': {'role_name': role_names},
                       'type': "trust_id", }
            created_settings = []
            try:
                created_settings.append(self.db.setting_create(context, setting))
            except Exception as ex:
                LOG.exception(ex)

            #Once new cloud trust is created remove old trusts.
            if is_cloud_trust is True:
                old_trusts = nova._get_trusts('cloud_admin', vault.CONF.cloud_admin_project_id)
            else:
                old_trusts = nova._get_trusts(context.user_id, context.project_id)
            for t in old_trusts:
                if created_settings[0].name != t.name:
                    self.trust_delete(context, t.name)

            return created_settings
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def trust_delete(self, context, name):

        trust = self.db.setting_get(context, name, get_hidden=True)
        if trust.type != "trust_id":
            msg = _("No trust record by name %s" % name)
            raise wlm_exceptions.Invalid(reason=msg)

        try:
            cntx = wlm_context.RequestContext(
                trustor_user_id=context.user_id,
                auth_token=context.auth_token,
                tenant_id=context.project_id,
                is_admin=False)

            clients.initialise()
            keystoneclient = clients.Clients(cntx).client("keystone")
            keystoneclient.delete_trust(trust.value)
        except Exception as ex:
            LOG.exception(ex)

        self.db.setting_delete(context, name)

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def trust_show(self, context, name):
        trust = self.db.setting_get(context, name, get_hidden=True)
        if trust.type != "trust_id":
            msg = _("No trust record by name %s" % name)
            raise wlm_exceptions.Invalid(reason=msg)

        try:
            cntx = wlm_context.RequestContext(
                trustor_user_id=context.user_id,
                auth_token=context.auth_token,
                tenant_id=context.project_id,
                is_admin=False)

            clients.initialise()
            keystoneclient = clients.Clients(cntx).client("keystone")
            return keystoneclient.show_trust(trust.value)
        except Exception as ex:
            LOG.exception(ex)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def trust_list(self, context, get_hidden=False, is_cloud_admin=False):

        settings = self.db.setting_get_all_by_project(
            context, context.project_id,
            get_hidden=True, is_cloud_admin=is_cloud_admin)

        check_for_user_id = context.user_id
        if is_cloud_admin:
            check_for_user_id = 'cloud_admin'

        trust = [t for t in settings if t.type == "trust_id" and
                 t.user_id == check_for_user_id and
                 t.project_id == context.project_id]
        return trust

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def trust_get(self, context, name):
        try:
            return self.db.setting_get(context, name, get_hidden=True)
        except Exception as ex:
            LOG.exception(ex)
        return None

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def validate_scheduler_trust(self, context, workload_id):
        res = {'scheduler_enabled': False, 'trust': None, 'is_valid': False, 'scheduler_obj': None}
        try:
            jobs = self.db.job_get_all(context)
            scheduler_obj = None
            for job in jobs:
                if job.workload_id == workload_id:
                    scheduler_obj = job
                    res['scheduler_enabled'] = True
                    break
            if not scheduler_obj and not getattr(scheduler_obj, 'kwargs', None):
                return res

            scheduler_obj.kwargs = pickle.loads(bytes(scheduler_obj.kwargs, 'utf-8'))
            res['scheduler_obj'] = scheduler_obj.kwargs
            user_id = scheduler_obj.kwargs.get('user_id', None)
            project_id = scheduler_obj.kwargs.get('project_id', None)
            if user_id and project_id:
                trust_objs = self.db.setting_get_all_by_project(
                    context, project_id,
                    **{'user_id': user_id, 'type': 'trust_id'}
                )
                for obj in trust_objs:
                    cntx = wlm_context.RequestContext(
                        trustor_user_id=user_id,
                        project_id=project_id,
                        trust_id=obj.value)
                    clients.initialise()
                    keystone_client = clients.Clients(cntx).client("keystone")
                    trust_context = keystone_client.create_trust_context()
                    if trust_context and trust_context.trust_id:
                        res['trust'] = obj
                        res['is_valid'] = True
                        return res
        except Exception as ex:
            LOG.exception(ex)
        return res


    @autolog.log_method(logger=Logger)
    @import_backend_settings
    @upload_settings
    @wrap_check_policy
    def license_create(self, context, license_data):

        license_json = json.dumps(parse_license_text(license_data['lic_txt']))
        eula_md5sum = calculate_file_md5sum(EULA_FILE_PATH, package_name='workloadmgrclient')
        eula_json = json.dumps({"agreed_time": datetime.utcnow().strftime('%m/%d/%Y %I:%M %p') + ' UTC', "eula_agreement_value": "accept", "md5sum_hash": eula_md5sum})
        setting = {'category': "license",
                   'name': "license-%s" % str(uuid.uuid4()),
                   'description': 'TrilioVault License Key',
                   'value': license_json,
                   'user_id': context.user_id,
                   'is_public': False,
                   'is_hidden': True,
                   'metadata': {'filename': license_data['file_name']},
                   'type': "license_key", }
        eula_setting = {'category': "eula",
                   'name': "eula_agreement-%s" % str(uuid.uuid4()),
                   'description': 'EULA Agreement',
                   'value': eula_json,
                   'user_id': context.user_id,
                   'is_public': False,
                   'is_hidden': True,
                   'metadata': {},
                   'type': "eula_agreement", }
        created_license = []
        try:
            settings = self.db.setting_get_all(context, get_hidden=True)
            created_license_obj = self.db.setting_create(context, setting)
            created_eula_obj = self.db.setting_create(context, eula_setting)
            for setting in settings:
                if setting.type in ["license_key", "eula_agreement"]:
                    try:
                        self.db.setting_delete(context, setting.name)
                    except BaseException as bex:
                        LOG.exception(bex)
        except Exception as ex:
            LOG.exception(ex)
        response = eval(dict(created_license_obj)['value'])
        response['EULA'] = dict(created_eula_obj)['value']
        return json.loads(json.dumps(response, default=str))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def license_list(self, context):
        license = self.db.setting_get_all(context, get_hidden=True, type='license_key')
        if not license:
            raise wlm_exceptions.LicenseNotFound()
        eula_agreement = self.db.setting_get_all(context, get_hidden=True, type='eula_agreement')
        if not eula_agreement:
            raise wlm_exceptions.EulaNotFound()
        package_version = get_distribution('workloadmgrclient').version
        db_version = eula_agreement[0].version
        if packaging.version.parse(package_version) > packaging.version.parse(db_version):
            if json.loads(eula_agreement[0].value).get('md5sum_hash') != calculate_file_md5sum(EULA_FILE_PATH, package_name='workloadmgrclient'):
                settings = [eula_agreement[0].name, license[0].name]
                for setting in settings:
                    try:
                        self.db.setting_delete(context, setting)
                    except Exception as ex:
                        LOG.exception(ex)
                raise wlm_exceptions.EulaChangedNeedsAcceptance()

        lic = json.loads(license[0].value)
        lic['metadata'] = license[0]['metadata']
        lic['EULA'] = eula_agreement[0]['value']
        return lic

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def license_check(self, context, method=None):
        try:
            return self.get_usage_and_validate_against_license(context, method)
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    def get_usage_and_validate_against_license(self, context, method=None):
        admin_context = wlm_context.get_admin_context()
        license_key = self.license_list(admin_context)

        try:
            return validate_license_key(license_key, method)
        except Exception as ex:
            LOG.exception(ex)
            raise

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_orphaned_workloads_list(self, context, migrate_cloud=None):
        AUDITLOG.log(context, 'Get Orphaned Workloads List Requested', None)
        if not context.is_admin:
            raise wlm_exceptions.AdminRequired()
        workloads = []
        keystone_client = KeystoneClient(context)
        projects = keystone_client.client.get_project_list_for_import(context)
        tenant_list = [project.id for project in projects]
        users = keystone_client.get_user_list()
        user_list = [user.id for user in users]

        def _check_workload(context, workload):
            project_id = workload.get('project_id')
            user_id = workload.get('user_id')
            # Check for valid tenant
            if project_id in tenant_list:
                # Check for valid workload user
                if keystone_client.client.user_exist_in_tenant(
                        project_id, user_id):
                    pass
                else:
                    workloads.append(workload)
            else:
                workloads.append(workload)

        if not migrate_cloud:
            kwargs = {
                'project_list': tenant_list,
                'exclude': True,
                'user_list': user_list}
            workloads_in_db = self.db.workload_get_all(context, **kwargs)
            for workload in workloads_in_db:
                workloads.append(workload)
        else:
            for backup_target_section in CONF.enabled_backends:
                for backup_endpoint in vault.CONF[backup_target_section].vault_storage_filesystem_export.split(','):
                    backup_target = None
                    try:
                        backup_target = vault.get_backup_target(backup_endpoint)
                        for workload_url in backup_target.get_workloads(context):
                            try:
                                workload_values = json.loads(backup_target.get_object(
                                    os.path.join(workload_url, 'workload_db')))
                                _check_workload(context, workload_values)
                            except Exception as ex:
                                LOG.exception(ex)
                    except Exception as ex:
                        LOG.exception(ex)
        AUDITLOG.log(context, 'Get Orphaned Workloads List Completed', None)
        return workloads

    def _update_workloads(self, context, workloads_to_update,
                          jobscheduler_map, new_tenant_id, user_id):
        '''
        Update the values for tenant_id and user_id for given list of
        workloads in database.
        '''
        updated_workloads = []
        workload_update_map = []
        snapshot_update_map = []
        topology_update_map = []

        try:
            DBSession = get_session()
            kwargs = {'session': DBSession}
            # Create the list for workloads and snapshots to update in
            # database with new project_id and user_id
            for workload_id in workloads_to_update:
                jobschedule = pickle.loads(bytes(jobscheduler_map[workload_id], 'utf-8'))
                if jobschedule.get('appliance_timezone'):
                    jobschedule['timezone'] = jobschedule['appliance_timezone']
                    jobschedule = utils.convert_jobschedule_date_tz(jobschedule)
                    jobscheduler_map[workload_id] = str(pickle.dumps(jobschedule, 0), 'utf-8')

                if jobschedule.get('timezone') and jobschedule['timezone'] != 'UTC':
                    jobschedule = utils.convert_jobschedule_date_tz(jobschedule)
                    jobscheduler_map[workload_id] = str(pickle.dumps(jobschedule, 0), 'utf-8')

                workload_map = {
                    'id': workload_id,
                    'jobschedule': jobscheduler_map[workload_id],
                    'project_id': new_tenant_id,
                    'user_id': user_id}
                workload_update_map.append(workload_map)
                snapshots = self.db.snapshot_get_all_by_workload(
                    context, workload_id, **kwargs)
                for snapshot in snapshots:
                    snapshot_map = {
                        'id': snapshot.get('id'),
                        'project_id': new_tenant_id,
                        'user_id': user_id}
                    snapshot_update_map.append(snapshot_map)

                    topology_resources = self.db.snapshot_network_resources_get(
                                                 context, snapshot.get('id'), **kwargs)
                    for topology_resource in topology_resources:
                        for metadata in topology_resource.metadata:
                            if metadata['key'] == 'json_data':
                                metadata_json = json.loads(metadata['value'])
                                metadata_json['tenant_id'] = new_tenant_id
                                metadata_json['project_id'] = new_tenant_id
                                resource_map = {
                                    'id': metadata.get('id'),
                                    'value' : json.dumps(metadata_json)}
                                topology_update_map.append(resource_map)

                # Removing workloads from job scheduler
                jobs = self.db.job_get_all(context)
                for job in jobs:
                    if job.workload_id == workload_id:
                        self.cron_rpcapi.workload_pause(context, workload_id)
                        break

            # Update all the entries for workloads and snapshots in database
            DBSession.bulk_update_mappings(
                models.Workloads, workload_update_map)
            DBSession.bulk_update_mappings(
                models.Snapshots, snapshot_update_map)
            DBSession.bulk_update_mappings(
                models.SnapNetworkResourceMetadata, topology_update_map)
            for workload_id in workloads_to_update:
                workload = self.db.workload_get(context, workload_id)
                updated_workloads.append(workload)

            return updated_workloads
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workloads_reassign(self, context, tenant_maps):
        '''
        Reassign given list of workloads to new tenant_id and user_id.
        '''
        try:
            AUDITLOG.log(context, 'Reassign workloads Requested', None)
            if not context.is_admin:
                raise wlm_exceptions.AdminRequired()

            keystone_client = KeystoneClient(context)

            reassigned_workloads = []
            failed_workloads = []
            projects = keystone_client.client.get_project_list_for_import(
                context)
            tenant_list = [project.id for project in projects]

            if len(tenant_list) == 0:
                raise Exception(
                    "User %s is not member of source tenants "
                    "and target tenant" % context.user_id)

            jobid_list = []
            for tenant_map in tenant_maps:
                workload_to_update = []
                workload_to_import = []
                workload_ids = tenant_map['workload_ids']
                old_tenant_ids = tenant_map['old_tenant_ids']
                new_tenant_id = tenant_map['new_tenant_id']
                user_id = tenant_map['user_id']
                source_btt = tenant_map['source_btt']
                source_btt_all = tenant_map['source_btt_all']
                migrate_cloud = tenant_map['migrate_cloud']

                if source_btt_all:
                    source_btt = [btt.id for btt in self.db.backup_target_type_get_all(context)]
                elif source_btt:
                    source_btt = list(set(source_btt))
                else:
                    source_btt = [self.db.get_default_backup_target_type(context)[0].id]

                # If workload id's are provided then check whether they
                # exist in current cloud or not.
                workloads_old_tenant_ids = []
                if workload_ids:
                    kwargs = {'workload_list': workload_ids}
                    workloads = self.db.workload_get_all(context, **kwargs)

                    if len(workloads) == len(workload_ids):
                        workload_to_update.extend(workload_ids)

                    if len(workloads) != len(
                            workload_ids) and migrate_cloud is not True:
                        raise Exception(
                            "Workload ids do not belong to current cloud. "
                            "To reassign them from another cloud, set migrate_cloud option to True.")

                    for workload in workloads:
                        workloads_old_tenant_ids.extend([self.db.workload_get(context, workload.id).project_id])

                    old_tenant_ids = list(set(old_tenant_ids))
                    # In case workload id's are provided and some are in DB and
                    # some need to import then filter those workloads here.
                    if len(workloads) != len(
                            workload_ids) and migrate_cloud is True:

                        if len(workloads) == 0:
                            workload_to_import.extend(workload_ids)
                        else:
                            workloads = self.db.workload_get_all(context)
                            workload_ids_in_db = [
                                workload.id for workload in workloads]
                            for workload_id in workload_ids:
                                if workload_id in workload_ids_in_db:
                                    workload_to_update.append(workload_id)
                                else:
                                    workload_to_import.append(workload_id)

                if new_tenant_id in tenant_list:
                    # Check for valid workload user
                    if keystone_client.client.user_exist_in_tenant(
                            new_tenant_id, user_id) is False:
                        raise wlm_exceptions.UserNotFound(user_id=user_id)
                    if keystone_client.client.check_user_role(
                            new_tenant_id, user_id) is False:
                        raise wlm_exceptions.RoleNotFound(
                            user_id=user_id, role_name=vault.CONF.trustee_role, project_id=new_tenant_id)

                    # If old_teanat_id is provided then look for all workloads
                    # under that tenant
                    if old_tenant_ids:
                        if not migrate_cloud:
                            for old_tenant_id in old_tenant_ids:
                                if old_tenant_id not in tenant_list:
                                    raise wlm_exceptions.ProjectNotFound(
                                        project_id=old_tenant_id)
                            kwargs = {'project_list': old_tenant_ids}
                            workloads_in_db = self.db.workload_get_all(
                                context, **kwargs)
                            for workload in workloads_in_db:
                                workload_to_update.append(workload.id)
                        else:
                            # When migrate is True then there could be the scenerio where some
                            # of the workloads exist in DB, in that case directly importing
                            # them will result in error, so filtering those
                            # workloads here
                            # fetch list of importable wl's from provided source-btt
                            bt_list = []
                            workload_ids = []
                            for btt in source_btt:
                                bt_obj = self.db.get_backup_target_by_btt(context, btt)
                                # skip BT already processed
                                if bt_obj.id in bt_list:
                                    continue
                                workload_ids_list = vault.get_workloads_for_tenant(context, old_tenant_ids, filesystem_export=bt_obj.filesystem_export)
                                bt_list.append(bt_obj.id)
                                if workload_ids_list:
                                    workload_ids.extend(workload_ids_list)
                            workload_ids = list(set(workload_ids))
                            kwargs = {'workload_list': workload_ids}
                            workloads = self.db.workload_get_all(
                                context, **kwargs)
                            if len(workloads) == 0:
                                workload_to_import.extend(workload_ids)
                            else:
                                workloads_in_db = self.db.workload_get_all(
                                    context)
                                workload_ids_in_db = [
                                    workload.id for workload in workloads_in_db]
                                for workload_id in workload_ids:
                                    if workload_id in workload_ids_in_db:
                                        workload_to_update.append(workload_id)
                                    else:
                                        workload_to_import.append(workload_id)

                    total_workloads = len(workload_to_import) + len(workload_to_update)
                    if total_workloads:
                        try:
                            allowed_quota_obj = workload_quota_check(
                                new_tenant_id, self.db,
                                import_count=total_workloads
                            )
                            if allowed_quota_obj \
                                    and allowed_quota_obj.actual_value == allowed_quota_obj.high_watermark:
                                # TODO: check how to handle this warning at UI side
                                AUDITLOG.log(
                                    context,
                                    "Destination project's workload allowed quota has reached to high watermark" +
                                    "Contact to the admin",
                                    None)
                                LOG.warning("Destination project's workload allowed quota has "
                                            "reached to high watermark. Contact to the admin")
                        except ValueError as e:
                            raise wlm_exceptions.QuotaLimitExceededError()
                        except Exception as ex:
                            LOG.exception(ex)
                            raise
                    if workload_to_import:
                        vault.update_workload_db(
                            context, workload_to_import, new_tenant_id, user_id)
                        jobid = self.import_workloads(
                            context, workload_to_import, source_btt=source_btt, source_btt_all=False, target_btt=None, upgrade=True)
                        jobid_list.append(jobid)

                    if workload_to_update:
                        jobscheduler_map = vault.update_workload_db(
                            context, workload_to_update, new_tenant_id, user_id)
                        for wid in workload_to_update:
                            if not wid in jobscheduler_map:
                                LOG.error("workload %s directory structure is corrupt or missing files" % wid)
                                raise Exception("workload %s directory structure is corrupt or missing files" % wid)
                        updated_workloads = self._update_workloads(
                            context, workload_to_update, jobscheduler_map, new_tenant_id, user_id)
                        reassigned_workloads.extend(updated_workloads)
                else:
                    LOG.error("project %s is not part of either some of source projects or "
                              "target project" % context.project_id)
                    raise wlm_exceptions.ProjectNotFound(
                            project_id=context.project_id)
            return {'workloads': {'reassigned_workloads': reassigned_workloads,
                'failed_workloads': failed_workloads, 'jobid_list': jobid_list}}

        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def config_workload(self, context, jobschedule, config_data):
        """
        Make the RPC call to create/update a config workload.
        """
        try:
            AUDITLOG.log(context, 'Config workload update Requested', None)

            def _get_matadata(metadata, key):
                for meta in metadata:
                    if meta.key == key:
                        if key == 'authorized_key':
                            return meta.value
                        else:
                            return pickle.loads(bytes(meta.value, 'utf-8'))

            try:
                existing_config_workload = self.db.config_workload_get(context)
                if existing_config_workload['status'].lower() == 'locked':
                    message = "Config workload is not available. " \
                        "Please wait for config backup to complete."
                    raise wlm_exceptions.InvalidState(reason=message)
            except wlm_exceptions.ConfigWorkloadNotFound:
                existing_config_workload = None
                # When user configuring for the first time
                if ('databases' in config_data) is False or len(
                        list(config_data['databases'].keys())) == 0:
                    message = "Database credentials are required to configure config backup."
                    raise wlm_exceptions.ErrorOccurred(reason=message)

                if ('trusted_user' not in config_data or 'authorized_key' not in config_data) or\
                        str(config_data['trusted_user'].get('username', None)).lower() == 'none':
                    message = "To backup controller nodes and database, please provide trusted user and " \
                              "authorized_key, which will be used to connect with controller nodes."
                    raise wlm_exceptions.ErrorOccurred(reason=message)

            metadata = {}
            # Validate trusted_user and authorized_key
            if ('trusted_user' in config_data or 'authorized_key' in config_data):
                trusted_user = _get_matadata(
                    existing_config_workload.metadata,
                    'trusted_user') if config_data.get(
                    'trusted_user',
                    None) is None else config_data.get('trusted_user')
                if 'authorized_key' in config_data:
                    authorized_key = vault.get_key_file(
                        config_data['authorized_key'], temp=True)
                else:
                    authorized_key = _get_matadata(
                        existing_config_workload.metadata, 'authorized_key')

                trust_creds = {
                    'trusted_user': trusted_user,
                    'authorized_key': authorized_key}
                workload_utils.validate_trusted_user_and_key(
                    context, trust_creds)
                if 'trusted_user' in config_data:
                    metadata['trusted_user'] = str(pickle.dumps(
                        config_data.pop('trusted_user'), 0), 'utf-8')
                if 'authorized_key' in config_data:
                    os.remove(authorized_key)
                    trust_creds['authorized_key'] = metadata['authorized_key'] = vault.get_key_file(
                        config_data['authorized_key'])

            if 'databases' in config_data:
                if 'trust_creds' not in locals():
                    trust_creds['trusted_user'] = _get_matadata(
                        existing_config_workload.metadata, 'trusted_user')
                    trust_creds['authorized_key'] = _get_matadata(
                        existing_config_workload.metadata, 'authorized_key')
                workload_utils.validate_database_creds(
                    context, config_data['databases'], trust_creds)
                metadata['databases'] = str(pickle.dumps(
                    config_data.pop('databases'), 0), 'utf-8')

            metadata['services_to_backup'] = str(pickle.dumps(
                config_data.pop('services_to_backup'), 0), 'utf-8')

            backup_target, path = vault.get_settings_backup_target()
            # Create new OpenStack workload
            if existing_config_workload is None:
                backup_target, path = vault.get_settings_backup_target()
                options = {
                    'id': vault.CONF.cloud_unique_id,
                    'user_id': context.user_id,
                    'project_id': context.project_id,
                    'host': socket.gethostname(),
                    'status': 'creating',
                    'metadata': metadata,
                    'jobschedule': str(pickle.dumps(jobschedule, 0), 'utf-8'),
                    'backup_media_target': backup_target.backup_endpoint,
                }
                config_workload = self.db.config_workload_update(
                    context, options)
                # Add to scheduler jobs
                self.workload_add_scheduler_job(
                    context, jobschedule, config_workload, is_config_backup=True)
            else:
                # Update existing config workload
                options = {}
                options['metadata'] = metadata
                if len(jobschedule):
                    options['jobschedule'] = str(pickle.dumps(jobschedule, 0), 'utf-8')
                    config_workload = self.db.config_workload_update(
                        context, options)

                    existing_joschedule = pickle.loads(
                        bytes(existing_config_workload.get('jobschedule'), 'utf-8'))
                    existing_scheduler_status = False
                    if str(existing_joschedule.get(
                            'enabled')).lower() == 'true':
                        existing_scheduler_status = True

                    if str(jobschedule['enabled']).lower() == 'true':
                        if existing_scheduler_status is True:
                            # Case when scheduler is updated with some new values
                            # Need to update scheduler with new values so that it
                            # reflect changes instantly.
                            job = self._scheduler.get_config_backup_job()
                            if job is not None:
                                self._scheduler.unschedule_config_backup_job(
                                    job)

                        # Add to scheduler jobs
                        self.workload_add_scheduler_job(
                            context, jobschedule, config_workload, is_config_backup=True)

                    if str(jobschedule['enabled']).lower() == 'false':
                        if existing_scheduler_status is True:
                            # Remove from scheduler jobs
                            job = self._scheduler.get_config_backup_job()
                            if job is not None:
                                self._scheduler.unschedule_config_backup_job(
                                    job)
                        else:
                            LOG.warning("Config workload is already disabled.")
                else:
                    config_workload = self.db.config_workload_update(
                        context, options)

            self.db.config_workload_update(context,
                                           {
                                               'status': 'available',
                                               'error_msg': None,
                                           })
            workload_utils.upload_config_workload_db_entry(context)
            AUDITLOG.log(
                context,
                'Config workload update submitted.',
                config_workload)
            return config_workload
        except Exception as ex:
            LOG.exception(ex)
            if 'config_workload' in locals():
                self.db.config_workload_update(
                    context, {
                        'status': 'error', 'error_msg': str(
                            str(ex))})
                workload_utils.upload_config_workload_db_entry(context)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_config_workload(self, context):
        try:
            try:
                config_workload = self.db.config_workload_get(context)
            except wlm_exceptions.ConfigWorkloadNotFound as ex:
                raise ex

            config_workload_dict = dict(config_workload)

            config_workload_dict['jobschedule'] = pickle.loads(
                bytes(config_workload.jobschedule, 'utf-8'))
            config_workload_dict['jobschedule']['enabled'] = False
            config_workload_dict['jobschedule'][
                'global_jobscheduler'] = self._scheduler.running
            # find the job object based on config_workload_id
            job = self._scheduler.get_config_backup_job()
            if job is not None:
                config_workload_dict['jobschedule']['enabled'] = True
                timedelta = job.compute_next_run_time(
                    datetime.now()) - datetime.utcnow()
                config_workload_dict['jobschedule']['nextrun'] = timedelta.total_seconds(
                )
            return config_workload_dict
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    # Workload Policy API's
    @wrap_check_policy
    def policy_create(self, context, name, description, metadata, field_values):
        """
        Create a policy.
        """
        try:
            AUDITLOG.log(context, 'Policy \'' + name +
                         '\' Create Requested', None)

            # verify field names provided with available policy fields
            fields = self.db.policy_fields_get_all(context)
            policy_fields = [f.field_name for f in fields]
            if field_values.get('manual') and field_values['manual'].get('retention') is None:
                manual_field_value = {'manual' :{'retention':'30'}}
                field_values.update(manual_field_value)
            if field_values.get('retentionmanual') and field_values['retentionmanual'].get('retentionmanual') is None:
                retentionmanual_field_value = {'retentionmanual':{'retentionmanual':'30'}}
                field_values.update(retentionmanual_field_value)
            if (len(list(field_values.keys())) != len(policy_fields)) or len(set(field_values.keys()).difference(set(policy_fields))) > 0:
                raise wlm_exceptions.InvalidRequest(
                    reason="Please provide only %s as part of policy fields %s" % (str(policy_fields), str(field_values)))

            serialized_field_values = {}
            for f, v in field_values.items():
                serialized_field_values[f] = str(pickle.dumps(v, 0), 'utf-8')

            options = {'user_id': context.user_id,
                       'project_id': context.project_id,
                       'display_name': name,
                       'display_description': description,
                       'status': 'available',
                       'metadata': metadata,
                       'field_values': serialized_field_values}
            policy = self.db.policy_create(context, options)

            workload_utils.upload_policy_db_entry(context, policy.id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Create Submitted', policy)
            return self.db.policy_get(context, policy['id'])
        except Exception as ex:
            LOG.exception(ex)
            if 'policy' in locals():
                self.db.policy_update(context, policy['id'],
                                      {'status': 'error',
                                       'error_msg': ex.kwargs['reason'] if hasattr(ex,
                                                                                   'kwargs') and 'reason' in ex.kwargs else {}})
            raise

    @wrap_check_policy
    def policy_update(self, context, policy_id, values):
        """
        Update given policy.
        """
        try:
            policy = self.db.policy_get(context, policy_id)

            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Update Requested', None)

            policy_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id, workloads=True)
            if len(policy_assignments) > 0:
                raise wlm_exceptions.ErrorOccurred(
                    reason="Can not update policy: %s. It's assigned to workloads." % (policy_id))

            # verify field names provided with available policy fields
            field_values = values.get('field_values', {})
            if len(field_values) > 0:
                fields = self.db.policy_fields_get_all(context)
                policy_fields = [f.field_name for f in fields]

                if len(set(policy_fields).union(set(field_values.keys()))) > len(policy_fields):
                    raise wlm_exceptions.InvalidRequest(
                        reason="Please provide only %s as part of policy fields" % str(policy_fields))

            serialized_field_values = {}
            for f, v in field_values.items():
                serialized_field_values[f] = str(pickle.dumps(v, 0), 'utf-8')

            values['field_values'] = serialized_field_values

            policy = self.db.policy_update(context, policy_id, values)

            workload_utils.upload_policy_db_entry(context, policy_id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Update Submitted', policy)
            return policy
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_get(self, context, policy_id):
        """
        Get policy object.
        """
        try:
            policy = self.db.policy_get(context, policy_id)
            # deserialize the field values before responding back

            for field_value in policy.field_values:
                field_value.value = pickle.loads(bytes(field_value.value, 'utf-8'))
            return policy
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_list(self, context, search_opts={}):
        """
        List all available policies.
        """
        try:
            policies = self.db.policy_get_all(context, **search_opts)
            for policy in policies:
                for field_value in policy.field_values:
                    field_value.value = pickle.loads(bytes(field_value.value, 'utf-8'))
            return policies
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_delete(self, context, policy_id):
        """
        Delete the given policy.
        """
        try:
            policy = self.db.policy_get(context, policy_id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Delete Requested', None)

            # Check policy is not assigned to any workload
            workload_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id, workloads=True)
            if len(workload_assignments) > 0:
                raise wlm_exceptions.ErrorOccurred(
                    reason="Can not delete policy: %s. It's assigned to workloads." % (policy_id))

            # Remove policy assignments from projects.
            policy_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id)

            for pa in policy_assignments:
                self.db.policy_assignment_delete(context, pa.id)

            self.db.policy_delete(context, policy_id)
            workload_utils.policy_delete(context, policy_id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Delete Submitted', None)
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_assign(self, context, policy_id, add_projects, remove_projects):
        """
        Assign/remove the policy to a tenant.
        """
        try:
            failed_project_ids = []
            project_to_add = {}
            # Remove those project id's which are wrong.
            try:
                for project_id in add_projects[:]:
                    # Validate tenant_id existance
                    clients.initialise()
                    keystoneclient = clients.Clients(
                        context).client("keystone")
                    project = keystoneclient.client.projects.get(project_id)
                    project_to_add[project_id] = project.name
            except Exception as ex:
                failed_project_ids.append(project_id)

            # Validate policy_id
            policy = self.db.policy_get(context, policy_id)

            policy_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id)

            for pa in policy_assignments:
                if pa.project_id in remove_projects:
                    # Check policy is not assigned to any workload
                    workload_assignments = self.db.policy_assignments_get_all(
                        context, policy_id=policy_id, project_id=pa.project_id, workloads=True)
                    if len(workload_assignments) > 0:
                        raise wlm_exceptions.ErrorOccurred(
                            reason="Can not remove policy: %s. It's assigned to workloads." % (policy_id))
                    self.db.policy_assignment_delete(context, pa.id)
                    remove_projects.remove(pa.project_id)
                elif pa.project_id in list(project_to_add.keys()):
                    project_to_add.pop(pa.project_id)

            if len(remove_projects) > 0:
                failed_project_ids.extend(remove_projects)

            for proj_id, proj_name in project_to_add.items():
                values = {'policy_id': policy_id, 'project_id': proj_id,
                          'policy_name': policy.display_name, 'project_name': proj_name}
                self.db.policy_assignment_create(context, values)

            workload_utils.upload_policy_db_entry(context, policy_id)
            policy = self.db.policy_get(context, policy_id)
            return (policy, failed_project_ids)
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def get_assigned_policies(self, context, project_id):
        """
        list the policies which are assigned to given project
        """
        try:
            assigned_policies = self.db.policy_assignments_get_all(
                context, project_id=project_id)
            return assigned_policies
        except Exception as ex:
            LOG.exception(ex)
            raise

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def policy_field_create(self, context, name, type):
        """
        Create a policy_field. No RPC call is made
        """
        AUDITLOG.log(context, 'Policy Field \'' +
                     name + '\' Create Requested', None)
        options = {'field_name': name,
                   'type': type,
                   }
        policy_field = self.db.policy_field_create(context, options)
        AUDITLOG.log(context, 'Policy Field \'' + name +
                     '\' Create Submitted', policy_field)
        return policy_field

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def policy_field_list(self, context):
        """
        List all policy fields.
        """
        policy_fields = self.db.policy_fields_get_all(context)
        return policy_fields

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_tenants_usage(self, context):
        """
        Get storage used and vm's protected by different tenants.
        """
        try:
            total_usage = 0
            total_capacity = 0
            total_vms_protected = 0
            tenants_usage = self.db.get_tenants_usage(
                context)

            admin_cntx = nova._get_tenant_context(context, cloud_admin=True)
            clients.initialise()
            nova_plugin = clients.Clients(admin_cntx)
            nova_client = nova_plugin.client("nova")

            search_opts = {'all_tenants': 1}
            servers = nova_client.servers.list(search_opts=search_opts)
            tenant_wise_servers = {}
            for server in servers:
                if server.tenant_id not in tenant_wise_servers:
                    tenant_wise_servers[server.tenant_id] = [server.id]
                else:
                    tenant_wise_servers[server.tenant_id].append(server.id)
            for tenant_id in tenant_wise_servers:
                if tenant_id in tenants_usage:
                    protected = len(set(tenants_usage[tenant_id]['vms_protected']).intersection(
                        set(tenant_wise_servers[tenant_id])))
                    passively_protected = len(set(tenants_usage[tenant_id]['vms_protected']).difference(
                        set(tenant_wise_servers[tenant_id])))
                    tenants_usage[tenant_id]['vms_protected'] = protected
                    total_vms_protected += protected
                    tenants_usage[tenant_id]['total_vms'] = len(
                        tenant_wise_servers[tenant_id])
                    tenants_usage[tenant_id][
                        'passively_protected'] = passively_protected
                else:
                    tenants_usage[tenant_id] = {'vms_protected': 0, 'total_vms': len(
                        tenant_wise_servers[tenant_id]), 'used_capacity': 0, 'passively_protected': 0}

            # Update tenants_usage for those tenants which doesn't have any
            # workloads
            clients.initialise()
            keystoneclient = clients.Clients(admin_cntx).client("keystone")
            tenants = keystoneclient.client.projects.list()

            for tenant in tenants:
                if tenant.id not in tenants_usage:
                    tenants_usage[tenant.id] = {
                        'vms_protected': 0, 'total_vms': 0, 'used_capacity': 0, 'passively_protected': 0, 'tenant_name': tenant.name}
                else:
                    tenants_usage[tenant.id]['tenant_name'] = tenant.name

            backends_storage_stats = self.get_storage_usage(context)

            for backend in backends_storage_stats['storage_usage']:
                total_capacity += int(round(float(backend['total_capacity'])))
                total_usage += int(round(float(backend['total_utilization'])))
            global_usage = {'total_capacity': total_capacity, 'total_usage': total_usage, 'total_vms': len(
                servers), 'vms_protected': total_vms_protected}
            return {'tenants_usage': tenants_usage,
                    'global_usage': global_usage}
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_tenants_chargeback(self, context, project_id=None, project_list=[]):
        """
        Get tenants chargeback.
        """
        try:
            snap_chargeback = {}
            kwargs = {"read_metadata": False, "project_id": project_id, "project_list": project_list}
            snapshots = self.db.snapshot_get_all(context, **kwargs)
            for snap in snapshots:
                if snap.workload_id in snap_chargeback:
                    snap_chargeback[snap.workload_id]["workload_size"] += int(
                        round(float(snap.size)))
                    snap_chargeback[snap.workload_id]["snapshots"][snap.id] = {"name": snap.display_name,
                                                                               "size": int(round(float(snap.size))),
                                                                               "created_at": snap.created_at}
                else:
                    snap_chargeback[snap.workload_id] = {"workload_size": snap.size, "snapshots": {
                        snap.id: {"name": snap.display_name, "size": int(round(float(snap.size))), "created_at": snap.created_at}}}

            workload_vm_chargeback = {}
            workload_vms = self.db.workload_vms_get(context, None)
            for w_vm in workload_vms:
                if w_vm.workload_id in workload_vm_chargeback:
                    workload_vm_chargeback[w_vm.workload_id][w_vm.vm_id] = {
                        "name": w_vm.vm_name}
                else:
                    workload_vm_chargeback[w_vm.workload_id] = {
                        w_vm.vm_id: {"name": w_vm.vm_name}}

            workloads = self.db.workload_get_all(context)
            workloads_chargeback = {}
            for w in workloads:
                def get_value(
                    obj, id, key): return obj[id][key] if id in obj else None

                if w.project_id in workloads_chargeback:
                    workloads_chargeback[w.project_id]["workloads"][w.id] = {"name": w.display_name,
                                                                          "size": get_value(snap_chargeback, w.id, "workload_size") or 0,
                                                                          "protected_vms": workload_vm_chargeback.get(w.id, 0),
                                                                          "snapshots": get_value(snap_chargeback, w.id, "snapshots") or {},
                                                                          "total_protected_vms": len(workload_vm_chargeback.get(w.id, []))
                                                                             }
                elif (project_id and w.project_id == project_id) or (project_list and w.project_id in project_list):
                    workloads_chargeback[w.project_id] = {"workloads": {w.id: {"name": w.display_name,
                                                                          "size": get_value(snap_chargeback, w.id, "workload_size") or 0,
                                                                          "protected_vms": workload_vm_chargeback[w.id],
                                                                          "snapshots": get_value(snap_chargeback, w.id, "snapshots")or {},
                                                                          "total_protected_vms": len(workload_vm_chargeback.get(w.id, []))
                                                                          }}}

            tenants_usage = self.get_tenants_usage(context)["tenants_usage"]
            tenant_chargeback = {}
            for tenant_id in tenants_usage.keys():
                if tenant_id in workloads_chargeback.keys():
                    tenant_chargeback[tenant_id] = tenants_usage[tenant_id]
                    tenant_chargeback[tenant_id]["no_of_workloads"] = len(
                        list(workloads_chargeback[tenant_id]["workloads"].keys()))
                    tenant_chargeback[tenant_id]["workloads"] = workloads_chargeback[tenant_id]["workloads"]
            return tenant_chargeback
        except Exception as ex:
            LOG.exception(ex)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_quota_data(self, context, project_id):
        """
        API collects following information based on project_id (of provided):
        1. storage: total storage capacity and total amount of utilized storage
        2. Protected VM: total number of protected VM's for given project
        3. tvalt-nodes: available nodes w.r.t. total nodes in system
        4. contego-services: total contego services with it's respctive states (UP/DOWN)
        5. snapshot data: total snapshots available for each node
        """
        try:
            quota_data = {'total_nodes': 0, 'available_nodes': 0, 'error': list()}
            node_wise_snapshot_count = 0
            resp = {
                'nodes': dict(),
                'storage': list(),
                'chrageback_data': dict()
            }

            try:
                total_snapshots = 0
                resp['nodes'] = self.get_nodes(context)
                for node in resp['nodes'].get('nodes', []):
                    if not node['is_vip']:
                        quota_data['total_nodes'] += 1
                        if isinstance(node['status'], six.string_types) and node['status'].lower() == "up":
                            quota_data['available_nodes'] += 1
                        search_opts = {'host': node['node'], 'get_all': True, 'status': 'running'}

                        try:
                            snapshots = self.db.snapshot_get_all(context, **search_opts)
                        except Exception:
                            snapshots = []

                        node['snapshots'] = []
                        for snapshot in snapshots:
                            if project_id and snapshot.project_id == project_id:
                                node['snapshots'].append(snapshot)
                            else:
                                node['snapshots'].append(snapshot)

                        total_snapshots += len(node['snapshots'])
                        node_wise_snapshot_count += len(node['snapshots'])
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch nodes due to %s' % str(ex))

            quota_data['balance_nodes'] = quota_data['total_nodes'] - quota_data['available_nodes']
            quota_data['total_snapshots'] = total_snapshots
            quota_data['node_wise_snapshot_count'] = node_wise_snapshot_count

            try:
                resp['storage'] = self.get_storage_usage(context)
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch storage usage due to %s' % str(ex))

            quota_data['total_utilization'] = 0
            quota_data['total_capacity'] = 0

            for nfsshare in resp['storage'].get('storage_usage', []):
                quota_data['storage_type'] = str(nfsshare['storage_type'])
                quota_data['total_utilization'] += nfsshare['total_utilization']
                quota_data['total_capacity'] += nfsshare['total_capacity']

            quota_data['available_capacity'] = \
                (float(quota_data['total_capacity']) -
                 float(quota_data['total_utilization']))
            quota_data['total_capacity_humanized'] = \
                utils.sizeof_fmt(quota_data['total_capacity'])
            quota_data['total_utilization_humanized'] = \
                utils.sizeof_fmt(quota_data['total_utilization'])
            quota_data['available_capacity_humanized'] = \
                utils.sizeof_fmt(quota_data['available_capacity'])

            quota_data['contego_services_up'] = 0
            quota_data['contego_services_down'] = 0
            quota_data['contego_services_others'] = 0
            quota_data['contego_services_total'] = 0

            try:
                admin_cntx = nova._get_tenant_context(context, cloud_admin=True)
                clients.initialise()
                contego_plugin = clients.Clients(admin_cntx)
                contego_client = contego_plugin.client("contego")
                service_info = contego_client.contego.get_service_list()
                if service_info and isinstance(service_info, tuple) and service_info[1].get('services'):
                    resp['services'] = service_info[1]['services']
                else:
                    resp['services'] = []
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch services due to %s' % str(ex))

            for service in resp['services']:
                quota_data['contego_services_total'] += 1
                if service['state'] == "up":
                    quota_data['contego_services_up'] += 1
                elif service.get('state') == "down":
                    quota_data['contego_services_down'] += 1
                else:
                    quota_data['contego_services_others'] += 1

            quota_data['vms_protected'] = 0
            quota_data['total_vms'] = 0

            try:
                resp['chrageback_data'] = self.get_tenants_usage(context)
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch tenant usage due to %s' % str(ex))

            if resp['chrageback_data'].get('global_usage') and resp['chrageback_data'].get('tenants_usage'):
                global_usage = resp['chrageback_data']['global_usage']
                tenant_usage = resp['chrageback_data']['tenants_usage']
                if project_id is not None:
                    for tenant in tenant_usage:
                        if tenant == project_id:
                            quota_data['vms_protected'] = tenant_usage[tenant]['vms_protected']
                            quota_data['total_vms'] = tenant_usage[tenant]['total_vms']
                            quota_data['vms_unprotected'] = tenant_usage[tenant]['total_vms'] - tenant_usage[tenant]['vms_protected']
                            quota_data['storage_used'] = tenant_usage[tenant]['used_capacity']
                            quota_data['total_utilization'] = tenant_usage[tenant]['used_capacity']
                            quota_data['total_capacity'] = global_usage['total_capacity']
                            quota_data['total_capacity_humanized'] = utils.sizeof_fmt(quota_data['total_capacity'])
                            quota_data['total_utilization_humanized'] = utils.sizeof_fmt(quota_data['total_utilization'])

                    try:
                        keystoneclient = clients.Clients(context).client("keystone")
                        project = keystoneclient.client.projects.get(project_id)
                        project_name = project.name
                    except Exception:
                        project_name = project_id

                    quota_data['tenant_name'] = 'by ' + project_name + ' Tenant' if project_name else None
                else:
                    quota_data['vms_protected'] = global_usage['vms_protected']
                    quota_data['total_vms'] = global_usage['total_vms']
                    quota_data['vms_unprotected'] = global_usage['total_vms'] - global_usage['vms_protected']
                    quota_data['tenant_name'] = ''
        except Exception as ex:
            LOG.exception(ex)
        return quota_data

    @autolog.log_method(logger=Logger)
    @create_trust()
    @check_license
    @wrap_check_policy
    def migration_plan_create(self, context, name, description,
                              vms, metadata, availability_zone=None,
                              source_platform="vmware"):
        try:
            AUDITLOG.log(context,
                         "migration_plan '%s(%s)' create requested" %
                         (name, description), None)

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform migration plan operations. '
                             'Please make sure user has the trustee role assigned'))

            if len(vms) == 0:
                raise wlm_exceptions.InvalidRequest(
                    reason="No VMs are chosen for migration plan")

            if source_platform != "vmware":
                raise wlm_exceptions.InvalidRequest(
                    reason="Unrecognized source platform %s. "
                           "We only support migrations from VMware"
                           % source_platform)

            #metadata = {}
            options = {'user_id': context.user_id,
                       'project_id': context.project_id,
                       'name': name,
                       'display_name': name,
                       'display_description': description,
                       'status': 'creating',
                       'source_platform': source_platform,
                       'metadata': metadata,
                       'host': socket.gethostname(), }
            migration_plan = self.db.migration_plan_create(context, options)

            si = get_vcenter_service_instance()

            for vm in vms:
                # Check whether given instance id exist or not.
                search_index = si.content.searchIndex
                vcenter_vm = search_index.FindByUuid(None, vm['vm-id'], True, True)
                if not vcenter_vm:
                    raise wlm_exceptions.InstanceNotFound(
                        instance_id=vm['vm-id'])

                vm_found = self.db.migration_plan_vm_get_by_id(
                            context, vm['vm-id'])
                if isinstance(vm_found, list):
                    if len(vm_found) > 0:
                        msg = _('Invalid VM as VM (%s)  is already part of '
                                'another migration plan' %
                                vcenter_vm.summary.config.name)
                        raise wlm_exceptions.Invalid(reason=msg)
                else:
                    msg = _('Error processing VM %s' %
                                vcenter_vm.summary.config.name)
                    raise wlm_exceptions.Invalid(reason=msg)

                vm['vm-name'] = vcenter_vm.summary.config.name
                vm['metadata'] = {'tools_running': vcenter_vm.summary.guest.toolsStatus,
                                  'esx_host': vcenter_vm.runtime.host.name,
                                  'power_state': vcenter_vm.runtime.powerState,
                                  'guest_family': vcenter_vm.guest.guestFamily,
                                  'guest_fullname': vcenter_vm.guest.guestFullName,
                                  'boot_options': "UEFI" if vcenter_vm.config.bootOptions.efiSecureBootEnabled else "BIOS",
                                 }

                values = {'migration_plan_id': migration_plan.id,
                          'vm_id': vm['vm-id'],
                          'vm_name': vm['vm-name'],
                          'status': 'available',
                          'metadata': vm.get('metadata', {})}
                vm = self.db.migration_plan_vms_create(context, values)

            self.scheduler_rpcapi.migration_plan_create(context,
                FLAGS.scheduler_topic,
                migration_plan['id'])

            AUDITLOG.log(context,
                         "Migration plan '%s' create submitted" % \
                         migration_plan['display_name'],
                         migration_plan)
            return migration_plan
        except Exception as ex:
            LOG.exception(ex)
            if 'migration_plan' in locals():
                self.db.migration_plan_update(
                    context, migration_plan['id'], {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else str(ex)})
            raise

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def migration_plan_modify(self, context, migration_plan_id, migration_plan):
        """
        Make the RPC call to modify a migration_plan.
        """
        migration_planobj = self.migration_plan_get(context, migration_plan_id)

        # check if BT is available
        vault.get_backup_target_by_backup_target_type(migration_planobj['metadata']['backup_target_type'])

        plan_existing_vms_ids = [vm['id'] for vm in migration_planobj['vms']]
        AUDITLOG.log(context, 'Workload %s Modify Requested' %
                     migration_planobj['display_name'], None)

        purge_metadata = False
        options = {}

        if migration_plan.get('name', ""):
            options['display_name'] = migration_plan['name']

        if migration_plan.get('description', ""):
            options['display_description'] = migration_plan['description']

        if migration_plan.get('metadata', {}):
            purge_metadata = True
            options['metadata'] = migration_plan['metadata']

        si = get_vcenter_service_instance()
        if migration_plan.get('vms', []):
            plan_new_vms = migration_plan['vms']
            for vm in plan_new_vms:
                if not isinstance(vm, dict) or 'vm-id' not in vm:
                    msg = _(
                        "Migration Plan definition key 'vms' must be a dictionary "
                        "with 'vm-id' key")
                    raise wlm_exceptions.Invalid(reason=msg)

                # Check whether given instance id exist or not.
                search_index = si.content.searchIndex
                vcenter_vm = search_index.FindByUuid(None, vm['vm-id'], True, True)
                if not vcenter_vm:
                    raise wlm_exceptions.InstanceNotFound(
                        instance_id=vm['vm-id'])

                vm_found = self.db.migration_plan_vm_get_by_id(
                            context, vm['vm-id'])
                if isinstance(vm_found, list) and len(vm_found) > 0:
                    if uuid.UUID(vm_found[0].migration_plan_id) != \
                        uuid.UUID(migration_plan_id):
                        msg = _('Invalid VM as VM (%s)  is already part of '
                                'another migration plan' % vcenter_vm.summary.config.name)
                        raise wlm_exceptions.Invalid(reason=msg)

                vm['vm-name'] = vcenter_vm.summary.config.name
                vm['metadata'] = {'tools_running': vcenter_vm.summary.guest.toolsStatus,
                                  'esx_host': vcenter_vm.runtime.host.name,
                                  'power_state': vcenter_vm.runtime.powerState,
                                  'guest_family': vcenter_vm.guest.guestFamily,
                                  'guest_fullname': vcenter_vm.guest.guestFullName,
                                  'boot_options': "UEFI" if vcenter_vm.config.bootOptions.efiSecureBootEnabled else "BIOS",
                                 }

            # Following database updates must be done as a transaction
            for vm in self.db.migration_plan_vms_get(context, migration_plan_id):
                self.db.migration_plan_vms_delete(context, vm.vm_id, migration_plan_id)

            for vm in plan_new_vms:
                values = {'migration_plan_id': migration_plan_id,
                          'vm_id': vm['vm-id'],
                          'metadata': vm['metadata'],
                          'status': 'available',
                          'vm_name': vm['vm-name']}
                self.db.migration_plan_vms_create(context, values)

        try:
            migration_plan_obj = self.db.migration_plan_update(
                context, migration_plan_id, options, purge_metadata)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=_("Error Modifying workload"))

        workload_utils.upload_migration_plan_db_entry(context, migration_plan_id)

        AUDITLOG.log(context,
              "Migration plan '%s' modify Submitted" % migration_plan_obj['display_name'],
              migration_plan_obj)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_plan_get_all(self, context, search_opts={}):
        migration_plans = self.db.migration_plan_get_all(context, **search_opts)
        return migration_plans

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_plan_get_by_vmid(self, context, search_opts={}):
        migration_plans = self.db.migration_plan_get_by_vmid(context, **search_opts)
        return migration_plans

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_plan_get(self, context, migration_plan_id):
        def _get_pit_resource_id(metadata, key):
            for metadata_item in metadata:
                if metadata_item['key'] == key:
                    pit_id = metadata_item['value']
                    return pit_id

        def _get_pit_resource(migration_plan_vm_common_resources, pit_id):
            for migration_plan_vm_resource in migration_plan_vm_common_resources:
                if migration_plan_vm_resource.resource_name == pit_id:
                    return migration_plan_vm_resource
        kwargs = {}
        migration_plan = self.db.migration_plan_get(context, migration_plan_id)
        migration_plan_dict = dict(migration_plan)

        migration_plan_vms = []
        for migration_plan_vm_obj in self.db.migration_plan_vms_get(context, migration_plan.id):
            migration_plan_vm = {
                'id': migration_plan_vm_obj.vm_id,
                'name': migration_plan_vm_obj.vm_name}
            metadata = {}
            for kvpair in migration_plan_vm_obj.metadata:
                metadata.setdefault(kvpair['key'], kvpair['value'])
            migration_plan_vm['metadata'] = metadata
            migration_plan_vm['nics'] = []
            migration_plan_vm['vdisks'] = []
            migration_plan_vm['security_group'] = []
            migration_plan_vm['flavor'] = {}

            # Get VM specific resources
            migration_plan_vm_resources = self.db.migration_plan_vm_resources_get(
                    context, migration_plan_vm_obj.vm_id, migration_plan_id)

            # Get workload specific resources shared by all VMs including
            # network and subnets
            migration_plan_vm_common_resources = self.db.migration_plan_vm_resources_get(
                    context, migration_plan_id, migration_plan_id)

            for migration_plan_vm_resource in migration_plan_vm_resources:
                """ flavor """
                if migration_plan_vm_resource.resource_type == 'flavor':
                    vm_flavor = migration_plan_vm_resource
                    migration_plan_vm['flavor'] = {
                        'vcpus': self.db.get_metadata_value(vm_flavor.metadata, 'vcpus'),
                        'ram': self.db.get_metadata_value(vm_flavor.metadata, 'ram'),
                        'disk': self.db.get_metadata_value(vm_flavor.metadata, 'disk'),
                        'ephemeral': self.db.get_metadata_value(vm_flavor.metadata, 'ephemeral')}

                """ security group """
                if migration_plan_vm_resource.resource_type == 'security_group':
                    if self.db.get_metadata_value(
                        migration_plan_vm_resource.metadata, 'vm_attached') in (
                            True, '1', None):
                        migration_plan_vm['security_group'].append(
                        {
                            'name': self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'name'),
                            'security_group_type': self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'security_group_type')
                        })

                """ nics """
                if migration_plan_vm_resource.resource_type == 'nic':
                    vm_nic = self.db.migration_plan_vm_network_resource_get(
                            context, migration_plan_vm_resource.id)
                    nic_data = pickle.loads(bytes(vm_nic.pickle, 'utf-8'))
                    nic = {'mac_address': nic_data['mac_address'],
                           'ip_address': nic_data['ip_address'], }
                    nic['network'] = {
                            'id': self.db.get_metadata_value(vm_nic.metadata, 'network_id'),
                            'name': nic_data.get('network_name') or self.db.get_metadata_value(vm_nic.metadata, 'network_name'),
                            'cidr': nic_data.get('cidr', None),
                            'network_type': nic_data['network_type']
                    }

                    pit_id = _get_pit_resource_id(vm_nic.metadata, 'subnet_name')
                    if pit_id:
                        try:
                            vm_nic_subnet = _get_pit_resource(
                                    migration_plan_vm_common_resources, pit_id)
                            vm_nic_subnet_data = self.db.migration_plan_vm_network_resource_get(
                                    context, vm_nic_subnet.id)
                            subnet = pickle.loads(bytes(vm_nic_subnet_data.pickle, 'utf-8'))
                            nic['network']['subnet'] = {
                                    'id': subnet.get('id', None),
                                    'name': subnet.get('name', None),
                                    'cidr': subnet.get('cidr', None),
                                    'ip_version': subnet.get('ip_version', None),
                                    'gateway_ip': subnet.get('gateway_ip', None),
                            }
                        except Exception as ex:
                            LOG.exception(ex)

                    migration_plan_vm['nics'].append(nic)

                """ vdisks """
                if migration_plan_vm_resource.resource_type == 'disk':
                    vdisk = {'label': self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'label'),
                             'resource_id': migration_plan_vm_resource.id,
                             'vm_id': migration_plan_vm_resource.vm_id}

                    vdisk['volume_id'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_id')
                    vdisk['volume_name'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_name')
                    vdisk['volume_size'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_size')
                    vdisk['volume_type'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_type')
                    vdisk['volume_mountpoint'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_mountpoint')

                    vdisk['volume_id'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_id')
                    vdisk['volume_name'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_name')
                    vdisk['volume_size'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_size')
                    vdisk['volume_type'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_type')
                    vdisk['volume_mountpoint'] = self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'volume_mountpoint')
                    if self.db.get_metadata_value(migration_plan_vm_resource.metadata, 'availability_zone'):
                        vdisk['availability_zone'] = self.db.get_metadata_value(
                                    migration_plan_vm_resource.metadata, 'availability_zone')
                    vdisk['metadata'] = json.loads(self.db.get_metadata_value(
                                migration_plan_vm_resource.metadata, 'metadata', "{}"))

                    migration_plan_vm['vdisks'].append(vdisk)
            migration_plan_vms.append(migration_plan_vm)

        migration_plan_dict['vms'] = migration_plan_vms

        metadata = {}
        for kvpair in migration_plan.metadata:
            metadata.setdefault(kvpair['key'], kvpair['value'])
        migration_plan_dict['metadata'] = metadata

        return migration_plan_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_plan_delete(self, context, migration_plan_id):
        try:
            migration_plan = self.migration_plan_get(context, migration_plan_id)

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(migration_plan['metadata']['backup_target_type'])

            display_name = migration_plan['display_name']
            AUDITLOG.log(
                context, "Migration Plan '%s' delete requested" % display_name,
                migration_plan)

            if migration_plan['status'] not in ['available', 'error']:
                msg = _("Migration Plan status must be 'available' or 'error'")
                raise wlm_exceptions.InvalidState(reason=msg)

            migrations = self.db.migration_get_all_by_project_migration_plan(
                            context, context.project_id, migration_plan_id)
            if len(migrations) > 0:
                    msg = _(
                        'This migration plan contains migrations. '
                        'Please delete all migrations and try again..')
                    raise wlm_exceptions.InvalidState(reason=msg)

            self.db.migration_plan_update(
                    context, migration_plan_id, {
                        'status': 'deleting'})
            self.scheduler_rpcapi.migration_plan_delete(
                    context, FLAGS.scheduler_topic, migration_plan_id)

            AUDITLOG.log(
                context, "Migration Plan '%s' delete submitted" % display_name,
                migration_plan)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def migration_plan_discovervms(self, context, migration_plan_id):
        migration_plan = None
        try:
            migration_plan = self.migration_plan_get(context, migration_plan_id)
        except Exception as ex:
            options = {
                'warning_msg': "DiscoverVMs failed. %s" % str(ex),
            }
            self.db.migration_plan_update(
                context, migration_plan_id, options)
            raise

        try:
            # check if BT is available
            vault.get_backup_target_by_backup_target_type(migration_plan['metadata']['backup_target_type'])

            try:
                workload_lock.acquire()
                if migration_plan['status'].lower() not in ['available', 'error']:
                    msg = _(
                        "Migration Plan must be in the 'available' or 'error' state to discover vms")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.migration_plan_update(
                    context, migration_plan_id, {
                        'status': 'locked'})
            finally:
                workload_lock.release()

            vms = [migration_plan_vm_obj.vm_id for migration_plan_vm_obj in
                         self.db.migration_plan_vms_get(context, migration_plan['id'])]
            if len(vms) != len(set(vms)):
                msg = _('Migration Plan has some duplicate vm entries. '
                        'Remove duplicate entries and retry again.\n'
                        'Hint: Open edit migration plan dialog in horizon dashboard '
                        'and update the migration plan to remove any deplications')
                raise wlm_exceptions.InvalidState(reason=msg)

            metadata = {}
            metadata.setdefault('cancel_requested', '0')

            ## TODO: Should we create a task to track the progress. Talk to Abhijeet.
            self.db.migration_plan_update(context,
                                    migration_plan_id,
                                    {'status': 'discovering'})
            self.scheduler_rpcapi.migration_plan_discovervms(
                context, FLAGS.scheduler_topic, migration_plan['id'])
            return
        except Exception as ex:
            self.db.migration_plan_update(
                context, migration_plan['id'],
                {'warning_msg': "DiscoverVMs failed with error %s" % str(ex),
                 'status': 'available'})
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_plan_vms_get_all(self, context, search_opts={}):
        db_vms = self.db.migration_plan_vms_get(context, None, **search_opts)
        vms = [{'id': vm.vm_id} for vm in db_vms]
        return {'vms': vms}

    @autolog.log_method(logger=Logger)
    @create_trust()
    @check_license
    @wrap_check_policy
    def migration_create(self, context, migration_plan_id,
                         migration_options):
        migration = None

        if hasattr(context, 'trust_failed'):
            raise wlm_exceptions.Invalid(
                reason=_('Trust broken: Cannot perform backups operations. '
                         'Please make sure user has the trustee role assigned'))
        try:
            migration_plan = self.migration_plan_get(context, migration_plan_id)
        except Exception as ex:
            options = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'migration_plan_id': migration_plan_id,
                'migration_type': "warm",
                'display_name': migration_options.get('name', 'Migration'),
                'display_description': migration_options.get('description', 'Migration'),
                'host': '',
                'status': 'error',
                'error_msg': str(ex),
                'metadata': {}, }
            migration = self.db.migration_create(context, options)
            return migration

        try:

            vms = [vm_obj.vm_id for vm_obj in
                         self.db.migration_plan_vms_get(context,
                             migration_plan_id)]

            # Check whether VMs to be migrated are present in migration plan or not.
            vms_to_be_migarted = []
            if migration_options['target'] == 'openstack':
                vms_to_be_migarted = [vm['id'] for vm in migration_options.
                                      get("openstack", {}).
                                      get("vms", []) if vm.get('include')]

            if not vms_to_be_migarted:
                raise wlm_exceptions.MigrationError("Include at least one "
                    "virtual machine from migration plan VMs : "
                        f"{vms}, for migration.")

            vms_not_in_migration_plan = [vm_id for vm_id in \
                vms_to_be_migarted if vm_id not in vms]

            if vms_not_in_migration_plan:
                raise wlm_exceptions.MigrationError("VM's with id's: "
                    f"{vms_not_in_migration_plan} are not present in "
                        f"migration plan id : {migration_plan_id}. "
                            "Please check migration plan details.")

            if len(vms) != len(set(vms)):
                msg = _('Migration plan has some duplicate vm entries. '
                        'Remove duplicate entries and retry again.\n'
                        'Hint: Open edit migration plan dialog in horizon '
                        'dashboard and update the migration plan to remove '
                        'any deplications')
                raise wlm_exceptions.InvalidState(reason=msg)

            migration_type = migration_options.get('migration_type', 'cold')
            migration_options['name'] = \
                    migration_options['name'].encode( encoding='UTF-8')
            migration_options['description'] = \
                    migration_options['description'].encode(encoding='UTF-8')

            # Check if Changed Block Tracking is enabled
            # for running warm migration
            if migration_type.lower() == 'warm':

                si = get_vcenter_service_instance()
                cbt_disabled_vms = []
                for vm in vms_to_be_migarted:
                    # Check whether given instance id exist or not.
                    search_index = si.content.searchIndex
                    vcenter_vm = search_index.FindByUuid(None, vm,
                            True, True)
                    if not vcenter_vm:
                        raise wlm_exceptions.InstanceNotFound(
                                instance_id=vm)
                    if not vcenter_vm.config.changeTrackingEnabled:
                        cbt_disabled_vms.append(vcenter_vm.name+'(%s)'%vm)

                if cbt_disabled_vms:
                    raise wlm_exceptions.MigrationError('Error creating '   \
                            'migration, these VMs are required to have the '\
                            'Change Block Tracking enabled to perform WARM '\
                            'migration: %s' % (', '.join(cbt_disabled_vms)))

            # VMware-tools must be installed
            # if the Guest OS is Microsft Windows.
            # It is also recommended to be running or
            # should be powered off using 'Shutdown Guest OS' option

            si = get_vcenter_service_instance()
            win_tools_disabled_vms = []
            for vm in vms_to_be_migarted:
                # Check whether given instance id exist or not.
                search_index = si.content.searchIndex
                vcenter_vm = search_index.FindByUuid(None, vm,
                            True, True)
                if not vcenter_vm:
                    raise wlm_exceptions.InstanceNotFound(
                                instance_id=vm)

                guest_os = vcenter_vm.summary.config.guestFullName
                vmware_tools_status = vcenter_vm.guest.toolsStatus
                power_state = vcenter_vm.runtime.powerState
                if power_state == vim.VirtualMachinePowerState.poweredOff:
                    continue

                if  "microsoft" in guest_os.lower():
                    if vmware_tools_status.lower() in \
                        ("toolsnotinstalled", "toolsnotrunning"):
                        win_tools_disabled_vms.append(\
                            vcenter_vm.name+'(%s)'%vm)
            if win_tools_disabled_vms:
                raise wlm_exceptions.MigrationError('Error creating '   \
                            'migration, these Windows VMs are required to '\
                            'have the VMware tools installed and running for '\
                            'sucessful migration: %s' % (', '.join(\
                                win_tools_disabled_vms)))

            AUDITLOG.log(
                context,
                "Migration plan '%s' initiating migration "
                "'%s' of type '%s'" %
                    (migration_plan['display_name'],
                     migration_options['description'],
                     migration_type),
                 migration_plan)

            try:
                migration_lock.acquire()
                if migration_plan['status'].lower() != 'available':
                    msg = _(
                        "Migration plan must be in the 'available' "
                        "state to invoke migration")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.migration_plan_update(
                    context, migration_plan_id, {
                        'status': 'locked'})
            finally:
                migration_lock.release()

            metadata = {}
            metadata.setdefault('cancel_requested', '0')
            metadata['power_off_vm_post_migrate'] = \
                migration_options.get('power_off_vm_post_migrate', True)
            values = {'user_id': context.user_id,
                      'project_id': context.project_id,
                      'migration_plan_id': migration_plan_id,
                      'migration_type': migration_type,
                      'display_name': migration_options['name'],
                      'display_description': migration_options['description'],
                      'pickle': str(pickle.dumps(migration_options, 0), 'utf-8'),
                      'host': '',
                      'status': 'creating',
                      'metadata': metadata }
            migration = self.db.migration_create(context, values)

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(migration_plan['metadata']['backup_target_type'])

            migration_display_name = migration['display_name']
            local_time = self.get_local_time(context, migration['created_at'])
            migration_display_name = local_time + \
                ' (' + migration['display_name'].decode("utf-8") + ')'

            self.db.migration_update(context,
                                     migration.id,
                                     {'progress_percent': 0,
                                      'progress_msg': 'Migration operation is scheduled',
                                      'status': 'executing'})
            self.scheduler_rpcapi.migration_create(
                context, FLAGS.scheduler_topic, migration['id'])
            return migration
        except Exception as ex:
            self.db.migration_plan_update(
                context, migration_plan_id, {
                    'status': 'available'})
            if migration:
                error_msg = ex.kwargs['reason'] if hasattr(ex, 'kwargs') and 'reason' in ex.kwargs else str(ex)
                migration = self.migration_get(context, migration['id'])
                if migration['error_msg']:
                    error_msg = migration['error_msg'] + '\n' + error_msg
                self.db.migration_update(
                    context, migration['id'], {
                        'status': 'error', 'error_msg': error_msg})
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_get(self, context, migration_id):
        rv = self.db.migration_get(
            context,
            migration_id,
            project_only=context.project_only)
        migration_details = dict(rv)
        migration_vms = []
        try:
            for vm_obj in self.db.migration_vms_get(
                    context, migration_id):
                migration_vm = {'id': vm_obj.vm_id,
                                'name': vm_obj.vm_name,
                                'status': vm_obj.status, }
                metadata = {}
                for kvpair in vm_obj.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                migration_vm['metadata'] = metadata
                migration_vms.append(migration_vm)
        except Exception as ex:
            LOG.debug("Failed to fetch migration: {}".format(migration_id))
            LOG.debug(ex)

        migration_details.setdefault('instances', migration_vms)
        return migration_details


    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_get_all(self, context, migration_plan_id=None):
        migrations = self.db.migration_get_all(context, migration_plan_id)
        return migrations


    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def migration_delete(self, context, migration_id):
        try:
            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot delete migration. ' +
                             'Expected role "%s" is not found. ' % vault.CONF.trustee_role +
                             'Please make sure user has the trustee role assigned'))
            migration = self.migration_get(context, migration_id)
            migration_plan = self.migration_plan_get(context, migration['migration_plan_id'])

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(migration_plan['metadata']['backup_target_type'])

            migration_display_name = migration['display_name']
            local_time = self.get_local_time( context, migration['created_at'])

            migration_display_name = local_time + \
                    ' (' + migration['display_name'] + ')'
            migration_type = migration['migration_type']
            migration_plan_display_name = migration_plan['display_name']
            AUDITLOG.log(
                context,
                "Migration plan '%s' migration '%s' of type %s delete requested" %
                (migration_plan_display_name, migration_display_name, migration_type),
                migration)

            if migration['status'] not in ['available', 'error', 'cancelled']:
                msg = _(
                    "Migration status must be 'available' or 'error' or 'cancelled'")
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                migration_lock.acquire()
                if migration_plan['status'].lower() != 'available' and \
                    migration_plan['status'].lower() != 'locked_for_delete':
                    msg = _(
                        "Migration plan must be in the 'available' state to delete a migration")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.migration_plan_update(
                    context, migration['migration_plan_id'], {
                        'status': 'locked_for_delete'})
            finally:
                migration_lock.release()

            self.db.migration_update(
                context, migration_id, {
                    'status': 'deleting'})

            status_messages = {'message': 'Migration delete operation starting'}
            options = {
                'display_name': "Migration Delete",
                'display_description': "Migration delete for migration id %s" % migration_id,
                'status': "starting",
                'status_messages': status_messages,
            }

            self.scheduler_rpcapi.migration_delete(
                context, FLAGS.scheduler_topic, migration_id)
            AUDITLOG.log(
                context,
                "Migration plan '%s' migration type '%s' migration '%s' delete submitted" %
                (migration_plan_display_name, migration_type, migration_display_name),
                migration)

            return migration_id

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def migration_cancel(self, context, migration_id):
        try:
            migration = self.migration_get(context, migration_id)
            migration_plan = self.db.migration_plan_get(context, migration['migration_plan_id'])

            # check if BT is available
            vault.get_backup_target_by_backup_target_type(migration_plan['metadata']['backup_target_type'])

            migration_display_name = migration['display_name']
            local_time = self.get_local_time( context, migration['created_at'])

            migration_display_name = local_time + \
                    ' (' + migration['display_name'] + ')'
            migration_type = migration['migration_type']
            migration_plan_display_name = migration_plan.display_name
            AUDITLOG.log(
                context,
                "Migration plan '%s' migration '%s' of type %s cancel requested" %
                (migration_plan_display_name, migration_display_name, migration_type),
                migration)

            if migration['status'] in ['available', 'error', 'cancelled']:
                msg = _(
                    "Migration status must not be 'available' or 'error' or 'cancelled'")
                raise wlm_exceptions.InvalidState(reason=msg)

            backup_endpoint = self.db.get_metadata_value(migration_plan.metadata, 'backup_media_target')
            backup_target_type = self.db.get_metadata_value(migration_plan.metadata, 'backup_target_type')
            backup_target = vault.get_backup_target_by_backup_target_type(backup_target_type)
            cancel_file = backup_target.get_cancelled_file_path(
                    {'migration_id': migration_id})
            fileutils.ensure_tree(os.path.dirname(cancel_file))
            with open(cancel_file, "w") as f:
                 pass

            self.db.migration_update(
                context, migration_id, {
                    'status': 'cancelling'})

            AUDITLOG.log(
                context,
                "Migration plan '%s' migration type '%s' migration '%s' cancelled successfully" %
                (migration_plan_display_name, migration_type, migration_display_name),
                migration)

            return migration_id

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_import_migration_plans_list(self, context, project_id=None):
        AUDITLOG.log(context, 'Get Import Migration Plans List Requested', None)
        if not context.is_admin:
            raise wlm_exceptions.AdminRequired()

        migration_plans = []
        for backup_target_section in vault.CONF.enabled_backends:
            for backup_endpoint in vault.CONF[backup_target_section].vault_storage_filesystem_export.split(','):
                vault.get_backup_target(backup_endpoint)

        for backup_target_section in vault.CONF.enabled_backends:
            for backup_endpoint in vault.CONF[backup_target_section].vault_storage_filesystem_export.split(','):
                backup_target = None
                try:
                    backup_target = vault.get_backup_target(backup_endpoint)
                    for migration_plan_url in backup_target.get_migration_plans(context):
                        try:
                            migration_plan_values = json.loads(backup_target.get_object(
                                os.path.join(migration_plan_url, 'migration_plan_db')))
                            if project_id is not None:
                                if migration_plan_values['project_id'] == project_id:
                                   migration_plans.append(migration_plan_values)
                            else:
                                migration_plans.append(migration_plan_values)
                        except Exception as ex:
                            LOG.exception(ex)
                            continue
                except Exception as ex:
                    LOG.exception(ex)
                finally:
                    pass

        AUDITLOG.log(context, 'Get Import Migration Plans List Completed', None)
        return migration_plans

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def import_migration_plans(self, context, migration_plan_ids, source_btt, source_btt_all, target_btt, upgrade):

        AUDITLOG.log(context, 'Import Migration Plans Requested', None)
        if context.is_admin is not True and upgrade is True:
            raise wlm_exceptions.AdminRequired()

        try:
            if target_btt:
                # check if provided target BTT exists
                self.db.backup_target_type_show(context, target_btt)

            migration_plans = []
            # call get_backup_target that makes sure all shares are mounted
            for backup_target_section in vault.CONF.enabled_backends:
                for backup_endpoint in vault.CONF[backup_target_section].vault_storage_filesystem_export.split(','):
                    vault.get_backup_target(backup_endpoint)

            module_name = 'workloadmgr.db.imports.import_workloads'
            import_migration_plan_module = importlib.import_module(module_name)
            import_migration_plan_module.import_settings(context, models.DB_VERSION)

            source_backup_endpoints = set()
            if source_btt or source_btt_all:
                bt_all = self.db.backup_target_get_all(context)
                btt_all = self.db.backup_target_type_get_all(context)
                for btt in btt_all:
                    if (source_btt and (btt.id in source_btt or btt.name in source_btt)) or source_btt_all:
                        for bt in bt_all:
                            if bt.id == btt.backup_targets_id:
                                source_backup_endpoints.add(bt.filesystem_export)
                                break

            migration_plans = import_migration_plan_module.import_migration_plan(context, migration_plan_ids, source_backup_endpoints, target_btt, models.DB_VERSION, upgrade)

        except wlm_exceptions.WorkloadsNotFound as ex:
            LOG.exception(ex)
            return {'migration_plans': {'imported_migration_plans': [],
                                  'failed_migration_plans': []}}
        except Exception as ex:
            LOG.exception(ex)
            raise ex

        LOG.info('Import Migration Plans Completed')
        return migration_plans

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_barbican_support(self, context):
        try:
            error_message = None
            result = False
            barbican_service = barbican.API()
            if not vault.CONF.barbican.encryption_support:
                error_message = 'Trilio does not support encryption on this Openstack distribution.'
            elif barbican_service.get_barbican_endpoint(context):
                result = True
            return {'result': result, 'error_message': error_message}
        except wlm_exceptions.MissingCredentialError as ex:
            keystone_client = KeystoneClient(context)
            roles_list = keystone_client.client.client.roles.list()
            error_message = 'Encryption is disabled. {}'.format(str(ex))
            missing_roles = []
            for role in roles_list:
                if role.id in str(ex) or role.name in str(ex):
                    missing_roles.append(role.name)
            if missing_roles:
                error_message = 'Encryption is disabled. User is missing the role {} on the project'.format(','.join(missing_roles))
        except Exception as ex:
            error_message = 'Encryption is disabled. Please check Trilio service logs for more details.'
        return {'result': result, 'error_message': error_message}

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def update_workloads_service(self, context, body):
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role can perform enable/disable workload service
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                        raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            service_obj = self.db.service_get_by_host_and_topic(context, body['node_name'], topic=FLAGS.workloads_topic)
            if body['status']:    # in case of user wish to disable the service
                if not body.get('reason'):
                    body['reason'] = 'admin user disabled'
                body['reason'] += ' at ' + str(datetime.utcnow().strftime("%m-%d-%Y %I:%M %p")) + ' UTC'
            self.db.service_update(context, service_obj.id, {'disabled': int(body['status']), 'disabled_reason': body['reason']})
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_vcenter_vms(self, context, search_opts={}):
        si = get_vcenter_service_instance()

        vms = []
        try:
            content = si.RetrieveContent()

            container = content.rootFolder  # starting point to look into
            view_type = [vim.VirtualMachine]  # object types to look for
            recursive = True  # whether we should look into it recursively
            container_view = content.viewManager.CreateContainerView(
                container, view_type, recursive)

            children = container_view.view
            if search_opts.get("nameregex", None):
                pat = re.compile(search_opts["nameregex"], re.IGNORECASE)
            for child in children:
                if search_opts.get("nameregex", None) is None:
                    vms.append({'vm_id': child.summary.config.instanceUuid,
                                'vm_name': child.summary.config.name,
                                'power_state': child.runtime.powerState,
                                'tools_running': child.summary.guest.toolsStatus})
                else:
                    if pat.search(child.summary.config.name) is not None:
                        vms.append({'vm_id': child.summary.config.instanceUuid,
                                    'vm_name': child.summary.config.name,
                                    'power_state': child.runtime.powerState,
                                     'tools_running': child.summary.guest.toolsStatus})

            return {'vcenter_vms': vms}
        except vmodl.MethodFault as error:
            LOG.exception(error)
            raise wlm_exceptions.ErrorOccurred(reason=str(error.msg))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def backup_target_get_all(self, context):
        backup_targets = self.db.backup_target_get_all(context)
        nfsstats = vault.get_capacities_utilizations(context)
        for backup_target in backup_targets:
            backend = vault.get_backup_target(backup_target.filesystem_export)
            backend_status = 'available'
            if not backend.is_online():
                backend_status = 'offline'
            elif not backend.is_mounted():
                backend_status = 'unmounted'

            if backup_target.type == 'nfs' and backup_target.filesystem_export in nfsstats.keys():
                stat = nfsstats[backup_target.filesystem_export]
                if stat['nfsstatus']:
                    self.db.backup_target_update(context, backup_target.id, {'capacity': stat['total_capacity'], 'used': stat['total_utilization']})
                    backup_target.capacity = stat['total_capacity']
                    backup_target.used = stat['total_utilization']
            self.db.backup_target_update(context, backup_target.id, {'status': backend_status})
            backup_target.status = backend_status

        return backup_targets

    @wrap_check_policy
    def backup_target_show(self, context, backup_target_id):
        rv = self.db.backup_target_show(context, backup_target_id)
        bt_details = dict(rv)
        btt_list = []
        try:
            for btt in rv.backup_target_types:
                btt_list.append(dict(btt))
        except Exception as ex:
            LOG.exception(ex)
        bt_details.setdefault('backup_target_types', btt_list)
        return bt_details

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def backup_target_type_get_for_project(self, context, project_id):
        backup_target_types = self.db.backup_target_type_get_all_public_and_by_project_id(context, project_id=project_id)
        return backup_target_types

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def backup_target_type_get_all(self, context):
        backup_target_types = self.db.backup_target_type_get_all(context)
        return backup_target_types

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def backup_target_type_get_all_by_backup_target(self, context, backup_target_id):
        backup_target_types = self.db.backup_target_type_get_all_by_backup_target(context, backup_target_id)
        return backup_target_types

    @wrap_check_policy
    def backup_target_type_show(self, context, backup_target_type_id):
        rv = self.db.backup_target_type_show(context, backup_target_type_id)
        btt_details = dict(rv)
        project_list = []
        try:
            for project in rv.backup_target_type_projects:
                project_list.append(dict(project))
        except Exception as ex:
            LOG.exception(ex)
        metadata_list = []
        try:
            for metadata in rv.backup_target_type_metadata:
                metadata_list.append(dict(metadata))
        except Exception as ex:
            LOG.exception(ex)
        btt_details.setdefault('backup_target_type_projects', project_list)
        btt_details.setdefault('backup_target_type_metadata', metadata_list)
        return btt_details

    @wrap_check_policy
    def backup_target_type_delete(self, context, backup_target_type_id):
        """
        Delete the given BTT
        """
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role is allowed
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            self.db.backup_target_type_delete(context, backup_target_type_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(reason=str(ex))

    @wrap_check_policy
    def backup_target_type_create(self, context, backup_target_id, values, project_list=[]):
        """
        Creates the given BTT
        """
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role is allowed
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            return self.db.backup_target_type_create(context, backup_target_id, values, project_list)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(reason=str(ex))

    @wrap_check_policy
    def backup_target_type_update(self, context, backup_target_type_id, values, project_list=[]):
        """
        Updates the given BTT
        """
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role is allowed
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            values['user_id'] = context.user
            return self.db.backup_target_type_update(context, backup_target_type_id, values, project_list, purge_metadata=True, purge_projects=True)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(reason=str(ex))

    @wrap_check_policy
    def backup_target_type_add_projects(self, context, backup_target_type_id, project_list=[]):
        """
        Add projects to the given BTT
        """
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role is allowed
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            return self.db.backup_target_type_add_projects(context, backup_target_type_id,  project_list)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(reason=str(ex))

    @wrap_check_policy
    def backup_target_type_remove_projects(self, context, backup_target_type_id, project_list=[]):
        """
        Remove projects of the given BTT
        """
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role is allowed
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            return self.db.backup_target_type_remove_projects(context, backup_target_type_id, project_list)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(reason=str(ex))

    @wrap_check_policy
    def backup_target_type_add_metadata(self, context, backup_target_type_id, metadata={}):
        """
        Add metadata to the given BTT
        """
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role is allowed
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            return self.db.backup_target_type_add_metadata(context, backup_target_type_id,  metadata)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(reason=str(ex))

    @wrap_check_policy
    def backup_target_type_remove_metadata(self, context, backup_target_type_id, metadata=[]):
        """
        Remove metadata of the given BTT
        """
        try:
            keystone_client = KeystoneClient(context)
            # only user having admin role is allowed
            if keystone_client.client.check_user_role(
                            context.project_id, context.user_id, context=context) is False:
                raise wlm_exceptions.RoleNotFound(
                            user_id=context.user_id, role_name='admin', project_id=context.project_id)

            return self.db.backup_target_type_remove_metadata(context, backup_target_type_id, metadata)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(reason=str(ex))

    @wrap_check_policy
    def is_btt_immutable(self, context, backup_target_type_name):
        """ Return Immutability status for backup_target_type"""
        bt_id = self.db.backup_target_type_show(context, backup_target_type_name).backup_targets_id
        bt_obj = self.db.backup_target_show(context, bt_id)
        return bool(int(self.db.get_metadata_value(bt_obj.backup_targets_metadata, 'immutable')))