Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
Size: Mime:
# vim: tabstop=4 shiftwidth=4 softtabstop=4


# Copyright (c) 2014 TrilioData, Inc.
# All Rights Reserved.

"""
Handles all requests relating to the  workloadmgr service.
"""
import base64
import pickle as pickle
import importlib
import json
import os
import ast
import socket
import threading
import time
import uuid
import zlib
import functools
import re
import copy
import six
import ipaddress

from cryptography.hazmat.primitives.asymmetric import utils as crypto_utils
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.primitives import hashes

from datetime import datetime
from datetime import timedelta
from munch import munchify
from oslo_messaging import exceptions as oslo_exceptions

from operator import itemgetter
from workloadmgr.apscheduler.triggers import WorkloadMgrTrigger
from workloadmgr.common import clients
from workloadmgr.common import context as wlm_context
from workloadmgr.openstack.common import log as logging
from workloadmgr.openstack.common import fileutils

from workloadmgr import utils
from workloadmgr.utils import encrypt_password, tvault_key_file_name
from workloadmgr.workloads import rpcapi as workloads_rpcapi
from workloadmgr.scheduler import rpcapi as scheduler_rpcapi
from workloadmgr.wlm_cron import rpcapi as cron_rpcapi
from workloadmgr.api.validation_models import (
    workload_quota_check, storage_quota_check,
    vms_quota_check, snapshot_quota_check
)
from workloadmgr.db import base
from workloadmgr import exception as wlm_exceptions
from workloadmgr import flags
from workloadmgr.compute import nova
from workloadmgr.keymanager import barbican
from workloadmgr.image import glance
from workloadmgr.vault import vault
from workloadmgr.openstack.common import timeutils
from workloadmgr.workloads import workload_utils
from workloadmgr import auditlog
from workloadmgr import autolog
from workloadmgr import policy
from workloadmgr.db.sqlalchemy import models
from workloadmgr.db.sqlalchemy.session import get_session
from workloadmgr.common.workloadmgr_keystoneclient import KeystoneClient
from tzlocal import get_localzone

from workloadmgr.api.validation_models import AllowedQuota
from workloadmgr.api.validation_models import workloads as workload_validator

workload_lock = threading.Lock()

FLAGS = flags.FLAGS

LOG = logging.getLogger(__name__)
Logger = autolog.Logger(LOG)
AUDITLOG = auditlog.getAuditLogger()

total_storage_usage = None
total_storage_usage_refresh = datetime.now()

_job_scheduler = None


# do not decorate this function with autolog

def create_trust(cloud_admin=False):
    def create_trust_dec(func):
        def trust_create_wrapper(*args, **kwargs):
            # Clean up trust if the role is changed
            try:
                context = args[1]
                trusts = args[0].trust_list(context)
                for t in trusts:
                    for meta in t.metadata:
                        if meta.key == "role_name":
                            if meta.value not in \
                                [vault.CONF.trustee_role, vault.CONF.cloud_admin_role]:
                                args[0].trust_delete(context, t.name)

                trusts = args[0].trust_list(context)
                if context.roles and \
                        len(set([vault.CONF.trustee_role, 'admin']) & set(context.roles)) < 1:
                    for t in trusts:
                        args[0].trust_delete(context, t.name)
                # create new trust if the trust is not created
                trusts = args[0].trust_list(context)
                if not trusts:
                    if cloud_admin is True:
                        args[0].trust_create(context, vault.CONF.cloud_admin_role, is_cloud_trust=True)
                    else:
                        args[0].trust_create(context, vault.CONF.trustee_role)
            except Exception as ex:
                LOG.exception(ex)
                LOG.error(_("trust is broken, assign valid trustee role"))
                args[1].trust_failed = 'yes'

            return func(*args, **kwargs)
        return trust_create_wrapper
    return create_trust_dec


def parse_license_text(licensetext,
                       public_key=vault.CONF.triliovault_public_key):

    with open(public_key, 'rb') as f:
        key = load_pem_public_key(f.read(), backend=default_backend())

    if not isinstance(key, dsa.DSAPublicKey):
        raise wlm_exceptions.InternalError(
            "Invalid TrilioVault public key ",
            "Cannot validate license")

    if "License Key" not in licensetext:
        raise wlm_exceptions.InvalidLicense(
            message="Cannot find License Key in license key")

    try:
        licensekey = licensetext[licensetext.find(
            "License Key") + len("License Key"):].lstrip().rstrip()
        license_pair_base64 = licensekey[0:licensekey.find('X02')]
        license_pair = base64.b64decode(license_pair_base64)
        ord_len = license_pair.find(b'\r')
        license_text_len = ord(license_pair[0:ord_len].decode('UTF-32BE'))
        license_text = license_pair[ord_len:license_text_len + ord_len]
        license_signature = license_pair[ord_len + license_text_len:]

        chosen_hash = hashes.SHA1()
        hasher = hashes.Hash(chosen_hash, default_backend())
        hasher.update(license_text)
        digest = hasher.finalize()
        try:
            key.verify(license_signature, digest, crypto_utils.Prehashed(chosen_hash))
            properties_text = zlib.decompress(license_text[5:]).decode('utf-8')
            license = {}
            for line in properties_text.split('\n'):
                if len(line.split("=")) != 2:
                    continue
                license[line.split("=")[0].strip()] = line.split("=")[
                    1].lstrip().rstrip()

            return license
        except Exception as ex:
            raise wlm_exceptions.InvalidLicense(
                message="Cannot verify the license signature: {}".format(ex))
    except BaseException:
        raise wlm_exceptions.InvalidLicense(
            message="Cannot verify the license signature")


def validate_license_key(licensekey, func_name, compute_nodes=-1,
                         capacity_utilized=-1, virtual_machines=-1):

    if datetime.now() > datetime.strptime(
            licensekey['LicenseExpiryDate'], "%Y-%m-%d"):
        raise wlm_exceptions.InvalidLicense(
            message="License expired. License expriration date '%s'. "
                    "Today is '%s'" % (licensekey['LicenseExpiryDate'],
                                       datetime.now().strftime("%Y-%m-%d")))

    if 'Compute Nodes' in licensekey['Licensed For']:
        message = "Number of compute nodes deployed '%d' " \
                  "against the licensed number of " \
                  "compute nodes '%d'" % (compute_nodes,
                                          int(licensekey['Licensed For'].split(' Compute Nodes')[0]))
        if compute_nodes > int(
                licensekey['Licensed For'].split(' Compute Nodes')[0]):
            if func_name in ('workload_create', None):
                raise wlm_exceptions.InvalidLicense(
                    message="Number of compute nodes '%d' exceeded the licensed number of "
                    "compute nodes '%d'" %
                    (compute_nodes, int(
                        licensekey['Licensed For'].split(' Compute Nodes')[0])))

    elif ' Backup Capacity' in licensekey['Licensed For']:
        capacity_licensed_str = licensekey['Licensed For'].split(' Backup Capacity')[
            0]

        if 'PB' in capacity_licensed_str:
            capacity_licensed = int(
                capacity_licensed_str.split(' PB')[0]) * 1024 ** 5
            capacity_utilized_str = str(capacity_utilized / 1024 ** 5) + " PB"
        elif 'TB' in capacity_licensed_str:
            capacity_licensed = int(
                capacity_licensed_str.split(' TB')[0]) * 1024 ** 4
            capacity_utilized_str = str(capacity_utilized / 1024 ** 4) + " TB"
        elif 'GB' in capacity_licensed_str:
            capacity_licensed = int(
                capacity_licensed_str.split(' GB')[0]) * 1024 ** 3
            capacity_utilized_str = str(capacity_utilized / 1024 ** 3) + " GB"
        else:
            capacity_licensed = capacity_utilized
            capacity_utilized_str = 'NA'

        message = "Used capacity %s. Licensed capacity '%s' " \
                  % (capacity_utilized_str, capacity_licensed_str)
        if capacity_utilized > capacity_licensed:
            if func_name in ('workload_snapshot', None):
                raise wlm_exceptions.InvalidLicense(
                    message="Backup capacity exceeded. Licensed capacity '%s' "
                            "Used capacity '%s'" % (capacity_licensed_str,
                                                    capacity_utilized_str))

    elif 'Virtual Machines' in licensekey['Licensed For']:
        message = "Number of virtual machines '%d' protected " \
                  "vs the licensed number of " \
                  "virtual machines '%d'" % (virtual_machines,
                                             int(licensekey['Licensed For'].split(' Virtual Machines')[0]))
        if func_name in ('workload_create', None):
            if virtual_machines > int(
                    licensekey['Licensed For'].split(' Virtual Machines')[0]):
                raise wlm_exceptions.InvalidLicense(
                    message="Number of virtual machines '%d' exceeded the licensed number of "
                    "virtual machines '%d'" %
                    (virtual_machines, int(
                        licensekey['Licensed For'].split(' Virtual Machines')[0])))

    return message


class check_license(object):

    def __init__(self, f):
        """
        If there are no decorator arguments, the function
        to be decorated is passed to the constructor.
        """
        self.f = f

    def __call__(self, *args):
        """
        The __call__ method is not called until the
        decorated function is called.
        """
        apiclass = args[0]
        context = args[1]
        try:
            apiclass.get_usage_and_validate_against_license(
                context, method=self.f.__name__)
            return self.f(*args)
        except Exception as ex:
            LOG.exception(ex)
            raise


def upload_settings(func):
    def upload_settings_wrapper(*args, **kwargs):
        # Clean up trust if the role is changed
        context = args[1]

        ret_val = func(*args, **kwargs)
        workload_utils.upload_settings_db_entry(context)
        return ret_val

    return upload_settings_wrapper


def wrap_check_policy(func):
    """
    Check policy corresponding to the wrapped methods prior to execution
    This decorator requires the first 2 args of the wrapped function
    to be (self, context)
    """

    @functools.wraps(func)
    def wrapped(self, context, *args, **kwargs):
        check_policy(context, func.__name__)
        context.project_only = 'yes'
        if 'snapshot' in func.__name__:
            context.project_only = 'no'
        return func(self, context, *args, **kwargs)

    return wrapped


def check_policy(context, action):
    target = {
        'project_id': context.project_id,
        'user_id': context.user_id,
    }
    if 'workload' in action:
        _action = 'workload:%s' % action
    elif 'snapshot' in action:
        _action = 'snapshot:%s' % action
    elif 'restore' in action:
        _action = 'restore:%s' % action
    elif 'filesearch' in action:
        _action = 'filesearch:%s' % action
    else:
        _action = 'workload:%s' % action
    policy.enforce(context, _action, target)


class API(base.Base):

    """API for interacting with the Workload Manager."""

    # This singleton implementation is not thread safe
    # REVISIT: should the singleton be threadsafe

    __single = None  # the one, true Singleton

    @autolog.log_method(logger=Logger)
    def __new__(classtype, *args, **kwargs):
        # Check to see if a __single exists already for this class
        # Compare class types instead of just looking for None so
        # that subclasses will create their own __single objects
        if not isinstance(classtype.__single, classtype):
            classtype.__single = object.__new__(classtype, *args, **kwargs)
        return classtype.__single

    @autolog.log_method(logger=Logger)
    def __init__(self, db_driver=None):
        if not hasattr(self, "workloads_rpcapi"):
            self.workloads_rpcapi = workloads_rpcapi.WorkloadMgrAPI()

        if not hasattr(self, "scheduler_rpcapi"):
            self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()

        if not hasattr(self, "cron_rpcapi"):
            self.cron_rpcapi = cron_rpcapi.CronAPI()

        super(API, self).__init__(db_driver)


    @autolog.log_method(logger=Logger)
    def _apply_workload_policy(self, context, policy_id, jobschedule):
        try:
            policy = self.policy_get(context, policy_id)
            assignments = self.get_assigned_policies(
                context, context.project_id)

            available_policies = [
                assignment.policy_id for assignment in assignments]
            if len(available_policies) == 0:
                message = "No policy is assigned to project: %s" % (
                    context.project_id)
                raise wlm_exceptions.ErrorOccurred(message)
            if policy_id not in available_policies:
                message = "Given policy: %s not availabe for project: %s" % (
                    policy_id, context.project_id)
                raise wlm_exceptions.ErrorOccurred(message)

            for field_value in policy.field_values:
                jobschedule[field_value['policy_field_name']
                            ] = field_value['value']
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def search(self, context, data):
        if not re.match("^(/[^/ ]*)+/?$", data['filepath']):
            msg = _('Provide valid linux filepath to search')
            raise wlm_exceptions.Invalid(reason=msg)
        vm_found = self.db.workload_vm_get_by_id(
            context, data['vm_id'], read_deleted='yes', workloads_filter='deleted')
        if len(vm_found) == 0:
            # Check in snapshot vms
            vm_found = self.db.snapshot_vm_get(context, data['vm_id'], None)
            if vm_found is None:
                msg = _('vm_id not existing with this tenant')
                raise wlm_exceptions.Invalid(reason=msg)
            snapshot = self.db.snapshot_get(context, vm_found.snapshot_id)
            workload_id = snapshot.workload_id
        else:
            workload_id = vm_found[0].workload_id
        workload = self.db.workload_get(context, workload_id)
        if workload['status'] != 'available':
            msg = _('Vm workload is not in available state to perform search')
            raise wlm_exceptions.Invalid(reason=msg)
        kwargs = {'vm_id': data['vm_id'], 'status': 'completed'}
        search_list = self.db.file_search_get_all(context, **kwargs)
        if len(search_list) > 0:
            msg = _('Search with this vm_id already in exceution')
            raise wlm_exceptions.Invalid(reason=msg)
        if data['date_from'] != '':
            try:
                datetime.strptime(data['date_from'], '%Y-%m-%dT%H:%M:%S')
            except BaseException:
                msg = _("Please provide "
                        "valid date_from in Format YYYY-MM-DDTHH:MM:SS")
                raise wlm_exceptions.Invalid(reason=msg)

            if data['date_to'] != '':
                try:
                    datetime.strptime(data['date_to'], '%Y-%m-%dT%H:%M:%S')
                except BaseException:
                    msg = _("Please provide "
                            "valid date_to in Format YYYY-MM-DDTHH:MM:SS")
                    raise wlm_exceptions.Invalid(reason=msg)
        if isinstance(data['snapshot_ids'], list):
            data['snapshot_ids'] = ",".join(data['snapshot_ids'])
        options = {'vm_id': data['vm_id'],
                   'project_id': context.project_id,
                   'user_id': context.user_id,
                   'filepath': data['filepath'],
                   'snapshot_ids': data['snapshot_ids'],
                   'start': data['start'],
                   'end': data['end'],
                   'date_from': data['date_from'],
                   'date_to': data['date_to'],
                   'status': 'executing', }
        search = self.db.file_search_create(context, options)
        self.scheduler_rpcapi.file_search(
            context, FLAGS.scheduler_topic, search['id'])
        return search

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def search_show(self, context, search_id):
        search = self.db.file_search_get(context, search_id)
        return search

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_get(self, context, workload_type_id):
        workload_type = self.db.workload_type_get(context, workload_type_id)
        workload_type_dict = dict(workload_type)
        metadata = {}
        for kvpair in workload_type.metadata:
            metadata.setdefault(kvpair['key'], kvpair['value'])
        workload_type_dict['metadata'] = metadata
        return workload_type_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_show(self, context, workload_type_id):
        workload_type = self.db.workload_type_get(context, workload_type_id)
        workload_type_dict = dict(workload_type)
        metadata = {}
        for kvpair in workload_type.metadata:
            metadata.setdefault(kvpair['key'], kvpair['value'])
        workload_type_dict['metadata'] = metadata
        return workload_type_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_get_all(self, context, search_opts={}):
        workload_types = self.db.workload_types_get(context)
        return workload_types

    @autolog.log_method(logger=Logger)
    @create_trust(cloud_admin=True)
    @wrap_check_policy
    def workload_type_create(self, context, id, name,
                             description, is_public, metadata):
        """
        Create a workload_type. No RPC call is made
        """
        if hasattr(context, 'trust_failed'):
            raise wlm_exceptions.Invalid(
                 reason=_('Trust broken: Cannot create cloud admin trust.'
                          'Please make sure admin user has the %s role assigned' %
                          vault.CONF.cloud_admin_role))

        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            name +
            '\' Create Requested',
            None)
        options = {'user_id': context.user_id,
                   'project_id': context.project_id,
                   'id': id,
                   'display_name': name,
                   'display_description': description,
                   'is_public': is_public,
                   'status': 'available',
                   'metadata': metadata, }

        workload_type = self.db.workload_type_create(context, options)
        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            name +
            '\' Create Submitted',
            workload_type)
        return workload_type

    @autolog.log_method(logger=Logger)
    def workload_type_update_all(self, context, data):
        """
        update all workload_types. No RPC call is made
        """
        self.db.workload_type_update_all(context, data)
        workload_types = self.db.workload_types_get(context)
        return workload_types

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_delete(self, context, workload_type_id):
        """
        Delete a workload_type. No RPC call is made
        """
        workload_type = self.workload_type_get(context, workload_type_id)
        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            workload_type['display_name'] +
            '\' Delete Requested',
            workload_type)
        if workload_type['status'] not in ['available', 'error']:
            msg = _("WorkloadType status must be 'available' or 'error'")
            raise wlm_exceptions.InvalidState(reason=msg)

        # TODO(giri): check if this workload_type is referenced by other
        # workloads

        self.db.workload_type_delete(context, workload_type_id)
        AUDITLOG.log(
            context,
            'WorkloadType \'' +
            workload_type['display_name'] +
            '\' Delete Submitted',
            workload_type)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_discover_instances(
            self, context, workload_type_id, metadata):
        """
        Discover Instances of a workload_type. RPC call is made
        """
        if not metadata:
            msg = _(
                'metadata field is null. Pass valid metadata to discover the workload')
            raise wlm_exceptions.Invalid(reason=msg)
        try:
            return self.workloads_rpcapi.workload_type_discover_instances(
                context, socket.gethostname(), workload_type_id, metadata)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_type_topology(self, context, workload_type_id, metadata):
        """
        Topology  of a workload_type. RPC call is made
        """
        if not metadata:
            msg = _(
                'metadata field is null. Pass valid metadata to discover the workload')
            raise wlm_exceptions.Invalid(reason=msg)

        try:
            return self.workloads_rpcapi.workload_type_topology(
                context, socket.gethostname(), workload_type_id, metadata)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get(self, context, workload_id):
        kwargs = {}
        workload = self.db.workload_get(context, workload_id)
        workload_dict = dict(workload)

        workload_dict['storage_usage'] = {
            'usage': 0, 'full': {
                'snap_count': 0, 'usage': 0}, 'incremental': {
                'snap_count': 0, 'usage': 0}}
        for workload_snapshot in self.db.snapshot_get_all_by_workload(
                context, workload_id, **kwargs):
            if not workload_snapshot.data_deleted:
                if workload_snapshot.snapshot_type == 'incremental':
                    workload_dict['storage_usage']['incremental']['snap_count'] = \
                        workload_dict['storage_usage'][
                            'incremental']['snap_count'] + 1
                    workload_dict['storage_usage']['incremental']['usage'] = \
                        workload_dict['storage_usage']['incremental']['usage'] + \
                        workload_snapshot.size
                else:
                    workload_dict['storage_usage']['full']['snap_count'] = workload_dict['storage_usage']['full'][
                        'snap_count'] + 1
                    workload_dict['storage_usage']['full']['usage'] = workload_dict[
                        'storage_usage']['full']['usage'] + workload_snapshot.size
        workload_dict['storage_usage']['usage'] = workload_dict['storage_usage']['full']['usage'] + \
            workload_dict['storage_usage']['incremental']['usage']

        workload_vms = []
        for workload_vm_obj in self.db.workload_vms_get(context, workload.id):
            workload_vm = {
                'id': workload_vm_obj.vm_id,
                'name': workload_vm_obj.vm_name}
            metadata = {}
            for kvpair in workload_vm_obj.metadata:
                metadata.setdefault(kvpair['key'], kvpair['value'])
            workload_vm['metadata'] = metadata
            workload_vms.append(workload_vm)
        workload_dict['instances'] = workload_vms

        metadata = {}
        for kvpair in workload.metadata:
            metadata.setdefault(kvpair['key'], kvpair['value'])
        metadata['backup_media_target'] = metadata.get(
            "backup_media_target", "NA")
        if context.is_admin is False:
            metadata.get("backup_media_target", None) and \
                metadata.pop("backup_media_target")

        workload_dict['metadata'] = metadata
        workload_dict['jobschedule'] = pickle.loads(bytes(workload.jobschedule, 'utf-8'))
        workload_dict['jobschedule']['enabled'] = False
        workload_dict['jobschedule'][
            'global_jobscheduler'] = self.workload_get_global_job_scheduler(context)
        # find the job object based on workload_id
        jobs = self.db.job_get_all(context)
        for job in jobs:
            if job.workload_id == workload_id:
                workload_dict['jobschedule']['enabled'] = True
                break

        return workload_dict

    def convert_date_time_zone(self, jobschedule):
        if 'timezone' in jobschedule:
            date_time = utils.get_local_time(
                jobschedule['start_date'] +
                " " +
                jobschedule['start_time'],
                "%m/%d/%Y %I:%M %p",
                "%m/%d/%Y %I:%M %p",
                jobschedule['timezone']).split(" ")
            jobschedule['start_date'] = date_time[0]
            jobschedule['start_time'] = date_time[1] + " " + date_time[2]
            jobschedule['appliance_timezone'] = str(get_localzone())
            return jobschedule
        return jobschedule

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_show(self, context, workload_id):
        kwargs = {}
        workload = self.db.workload_get(context, workload_id, **kwargs)
        workload_dict = dict(workload)
        workload_dict['storage_usage'] = {
            'usage': 0, 'full': {
                'snap_count': 0, 'usage': 0}, 'incremental': {
                'snap_count': 0, 'usage': 0}}

        for workload_snapshot in self.db.snapshot_get_all_by_workload(
                context, workload_id, **kwargs):
            if workload_snapshot is None:
                msg = _("Not found any snapshots or operation not allowed")
                wlm_exceptions.ErrorOccurred(reason=msg)

            if not workload_snapshot.data_deleted:
                if workload_snapshot.snapshot_type == 'incremental':
                    workload_dict['storage_usage']['incremental']['snap_count'] = \
                        workload_dict['storage_usage'][
                            'incremental']['snap_count'] + 1
                    workload_dict['storage_usage']['incremental']['usage'] = \
                        workload_dict['storage_usage']['incremental']['usage'] + \
                        workload_snapshot.size
                else:
                    workload_dict['storage_usage']['full']['snap_count'] = workload_dict['storage_usage']['full'][
                        'snap_count'] + 1
                    workload_dict['storage_usage']['full']['usage'] = workload_dict[
                        'storage_usage']['full']['usage'] + workload_snapshot.size
        workload_dict['storage_usage']['usage'] = workload_dict['storage_usage']['full']['usage'] + \
            workload_dict['storage_usage']['incremental']['usage']

        workload_vms = []
        for workload_vm_obj in self.db.workload_vms_get(context, workload.id):
            workload_vm = {
                'id': workload_vm_obj.vm_id,
                'name': workload_vm_obj.vm_name}
            metadata = {}
            for kvpair in workload_vm_obj.metadata:
                metadata.setdefault(kvpair['key'], kvpair['value'])
            workload_vm['metadata'] = metadata
            workload_vms.append(workload_vm)
        workload_dict['instances'] = workload_vms

        metadata = {}
        metadata_type = self.db.workload_type_get(
            context, workload.workload_type_id).metadata
        for kvpair in workload.metadata:
            mtype = None
            for mtype in metadata_type:
                try:
                    if mtype['key'] == kvpair['key'] and json.loads(
                            mtype.value)['type'] == 'password':
                        break
                except BaseException as bex:
                    LOG.exception(bex)

            try:
                if mtype['key'] == kvpair['key'] and json.loads(
                        mtype.value)['type'] == 'password':
                    metadata.setdefault(kvpair['key'], "**********")
                else:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
            except BaseException:
                metadata.setdefault(kvpair['key'], kvpair['value'])
                pass

        metadata['backup_media_target'] = metadata.get(
            "backup_media_target", "NA")
        if context.is_admin is False:
            metadata.get("backup_media_target", None) and \
                metadata.pop("backup_media_target")
        workload_dict['metadata'] = metadata
        workload_dict['jobschedule'] = pickle.loads(bytes(workload.jobschedule, 'utf-8'))
        workload_dict['jobschedule']['enabled'] = False
        workload_dict['jobschedule'][
           'global_jobscheduler'] = self.workload_get_global_job_scheduler(context)

        # find the job object based on workload_id
        jobs = self.db.job_get_all(context)
        for job in jobs:
            if job.workload_id == workload_id:
                current_time = datetime.now()
                workload_dict['jobschedule']['enabled'] = True
                next_run_time = WorkloadMgrTrigger(
                    workload_dict['jobschedule']
                ).get_next_fire_time(current_time)
                if isinstance(next_run_time, datetime):
                    time_diff = next_run_time - current_time
                    workload_dict['jobschedule'][
                        'nextrun'] = time_diff.total_seconds()
                else:
                    # if no "nextrun" then show that trigger is disabled
                    workload_dict['jobschedule']['enabled'] = False
                break
        return workload_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_all(self, context, search_opts={}):
        workloads = self.db.workload_get_all(context, **search_opts)
        return workloads

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_vms_get_all(self, context, search_opts={}):
        db_vms = self.db.workload_vms_get(context, None, **search_opts)
        vms = [{'id': vm.vm_id} for vm in db_vms]
        return {'protected_vms': vms}

    @autolog.log_method(logger=Logger)
    @create_trust()
    @check_license
    @wrap_check_policy
    def workload_create(self, context, name, description, workload_type_id,
                        source_platform, instances, jobschedule, metadata,
                        is_workload_encrypted=False, secret_uuid=None, availability_zone=None):
        """
        Make the RPC call to create a workload.
        """
        try:
            allowed_quota_obj = workload_quota_check(
                context.project_id, self.db
            )
            if allowed_quota_obj \
                    and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                # TODO: check how to handle this warning at UI side
                AUDITLOG.log(
                    context,
                    "Current project's workload allowed quota has reached to high watermark" +
                    "Contact to the admin",
                    None)
                LOG.warning("Current project's workload allowed quota has "
                            "reached to high watermark. Contact to the admin")
        except ValueError as e:
            raise wlm_exceptions.QuotaLimitExceededError()
        except Exception as ex:
            LOG.exception(ex)
            raise
        try:
            allowed_quota_obj = vms_quota_check(
                context.project_id, self.db, len(instances)
            )
            if allowed_quota_obj \
                    and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                # TODO: check how to handle this warning at UI side
                AUDITLOG.log(
                    context,
                    "Current project's VM allowed quota has reached to high watermark" +
                    "Contact to the admin",
                    None)
                LOG.warning("Current project's VM allowed quota has "
                            "reached to high watermark. Contact to the admin")
        except ValueError as e:
            raise wlm_exceptions.QuotaLimitExceededError()
        except Exception as ex:
            LOG.exception(ex)
            raise

        try:
            AUDITLOG.log(
                context,
                'Workload \'' +
                name +
                '\' Create Requested',
                None)

            policy_id = metadata.get('policy_id', None)
            if policy_id is not None:
                self._apply_workload_policy(context, policy_id, jobschedule)

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))

            compute_service = nova.API(production=True)
            instances_with_name = compute_service.get_servers(context)
            instance_ids = [x.id for x in instances_with_name]
            # TODO(giri): optimize this lookup

            if len(instances) == 0:
                raise wlm_exceptions.InvalidRequest(
                    reason="No instances found in the workload create request")

            for instance in instances:
                # Check whether given instance id exist or not.
                if not instance_ids or instance['instance-id'] not in instance_ids:
                    raise wlm_exceptions.InstanceNotFound(
                        instance_id=instance['instance-id'])
                for instance_with_name in instances_with_name:
                    if instance_with_name.tenant_id != context.project_id:
                        msg = _(
                            'Invalid instance as ' +
                            instance_with_name.name +
                            ' is not associated with your current tenant')
                        raise wlm_exceptions.Invalid(reason=msg)
                    if instance['instance-id'] == instance_with_name.id:
                        vm_found = self.db.workload_vm_get_by_id(
                            context, instance_with_name.id)
                        if isinstance(vm_found, list):
                            if len(vm_found) > 0:
                                msg = _(
                                    'Invalid instance as ' +
                                    instance_with_name.name +
                                    ' already attached with other workload')
                                raise wlm_exceptions.Invalid(reason=msg)
                        else:
                            msg = _(
                                'Error processing instance' +
                                instance_with_name.name)
                            raise wlm_exceptions.Invalid(reason=msg)
                        instance['instance-name'] = instance_with_name.name
                        if instance_with_name.metadata:
                            instance['metadata'] = instance_with_name.metadata
                            if 'imported_from_vcenter' in instance_with_name.metadata and \
                                    instances_with_name[0].metadata['imported_from_vcenter'] == 'True':
                                source_platform = "vmware"
                        break

            workload_type_id_valid = False
            workload_types = self.workload_type_get_all(context)
            for workload_type in workload_types:
                if workload_type_id is None and workload_type.display_name == 'Serial':
                    workload_type_id = workload_type.id
                    workload_type_id_valid = True
                    break
                if workload_type_id == workload_type.id:
                    workload_type_id_valid = True
                    break

            if not workload_type_id_valid:
                msg = _('Invalid workload type')
                raise wlm_exceptions.Invalid(reason=msg)

            if str(jobschedule['enabled']).lower() in ('true', '1'):
                jobschedule['enabled'] = True
            elif str(jobschedule['enabled']).lower() in ('false', '0'):
                jobschedule['enabled'] = False

            # updating timezone to user's local timezone
            if jobschedule.get('local_clock') and isinstance(jobschedule['local_clock'], str):
                jobschedule['timezone'] = jobschedule['local_clock'].split()[-1]

            if 'start_date' in jobschedule:
                try:
                    datetime.strptime(jobschedule['start_date'], '%m/%d/%Y')
                except ValueError as ex:
                     raise wlm_exceptions.Invalid(
                         reason=_("start_date: {} does not match format '%m/%d/%Y'. Examples: 06/05/2014, 07/15/2014".format(jobschedule['start_date'])))

            if 'end_date' in jobschedule and \
                jobschedule['end_date'].lower() != 'no end':
                try:
                    datetime.strptime(jobschedule['end_date'], '%m/%d/%Y')
                except ValueError as ex:
                     raise wlm_exceptions.Invalid(
                         reason=_("end_date: {} does not match format '%m/%d/%Y'. Examples: 06/05/2014, 07/15/2014".format(jobschedule['end_date'])))

            if 'start_time' in jobschedule:
                try:
                    datetime.strptime(jobschedule['start_time'], '%I:%M %p')
                except ValueError as ex:
                    raise wlm_exceptions.Invalid(
                        reason=_("start_time: {} does not match format '%I:%M %p'. Examples: 12:05 PM, 04:30 AM".format(jobschedule['start_time'])))

            jobschedule = self.convert_date_time_zone(jobschedule)

            if 'hostnames' not in metadata:
                metadata['hostnames'] = json.dumps([])

            if 'preferredgroup' not in metadata:
                metadata['preferredgroup'] = json.dumps([])

            options = {'user_id': context.user_id,
                       'project_id': context.project_id,
                       'display_name': name,
                       'display_description': description,
                       'status': 'creating',
                       'source_platform': source_platform,
                       'workload_type_id': workload_type_id,
                       'metadata': metadata,
                       'encryption': is_workload_encrypted,
                       'secret_uuid': secret_uuid,
                       'jobschedule': str(pickle.dumps(jobschedule, 0), 'utf-8'),
                       'host': socket.gethostname(), }
            workload = self.db.workload_create(context, options)

            for instance in instances:
                values = {'workload_id': workload.id,
                          'vm_id': instance['instance-id'],
                          'vm_name': instance['instance-name'],
                          'status': 'available',
                          'metadata': instance.get('metadata', {})}
                vm = self.db.workload_vms_create(context, values)

            if is_workload_encrypted:
                barbican_service = barbican.API()
                body = {
                        'metadata':{
                            'workload_id': workload.id,
                            'workload_name': workload.name
                            }
                    }
                res = barbican_service.update_secret_metadata(
                        context,
                        secret_uuid,
                        body)

            self.scheduler_rpcapi.workload_create(context,
                                                  FLAGS.scheduler_topic,
                                                  workload['id'])

            # Now register the job with job scheduler
            # HEre is the catch. The workload may not been fully created yet
            # so the job call back should only start creating snapshots when
            # the workload is successfully created.
            # the workload has errored during workload creation, then it should
            # remove itself from the job queue
            # if we fail to schedule the job, we should fail the
            # workload create request?
            # _snapshot_create_callback([], kwargs={  'workload_id':workload.id,
            #                                        'user_id': workload.user_id,
            #                                        'project_id':workload.project_id})
            #
            #               jobschedule = {'start_date': '06/05/2014',
            #                              'end_date': '07/05/2015',
            #                              'interval': '1 hr',
            #                              'start_time': '2:30 PM',
            #                              'retention_policy_type': 'Number of Snapshots to Keep',
            #                              'retention_policy_value': '30'}

            self.cron_rpcapi.workload_add_scheduler_job(context, jobschedule, workload)

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' Create Submitted',
                workload)
            return workload
        except Exception as ex:
            LOG.exception(ex)
            if 'workload' in locals():
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else {}})
            raise


    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def workload_modify(self, context, workload_id, workload, is_admin_dashboard=False):
        """
        Make the RPC call to modify a workload.
        """
        def _should_revisit_jobschedule(src_jobschedule, dest_jobschedule):
            if src_jobschedule['enabled'] == dest_jobschedule['enabled']:
                check_list = ['interval', 'end_date']
                return any(src_jobschedule.get(check, None) != dest_jobschedule.get(check, None)
                           for check in check_list if check in src_jobschedule)
            return False

        workloadobj = self.workload_get(context, workload_id)
        no_of_new_instances = len(workload.get('instances', [])) - len(workloadobj.get('instances', []))
        if no_of_new_instances and no_of_new_instances > 0:
            try:
                allowed_quota_obj = vms_quota_check(
                    context.project_id, self.db, no_of_new_instances
                )
                if allowed_quota_obj \
                        and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                    # TODO: check how to handle this warning at UI side
                    AUDITLOG.log(
                        context,
                        "Current project's VM allowed quota has reached to high watermark" +
                        "Contact to the admin",
                        None)
                    LOG.warning("Current project's VM allowed quota has "
                                "reached to high watermark. Contact to the admin")
            except ValueError as e:
                raise wlm_exceptions.QuotaLimitExceededError()
            except Exception as ex:
                LOG.exception(ex)
                raise

        AUDITLOG.log(
            context,
            'Workload \'' +
            workloadobj['display_name'] +
            '\' Modify Requested',
            None)

        purge_metadata = False
        pause_workload = False
        unpause_workload = False
        options = {}

        if hasattr(context, 'trust_failed'):
            raise wlm_exceptions.Invalid(
                reason=_('Trust broken: Cannot edit workload. ' +
                         'Expected role "%s" is not found. ' % vault.CONF.trustee_role +
                         'Please make sure user has the trustee role assigned'))
        # Always update the user_id from the context. This will handle the case where
        # a user creates the workload, user gets deleted and a new user assigned to
        # take up that role. old workload job schedulers won't run because they
        # are executed based on the user_id that is saved in workload record.
        # By forcing user_id update, the new user can switch the job scheduler
        # to generate token based on his trust.
        # If call is coming from Admin panel then don't update the user id as
        # it will result in failure of scheduler trust.
        if is_admin_dashboard is False:
            options['user_id'] = context.user_id
        if 'name' in workload and workload['name']:
            options['display_name'] = workload['name']

        if 'description' in workload and workload['description']:
            options['display_description'] = workload['description']

        assignments = self.get_assigned_policies(context, context.project_id)
        available_policies = [assignment.policy_id for assignment in assignments]

        if len(available_policies) > 0 and 'policy_id' not in workload.get('metadata', {})\
                                      and len(workload.get('jobschedule', {})) > 0:

            fields = self.db.policy_fields_get_all(context)
            policy_fields = [f.field_name for f in fields]

            if len(set(workload.get('jobschedule', {}).keys()).intersection(set(policy_fields))) > 0:
                msg = "Can not update policy fields settings when policies are "\
                    "applied on project, please use available policies: %s" %(available_policies)
                raise wlm_exceptions.ErrorOccurred(reason=msg)

        if 'metadata' in workload and workload['metadata']:
            purge_metadata = True
            options['metadata'] = workload['metadata']
            if 'policy_id' in workload['metadata'] and workload['metadata']['policy_id'] is not None:
                policy_id = workload['metadata']['policy_id']
                if policy_id not in available_policies:
                    message = "Policy %s is not assigned to project %s" % (
                        policy_id, context.project_id)
                    raise wlm_exceptions.ErrorOccurred(message)

                if 'jobschedule' not in workload:
                    workload['jobschedule'] = {}
                self._apply_workload_policy(
                    context, policy_id, workload['jobschedule'])

        if 'jobschedule' in workload and workload['jobschedule']:
            if 'start_date' in workload['jobschedule']:
                try:
                    datetime.strptime(workload['jobschedule']['start_date'], '%m/%d/%Y')
                except ValueError as ex:
                     raise wlm_exceptions.Invalid(
                         reason=_("start_date: {} does not match format '%m/%d/%Y'. Examples: 06/05/2014, 07/15/2014".format(workload['jobschedule']['start_date'])))

            if 'end_date' in workload['jobschedule'] and \
                workload['jobschedule']['end_date'].lower() != 'no end':
                try:
                    datetime.strptime(workload['jobschedule']['end_date'], '%m/%d/%Y')
                except ValueError as ex:
                     raise wlm_exceptions.Invalid(
                         reason=_("end_date: {} does not match format '%m/%d/%Y'. Examples: 06/05/2014, 07/15/2014".format(workload['jobschedule']['end_date'])))

            if 'start_time' in workload['jobschedule']:
                try:
                    datetime.strptime(workload['jobschedule']['start_time'], '%I:%M %p')
                except ValueError as ex:
                    raise wlm_exceptions.Invalid(
                        reason=_("start_time: {} does not match format '%I:%M %p'. Examples: 12:05 PM, 04:30 AM".format(workload['jobschedule']['start_time'])))

            if 'fullbackup_interval' not in workload['jobschedule']:
                workload['jobschedule']['fullbackup_interval'] = workloadobj[
                    'jobschedule']['fullbackup_interval']

            if 'start_time' not in workload['jobschedule']:
                workload['jobschedule']['start_time'] = workloadobj[
                    'jobschedule']['start_time']

            if 'interval' not in workload['jobschedule']:
                workload['jobschedule']['interval'] = workloadobj[
                    'jobschedule']['interval']

            if 'enabled' not in workload['jobschedule']:
                workload['jobschedule']['enabled'] = workloadobj[
                    'jobschedule']['enabled']

            if 'start_date' not in workload['jobschedule']:
                workload['jobschedule']['start_date'] = workloadobj[
                    'jobschedule']['start_date']

            if 'retention_policy_type' not in workload['jobschedule']:
                workload['jobschedule']['retention_policy_type'] = workloadobj[
                    'jobschedule']['retention_policy_type']

            if 'retention_policy_value' not in workload['jobschedule']:
                workload['jobschedule']['retention_policy_value'] = workloadobj[
                    'jobschedule']['retention_policy_value']

            if str(workload['jobschedule']['enabled']).lower() in ('true', '1'):
                workload['jobschedule']['enabled'] = True
            elif str(workload['jobschedule']['enabled']).lower() in ('false', '0'):
                workload['jobschedule']['enabled'] = False

            if workloadobj['jobschedule']['enabled'] != workload['jobschedule'][
                    'enabled'] and workloadobj['jobschedule']['enabled']:
                pause_workload = True

            if workloadobj['jobschedule']['enabled'] != workload['jobschedule']['enabled'] and workload['jobschedule']['enabled']:
                unpause_workload = True

            if workload['jobschedule']['enabled'] and \
               _should_revisit_jobschedule(workload['jobschedule'], workloadobj['jobschedule']):
                pause_workload = True
                unpause_workload = True

            # updating timezone to user's local timezone
            if workload['jobschedule'].get('local_clock') and isinstance(workload['jobschedule']['local_clock'], str):
                workload['jobschedule']['timezone'] = workload['jobschedule']['local_clock'].split()[-1]

            workload['jobschedule'] = self.convert_date_time_zone(
                workload['jobschedule'])
            options['jobschedule'] = str(pickle.dumps(workload['jobschedule'], 0), 'utf-8')


        if 'instances' in workload and workload['instances']:
            compute_service = nova.API(production=True)
            instances = workload['instances']
            instances_with_name = compute_service.get_servers(context)
            for instance in instances:
                if not isinstance(instance, dict) or \
                        'instance-id' not in instance:
                    msg = _(
                        "Workload definition key 'instances' must be a dictionary "
                        "with 'instance-id' key")
                    raise wlm_exceptions.Invalid(reason=msg)

                found = False
                for existing_instance in instances_with_name:
                    if existing_instance.tenant_id != context.project_id:
                        msg = _(
                            'Invalid instance as ' +
                            existing_instance.name +
                            ' is not associated with your current tenant')
                        raise wlm_exceptions.Invalid(reason=msg)
                    if instance['instance-id'] == existing_instance.id:
                        vm_found = self.db.workload_vm_get_by_id(
                            context, existing_instance.id)
                        if isinstance(vm_found, list):
                            if len(vm_found) > 0 and \
                                    vm_found[0].workload_id != workload_id:
                                msg = _("Invalid instance as " +
                                        existing_instance.name +
                                        " already part of workload '%s'" %
                                        (vm_found[0].workload_id))
                                raise wlm_exceptions.Invalid(reason=msg)
                        else:
                            msg = _(
                                'Error processing instance' +
                                existing_instance.name)
                            raise wlm_exceptions.Invalid(reason=msg)

                        instance['instance-name'] = existing_instance.name
                        instance['metadata'] = existing_instance.metadata
                        found = True
                        break

                if not found:
                    msg = _(
                        "Workload definition contains instance id that cannot be "
                        "found in the cloud")
                    raise wlm_exceptions.Invalid(reason=msg)

            for vm in self.db.workload_vms_get(context, workload_id):
                compute_service.delete_meta(context, vm.vm_id,
                                            ['workload_id', 'workload_name'])
                self.db.workload_vms_delete(context, vm.vm_id, workload_id)

            for instance in instances:
                values = {'workload_id': workload_id,
                          'vm_id': instance['instance-id'],
                          'metadata': instance['metadata'],
                          'status': 'available',
                          'vm_name': instance['instance-name']}
                vm = self.db.workload_vms_create(context, values)
                compute_service.set_meta_item(
                    context, vm.vm_id, 'workload_id', workload_id)
                compute_service.set_meta_item(
                    context, vm.vm_id, 'workload_name', workloadobj['display_name'])

        try:
            if pause_workload is True:
                self.workload_pause(context, workload_id)
            workload_obj = self.db.workload_update(
                context, workload_id, options, purge_metadata)
            if unpause_workload is True:
                self.workload_resume(context, workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=_("Error Modifying workload"))

        workload_utils.upload_workload_db_entry(context, workload_id)

        AUDITLOG.log(
            context,
            'Workload \'' +
            workload_obj['display_name'] +
            '\' Modify Submitted',
            workload_obj)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_delete(self, context, workload_id, database_only=False):
        """
        Delete a workload. No RPC call is made
        """
        try:
            if context.is_admin is False and database_only is True:
                raise wlm_exceptions.AdminRequired()

            workload = self.workload_get(context, workload_id)
            display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Delete Requested',
                workload)
            if workload['status'] not in ['available', 'error']:
                msg = _("Workload status must be 'available' or 'error'")
                raise wlm_exceptions.InvalidState(reason=msg)

            '''
            workloads = self.db.workload_get_all(context)
            for workload in workloads:
                if workload.deleted:
                    continue
                workload_type = self.db.workload_type_get(context, workload.workload_type_id)
                if (workload_type.display_name == 'Composite'):
                    for kvpair in workload.metadata:
                        if kvpair['key'] == 'workloadgraph':
                            graph = json.loads(kvpair['value'])
                            for flow in graph['children']:
                                for member in flow['children']:
                                    if 'type' in member:
                                        if member['data']['id'] == workload_id:
                                            msg = _(
                                                'Operation not allowed since this workload is a member of a composite workflow')
                                            raise wlm_exceptions.InvalidState(reason=msg)
            '''
            # First unschedule the job
            jobs = self.db.job_get_all(context)
            for job in jobs:
                if job.workload_id == workload_id:
                    self.cron_rpcapi.workload_pause(context, workload_id)
                    break

            if database_only is True:
                self.db.workload_update(
                    context, workload_id, {
                        'status': 'deleting'})

                # Remove workload entry from workload_vm's
                compute_service = nova.API(production=True)
                workload_vms = self.db.workload_vms_get(context, workload_id)
                for vm in workload_vms:
                    compute_service.delete_meta(
                        context, vm.vm_id, [
                            "workload_id", 'workload_name'])
                    self.db.workload_vms_delete(context, vm.vm_id, workload_id)

                # Remove all snapshots from workload
                snapshots = self.db.snapshot_get_all_by_workload(
                    context, workload_id)
                for snapshot in snapshots:
                    workload_utils.snapshot_delete(
                        context, snapshot.id, database_only)
                    self.db.snapshot_update(
                        context, snapshot.id, {
                            'status': 'deleted'})
                self.db.workload_delete(context, workload_id)

            else:
                snapshots = self.db.snapshot_get_all_by_project_workload(
                    context, context.project_id, workload_id)
                if len(snapshots) > 0:
                    msg = _(
                        'This workload contains snapshots. Please delete all snapshots and try again..')
                    raise wlm_exceptions.InvalidState(reason=msg)

                self.db.workload_update(
                    context, workload_id, {
                        'status': 'deleting'})
                self.scheduler_rpcapi.workload_delete(
                    context, FLAGS.scheduler_topic, workload_id)

            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Delete Submitted',
                workload)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_reset(self, context, workload_id):
        """
        Reset a workload. When a workload is reset, any overlay files that were
        created as part of the snapshot operation are commited back to original
        files
        """
        try:
            workload = self.workload_get(context, workload_id)
            display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Reset Requested',
                workload)
            if workload['status'] not in ['available', 'resetting']:
                msg = _("Workload status must be 'available'")
                raise wlm_exceptions.InvalidState(reason=msg)

            self.db.workload_update(
                context, workload_id, {
                    'status': 'resetting'})
            self.scheduler_rpcapi.workload_reset(
                context, FLAGS.scheduler_topic, workload_id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                display_name +
                '\' Reset Submitted',
                workload)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_import_workloads_list(self, context, project_id=None):
        AUDITLOG.log(context, 'Get Import Workloads List Requested', None)
        if not context.is_admin:
            raise wlm_exceptions.AdminRequired()

        workloads = []
        for backup_endpoint in vault.CONF.vault_storage_nfs_export.split(','):
            vault.get_backup_target(backup_endpoint)
        for backup_endpoint in vault.CONF.vault_storage_nfs_export.split(','):
            backup_target = None
            try:
                backup_target = vault.get_backup_target(backup_endpoint)
                for workload_url in backup_target.get_workloads(context):
                    try:
                        workload_values = json.loads(backup_target.get_object(
                            os.path.join(workload_url, 'workload_db')))
                        if project_id is not None:
                            if workload_values['project_id'] == project_id:
                                workloads.append(workload_values)
                        else:
                            workloads.append(workload_values)
                    except Exception as ex:
                        LOG.exception(ex)
                        continue
            except Exception as ex:
                LOG.exception(ex)
            finally:
                pass

        AUDITLOG.log(context, 'Get Import Workloads List Completed', None)
        return workloads

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def import_workloads(self, context, workload_ids, upgrade):

        AUDITLOG.log(context, 'Import Workloads Requested', None)
        if context.is_admin is not True and upgrade is True:
            raise wlm_exceptions.AdminRequired()

        try:
            workloads = []
            # call get_backup_target that makes sure all shares are mounted
            for backup_endpoint in vault.CONF.vault_storage_nfs_export.split(
                    ','):
                vault.get_backup_target(backup_endpoint)

            module_name = 'workloadmgr.db.imports.import_workloads'
            import_workload_module = importlib.import_module(module_name)
            import_workload_module.import_settings(context, models.DB_VERSION)
            global_job_scheduler_status = self.workload_get_global_job_scheduler(context)
            import_workload_module.import_allowed_quotas(context)
            if not global_job_scheduler_status:
                # global job scheduler should be enable before
                # workload import to persist their job triggers
                self.workload_enable_global_job_scheduler(context)
            # Import Workload policies
            import_workload_module.import_policy(context, models.DB_VERSION)

            workloads = import_workload_module.import_workload(context, workload_ids, models.DB_VERSION, upgrade)
            if not global_job_scheduler_status:
                self.workload_disable_global_job_scheduler(context)

            self.cron_rpcapi.workload_ensure_global_job_scheduler(context)
        except wlm_exceptions.WorkloadsNotFound as ex:
            LOG.exception(ex)
            return {'workloads': {'imported_workloads': [],
                                  'failed_workloads': []}}
        except Exception as ex:
            LOG.exception(ex)
            raise ex

        AUDITLOG.log(context, 'Import Workloads Completed', None)
        return workloads

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_nodes(self, context):
        nodes = []
        try:
            cron_host = None
            for node_record in self.db.service_get_all_by_topic(
                    context, topic='workloadmgr-cron'):
                if not (node_record['disabled'] or node_record['deleted']):
                    cron_host = node_record.host
                    break

            for node_record in self.db.service_get_all_by_topic(
                    context, topic='workloadmgr-workloads'):
                try:
                    status = 'Up'
                    if not utils.service_is_up(
                            node_record) or node_record['disabled']:
                        status = 'Down'
                    ip_address_list = []
                    # showing ipv4 to user on dashboard
                    for ip_address in node_record.ip_addresses.split(';'):
                        try:
                            if ipaddress.IPv4Network(ip_address):
                                ip_address_list.append(ip_address)
                        except ipaddress.AddressValueError as ve:
                            LOG.debug("Invalid ipv4 address: {}".format(ip_address))
                            LOG.debug(ve)

                    # if ipv4 doesn't found then adding ipv6 address
                    if not ip_address_list:
                        for ip_address in node_record.ip_addresses.split(';'):
                            try:
                                if ipaddress.IPv6Network(ip_address):
                                    ip_address_list.append(ip_address)
                            except ipaddress.AddressValueError as ve:
                                LOG.debug("Invalid ipv6 address: {}".format(ip_address))
                                LOG.debug(ve)

                    is_controller = True if cron_host == node_record.host else False
                    node_data = {'node': node_record.host,
                                  'id': node_record.id,
                                  'version': node_record.version,
                                  'ipaddress': ';'.join(ip_address_list),
                                  'is_controller': is_controller,
                                  'is_vip': False,
                                  'status': status}
                    nodes.append(node_data)
                except Exception as ex:
                    LOG.exception(ex)
        except Exception as ex:
            LOG.exception(ex)
        return dict(nodes=nodes)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def remove_node(self, context, ip):
        try:
            for node_record in self.db.service_get_all_by_topic(
                    context, topic='workloadmgr-workloads'):
                try:
                    ipaddress = ''
                    ip_addresses = node_record.ip_addresses.split(';')
                    if len(node_record.ip_addresses) > 0 and len(
                            node_record.ip_addresses[0]) > 0:
                        ipaddress = ip_addresses[0]

                    if any([ipaddress == ip, node_record.host == ip]
                           ) and socket.gethostname() != node_record.host:
                        if utils.service_is_up(node_record):
                            msg = _(
                                "Node is up, Please shutdown/delete node then only this can be removed with this command")
                            raise wlm_exceptions.ErrorOccurred(reason=msg)
                        self.db.service_delete(context, int(node_record.id))

                except Exception as ex:
                    LOG.exception(ex)
                    raise ex
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_contego_status(self, context, host=None, ip=None):
        try:
            compute_service = nova.API(production=True)
            compute_contego_records = compute_service.contego_service_status(
                context, host, ip)
            return compute_contego_records
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def add_node(self, context, ip):
        try:
            for node_record in self.db.service_get_all_by_topic(
                    context, topic='workloadmgr-workloads'):
                try:
                    ipaddress = ''
                    ip_addresses = node_record.ip_addresses.split(';')
                    if len(node_record.ip_addresses) > 0 and len(
                            node_record.ip_addresses[0]) > 0:
                        ipaddress = ip_addresses[0]
                    if socket.gethostname() == node_record.host:
                        controller_ip = ipaddress
                    if any([ipaddress == ip, node_record.host == ip]):
                        msg = _("Other node with same ip addresss exists")
                        raise wlm_exceptions.ErrorOccurred(reason=msg)
                except Exception as ex:
                    LOG.exception(ex)
                    raise ex
            import subprocess
            file_name = context.user_id + '.txt'
            command = [
                'sudo',
                'curl',
                '-k',
                '--cookie-jar',
                file_name,
                '--data',
                "username=admin&password=password",
                "https://" +
                ip +
                "/login"]
            try:
                res = subprocess.check_output(command)
            except Exception as ex:
                msg = _("Error resolving " + ip)
                raise wlm_exceptions.ErrorOccurred(reason=msg)
            subprocess.call(command, shell=False)
            config_inputs = {}
            for setting in self.db.setting_get_all_by_project(
                    context, "Configurator"):
                config_inputs[setting.name] = setting.value
            if not config_inputs:
                msg = _("No configurations found")
                raise wlm_exceptions.ErrorOccurred(reason=msg)
            command = ['sudo', 'curl', '-k', '--cookie', file_name, '--data',
                       "refresh=1&from=api&tvault-primary-node=" +
                       controller_ip + "&nodetype=additional",
                       "https://" + ip + "/configure_vmware"]
            subprocess.call(command, shell=False)
            urls = [
                'configure_host',
                'authenticate_with_vcenter',
                'authenticate_with_swift',
                'register_service',
                'configure_api',
                'configure_scheduler',
                'configure_service',
                'start_api',
                'start_scheduler',
                'start_service',
                'register_workloadtypes',
                'workloads_import',
                'discover_vcenter',
                'ntp_setup']
            if len(config_inputs['swift_auth_url']) == 0:
                urls.remove('authenticate_with_swift')
            if config_inputs['import_workloads'] == 'off':
                urls.remove('workloads_import')
            if config_inputs['ntp_enabled'] == 'off':
                urls.remove('ntp_setup')
            for url in urls:
                command = [
                    'sudo',
                    'curl',
                    '-k',
                    '--cookie',
                    file_name,
                    "https://" +
                    ip +
                    "/" +
                    url]
                res = subprocess.check_output(command)
                if res != '{"status": "Success"}' and url != 'ntp_setup':
                    command = ['sudo', 'rm', '-rf', file_name]
                    subprocess.call(command, shell=False)
                    msg = _(res)
                    raise wlm_exceptions.ErrorOccurred(reason=msg)
            command = ['sudo', 'rm', '-rf', file_name]
            subprocess.call(command, shell=False)

        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_storage_usage(self, context):

        storages_usage = {}
        total_usage = 0
        nfsstats = vault.get_capacities_utilizations(context)
        for nfsshare in vault.CONF.vault_storage_nfs_export.split(','):
            nfsshare = nfsshare.strip()
            stat = nfsstats[nfsshare]

            total_capacity = stat['total_capacity']
            total_utilization = stat['total_utilization']
            nfsstatus = stat['nfsstatus']

            storages_usage[nfsshare] = {
                'storage_type': vault.CONF.vault_storage_type,
                'nfs_share(s)': [
                    {
                        "nfsshare": nfsshare,
                        "status": "Online" if nfsstatus else "Offline",
                        "capacity": utils.sizeof_fmt(total_capacity),
                        "utilization": utils.sizeof_fmt(total_utilization),
                    },
                ],
                'total_capacity': total_capacity,
                'total_utilization': total_utilization,
                'total_capacity_humanized': utils.sizeof_fmt(total_capacity),
                'total_utilization_humanized': utils.sizeof_fmt(total_utilization),
                'available_capacity_humanized': utils.sizeof_fmt(
                    float(total_capacity) - float(total_utilization)),
                'total_utilization_percent': round(
                    ((float(total_utilization) / float(total_capacity)) * 100),
                    2),
            }
        storage_usage = {
            'storage_usage': list(storages_usage.values()),
            'count_dict': {}}
        full = 0
        incr = 0
        total = 0
        full_size = 0
        incr_size = 0
        kwargs = {"get_all": True, "read_metadata": False}
        workload_kwargs = {'project_id': '', 'all_workloads': True, 'page_number': '', 'nfs_share': ''}
        all_workload_list = self.db.workload_get_all(context, **workload_kwargs)

        for workload in all_workload_list:
            kwargs['workload_id'] = workload.id
            for snapshot in self.db.snapshot_get_all(context, **kwargs):
                if snapshot.snapshot_type == 'full':
                    full = full + 1
                    full_size = full_size + float(snapshot.size)
                elif snapshot.snapshot_type == 'incremental':
                    incr = incr + 1
                    incr_size = incr_size + float(snapshot.size)
                total = total + 1

        if (full + incr) > 0:
            full_total_count_percent = \
                round(((float(full) / float((full + incr))) * 100), 2)
            storage_usage['count_dict']['full_total_count_percent'] = \
                str(full_total_count_percent)
            storage_usage['count_dict']['full_total_count'] = str(full)
            storage_usage['count_dict']['incr_total_count'] = str(incr)

        storage_usage['count_dict']['full'] = full_size
        storage_usage['count_dict']['incremental'] = incr_size
        storage_usage['count_dict']['total'] = full_size + incr_size
        total_usage = storage_usage['count_dict']['total']
        if float(total_usage) > 0:
            storage_usage['count_dict']['full_snaps_utilization'] = \
                round(((float(full_size) / float(total_usage)) * 100), 2)
            storage_usage['count_dict']['incremental_snaps_utilization'] = \
                round(((float(incr) / float(total_usage)) * 100), 2)
        else:
            storage_usage['count_dict']['full_snaps_utilization'] = '0'
            storage_usage['count_dict']['incremental_snaps_utilization'] = '0'
        return storage_usage

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_recentactivities(self, context, time_in_minutes):
        recentactivites = []
        now = timeutils.utcnow()
        time_offset = datetime.now() - datetime.utcnow()
        try:
            for workload in self.db.workload_get_all(
                    context,
                    read_deleted='yes',
                    dashboard_item='activities',
                    time_in_minutes=time_in_minutes
            ):
                recentactivity = {'activity_type': '',
                                  'activity_time': '',
                                  'activity_description': '',
                                  'activity_result': workload.status,
                                  'object_type': 'workload',
                                  'object_name': workload.display_name,
                                  'object_id': workload.id,
                                  'object_user_id': workload.user_id,
                                  'object_project_id': workload.project_id,
                                  }
                description_suffix = \
                    "(Workload: '%s' - '%s')" % \
                    (workload.display_name,
                     (workload.created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"))
                if workload.deleted:
                    recentactivity['activity_type'] = 'delete'
                    recentactivity['activity_time'] = workload.deleted_at
                    recentactivity['activity_description'] = \
                        "Workload deleted. " + description_suffix
                else:
                    recentactivity['activity_type'] = 'create'
                    recentactivity['activity_time'] = workload.created_at
                    if workload.status == 'error':
                        recentactivity['activity_description'] = \
                            "Workload failed. " + description_suffix
                    else:
                        recentactivity['activity_description'] = \
                            "Workload created. " + description_suffix
                recentactivites.append(recentactivity)

            for snapshot in self.db.snapshot_get_all(
                    context,
                    read_deleted='yes',
                    dashboard_item='activities',
                    time_in_minutes=time_in_minutes):
                recentactivity = {'activity_type': '',
                                  'activity_time': '',
                                  'activity_description': '',
                                  'activity_result': snapshot.status,
                                  'object_type': 'snapshot',
                                  'object_name': snapshot.display_name,
                                  'object_id': snapshot.id,
                                  'object_user_id': snapshot.user_id,
                                  'object_project_id': snapshot.project_id,
                                  }
                description_suffix = \
                    "(Snapshot: '%s' - '%s', Workload: '%s' - '%s')" % \
                    (snapshot.display_name,
                     (snapshot.created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"),
                     snapshot.workload_name,
                     (snapshot.workload_created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"))
                if snapshot.deleted:
                    recentactivity['activity_type'] = 'delete'
                    recentactivity['activity_time'] = snapshot.deleted_at
                    recentactivity['activity_description'] = \
                        "Snapshot deleted. " + description_suffix
                else:
                    recentactivity['activity_type'] = 'create'
                    recentactivity['activity_time'] = snapshot.created_at
                    if snapshot.status == 'error':
                        recentactivity['activity_description'] = \
                            "Snapshot failed. " + description_suffix
                    elif snapshot.status == 'available':
                        recentactivity['activity_description'] = \
                            "Snapshot created. " + description_suffix
                    elif snapshot.status == 'cancelled':
                        recentactivity['activity_type'] = 'cancel'
                        recentactivity['activity_description'] = \
                            "Snapshot cancelled. " + description_suffix
                    else:
                        recentactivity['activity_description'] = \
                            "Snapshot is in progress. " + description_suffix
                recentactivites.append(recentactivity)

            for restore in self.db.restore_get_all(
                    context,
                    read_deleted='yes',
                    dashboard_item='activities',
                    time_in_minutes=time_in_minutes):
                recentactivity = {'activity_type': '',
                                  'activity_time': '',
                                  'activity_description': '',
                                  'activity_result': restore.status,
                                  'object_type': 'restore',
                                  'object_name': restore.display_name,
                                  'object_id': restore.id,
                                  'object_user_id': restore.user_id,
                                  'object_project_id': restore.project_id,
                                  }
                description_suffix = \
                    "(Restore: '%s' - '%s', Snapshot: '%s' - '%s', Workload: '%s' - '%s')" % \
                    (restore.display_name,
                     (restore.created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"),
                     restore.snapshot_name,
                     (restore.snapshot_created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"),
                     restore.workload_name,
                     (restore.workload_created_at + time_offset).
                     strftime("%m/%d/%Y %I:%M %p"))
                if restore.deleted:
                    recentactivity['activity_type'] = 'delete'
                    recentactivity['activity_time'] = snapshot.deleted_at
                    recentactivity['activity_description'] = \
                        "Restore deleted. " + description_suffix
                else:
                    recentactivity['activity_type'] = 'create'
                    recentactivity['activity_time'] = restore.created_at
                    if restore.status == 'error':
                        recentactivity['activity_description'] = \
                            "Restore failed. " + description_suffix
                    elif restore.status == 'available':
                        recentactivity['activity_description'] = \
                            "Restore completed. " + description_suffix
                    elif restore.status == 'cancelled':
                        recentactivity['activity_type'] = 'cancel'
                        recentactivity['activity_description'] = \
                            "Restore Cancelled. " + description_suffix
                    else:
                        recentactivity['activity_description'] = \
                            "Restore is in progress. " + description_suffix
                recentactivites.append(recentactivity)

            recentactivites = sorted(recentactivites,
                                     key=itemgetter('activity_time'),
                                     reverse=True)
        except Exception as ex:
            LOG.exception(ex)
        return dict(recentactivites=recentactivites)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_auditlog(self, context, time_in_minutes, time_from, time_to):
        auditlog = []
        try:
            auditlog = AUDITLOG.get_records(
                time_in_minutes, time_from, time_to)
        except Exception as ex:
            LOG.exception(ex)
        return dict(auditlog=auditlog)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_workflow(self, context, workload_id):
        """
        Get the workflow of the workload. RPC call is made
        """
        try:
            return self.workloads_rpcapi.workload_get_workflow_details(
                context, socket.gethostname(), workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_topology(self, context, workload_id):
        """
        Get the topology of the workload. RPC call is made
        """
        try:
            return self.workloads_rpcapi.workload_get_topology(
                context, socket.gethostname(), workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_discover_instances(self, context, workload_id):
        """
        Discover Instances of a workload_type. RPC call is made
        """
        try:
            return self.workloads_rpcapi.workload_discover_instances(
                context, socket.gethostname(), workload_id)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    def _is_workload_paused(self, context, workload_id):
        workload = self.workload_get(context, workload_id)
        jobs = self.db.job_get_all(context)
        for job in jobs:
            if job.workload_id == workload_id:
                return False
        return True

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_pause(self, context, workload_id):
        """
        Pause workload job schedule. No RPC call is made
        """
        workload = self.workload_get(context, workload_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Pause Requested',
            workload)
        job = self.db.job_get(context, workload_id)
        if job:
            assert uuid.UUID(job.workload_id) == uuid.UUID(workload_id)
        self.cron_rpcapi.workload_pause(context, workload_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Pause Submitted',
            workload)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_resume(self, context, workload_id):
        workload = self.db.workload_get(context, workload_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Resume Requested',
            workload)
        job = self.db.job_get(context, workload_id)
        if job:
            msg = _('Workload job scheduler is not paused')
            raise wlm_exceptions.InvalidState(reason=msg)
        jobschedule = pickle.loads(bytes(workload['jobschedule'], 'utf-8'))
        if len(jobschedule) >= 1:
            self.cron_rpcapi.workload_add_scheduler_job(context, jobschedule, workload)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload['display_name'] +
            '\' Resume Submitted',
            workload)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_unlock(self, context, workload_id):
        workload = self.workload_get(context, workload_id)
        display_name = workload['display_name']
        AUDITLOG.log(
            context,
            'Workload \'' +
            display_name +
            '\' Unlock Requested',
            workload)
        if not workload['deleted']:
            self.db.workload_update(
                context, workload_id, {
                    'status': 'available'})
        AUDITLOG.log(
            context,
            'Workload \'' +
            display_name +
            '\' Unlock Submitted',
            workload)

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def workload_disable_global_job_scheduler(self, context):

        try:
            if vault.CONF.global_job_scheduler_override:
                raise Exception("global_job_scheduler_override is set to True"
                                " in workloadmgr.conf. Cannot disable global "
                                " job scheduler")

            if self.workload_get_global_job_scheduler(context) is False:
                # scheduler is already stopped. Nothing to do
                return

            self.cron_rpcapi.workload_disable_global_job_scheduler(context)

            setting = {'category': "job_scheduler",
                       'name': "global-job-scheduler",
                       'description': "Controls job scheduler status",
                       'value': False,
                       'user_id': context.user_id,
                       'is_public': False,
                       'is_hidden': True,
                       'metadata': {},
                       'type': "job-scheduler-setting", }

            try:
                self.db.setting_get(context, setting['name'], get_hidden=True, cloud_setting=True)
                self.db.setting_update(context, setting['name'], setting, cloud_setting=True)
            except wlm_exceptions.SettingNotFound:
                self.db.setting_create(context, setting)

        except Exception as ex:
            LOG.exception(ex)
            raise

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def workload_enable_global_job_scheduler(self, context):

        if self.workload_get_global_job_scheduler(context) is True:
            # scheduler is already running. Nothing to do
            return

        self.cron_rpcapi.workload_enable_global_job_scheduler(context)
        setting = {'category': "job_scheduler",
                   'name': "global-job-scheduler",
                   'description': "Controls job scheduler status",
                   'value': True,
                   'user_id': context.user_id,
                   'is_public': False,
                   'is_hidden': True,
                   'metadata': {},
                   'type': "job-scheduler-setting", }
        try:
            try:
                self.db.setting_get(context, setting['name'], get_hidden=True, cloud_setting=True)
                self.db.setting_update(context, setting['name'], setting, cloud_setting=True)
            except wlm_exceptions.SettingNotFound:
                self.db.setting_create(context, setting)

        except Exception as ex:
            LOG.exception(ex)
            raise Exception("Cannot enable job scheduler globally")

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workload_get_global_job_scheduler(self, context):

        try:
            cron_status = \
             self.cron_rpcapi.workload_get_global_job_scheduler(context)
            if cron_status is None:
                raise Exception("Error in retrieving cron status, "\
                        "check wlm-cron log")
            return cron_status
        except oslo_exceptions.MessagingTimeout as ex:
            LOG.exception(ex)
            raise Exception("Not able to connect with wlm-cron, "\
                    "Please check service status.")
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @create_trust()
    @check_license
    @wrap_check_policy
    def workload_snapshot(self, context, workload_id,
                          snapshot_type, name, description, is_scheduled=False):
        """
        Make the RPC call to snapshot a workload.
        """
        workload = None
        try:
            if is_scheduled:
                # In case of scheduled snapshots need to update the context
                job = self.db.job_get(context, workload_id)
                kwargs = pickle.loads(bytes(job.kwargs, 'utf-8'))
                kwargs['tenant_id'] = kwargs['project_id']
                context = nova._get_tenant_context(munchify(kwargs))
                admin_context = nova._get_tenant_context(munchify(kwargs), cloud_admin=True)
            else:
                admin_context = nova._get_tenant_context(context, cloud_admin=True)
        except Exception as ex:
            LOG.exception(ex)
            raise
        try:
            workload = self.workload_get(context, workload_id)
            if not workload.get("encryption"):
                wv_obj = workload_validator.WorkloadValidator(context=context, body=workload)
                wv_obj._validate_logical_path_for_workload_encryption()
        except Exception as ex:
            options = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'workload_id': workload_id,
                'snapshot_type': "full",
                'display_name': name,
                'display_description': description,
                'host': '',
                'status': 'error',
                'error_msg': str(ex),
                'metadata': {}, }
            context = wlm_context.get_admin_context()
            snapshot = self.db.snapshot_create(context, options)
            return snapshot

        quota_exceeded = False
        error_msg = "Check allowed quotas"
        try:
            allowed_quota_obj = snapshot_quota_check(
                context.project_id, self.db
            )
            if allowed_quota_obj \
                    and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                # TODO: check how to handle this warning at UI side
                AUDITLOG.log(
                    context,
                    "Current project's snapshot allowed quota has reached to high watermark" +
                    "Contact to the admin",
                    None)
                LOG.warning("Current project's snapshot allowed quota has "
                            "reached to high watermark. Contact to the admin")
        except ValueError as e:
            LOG.exception(e)
            quota_exceeded = True
            error_msg = "Current project's snapshot allowed quota has exceeded. Contact to the admin"
        except Exception as ex:
            LOG.exception(ex)
            raise

        if not quota_exceeded:
            try:
                admin_context.is_admin = True
                total_storage = self.get_tenants_usage(admin_context)

                if total_storage and \
                    total_storage.get('tenants_usage', {}).get(context.project_id, {}):
                    allowed_quota_obj = storage_quota_check(
                        context.project_id,
                        self.db, total_storage['tenants_usage'][context.project_id].get('used_capacity',0))
                    if allowed_quota_obj \
                            and allowed_quota_obj.actual_value >= allowed_quota_obj.high_watermark:
                        # TODO: check how to handle this warning at UI side
                        AUDITLOG.log(
                            context,
                            "Current project's storage allowed quota has reached to high watermark" +
                            "Contact to the admin",
                            None)
                        LOG.warning("Current project's storage allowed quota has "
                                    "reached to high watermark. Contact to the admin")
            except ValueError as e:
                LOG.exception(e)
                quota_exceeded = True
                error_msg =  "Current project's storage allowed quota has exceeded. Contact to the admin"
            except Exception as ex:
                LOG.exception(ex)
                raise

        if quota_exceeded:
            options = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'workload_id': workload_id,
                'snapshot_type': "full",
                'display_name': name,
                'display_description': description,
                'host': '',
                'status': 'error',
                'error_msg': error_msg,
                'metadata': {}, }

            context = wlm_context.get_admin_context()
            snapshot = self.db.snapshot_create(context, options)
            return snapshot

        try:
            if not workload:
                workload = self.workload_get(context, workload_id)
            snapshot_display_name = ''
            if name and len(name) > 0:
                snapshot_display_name = '\'' + name + '\''
            else:
                snapshot_display_name = '\'' + 'Undefined' + '\''

            AUDITLOG.log(context, 'Workload \'' + workload['display_name'] +
                         '\' ' + snapshot_type + ' Snapshot ' +
                         snapshot_display_name + ' Create Requested', workload)

            workloads = self.db.workload_get_all(context)
            for workload in workloads:
                if workload.deleted:
                    continue
                workload_type = self.db.workload_type_get(
                    context, workload.workload_type_id)
                if (workload_type.display_name == 'Composite'):
                    for kvpair in workload.metadata:
                        if kvpair['key'] == 'workloadgraph':
                            graph = json.loads(kvpair['value'])
                            for flow in graph['children']:
                                for member in flow['children']:
                                    if 'type' in member:
                                        if member['data']['id'] == workload_id:
                                            msg = _(
                                                'Operation not allowed since this workload is a member of a composite workflow')
                                            raise wlm_exceptions.InvalidState(
                                                reason=msg)

            try:
                workload_lock.acquire()
                workload = self.workload_get(context, workload_id)
                if workload['status'].lower() != 'available':
                    msg = _(
                        "Workload must be in the 'available' state to take a snapshot")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload_id, {
                        'status': 'locked'})
            finally:
                workload_lock.release()

            instances = [workload_vm_obj.vm_id for workload_vm_obj in
                         self.db.workload_vms_get(context, workload['id'])]
            if len(instances) != len(set(instances)):
                msg = _('Workload has some duplicate vm entries. '
                        'Remove duplicate entries and retry again.\n'
                        'Hint: Open edit workload dialog in horizon dashboard '
                        'and update the workload to remove any deplications')
                raise wlm_exceptions.InvalidState(reason=msg)

            metadata = {}
            metadata.setdefault('cancel_requested', '0')
            instances = [workload_vm_obj.vm_id for workload_vm_obj in
                         self.db.workload_vms_get(context, workload['id'])]
            if len(instances) != len(set(instances)):
                msg = _('Workload has some duplicate vm entries. '
                        'Remove duplicate entries and retry again.\n'
                        'Hint: Open edit workload dialog in horizon dashboard '
                        'and update the workload to remove any deplications')
                raise wlm_exceptions.InvalidState(reason=msg)

            options = {'user_id': context.user_id,
                       'project_id': context.project_id,
                       'workload_id': workload_id,
                       'snapshot_type': snapshot_type,
                       'display_name': name,
                       'display_description': description,
                       'host': '',
                       'status': 'creating',
                       'metadata': metadata, }
            snapshot = self.db.snapshot_create(context, options)

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                snapshot['snapshot_type'] +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Create Submitted',
                workload)

            self.db.snapshot_update(context,
                                    snapshot.id,
                                    {'progress_percent': 0,
                                     'progress_msg': 'Snapshot operation is scheduled',
                                     'status': 'executing'})
            self.scheduler_rpcapi.workload_snapshot(
                context, FLAGS.scheduler_topic, snapshot['id'])
            return snapshot
        except Exception as ex:
            if 'snapshot' in locals():
                self.db.workload_update(
                    context, snapshot.workload_id, {
                        'status': 'available'})
                self.db.snapshot_update(
                    context, snapshot.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else "Unknown"})
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_get(self, context, snapshot_id):
        rv = self.db.snapshot_get(
            context,
            snapshot_id,
            project_only=context.project_only)
        snapshot_details = dict(rv)
        snapshot_vms = []
        try:
            for snapshot_vm_obj in self.db.snapshot_vms_get(
                    context, snapshot_id):
                snapshot_vm = {'id': snapshot_vm_obj.vm_id,
                               'name': snapshot_vm_obj.vm_name,
                               'status': snapshot_vm_obj.status, }
                metadata = {}
                for kvpair in snapshot_vm_obj.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                snapshot_vm['metadata'] = metadata
                snapshot_vms.append(snapshot_vm)
        except Exception as ex:
            LOG.debug("Failed to fetch snapshot: {}".format(snapshot_id))
            LOG.debug(ex)
        snapshot_details.setdefault('instances', snapshot_vms)
        return snapshot_details

    # @autolog.log_method(logger=Logger, log_args=False, log_retval=False)
    @wrap_check_policy
    def snapshot_show(self, context, snapshot_id):
        def _get_pit_resource_id(metadata, key):
            for metadata_item in metadata:
                if metadata_item['key'] == key:
                    pit_id = metadata_item['value']
                    return pit_id

        def _get_pit_resource(snapshot_vm_common_resources, pit_id):
            for snapshot_vm_resource in snapshot_vm_common_resources:
                if snapshot_vm_resource.resource_pit_id == pit_id:
                    return snapshot_vm_resource

        rv = self.db.snapshot_show(
            context, snapshot_id, project_only=context.project_only)
        if rv is None:
            msg = _("Not found snapshot or operation not allowed")
            wlm_exceptions.ErrorOccurred(reason=msg)

        snapshot_details = dict(rv.items())

        snapshot_vms = []
        try:
            for snapshot_vm_obj in self.db.snapshot_vms_get(
                    context, snapshot_id):
                snapshot_vm = {'id': snapshot_vm_obj.vm_id,
                               'name': snapshot_vm_obj.vm_name,
                               'status': snapshot_vm_obj.status,
                               }
                metadata = {}
                for kvpair in snapshot_vm_obj.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                snapshot_vm['metadata'] = metadata
                snapshot_vm['nics'] = []
                snapshot_vm['vdisks'] = []
                snapshot_vm['security_group'] = []
                snapshot_vm_resources = self.db.snapshot_vm_resources_get(
                    context, snapshot_vm_obj.vm_id, snapshot_id)
                snapshot_vm_common_resources = self.db.snapshot_vm_resources_get(
                    context, snapshot_id, snapshot_id)
                for snapshot_vm_resource in snapshot_vm_resources:
                    """ flavor """
                    if snapshot_vm_resource.resource_type == 'flavor':
                        vm_flavor = snapshot_vm_resource
                        snapshot_vm['flavor'] = {
                            'vcpus': self.db.get_metadata_value(
                                vm_flavor.metadata, 'vcpus'), 'ram': self.db.get_metadata_value(
                                vm_flavor.metadata, 'ram'), 'disk': self.db.get_metadata_value(
                                vm_flavor.metadata, 'disk'), 'ephemeral': self.db.get_metadata_value(
                                vm_flavor.metadata, 'ephemeral')}
                    """ security group """
                    if snapshot_vm_resource.resource_type == 'security_group':
                        if self.db.get_metadata_value(
                                snapshot_vm_resource.metadata,
                                'vm_attached') in (
                                True,
                                '1',
                                None):
                            snapshot_vm['security_group'].append(
                                {
                                    'name': self.db.get_metadata_value(
                                        snapshot_vm_resource.metadata,
                                        'name'),
                                    'security_group_type': self.db.get_metadata_value(
                                        snapshot_vm_resource.metadata,
                                        'security_group_type')})

                    """ nics """
                    if snapshot_vm_resource.resource_type == 'nic':
                        vm_nic_snapshot = self.db.vm_network_resource_snap_get(
                            context, snapshot_vm_resource.id)
                        nic_data = pickle.loads(bytes(vm_nic_snapshot.pickle, 'utf-8'))
                        nic = {'mac_address': nic_data['mac_address'],
                               'ip_address': nic_data['ip_address'], }
                        nic['network'] = {
                            'id': self.db.get_metadata_value(
                                vm_nic_snapshot.metadata, 'network_id'), 'name': self.db.get_metadata_value(
                                vm_nic_snapshot.metadata, 'network_name'), 'cidr': nic_data.get(
                                'cidr', None), 'network_type': nic_data['network_type']}

                        pit_id = _get_pit_resource_id(
                            vm_nic_snapshot.metadata, 'subnet_id')
                        if pit_id:
                            try:
                                vm_nic_subnet = _get_pit_resource(
                                    snapshot_vm_common_resources, pit_id)
                                vm_nic_subnet_snapshot = self.db.vm_network_resource_snap_get(
                                    context, vm_nic_subnet.id)
                                subnet = pickle.loads(
                                    bytes(vm_nic_subnet_snapshot.pickle, 'utf-8'))
                                nic['network']['subnet'] = {
                                    'id': subnet.get(
                                        'id', None), 'name': subnet.get(
                                        'name', None), 'cidr': subnet.get(
                                        'cidr', None), 'ip_version': subnet.get(
                                        'ip_version', None), 'gateway_ip': subnet.get(
                                        'gateway_ip', None), }
                            except Exception as ex:
                                LOG.exception(ex)
                        snapshot_vm['nics'].append(nic)
                    """ vdisks """
                    if snapshot_vm_resource.resource_type == 'disk':
                        vdisk = {
                            'label': self.db.get_metadata_value(
                                snapshot_vm_resource.metadata,
                                'label'),
                            'resource_id': snapshot_vm_resource.id,
                            'restore_size': snapshot_vm_resource.restore_size,
                            'vm_id': snapshot_vm_resource.vm_id}

                        if self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'image_id'):
                            vdisk['image_id'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'image_id')
                            vdisk['image_name'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'image_name')
                            vdisk['hw_qemu_guest_agent'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'hw_qemu_guest_agent')
                        else:
                            vdisk['volume_id'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_id')
                            vdisk['volume_name'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_name')
                            vdisk['volume_size'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_size')
                            vdisk['volume_type'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_type')
                            vdisk['volume_mountpoint'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_mountpoint')

                            vdisk['volume_id'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_id')
                            vdisk['volume_name'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_name')
                            vdisk['volume_size'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_size')
                            vdisk['volume_type'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_type')
                            vdisk['volume_mountpoint'] = self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'volume_mountpoint')
                            if self.db.get_metadata_value(
                                    snapshot_vm_resource.metadata, 'availability_zone'):
                                vdisk['availability_zone'] = self.db.get_metadata_value(
                                    snapshot_vm_resource.metadata, 'availability_zone')
                            vdisk['metadata'] = json.loads(self.db.get_metadata_value(
                                snapshot_vm_resource.metadata, 'metadata', "{}"))

                        snapshot_vm['vdisks'].append(vdisk)
                snapshot_vms.append(snapshot_vm)

        except Exception as ex:
            LOG.exception(ex)

        snapshot_details['instances'] = snapshot_vms
        return snapshot_details

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_get_all(self, context, search_opts={}):
        search_opts["read_metadata"] = False
        snapshots = self.db.snapshot_get_all(context, **search_opts)
        return snapshots

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_next(self, context, snapshot_id, workload_id):
        """

        Args:
            context: HttpRequest Object
            snapshot_id: snapshot_uuid
            workload_id: Workload_uuid

        Returns: list of snapshot ids.

        """
        t = []
        rv = self.db.snapshot_next(
            context, snapshot_id, workload_id, project_only=context.project_only)
        try:
            for each in rv:
                t.append((each.get('id')))
                return t
        except StopIteration as si:
            t.append(None)
        except Exception as e:
            LOG.exception(e)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_prev(self, context, snapshot_id, workload_id):
        """

        Args:
            context: HttpRequest Object
            snapshot_id: Snapshot_uuid
            workload_id: Workload_uuid

        Returns: list of previous snapshot_ids

        """
        t = []
        rv = self.db.snapshot_prev(
            context, snapshot_id, workload_id, project_only=context.project_only)
        try:
            for each in rv:
                t.append((each.get('id')))
                return t
        except StopIteration as si:
            t.append(None)

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def snapshot_delete(self, context, snapshot_id):
        """
        Delete a workload snapshot. No RPC call required
        """
        try:
            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot delete snapshot. ' +
                         'Expected role "%s" is not found. ' % vault.CONF.trustee_role +
                         'Please make sure user has the trustee role assigned'))
            snapshot = self.snapshot_get(context, snapshot_id)
            workload = self.workload_get(context, snapshot['workload_id'])
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            snapshot_snapshot_type = snapshot['snapshot_type']
            workload_display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Delete Requested',
                snapshot)

            if snapshot['status'] not in ['available', 'error', 'cancelled']:
                msg = _(
                    "Snapshot status must be 'available' or 'error' or 'cancelled'")
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available' and workload['status'].lower(
                ) != 'locked_for_delete':
                    msg = _(
                        "Workload must be in the 'available' state to delete a snapshot")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, snapshot['workload_id'], {
                        'status': 'locked_for_delete'})
            finally:
                workload_lock.release()

            restores = self.db.restore_get_all_by_project_snapshot(
                context, context.project_id, snapshot_id)
            for restore in restores:
                if restore.restore_type == 'test':
                    msg = _('This workload snapshot contains testbubbles')
                    raise wlm_exceptions.InvalidState(reason=msg)

            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'deleting'})

            status_messages = {'message': 'Snapshot delete operation starting'}
            options = {
                'display_name': "Snapshot Delete",
                'display_description': "Snapshot delete for snapshot id %s" % snapshot_id,
                'status': "starting",
                'status_messages': status_messages,
            }

            task = self.db.task_create(context, options)

            self.scheduler_rpcapi.snapshot_delete(
                context, FLAGS.scheduler_topic, snapshot_id, task.id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Delete Submitted',
                snapshot)

            return task.id

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    def _validate_restore_options(self, context, snapshot_id, options):
        if options.get('type', "") != "openstack":
            msg = _("'type' field in options is not set to 'openstack'")
            raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if 'openstack' not in options:
            msg = _("'openstack' field is not in options")
            raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options.get("restore_type", None) not in (
                'inplace', 'selective', 'oneclick'):
            msg = _(
                "'restore_type' field must be one of 'inplace', 'selective', 'oneclick'")
            raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options.get("restore_type", None) in ('inplace', 'selective'):
            # If instances is not available should we restore entire snapshot?
            if 'instances' not in options['openstack']:
                msg = _("'instances' field is not in found "
                        "in options['instances']")
                raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options['openstack'].get('restore_topology', False) is True:
           if len(options['openstack']['networks_mapping'].get('networks', [])) > 0:
              msg = _("While restoring network topology cannot pass network mapping.")
              raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

        if options.get("restore_type", None) in ('selective'):
            restore_instances = options['openstack']['instances']
            restore_ins_ids = set([ins['id'] for ins in restore_instances])

            snap_vms = self.db.snapshot_vms_get(context, snapshot_id)
            snap_vm_ids = set([vm.vm_id for vm in snap_vms])

            if len(restore_ins_ids - snap_vm_ids) > 0:
                msg = _("Please provide valid instance id in restore options.")
                raise wlm_exceptions.InvalidRestoreOptions(reason=msg)

            missing_vms = snap_vm_ids - restore_ins_ids
            if len(missing_vms) > 0:
                for missing_vm in missing_vms:
                    restore_instances.append({'include': False, 'id': missing_vm})

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def snapshot_restore(self, context, snapshot_id, test,
                         name, description, options):
        """
        Make the RPC call to restore a snapshot.
        """
        try:
            self._validate_restore_options(context, snapshot_id, options)

            snapshot = self.snapshot_get(context, snapshot_id)
            workload = self.workload_get(context, snapshot['workload_id'])
            workload_display_name = workload['display_name']
            snapshot_display_name = snapshot['display_name']
            snapshot_snapshot_type = snapshot['snapshot_type']

            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            restore_display_name = ''

            if not name or len(name) == 0:
                name = 'Undefined'
            restore_display_name = '\'' + name + '\''
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Create Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available':
                    msg = _(
                        "Workload must be in the 'available' state to restore")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'locked'})
            finally:
                workload_lock.release()
            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'restoring'})

            restore_type = "restore"
            if test:
                restore_type = "test"
            values = {'user_id': context.user_id,
                      'project_id': context.project_id,
                      'snapshot_id': snapshot_id,
                      'restore_type': restore_type,
                      'display_name': name,
                      'display_description': description,
                      'pickle': str(pickle.dumps(options, 0), 'utf-8'),
                      'host': '',
                      'status': 'restoring', }
            restore = self.db.restore_create(context, values)
            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))
            self.db.restore_update(context,
                                   restore.id,
                                   {'progress_percent': 0,
                                    'progress_msg': 'Restore operation is scheduled',
                                    'status': 'restoring'})
            self.scheduler_rpcapi.snapshot_restore(
                context, FLAGS.scheduler_topic, restore['id'])
            restore_display_name = restore['display_name']
            restore_restore_type = restore['restore_type']
            if restore_display_name == 'One Click Restore':
                local_time = self.get_local_time(
                    context, restore['created_at'])
                restore_display_name = local_time + \
                    ' (' + restore['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                snapshot_snapshot_type +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Create Submitted',
                restore)
            return restore
        except Exception as ex:
            if 'restore' in locals():
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'available'})
                self.db.snapshot_update(
                    context, snapshot_id, {
                        'status': 'available'})
                self.db.restore_update(
                    context, restore.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else {}})
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_cancel(self, context, snapshot_id):
        """
        Make the RPC call to cancel snapshot
        """
        try:
            snapshot = self.db.snapshot_get(context, snapshot_id)
            admin_context = wlm_context.get_admin_context()
            workload = self.workload_get(admin_context, snapshot['workload_id'])
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Cancel Requested',
                snapshot)
            if snapshot.status in ['available', 'cancelled', 'error']:
                return

            metadata = {}
            metadata.setdefault('cancel_requested', '1')
            self.db.snapshot_update(context,
                                    snapshot_id,
                                    {
                                        'metadata': metadata,
                                        'status': 'cancelling'
                                    })
            backup_target = vault.get_backup_target(
                workload['metadata']['backup_media_target'])
            tracking_dir = backup_target.get_progress_tracker_directory(
                {'snapshot_id': snapshot_id})
            fileutils.ensure_tree(tracking_dir)
            with open(os.path.join(tracking_dir, "cancelled"), "w") as f:
                 pass

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Cancel Submitted',
                snapshot)

            return True

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_restore_network_topology(
            self, context, snapshot_id, name, description
    ):
        """
        Make the RPC call to restore a snapshot's Network Topology.
        """
        snapshot = self.snapshot_show(context, snapshot_id)
        try:
            restore_options = workload_utils.get_restore_options(name, description, snapshot, restore_type='network_topology', restore_topology=True)
        except Exception as ex:
            LOG.exception('Failed to fetch network options, with error: %s', ex)
            raise ex

        admin_context = wlm_context.get_admin_context()
        workload = self.workload_get(admin_context, snapshot['workload_id'])
        workload_display_name = workload['display_name']
        snapshot_display_name = snapshot['display_name']
        try:
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(context, snapshot['created_at'])
                snapshot_display_name = local_time + ' (' + snapshot['display_name'] + ')'

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Network Topology Restore Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available':
                    msg = _("Workload must be in the 'available' state to restore")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'locked'})
            finally:
                workload_lock.release()

            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'restoring'})

            restore_type = "network-topology"
            values = {'user_id': context.user_id,
                      'project_id': context.project_id,
                      'snapshot_id': snapshot_id,
                      'restore_type': restore_type,
                      'display_name': name,
                      'display_description': description,
                      'pickle': str(pickle.dumps(restore_options, 0), 'utf-8'),
                      'host': '',
                      'status': 'restoring', }
            restore = self.db.restore_create(context, values)

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))

            self.db.restore_update(context,
                                   restore.id,
                                   {'progress_percent': 0,
                                    'progress_msg': 'Network Topology Restore operation is scheduled',
                                    'status': 'restoring'})
            self.scheduler_rpcapi.network_topology_restore(
                context, FLAGS.scheduler_topic, restore['id'])

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Network Topology Restore Request Submitted',
                restore)
            return restore
        except Exception as ex:
            if 'restore' in locals():
                self.db.workload_update(context, workload['id'], {'status': 'available'})
                self.db.snapshot_update(context, snapshot_id, {'status': 'available'})
                self.db.restore_update(
                    context, restore.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else {}})

            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) % (ex.kwargs if hasattr(ex, 'kwargs') else {})
            )

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_mount(self, context, snapshot_id, mount_vm_id):
        """
        Make the RPC call to Mount the snapshot.
        """
        compute_service = nova.API(production=True)
        try:
            snapshot = self.snapshot_get(context, snapshot_id)
            server = compute_service.get_server_by_id(context, mount_vm_id)
            flavor_id = server.flavor['id']
            fl = compute_service.get_flavor_by_id(context, flavor_id)
            if fl.ephemeral:
                error_msg = "Recovery manager instance cannot have ephemeral disk"
                raise Exception(error_msg)

            if hasattr(server, 'image') and server.image:
                (image_service, image_id) = glance.get_remote_image_service(
                    context, server.image['id'])
                metadata = image_service.show(context, server.image['id'])
                error_msg = "Recovery manager instance needs to be created with glance image property 'hw_qemu_guest_agent=yes'"
                if 'hw_qemu_guest_agent' in metadata['properties'].keys():
                    if metadata['properties']['hw_qemu_guest_agent'] != 'yes':
                        raise Exception(error_msg)
                else:
                    raise Exception(error_msg)

            if not snapshot:
                msg = _('Invalid snapshot id')
                raise wlm_exceptions.Invalid(reason=msg)

            workload = self.workload_get(context, snapshot['workload_id'])
            snapshots_all = self.db.snapshot_get_all(context)
            for snapshot_one in snapshots_all:
                if snapshot_one.status == 'mounted':
                    if workload['source_platform'] == 'openstack':
                        mounted_vm_id = self.db.get_metadata_value(
                            snapshot_one.metadata, 'mount_vm_id')
                        if mounted_vm_id is not None:
                            if mount_vm_id == mounted_vm_id:
                                workload_one = self.db.workload_get(context, snapshot_one.workload_id)
                                msg = _(
                                    'snapshot %s (%s) from workload %s (%s) is already mounted on this VM' %
                                    (snapshot_one.display_name, snapshot_one.id, workload_one.display_name, workload_one.id))
                                raise wlm_exceptions.InvalidParameterValue(
                                    err=msg)

            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Mount Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            self.db.snapshot_update(context, snapshot_id,
                                    {'status': 'mounting'})
            self.scheduler_rpcapi.snapshot_mount(
                context, FLAGS.scheduler_topic, snapshot_id, mount_vm_id)

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Mount Submitted',
                snapshot)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_dismount(self, context, snapshot_id):
        """
        Make the RPC call to Dismount the snapshot.
        """
        try:
            snapshot = self.snapshot_get(context, snapshot_id)

            if not snapshot:
                msg = _('Invalid snapshot id')
                raise wlm_exceptions.Invalid(reason=msg)

            snapshot_db_obj = self.db.snapshot_get(context, snapshot_id)
            mount_vm_id = self.db.get_metadata_value(
                snapshot_db_obj.metadata, 'mount_vm_id')

            if not mount_vm_id:
                LOG.error(
                    _("Could not find recovery manager vm id in the snapshot metadata"))
                raise wlm_exceptions.Invalid(reason=msg)

            workload = self.workload_get(context, snapshot['workload_id'])
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Dismount Requested',
                snapshot)

            if snapshot['status'] != 'mounted':
                msg = _('Snapshot status must be mounted')
                raise wlm_exceptions.InvalidState(reason=msg)

            self.scheduler_rpcapi.snapshot_dismount(
                context, FLAGS.scheduler_topic, snapshot_id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload['display_name'] +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Dismount Submitted',
                snapshot)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def snapshot_restore_security_groups(
            self, context, snapshot_id, name, description
    ):
        """
        Make the RPC call to restore a snapshot's Security Groups
        """
        snapshot = self.snapshot_show(context, snapshot_id)
        try:
            restore_options = workload_utils.get_restore_options(
                name, description, snapshot,
                restore_type='security_group', restore_topology=True
            )
        except Exception as ex:
            LOG.exception('Failed to fetch security group options, with error: %s', ex)
            raise ex

        admin_context = wlm_context.get_admin_context()
        workload = self.workload_get(admin_context, snapshot['workload_id'])
        workload_display_name = workload['display_name']
        snapshot_display_name = snapshot['display_name']
        try:
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(context, snapshot['created_at'])
                snapshot_display_name = local_time + ' (' + snapshot['display_name'] + ')'

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Security Groups Requested',
                snapshot)

            if snapshot['status'] != 'available':
                msg = _('Snapshot status must be available')
                raise wlm_exceptions.InvalidState(reason=msg)

            try:
                workload_lock.acquire()
                if workload['status'].lower() != 'available':
                    msg = _("Workload must be in the 'available' state to restore")
                    raise wlm_exceptions.InvalidState(reason=msg)
                self.db.workload_update(
                    context, workload['id'], {
                        'status': 'locked'})
            finally:
                workload_lock.release()

            self.db.snapshot_update(
                context, snapshot_id, {
                    'status': 'restoring'})

            restore_type = "security_group"
            values = {'user_id': context.user_id,
                      'project_id': context.project_id,
                      'snapshot_id': snapshot_id,
                      'restore_type': restore_type,
                      'display_name': name,
                      'display_description': description,
                      'pickle': str(pickle.dumps(restore_options, 0), 'utf-8'),
                      'host': '',
                      'status': 'restoring', }
            restore = self.db.restore_create(context, values)

            if hasattr(context, 'trust_failed'):
                raise wlm_exceptions.Invalid(
                    reason=_('Trust broken: Cannot perform backups operations. '
                             'Please make sure user has the trustee role assigned'))

            self.db.restore_update(context,
                                   restore.id,
                                   {'progress_percent': 0,
                                    'progress_msg': 'Security Groups operation is scheduled',
                                    'status': 'restoring'})
            self.scheduler_rpcapi.security_groups_restore(
                context, FLAGS.scheduler_topic, restore['id'])

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                ' Snapshot \'' +
                snapshot_display_name +
                '\' Security Groups Restore Request Submitted',
                restore)
            return restore
        except Exception as ex:
            if 'restore' in locals():
                self.db.workload_update(context, workload['id'], {'status': 'available'})
                self.db.snapshot_update(context, snapshot_id, {'status': 'available'})
                self.db.restore_update(
                    context, restore.id, {
                        'status': 'error', 'error_msg': ex.kwargs['reason'] if hasattr(
                            ex, 'kwargs') and 'reason' in ex.kwargs else {}})

            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) % (ex.kwargs if hasattr(ex, 'kwargs') else {})
            )

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def mounted_list(self, context, workload_id):
        """
        Gets list of mounted snapshots
        """
        try:
            mounted_snapshots = []
            kwargs = {"workload_id": workload_id}
            snapshots = self.db.snapshot_get_all(context, **kwargs)
            if len(snapshots) == 0:
                msg = _("Not found any snapshots")
                wlm_exceptions.ErrorOccurred(reason=msg)

            for snapshot in snapshots:
                if snapshot.status == 'mounted':
                    fspid = self.db.get_metadata_value(
                        snapshot.metadata, 'fsmanagerpid')
                    mounturl = fspid = self.db.get_metadata_value(
                        snapshot.metadata, 'mounturl')
                    # if (fspid and int(fspid) != -1) and (mounturl and
                    # len(mounturl) > 1):
                    mounted = {'snapshot_id': snapshot.id,
                               'snapshot_name': snapshot.display_name,
                               'workload_id': snapshot.workload_id,
                               'mounturl': mounturl,
                               'status': snapshot.status,
                               }
                    mounted_snapshots.append(mounted)
            return dict(mounted_snapshots=mounted_snapshots)
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_get(self, context, restore_id):
        rv = self.db.restore_get(context, restore_id)
        restore_details = dict(rv)

        snapshot = self.db.snapshot_get(
            context,
            rv.snapshot_id,
            read_deleted="yes",
            project_only=context.project_only)
        restore_details.setdefault('workload_id', snapshot.workload_id)

        restore_details['snapshot_details'] = dict(snapshot)

        instances = []
        try:
            vms = self.db.restored_vms_get(context, restore_id)
            for vm in vms:
                restored_vm = {'id': vm.vm_id,
                               'name': vm.vm_name,
                               'time_taken': vm.time_taken,
                               'status': vm.status, }
                metadata = {}
                for kvpair in vm.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                restored_vm['metadata'] = metadata
                instances.append(restored_vm)
        except Exception as ex:
            LOG.exception(ex)
        restore_details.setdefault('instances', instances)
        return restore_details

    # @autolog.log_method(logger=Logger, log_args=False, log_retval=False)
    @wrap_check_policy
    def restore_show(self, context, restore_id):
        rv = self.db.restore_show(context, restore_id)
        restore_details = dict(rv)

        snapshot = self.db.snapshot_get(
            context,
            rv.snapshot_id,
            read_deleted="yes",
            project_only=context.project_only)
        restore_details.setdefault('workload_id', snapshot.workload_id)

        restore_details['snapshot_details'] = dict(snapshot)
        instances = []
        try:
            vms = self.db.restored_vms_get(context, restore_id)
            for vm in vms:
                restored_vm = {'id': vm.vm_id,
                               'name': vm.vm_name,
                               'status': vm.status, }
                metadata = {}
                for kvpair in vm.metadata:
                    metadata.setdefault(kvpair['key'], kvpair['value'])
                restored_vm['metadata'] = metadata
                instances.append(restored_vm)
        except Exception as ex:
            LOG.exception(ex)
        restore_details.setdefault('instances', instances)

        ports_list = []
        networks_list = []
        subnets_list = []
        routers_list = []
        flavors_list = []
        security_groups_list = []
        try:
            resources = self.db.restored_vm_resources_get(
                context, restore_id, restore_id)
            for resource in resources:
                if resource.resource_type == 'port':
                    ports_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                if resource.resource_type == 'network':
                    networks_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'subnet':
                    subnets_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'router':
                    routers_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'flavor':
                    flavors_list.append(
                        {'id': resource.id, 'name': resource.resource_name})
                elif resource.resource_type == 'security_group':
                    security_groups_list.append(
                        {'id': resource.id, 'name': resource.resource_name})

        except Exception as ex:
            LOG.exception(ex)
        restore_details.setdefault('ports', ports_list)
        restore_details.setdefault('networks', networks_list)
        restore_details.setdefault('subnets', subnets_list)
        restore_details.setdefault('routers', routers_list)
        restore_details.setdefault('flavors', flavors_list)
        restore_details.setdefault('security_groups', security_groups_list)
        return restore_details

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_get_all(self, context, snapshot_id=None):
        restores = self.db.restore_get_all(context, snapshot_id)
        return restores

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_delete(self, context, restore_id):
        """
        Delete a workload restore.
        """
        restore_details = self.restore_show(context, restore_id)

        restore = self.db.restore_get(context, restore_id)
        snapshot = self.db.snapshot_get(context, restore['snapshot_id'])
        workload = self.workload_get(context, snapshot['workload_id'])
        restore_display_name = restore['display_name']
        if restore_display_name == 'One Click Restore':
            local_time = self.get_local_time(context, restore['created_at'])
            restore_display_name = local_time + \
                ' (' + restore['display_name'] + ')'
        snapshot_display_name = snapshot['display_name']
        if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
            local_time = self.get_local_time(context, snapshot['created_at'])
            snapshot_display_name = local_time + \
                ' (' + snapshot['display_name'] + ')'
        workload_display_name = workload['display_name']
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload_display_name +
            '\' ' +
            'Snapshot \'' +
            snapshot_display_name +
            '\' Restore \'' +
            restore_display_name +
            '\' Delete Requested',
            restore)

        if restore_details['target_platform'] == 'vmware':
            self.db.restore_delete(context, restore_id)
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Delete Submitted',
                restore)
            return

        if restore_details['status'] not in [
                'available', 'error', 'cancelled']:
            msg = _(
                "Status of the requested resource status must be 'available' or 'error' or 'cancelled'")
            raise wlm_exceptions.InvalidState(reason=msg)

        """
        if restore_details['restore_type'] == 'test':
            network_service =  neutron.API(production=False)
            compute_service = nova.API(production=False)
        else:
            network_service =  neutron.API(production=True)
            compute_service = nova.API(production=True)
        image_service = glance.get_default_image_service(production= (restore_details['restore_type'] != 'test'))
        for instance in restore_details['instances']:
            try:
                vm = compute_service.get_server_by_id(context, instance['id'])
                compute_service.delete(context, instance['id'])
                image_service.delete(context, vm.image['id'])
                #TODO(giri): delete the cinder volumes
            except Exception as exception:
                msg = _("Error deleting instance %(instance_id)s with failure: %(exception)s")
                LOG.debug(msg, {'instance_id': instance['id'], 'exception': exception})
                LOG.exception(exception)
        for port in restore_details['ports']:
            try:
                network_service.delete_port(context,port['id'])
            except Exception as exception:
                msg = _("Error deleting port %(port_id)s with failure: %(exception)s")
                LOG.debug(msg, {'port_id': port['id'], 'exception': exception})
                LOG.exception(exception)
        for router in restore_details['routers']:
            try:
                network_service.delete_router(context,router['id'])
            except Exception as exception:
                msg = _("Error deleting router %(router_id)s with failure: %(exception)s")
                LOG.debug(msg, {'router_id': router['id'], 'exception': exception})
                LOG.exception(exception)
        for subnet in restore_details['subnets']:
            try:
                network_service.delete_subnet(context,subnet['id'])
            except Exception as exception:
                msg = _("Error deleting subnet %(subnet_id)s with failure: %(exception)s")
                LOG.debug(msg, {'subnet_id': subnet['id'], 'exception': exception})
                LOG.exception(exception)
        for network in restore_details['networks']:
            try:
                network_service.delete_network(context,network['id'])
            except Exception as exception:
                msg = _("Error deleting network %(network_id)s with failure: %(exception)s")
                LOG.debug(msg, {'network_id': network['id'], 'exception': exception})
                LOG.exception(exception)
        for flavor in restore_details['flavors']:
            try:
                compute_service.delete_flavor(context,flavor['id'])
            except Exception as exception:
                msg = _("Error deleting flavor %(flavor_id)s with failure: %(exception)s")
                LOG.debug(msg, {'flavor_id': flavor['id'], 'exception': exception})
                LOG.exception(exception)
        for security_group in restore_details['security_groups']:
            try:
                network_service.security_group_delete(context,security_group['id'])
            except Exception as exception:
                msg = _("Error deleting security_group %(security_group_id)s with failure: %(exception)s")
                LOG.debug(msg, {'security_group_id': security_group['id'], 'exception': exception})
                LOG.exception(exception)
        """

        self.db.restore_delete(context, restore_id)
        AUDITLOG.log(
            context,
            'Workload \'' +
            workload_display_name +
            '\' ' +
            'Snapshot \'' +
            snapshot_display_name +
            '\' Restore \'' +
            restore_display_name +
            '\' Delete Submitted',
            restore)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def restore_cancel(self, context, restore_id):
        """
        Make the RPC call to cancel restore
        """
        try:
            restore = self.db.restore_get(context, restore_id)
            snapshot = self.db.snapshot_get(context, restore['snapshot_id'])
            admin_context = wlm_context.get_admin_context()
            workload = self.workload_get(admin_context, snapshot['workload_id'])
            restore_display_name = restore['display_name']
            if restore_display_name == 'One Click Restore':
                local_time = self.get_local_time(
                    context, restore['created_at'])
                restore_display_name = local_time + \
                    ' (' + restore['display_name'] + ')'
            snapshot_display_name = snapshot['display_name']
            if snapshot_display_name == 'User-Initiated' or snapshot_display_name == 'jobscheduler':
                local_time = self.get_local_time(
                    context, snapshot['created_at'])
                snapshot_display_name = local_time + \
                    ' (' + snapshot['display_name'] + ')'
            workload_display_name = workload['display_name']
            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Cancel Requested',
                restore)

            if restore.status in ['available', 'cancelled', 'error']:
                return

            metadata = {}
            metadata.setdefault('cancel_requested', '1')

            self.db.restore_update(context,
                                   restore_id,
                                   {
                                       'metadata': metadata,
                                       'status': 'cancelling'
                                   })
            backup_target = vault.get_backup_target(
                workload['metadata']['backup_media_target'])
            tracking_dir = backup_target.get_progress_tracker_directory(
                {'snapshot_id': restore.snapshot_id})
            fileutils.ensure_tree(tracking_dir)
            with open(os.path.join(tracking_dir, "cancelled"), "w") as f:
                 pass

            AUDITLOG.log(
                context,
                'Workload \'' +
                workload_display_name +
                '\' ' +
                'Snapshot \'' +
                snapshot_display_name +
                '\' Restore \'' +
                restore_display_name +
                '\' Cancel Submitted',
                restore)

            return True

        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_all_project_quota_types(self, context, quota_type_id=None):
        try:
            quota_types = self.db.get_all_project_quota_types(quota_type_id)
            return quota_types or []
        except Exception as ex:
            LOG.exception(ex)
        return []

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def create_project_quota_type(self, context, project_quota_type_obj):
        try:
            return self.db.create_project_quota_type(
                project_quota_type_obj.to_dict()
            ) or {}
        except Exception as ex:
            LOG.exception(ex)
        return {}

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def update_project_quota_types(self, context, update_dict):
        try:
            return self.db.update_project_quota_types(update_dict) or {}
        except Exception as ex:
            LOG.exception(ex)
        return {}

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_allowed_quotas(
            self, context, project_id=None,
            allowed_quota_id=None, quota_type_id=None
    ):
        try:
            return self.db.get_allowed_quotas(
                context, project_id, allowed_quota_id, quota_type_id
            ) or []
        except Exception as ex:
            LOG.exception(ex)
        return []

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def create_allowed_quotas(self, context, allowed_quota_objs):
        res = []
        try:
            data = [obj.to_dict() for obj in allowed_quota_objs]
            project_ids = [each_data['project_id'] for each_data in data]
            project_ids = list(set(project_ids))
            res = self.db.create_allowed_quotas(context, data) or []
            if res:
                # there is no possibility that allow quota creation
                # will happen for multiple projects at time, but still looping over
                for project_id in project_ids:
                    workload_utils.upload_allowed_quota_db_entry(
                        context, project_id
                    )
        except Exception as ex:
            LOG.exception(ex)
        return res

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def modify_allowed_quotas(self, context, allowed_quota_id, data):
        res = None
        try:
            project_id = data['project_id']
            is_modified = self.db.modify_allowed_quotas(context, allowed_quota_id, data) or []
            if is_modified:
                res = self.db.get_allowed_quotas(
                    context, project_id, allowed_quota_id
                )
                workload_utils.upload_allowed_quota_db_entry(
                    context, project_id
                )
                if res and isinstance(res, list):
                    return res[0]
        except Exception as ex:
            LOG.exception(ex)
        return res

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def delete_allowed_quota(self, context, allowed_quota_id):
        try:
            quota_obj = self.db.get_allowed_quotas(
                context, allowed_quota_id=allowed_quota_id
            )

            self.db.delete_allowed_quota(context, allowed_quota_id)
            if quota_obj and isinstance(quota_obj, list) and quota_obj[0].get('project_id', None):
                workload_utils.upload_allowed_quota_db_entry(
                    context, quota_obj[0].project_id
                )

        except Exception as ex:
            LOG.exception(ex)

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def settings_create(self, context, settings):
        created_settings = []
        try:
            for setting in settings:
                # if smtp_server_password exists in setting then encrypt it.
                if "smtp_server_password" in setting.get('name'):
                    setting['value'] = encrypt_password(setting.get('value'), tvault_key_file_name).decode('utf-8')

                created_settings.append(
                    self.db.setting_create(
                        context, setting))
        except Exception as ex:
            LOG.exception(ex)
        return created_settings

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def settings_update(self, context, settings):
        updated_settings = []
        try:
            for setting in settings:
                # if smtp_server_password exists in setting then encrypt it.
                if "smtp_server_password" in setting.get('name'):
                    setting['value'] = encrypt_password(setting.get('value'), tvault_key_file_name).decode('utf-8')

                updated_settings.append(
                    self.db.setting_update(
                        context, setting['name'], setting))
        except Exception as ex:
            LOG.exception(ex)
        return updated_settings

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def setting_delete(self, context, name):
        self.db.setting_delete(context, name)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def setting_get(self, context, name, get_hidden=False):
        try:
            return self.db.setting_get(context, name, get_hidden=get_hidden)
        except Exception as ex:
            LOG.exception(ex)
        return None

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def settings_get(self, context, get_hidden=False):
        settings = []
        try:
            return self.db.setting_get_all(context, get_hidden=get_hidden)
        except Exception as ex:
            LOG.exception(ex)
        return settings

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def task_show(self, context, task_id):

        task = self.db.task_get(context, task_id)
        task_dict = dict(task)
        return task_dict

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def tasks_get(self, context, status, page, size, time_in_minutes):
        tasks = self.db.task_get_all(
            context,
            status=status,
            page=page,
            size=size,
            time_in_minutes=time_in_minutes)
        return tasks

    @autolog.log_method(logger=Logger)
    def get_local_time(self, context, record_time):
        """
        Convert and return the date and time - from GMT to local time
        """
        try:
            epoch = time.mktime(record_time.timetuple())
            offset = datetime.fromtimestamp(
                epoch) - datetime.utcfromtimestamp(epoch)
            local_time = datetime.strftime(
                (record_time + offset), "%m/%d/%Y %I:%M %p")
            return local_time
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def trust_create(self, context, role_name, is_cloud_trust=False):

        try:
            # create trust
            cntx = wlm_context.RequestContext(
                trustor_user_id=context.user_id,
                auth_token=context.auth_token,
                tenant_id=context.project_id,
                roles=[role_name],
                is_admin=False)
            clients.initialise()
            keystoneclient = clients.Clients(cntx).client("keystone")
            trust_context = keystoneclient.create_trust_context()

            setting = {'category': "identity",
                       'name': "trust-%s" % str(uuid.uuid4()),
                       'description': 'token id for user %s project %s' %
                                       (context.user_id, context.project_id),
                       'value': trust_context.trust_id,
                       'user_id': 'cloud_admin' if is_cloud_trust else context.user_id,
                       'is_public': False,
                       'is_hidden': True,
                       'metadata': {'role_name': role_name},
                       'type': "trust_id", }
            created_settings = []
            try:
                created_settings.append(self.db.setting_create(context, setting))
            except Exception as ex:
                LOG.exception(ex)

            #Once new cloud trust is created remove old trusts.
            if is_cloud_trust is True:
                old_trusts = nova._get_trusts('cloud_admin', vault.CONF.cloud_admin_project_id)
                for t in old_trusts:
                    if created_settings[0].name != t.name:
                        self.trust_delete(context, t.name)

            return created_settings
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def trust_delete(self, context, name):

        trust = self.db.setting_get(context, name, get_hidden=True)
        if trust.type != "trust_id":
            msg = _("No trust record by name %s" % name)
            raise wlm_exceptions.Invalid(reason=msg)

        try:
            cntx = wlm_context.RequestContext(
                trustor_user_id=context.user_id,
                auth_token=context.auth_token,
                tenant_id=context.project_id,
                is_admin=False)

            clients.initialise()
            keystoneclient = clients.Clients(cntx).client("keystone")
            keystoneclient.delete_trust(trust.value)
        except Exception as ex:
            LOG.exception(ex)

        self.db.setting_delete(context, name)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def trust_list(self, context, get_hidden=False, is_cloud_admin=False):

        settings = self.db.setting_get_all_by_project(
            context, context.project_id,
            get_hidden=True, is_cloud_admin=is_cloud_admin)

        check_for_user_id = context.user_id
        if is_cloud_admin:
            check_for_user_id = 'cloud_admin'

        trust = [t for t in settings if t.type == "trust_id" and
                 t.user_id == check_for_user_id and
                 t.project_id == context.project_id]
        return trust

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def trust_get(self, context, name):
        try:
            return self.db.setting_get(context, name, get_hidden=True)
        except Exception as ex:
            LOG.exception(ex)
        return None

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def validate_scheduler_trust(self, context, workload_id):
        res = {'scheduler_enabled': False, 'trust': None, 'is_valid': False, 'scheduler_obj': None}
        try:
            jobs = self.db.job_get_all(context)
            scheduler_obj = None
            for job in jobs:
                if job.workload_id == workload_id:
                    scheduler_obj = job
                    res['scheduler_enabled'] = True
                    break
            if not scheduler_obj and not getattr(scheduler_obj, 'kwargs', None):
                return res

            scheduler_obj.kwargs = pickle.loads(bytes(scheduler_obj.kwargs, 'utf-8'))
            res['scheduler_obj'] = scheduler_obj.kwargs
            user_id = scheduler_obj.kwargs.get('user_id', None)
            project_id = scheduler_obj.kwargs.get('project_id', None)
            if user_id and project_id:
                trust_objs = self.db.setting_get_all_by_project(
                    context, project_id,
                    **{'user_id': user_id, 'type': 'trust_id'}
                )
                for obj in trust_objs:
                    cntx = wlm_context.RequestContext(
                        trustor_user_id=user_id,
                        project_id=project_id,
                        trust_id=obj.value)
                    clients.initialise()
                    keystone_client = clients.Clients(cntx).client("keystone")
                    trust_context = keystone_client.create_trust_context()
                    if trust_context and trust_context.trust_id:
                        res['trust'] = obj
                        res['is_valid'] = True
                        return res
        except Exception as ex:
            LOG.exception(ex)
        return res

    @autolog.log_method(logger=Logger)
    @upload_settings
    @wrap_check_policy
    def license_create(self, context, license_data):

        license_json = json.dumps(parse_license_text(license_data['lic_txt']))
        setting = {'category': "license",
                   'name': "license-%s" % str(uuid.uuid4()),
                   'description': 'TrilioVault License Key',
                   'value': license_json,
                   'user_id': context.user_id,
                   'is_public': False,
                   'is_hidden': True,
                   'metadata': {'filename': license_data['file_name']},
                   'type': "license_key", }
        created_license = []
        try:
            settings = self.db.setting_get_all(context, get_hidden=True)
            created_license.append(self.db.setting_create(context, setting))

            for setting in settings:
                if setting.type == "license_key":
                    try:
                        self.db.setting_delete(context, setting.name)
                    except BaseException as bex:
                        LOG.exception(bex)
        except Exception as ex:
            LOG.exception(ex)

        return json.loads(created_license[0].value)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def license_list(self, context):

        settings = self.db.setting_get_all(context, get_hidden=True)

        license = [t for t in settings if t.type == "license_key"]

        if len(license) == 0:
            raise Exception("No licenses added to TrilioVault")

        lic = json.loads(license[0].value)
        lic['metadata'] = license[0]['metadata']
        return lic

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def license_check(self, context, method=None):
        try:
            return self.get_usage_and_validate_against_license(context, method)
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    def get_usage_and_validate_against_license(self, context, method=None):
        global total_storage_usage
        global total_storage_usage_refresh

        admin_context = wlm_context.get_admin_context()
        license_key = self.license_list(admin_context)

        kwargs = {}
        if 'Compute Nodes' in license_key['Licensed For']:
            kwargs['compute_nodes'] = len(
                workload_utils.get_compute_nodes(context))
        elif 'Virtual Machines' in license_key['Licensed For']:
            kwargs['virtual_machines'] = len(
                self.db.workload_vms_get(
                    admin_context, None))
        elif ' Backup Capacity' in license_key['Licensed For']:
            if total_storage_usage is None:
                total_storage_usage = self.get_storage_usage(admin_context)
                total_storage_usage_refresh = datetime.now()
            elif datetime.now() > total_storage_usage_refresh + \
                                timedelta(0, 0, 0, 0, 0, 12, 0):
                total_storage_usage = self.get_storage_usage(admin_context)
                total_storage_usage_refresh = datetime.now()

            total_utilization = total_storage_usage[
                'storage_usage'][0]['total_utilization']
            kwargs['capacity_utilized'] = total_utilization

        try:
            return validate_license_key(license_key, method, **kwargs)
        except Exception as ex:
            LOG.exception(ex)
            raise

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_orphaned_workloads_list(self, context, migrate_cloud=None):
        AUDITLOG.log(context, 'Get Orphaned Workloads List Requested', None)
        if not context.is_admin:
            raise wlm_exceptions.AdminRequired()
        workloads = []
        keystone_client = KeystoneClient(context)
        projects = keystone_client.client.get_project_list_for_import(context)
        tenant_list = [project.id for project in projects]
        users = keystone_client.get_user_list()
        user_list = [user.id for user in users]

        def _check_workload(context, workload):
            project_id = workload.get('project_id')
            user_id = workload.get('user_id')
            # Check for valid tenant
            if project_id in tenant_list:
                # Check for valid workload user
                if keystone_client.client.user_exist_in_tenant(
                        project_id, user_id):
                    pass
                else:
                    workloads.append(workload)
            else:
                workloads.append(workload)

        if not migrate_cloud:
            kwargs = {
                'project_list': tenant_list,
                'exclude': True,
                'user_list': user_list}
            workloads_in_db = self.db.workload_get_all(context, **kwargs)
            for workload in workloads_in_db:
                workloads.append(workload)
        else:
            for backup_endpoint in vault.CONF.vault_storage_nfs_export.split(
                    ','):
                backup_target = None
                try:
                    backup_target = vault.get_backup_target(backup_endpoint)
                    for workload_url in backup_target.get_workloads(context):
                        try:
                            workload_values = json.loads(backup_target.get_object(
                                os.path.join(workload_url, 'workload_db')))
                            _check_workload(context, workload_values)
                        except Exception as ex:
                            LOG.exception(ex)
                except Exception as ex:
                    LOG.exception(ex)
        AUDITLOG.log(context, 'Get Orphaned Workloads List Completed', None)
        return workloads

    def _update_workloads(self, context, workloads_to_update,
                          jobscheduler_map, new_tenant_id, user_id):
        '''
        Update the values for tenant_id and user_id for given list of
        workloads in database.
        '''
        updated_workloads = []
        workload_update_map = []
        snapshot_update_map = []
        topology_update_map = []

        try:
            DBSession = get_session()
            kwargs = {'session': DBSession}
            # Create the list for workloads and snapshots to update in
            # database with new project_id and user_id
            for workload_id in workloads_to_update:
                workload_map = {
                    'id': workload_id,
                    'jobschedule': jobscheduler_map[workload_id],
                    'project_id': new_tenant_id,
                    'user_id': user_id}
                workload_update_map.append(workload_map)
                snapshots = self.db.snapshot_get_all_by_workload(
                    context, workload_id, **kwargs)
                for snapshot in snapshots:
                    snapshot_map = {
                        'id': snapshot.get('id'),
                        'project_id': new_tenant_id,
                        'user_id': user_id}
                    snapshot_update_map.append(snapshot_map)

                    topology_resources = self.db.snapshot_network_resources_get(
                                                 context, snapshot.get('id'), **kwargs)
                    for topology_resource in topology_resources:
                        for metadata in topology_resource.metadata:
                            if metadata['key'] == 'json_data':
                                metadata_json = json.loads(metadata['value'])
                                metadata_json['tenant_id'] = new_tenant_id
                                metadata_json['project_id'] = new_tenant_id
                                resource_map = {
                                    'id': metadata.get('id'),
                                    'value' : json.dumps(metadata_json)}
                                topology_update_map.append(resource_map)

                # Removing workloads from job scheduler
                jobs = self.db.job_get_all(context)
                for job in jobs:
                    if job.workload_id == workload_id:
                        self.cron_rpcapi.workload_pause(context, workload_id)
                        break

            # Update all the entries for workloads and snapshots in database
            DBSession.bulk_update_mappings(
                models.Workloads, workload_update_map)
            DBSession.bulk_update_mappings(
                models.Snapshots, snapshot_update_map)
            DBSession.bulk_update_mappings(
                models.SnapNetworkResourceMetadata, topology_update_map)
            for workload_id in workloads_to_update:
                workload = self.db.workload_get(context, workload_id)
                updated_workloads.append(workload)

            return updated_workloads
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def workloads_reassign(self, context, tenant_maps):
        '''
        Reassign given list of workloads to new tenant_id and user_id.
        '''
        try:
            AUDITLOG.log(context, 'Reassign workloads Requested', None)
            if not context.is_admin:
                raise wlm_exceptions.AdminRequired()

            keystone_client = KeystoneClient(context)

            reassigned_workloads = []
            failed_workloads = []
            projects = keystone_client.client.get_project_list_for_import(
                context)
            tenant_list = [project.id for project in projects]

            if len(tenant_list) == 0:
                raise Exception(
                    "User %s is not member of source tenants "
                    "and target tenant" % context.user_id)

            for tenant_map in tenant_maps:
                workload_to_update = []
                workload_to_import = []
                workload_ids = tenant_map['workload_ids']
                old_tenant_ids = tenant_map['old_tenant_ids']
                new_tenant_id = tenant_map['new_tenant_id']
                user_id = tenant_map['user_id']
                migrate_cloud = tenant_map['migrate_cloud']

                # If workload id's are provided then check whether they
                # exist in current cloud or not.
                workloads_old_tenant_ids = []
                if workload_ids:
                    kwargs = {'workload_list': workload_ids}
                    workloads = self.db.workload_get_all(context, **kwargs)

                    if len(workloads) == len(workload_ids):
                        workload_to_update.extend(workload_ids)

                    if len(workloads) != len(
                            workload_ids) and migrate_cloud is not True:
                        raise Exception(
                            "Workload ids do not belong to current cloud. "
                            "To reassign them from another cloud, set migrate_cloud option to True.")

                    for workload in workloads:
                        workloads_old_tenant_ids.extend([self.db.workload_get(context, workload.id).project_id])

                    old_tenant_ids = list(set(old_tenant_ids))
                    # In case workload id's are provided and some are in DB and
                    # some need to import then filter those workloads here.
                    if len(workloads) != len(
                            workload_ids) and migrate_cloud is True:

                        if len(workloads) == 0:
                            workload_to_import.extend(workload_ids)
                        else:
                            workloads = self.db.workload_get_all(context)
                            workload_ids_in_db = [
                                workload.id for workload in workloads]
                            for workload_id in workload_ids:
                                if workload_id in workload_ids_in_db:
                                    workload_to_update.append(workload_id)
                                else:
                                    workload_to_import.append(workload_id)

                if new_tenant_id in tenant_list and \
                    all(id in tenant_list for id in workloads_old_tenant_ids):
                    # Check for valid workload user
                    if keystone_client.client.user_exist_in_tenant(
                            new_tenant_id, user_id) is False:
                        raise wlm_exceptions.UserNotFound(user_id=user_id)
                    if keystone_client.client.check_user_role(
                            new_tenant_id, user_id) is False:
                        raise wlm_exceptions.RoleNotFound(
                            user_id=user_id, role_name=vault.CONF.trustee_role, project_id=new_tenant_id)

                    # If old_teanat_id is provided then look for all workloads
                    # under that tenant
                    if old_tenant_ids:
                        if not migrate_cloud:
                            for old_tenant_id in old_tenant_ids:
                                if old_tenant_id not in tenant_list:
                                    raise wlm_exceptions.ProjectNotFound(
                                        project_id=old_tenant_id)
                            kwargs = {'project_list': old_tenant_ids}
                            workloads_in_db = self.db.workload_get_all(
                                context, **kwargs)
                            for workload in workloads_in_db:
                                workload_to_update.append(workload.id)
                        else:
                            # When migrate is True then there could be the scenerio where some
                            # of the workloads exist in DB, in that case directly importing
                            # them will result in error, so filtering those
                            # workloads here
                            workload_ids = vault.get_workloads_for_tenant(
                                context, old_tenant_ids)
                            kwargs = {'workload_list': workload_ids}
                            workloads = self.db.workload_get_all(
                                context, **kwargs)
                            if len(workloads) == 0:
                                workload_to_import.extend(workload_ids)
                            else:
                                workloads_in_db = self.db.workload_get_all(
                                    context)
                                workload_ids_in_db = [
                                    workload.id for workload in workloads_in_db]
                                for workload_id in workload_ids:
                                    if workload_id in workload_ids_in_db:
                                        workload_to_update.append(workload_id)
                                    else:
                                        workload_to_import.append(workload_id)

                    total_workloads = len(workload_to_import) + len(workload_to_update)
                    if total_workloads:
                        try:
                            allowed_quota_obj = workload_quota_check(
                                new_tenant_id, self.db,
                                import_count=total_workloads
                            )
                            if allowed_quota_obj \
                                    and allowed_quota_obj.actual_value == allowed_quota_obj.high_watermark:
                                # TODO: check how to handle this warning at UI side
                                AUDITLOG.log(
                                    context,
                                    "Destination project's workload allowed quota has reached to high watermark" +
                                    "Contact to the admin",
                                    None)
                                LOG.warning("Destination project's workload allowed quota has "
                                            "reached to high watermark. Contact to the admin")
                        except ValueError as e:
                            raise wlm_exceptions.QuotaLimitExceededError()
                        except Exception as ex:
                            LOG.exception(ex)
                            raise
                    if workload_to_import:
                        vault.update_workload_db(
                            context, workload_to_import, new_tenant_id, user_id)
                        imported_workloads = self.import_workloads(
                            context, workload_to_import, True)
                        reassigned_workloads.extend(
                            imported_workloads['workloads']['imported_workloads'])
                        failed_workloads.extend(
                            imported_workloads['workloads']['failed_workloads'])

                    if workload_to_update:
                        jobscheduler_map = vault.update_workload_db(
                            context, workload_to_update, new_tenant_id, user_id)
                        for wid in workload_to_update:
                            if not wid in jobscheduler_map:
                                LOG.error("workload %s directory structure is corrupt or missing files" % wid)
                                raise Exception("workload %s directory structure is corrupt or missing files" % wid)
                        updated_workloads = self._update_workloads(
                            context, workload_to_update, jobscheduler_map, new_tenant_id, user_id)
                        reassigned_workloads.extend(updated_workloads)

                else:
                    LOG.error("project %s is not part of either some of source projects or "
                              "target project" % context.project_id)
                    raise wlm_exceptions.ProjectNotFound(
                            project_id=context.project_id)
            return {'workloads': {'reassigned_workloads': reassigned_workloads,
                                  'failed_workloads': failed_workloads}}

        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def config_workload(self, context, jobschedule, config_data):
        """
        Make the RPC call to create/update a config workload.
        """
        try:
            AUDITLOG.log(context, 'Config workload update Requested', None)

            def _get_matadata(metadata, key):
                for meta in metadata:
                    if meta.key == key:
                        if key == 'authorized_key':
                            return meta.value
                        else:
                            return pickle.loads(bytes(meta.value, 'utf-8'))

            try:
                existing_config_workload = self.db.config_workload_get(context)
                if existing_config_workload['status'].lower() == 'locked':
                    message = "Config workload is not available. " \
                        "Please wait for config backup to complete."
                    raise wlm_exceptions.InvalidState(reason=message)
            except wlm_exceptions.ConfigWorkloadNotFound:
                existing_config_workload = None
                # When user configuring for the first time
                if ('databases' in config_data) is False or len(
                        list(config_data['databases'].keys())) == 0:
                    message = "Database credentials are required to configure config backup."
                    raise wlm_exceptions.ErrorOccurred(reason=message)

                if ('trusted_user' not in config_data or 'authorized_key' not in config_data) or\
                        str(config_data['trusted_user'].get('username', None)).lower() == 'none':
                    message = "To backup controller nodes and database, please provide trusted user and " \
                              "authorized_key, which will be used to connect with controller nodes."
                    raise wlm_exceptions.ErrorOccurred(reason=message)

            metadata = {}
            # Validate trusted_user and authorized_key
            if ('trusted_user' in config_data or 'authorized_key' in config_data):
                trusted_user = _get_matadata(
                    existing_config_workload.metadata,
                    'trusted_user') if config_data.get(
                    'trusted_user',
                    None) is None else config_data.get('trusted_user')
                if 'authorized_key' in config_data:
                    authorized_key = vault.get_key_file(
                        config_data['authorized_key'], temp=True)
                else:
                    authorized_key = _get_matadata(
                        existing_config_workload.metadata, 'authorized_key')

                trust_creds = {
                    'trusted_user': trusted_user,
                    'authorized_key': authorized_key}
                workload_utils.validate_trusted_user_and_key(
                    context, trust_creds)
                if 'trusted_user' in config_data:
                    metadata['trusted_user'] = str(pickle.dumps(
                        config_data.pop('trusted_user'), 0), 'utf-8')
                if 'authorized_key' in config_data:
                    os.remove(authorized_key)
                    trust_creds['authorized_key'] = metadata['authorized_key'] = vault.get_key_file(
                        config_data['authorized_key'])

            if 'databases' in config_data:
                if 'trust_creds' not in locals():
                    trust_creds['trusted_user'] = _get_matadata(
                        existing_config_workload.metadata, 'trusted_user')
                    trust_creds['authorized_key'] = _get_matadata(
                        existing_config_workload.metadata, 'authorized_key')
                workload_utils.validate_database_creds(
                    context, config_data['databases'], trust_creds)
                metadata['databases'] = str(pickle.dumps(
                    config_data.pop('databases'), 0), 'utf-8')

            metadata['services_to_backup'] = str(pickle.dumps(
                config_data.pop('services_to_backup'), 0), 'utf-8')

            backup_target, path = vault.get_settings_backup_target()
            # Create new OpenStack workload
            if existing_config_workload is None:
                backup_target, path = vault.get_settings_backup_target()
                options = {
                    'id': vault.CONF.cloud_unique_id,
                    'user_id': context.user_id,
                    'project_id': context.project_id,
                    'host': socket.gethostname(),
                    'status': 'creating',
                    'metadata': metadata,
                    'jobschedule': str(pickle.dumps(jobschedule, 0), 'utf-8'),
                    'backup_media_target': backup_target.backup_endpoint,
                }
                config_workload = self.db.config_workload_update(
                    context, options)
                # Add to scheduler jobs
                self.workload_add_scheduler_job(
                    context, jobschedule, config_workload, is_config_backup=True)
            else:
                # Update existing config workload
                options = {}
                options['metadata'] = metadata
                if len(jobschedule):
                    options['jobschedule'] = str(pickle.dumps(jobschedule, 0), 'utf-8')
                    config_workload = self.db.config_workload_update(
                        context, options)

                    existing_joschedule = pickle.loads(
                        bytes(existing_config_workload.get('jobschedule'), 'utf-8'))
                    existing_scheduler_status = False
                    if str(existing_joschedule.get(
                            'enabled')).lower() == 'true':
                        existing_scheduler_status = True

                    if str(jobschedule['enabled']).lower() == 'true':
                        if existing_scheduler_status is True:
                            # Case when scheduler is updated with some new values
                            # Need to update scheduler with new values so that it
                            # reflect changes instantly.
                            job = self._scheduler.get_config_backup_job()
                            if job is not None:
                                self._scheduler.unschedule_config_backup_job(
                                    job)

                        # Add to scheduler jobs
                        self.workload_add_scheduler_job(
                            context, jobschedule, config_workload, is_config_backup=True)

                    if str(jobschedule['enabled']).lower() == 'false':
                        if existing_scheduler_status is True:
                            # Remove from scheduler jobs
                            job = self._scheduler.get_config_backup_job()
                            if job is not None:
                                self._scheduler.unschedule_config_backup_job(
                                    job)
                        else:
                            LOG.warning("Config workload is already disabled.")
                else:
                    config_workload = self.db.config_workload_update(
                        context, options)

            self.db.config_workload_update(context,
                                           {
                                               'status': 'available',
                                               'error_msg': None,
                                           })
            workload_utils.upload_config_workload_db_entry(context)
            AUDITLOG.log(
                context,
                'Config workload update submitted.',
                config_workload)
            return config_workload
        except Exception as ex:
            LOG.exception(ex)
            if 'config_workload' in locals():
                self.db.config_workload_update(
                    context, {
                        'status': 'error', 'error_msg': str(
                            str(ex))})
                workload_utils.upload_config_workload_db_entry(context)
            raise ex

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_config_workload(self, context):
        try:
            try:
                config_workload = self.db.config_workload_get(context)
            except wlm_exceptions.ConfigWorkloadNotFound as ex:
                raise ex

            config_workload_dict = dict(config_workload)

            config_workload_dict['jobschedule'] = pickle.loads(
                bytes(config_workload.jobschedule, 'utf-8'))
            config_workload_dict['jobschedule']['enabled'] = False
            config_workload_dict['jobschedule'][
                'global_jobscheduler'] = self._scheduler.running
            # find the job object based on config_workload_id
            job = self._scheduler.get_config_backup_job()
            if job is not None:
                config_workload_dict['jobschedule']['enabled'] = True
                timedelta = job.compute_next_run_time(
                    datetime.now()) - datetime.now()
                config_workload_dict['jobschedule']['nextrun'] = timedelta.total_seconds(
                )
            return config_workload_dict
        except Exception as ex:
            LOG.exception(ex)
            raise ex

    @autolog.log_method(logger=Logger)
    @create_trust()
    @wrap_check_policy
    def config_backup(self, context, name, description):
        """
        Make the RPC call to backup OpenStack configuration.
        """
        try:
            try:
                config_workload = self.db.config_workload_get(context)
            except wlm_exceptions.ConfigWorkloadNotFound as ex:
                message = 'OpenStack config backup is not configured. First configure it.'
                raise wlm_exceptions.ErrorOccurred(reason=message)

            if config_workload['status'].lower() != 'available':
                message = "Config workload is not available. " \
                          "Please wait for other backup to complete."
                raise wlm_exceptions.InvalidState(reason=message)
            self.db.config_workload_update(context, {'status': 'locked'})

            AUDITLOG.log(
                context,
                'OpenStack configuration backup ' +
                name +
                ' Create Requested',
                None)

            options = {'config_workload_id': vault.CONF.cloud_unique_id,
                       'user_id': context.user_id,
                       'project_id': context.project_id,
                       'display_name': name,
                       'display_description': description,
                       'start_date': time.strftime("%x"),
                       'status': 'creating', }

            backup = self.db.config_backup_create(context, options)
            self.db.config_backup_update(
                context, backup.id, {
                    'progress_msg': 'Config backup operation is scheduled', 'status': 'starting'})
            self.scheduler_rpcapi.config_backup(
                context, FLAGS.scheduler_topic, backup['id'], request_spec={})
            AUDITLOG.log(
                context,
                'OpenStack configuration backup ' +
                name +
                ' Create Submitted.',
                backup)
            return backup
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_config_backups(self, context, backup_id=None):
        """
        Return list/single of backups.
        """
        try:
            if backup_id:
                backups = self.db.config_backup_get(context, backup_id)
            else:
                backups = self.db.config_backup_get_all(context)
            return backups
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def config_backup_delete(self, context, backup_id):
        """
        Delete a config backup.
        """
        try:
            config_backup = self.get_config_backups(context, backup_id)
            config_workload = self.get_config_workload(context)
            backup_display_name = config_backup['display_name']

            AUDITLOG.log(context, 'Config backup ' + backup_display_name +
                         ' Delete Requested', config_backup)
            if config_backup['status'] not in ['available', 'error']:
                msg = _("Config backup status must be 'available' or 'error'.")
                raise wlm_exceptions.InvalidState(reason=msg)

            backup = self.db.config_backup_update(
                context, backup_id, {'status': 'deleting'})
            workload_utils.config_backup_delete(context, backup_id)

            AUDITLOG.log(
                context,
                'OpenStack cofig backup  ' +
                backup_display_name +
                ' Delete Submited',
                config_backup)
            return backup
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    # Workload Policy API's
    @wrap_check_policy
    def policy_create(self, context, name, description, metadata, field_values):
        """
        Create a policy.
        """
        try:
            AUDITLOG.log(context, 'Policy \'' + name +
                         '\' Create Requested', None)

            # verify field names provided with available policy fields
            fields = self.db.policy_fields_get_all(context)
            policy_fields = [f.field_name for f in fields]

            if (len(list(field_values.keys())) != len(policy_fields)) or len(set(field_values.keys()).difference(set(policy_fields))) > 0:
                raise wlm_exceptions.InvalidRequest(
                    reason="Please provide only %s as part of policy fields" % str(policy_fields))

            options = {'user_id': context.user_id,
                       'project_id': context.project_id,
                       'display_name': name,
                       'display_description': description,
                       'status': 'available',
                       'metadata': metadata,
                       'field_values': field_values}
            policy = self.db.policy_create(context, options)

            workload_utils.upload_policy_db_entry(context, policy.id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Create Submitted', policy)
            return self.db.policy_get(context, policy['id'])
        except Exception as ex:
            LOG.exception(ex)
            if 'policy' in locals():
                self.db.policy_update(context, policy['id'],
                                      {'status': 'error',
                                       'error_msg': ex.kwargs['reason'] if hasattr(ex,
                                                                                   'kwargs') and 'reason' in ex.kwargs else {}})
            raise

    @wrap_check_policy
    def policy_update(self, context, policy_id, values):
        """
        Update given policy.
        """
        try:
            policy = self.db.policy_get(context, policy_id)

            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Update Requested', None)

            policy_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id, workloads=True)
            if len(policy_assignments) > 0:
                raise wlm_exceptions.ErrorOccurred(
                    reason="Can not update policy: %s. It's assigned to workloads." % (policy_id))

            # verify field names provided with available policy fields
            field_values = values.get('field_values', {})
            if len(field_values) > 0:
                fields = self.db.policy_fields_get_all(context)
                policy_fields = [f.field_name for f in fields]

                if len(set(policy_fields).union(set(field_values.keys()))) > len(policy_fields):
                    raise wlm_exceptions.InvalidRequest(
                        reason="Please provide only %s as part of policy fields" % str(policy_fields))

            policy = self.db.policy_update(context, policy_id, values)

            workload_utils.upload_policy_db_entry(context, policy_id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Update Submitted', policy)
            return policy
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_get(self, context, policy_id):
        """
        Get policy object.
        """
        try:
            policy = self.db.policy_get(context, policy_id)
            return policy
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_list(self, context, search_opts={}):
        """
        List all available policies.
        """
        try:
            policies = self.db.policy_get_all(context, **search_opts)
            return policies
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_delete(self, context, policy_id):
        """
        Delete the given policy.
        """
        try:
            policy = self.db.policy_get(context, policy_id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Delete Requested', None)

            # Check policy is not assigned to any workload
            workload_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id, workloads=True)
            if len(workload_assignments) > 0:
                raise wlm_exceptions.ErrorOccurred(
                    reason="Can not delete policy: %s. It's assigned to workloads." % (policy_id))

            # Remove policy assignments from projects.
            policy_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id)

            for pa in policy_assignments:
                self.db.policy_assignment_delete(context, pa.id)

            self.db.policy_delete(context, policy_id)
            workload_utils.policy_delete(context, policy_id)
            AUDITLOG.log(context, 'Policy \'' +
                         policy['display_name'] + '\' Delete Submitted', None)
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def policy_assign(self, context, policy_id, add_projects, remove_projects):
        """
        Assign/remove the policy to a tenant.
        """
        try:
            failed_project_ids = []
            project_to_add = {}
            # Remove those project id's which are wrong.
            try:
                for project_id in add_projects[:]:
                    # Validate tenant_id existance
                    clients.initialise()
                    keystoneclient = clients.Clients(
                        context).client("keystone")
                    project = keystoneclient.client.projects.get(project_id)
                    project_to_add[project_id] = project.name
            except Exception as ex:
                failed_project_ids.append(project_id)

            # Validate policy_id
            policy = self.db.policy_get(context, policy_id)

            policy_assignments = self.db.policy_assignments_get_all(
                context, policy_id=policy_id)

            for pa in policy_assignments:
                if pa.project_id in remove_projects:
                    # Check policy is not assigned to any workload
                    workload_assignments = self.db.policy_assignments_get_all(
                        context, policy_id=policy_id, project_id=pa.project_id, workloads=True)
                    if len(workload_assignments) > 0:
                        raise wlm_exceptions.ErrorOccurred(
                            reason="Can not remove policy: %s. It's assigned to workloads." % (policy_id))
                    self.db.policy_assignment_delete(context, pa.id)
                    remove_projects.remove(pa.project_id)
                elif pa.project_id in list(project_to_add.keys()):
                    project_to_add.pop(pa.project_id)

            if len(remove_projects) > 0:
                failed_project_ids.extend(remove_projects)

            for proj_id, proj_name in project_to_add.items():
                values = {'policy_id': policy_id, 'project_id': proj_id,
                          'policy_name': policy.display_name, 'project_name': proj_name}
                self.db.policy_assignment_create(context, values)

            workload_utils.upload_policy_db_entry(context, policy_id)
            policy = self.db.policy_get(context, policy_id)
            return (policy, failed_project_ids)
        except Exception as ex:
            LOG.exception(ex)
            raise

    @wrap_check_policy
    def get_assigned_policies(self, context, project_id):
        """
        list the policies which are assigned to given project
        """
        try:
            assigned_policies = self.db.policy_assignments_get_all(
                context, project_id=project_id)
            return assigned_policies
        except Exception as ex:
            LOG.exception(ex)
            raise

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def policy_field_create(self, context, name, type):
        """
        Create a policy_field. No RPC call is made
        """
        AUDITLOG.log(context, 'Policy Field \'' +
                     name + '\' Create Requested', None)
        options = {'field_name': name,
                   'type': type,
                   }
        policy_field = self.db.policy_field_create(context, options)
        AUDITLOG.log(context, 'Policy Field \'' + name +
                     '\' Create Submitted', policy_field)
        return policy_field

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def policy_field_list(self, context):
        """
        List all policy fields.
        """
        policy_fields = self.db.policy_fields_get_all(context)
        return policy_fields

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_tenants_usage(self, context):
        """
        Get storage used and vm's protected by different tenants.
        """
        try:
            total_usage = 0
            total_capacity = 0
            total_vms_protected = 0
            tenants_usage = self.db.get_tenants_usage(
                context)

            admin_cntx = nova._get_tenant_context(context, cloud_admin=True)
            clients.initialise()
            nova_plugin = clients.Clients(admin_cntx)
            nova_client = nova_plugin.client("nova")

            search_opts = {'all_tenants': 1}
            servers = nova_client.servers.list(search_opts=search_opts)
            tenant_wise_servers = {}
            for server in servers:
                if server.tenant_id not in tenant_wise_servers:
                    tenant_wise_servers[server.tenant_id] = [server.id]
                else:
                    tenant_wise_servers[server.tenant_id].append(server.id)
            for tenant_id in tenant_wise_servers:
                if tenant_id in tenants_usage:
                    protected = len(set(tenants_usage[tenant_id]['vms_protected']).intersection(
                        set(tenant_wise_servers[tenant_id])))
                    passively_protected = len(set(tenants_usage[tenant_id]['vms_protected']).difference(
                        set(tenant_wise_servers[tenant_id])))
                    tenants_usage[tenant_id]['vms_protected'] = protected
                    total_vms_protected += protected
                    tenants_usage[tenant_id]['total_vms'] = len(
                        tenant_wise_servers[tenant_id])
                    tenants_usage[tenant_id][
                        'passively_protected'] = passively_protected
                else:
                    tenants_usage[tenant_id] = {'vms_protected': 0, 'total_vms': len(
                        tenant_wise_servers[tenant_id]), 'used_capacity': 0, 'passively_protected': 0}

            # Update tenants_usage for those tenants which doesn't have any
            # workloads
            clients.initialise()
            keystoneclient = clients.Clients(admin_cntx).client("keystone")
            tenants = keystoneclient.client.projects.list()

            for tenant in tenants:
                if tenant.id not in tenants_usage:
                    tenants_usage[tenant.id] = {
                        'vms_protected': 0, 'total_vms': 0, 'used_capacity': 0, 'passively_protected': 0, 'tenant_name': tenant.name}
                else:
                    tenants_usage[tenant.id]['tenant_name'] = tenant.name

            backends_storage_stats = self.get_storage_usage(context)

            for backend in backends_storage_stats['storage_usage']:
                total_capacity += int(round(float(backend['total_capacity'])))
                total_usage += int(round(float(backend['total_utilization'])))
            global_usage = {'total_capacity': total_capacity, 'total_usage': total_usage, 'total_vms': len(
                servers), 'vms_protected': total_vms_protected}
            return {'tenants_usage': tenants_usage,
                    'global_usage': global_usage}
        except Exception as ex:
            LOG.exception(ex)
            raise wlm_exceptions.ErrorOccurred(
                reason=str(ex) %
                (ex.kwargs if hasattr(
                    ex, 'kwargs') else {}))

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_tenants_chargeback(self, context):
        """
        Get tenants chargeback.
        """
        try:
            snap_chargeback = {}
            kwargs = {"read_metadata": False}
            snapshots = self.db.snapshot_get_all(context, **kwargs)
            for snap in snapshots:
                if snap.workload_id in snap_chargeback:
                    snap_chargeback[snap.workload_id]["workload_size"] += int(
                        round(float(snap.size)))
                    snap_chargeback[snap.workload_id]["snapshots"][snap.id] = {"name": snap.display_name,
                                                                               "size": int(round(float(snap.size))),
                                                                               "created_at": snap.created_at}
                else:
                    snap_chargeback[snap.workload_id] = {"workload_size": snap.size, "snapshots": {
                        snap.id: {"name": snap.display_name, "size": int(round(float(snap.size))), "created_at": snap.created_at}}}
            workload_vm_chargeback = {}
            workload_vms = self.db.workload_vms_get(context, None)
            for w_vm in workload_vms:
                if w_vm.workload_id in workload_vm_chargeback:
                    workload_vm_chargeback[w_vm.workload_id][w_vm.vm_id] = {
                        "name": w_vm.vm_name}
                else:
                    workload_vm_chargeback[w_vm.workload_id] = {
                        w_vm.vm_id: {"name": w_vm.vm_name}}

            workloads = self.db.workload_get_all(context)
            workloads_chargeback = {}
            for w in workloads:
                def get_value(
                    obj, id, key): return obj[id][key] if id in obj else None
                if w.project_id in workloads_chargeback:
                    workloads_chargeback[w.project_id]["workloads"][w.id] = {"name": w.display_name,
                                                                          "size": get_value(snap_chargeback, w.id, "workload_size") or 0,
                                                                          "protected_vms": workload_vm_chargeback.get(w.id, 0),
                                                                          "snapshots": get_value(snap_chargeback, w.id, "snapshots") or {},
                                                                          "total_protected_vms": len(workload_vm_chargeback.get(w.id, []))
                                                                             }
                else:
                    workloads_chargeback[w.project_id] = {"workloads": {w.id: {"name": w.display_name,
                                                                          "size": get_value(snap_chargeback, w.id, "workload_size") or 0,
                                                                          "protected_vms": workload_vm_chargeback[w.id],
                                                                          "snapshots": get_value(snap_chargeback, w.id, "snapshots")or {},
                                                                          "total_protected_vms": len(workload_vm_chargeback.get(w.id, []))
                                                                               }}}

            tenants_usage = self.get_tenants_usage(context)["tenants_usage"]
            tenant_chargeback = tenants_usage.copy()
            for tenant_id in list(tenants_usage.keys()):
                if tenant_id in workloads_chargeback:
                    tenant_chargeback[tenant_id]["no_of_workloads"] = len(
                        list(workloads_chargeback[tenant_id]["workloads"].keys()))
                    tenant_chargeback[tenant_id]["workloads"] = workloads_chargeback[tenant_id]["workloads"]
                else:
                    tenant_chargeback[tenant_id]["no_of_workloads"] = 0
                    tenant_chargeback[tenant_id]["workloads"] = {}
            return tenant_chargeback
        except Exception as ex:
            LOG.exception(ex)

    @autolog.log_method(logger=Logger)
    @wrap_check_policy
    def get_quota_data(self, context, project_id):
        """
        API collects following information based on project_id (of provided):
        1. storage: total storage capacity and total amount of utilized storage
        2. Protected VM: total number of protected VM's for given project
        3. tvalt-nodes: available nodes w.r.t. total nodes in system
        4. contego-services: total contego services with it's respctive states (UP/DOWN)
        5. snapshot data: total snapshots available for each node
        """
        try:
            quota_data = {'total_nodes': 0, 'available_nodes': 0, 'error': list()}
            node_wise_snapshot_count = 0
            resp = {
                'nodes': dict(),
                'storage': list(),
                'chrageback_data': dict()
            }

            tz = 'UTC'
            try:
                tz = self._get_cookies()['django_timezone']
            except:
                try:
                    tz = self.COOKIES['django_timezone']
                except Exception  as ex:
                    LOG.debug("Failed to fetch time zone, using default: {}".format(tz))
                    LOG.debug(ex)

            try:
                total_snapshots = 0
                resp['nodes'] = self.get_nodes(context)
                for node in resp['nodes'].get('nodes', []):
                    if not node['is_vip']:
                        quota_data['total_nodes'] += 1
                        if isinstance(node['status'], six.string_types) and node['status'].lower() == "up":
                            quota_data['available_nodes'] += 1
                        search_opts = {'host': node['node'], 'get_all': True, 'status': 'running'}

                        try:
                            snapshots = self.db.snapshot_get_all(context, **search_opts)
                        except Exception:
                            snapshots = []

                        node['snapshots'] = []
                        for snapshot in snapshots:
                            if project_id and snapshot.project_id == project_id:
                                node['snapshots'].append(snapshot)
                            else:
                                node['snapshots'].append(snapshot)

                        total_snapshots += len(node['snapshots'])
                        node_wise_snapshot_count += len(node['snapshots'])
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch nodes due to %s' % str(ex))

            quota_data['balance_nodes'] = quota_data['total_nodes'] - quota_data['available_nodes']
            quota_data['total_snapshots'] = total_snapshots
            quota_data['node_wise_snapshot_count'] = node_wise_snapshot_count

            try:
                resp['storage'] = self.get_storage_usage(context)
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch storage usage due to %s' % str(ex))

            quota_data['total_utilization'] = 0
            quota_data['total_capacity'] = 0

            for nfsshare in resp['storage'].get('storage_usage', []):
                quota_data['storage_type'] = str(nfsshare['storage_type'])
                quota_data['total_utilization'] += nfsshare['total_utilization']
                quota_data['total_capacity'] += nfsshare['total_capacity']

            quota_data['available_capacity'] = \
                (float(quota_data['total_capacity']) -
                 float(quota_data['total_utilization']))
            quota_data['total_capacity_humanized'] = \
                utils.sizeof_fmt(quota_data['total_capacity'])
            quota_data['total_utilization_humanized'] = \
                utils.sizeof_fmt(quota_data['total_utilization'])
            quota_data['available_capacity_humanized'] = \
                utils.sizeof_fmt(quota_data['available_capacity'])

            quota_data['contego_services_up'] = 0
            quota_data['contego_services_down'] = 0
            quota_data['contego_services_others'] = 0
            quota_data['contego_services_total'] = 0

            try:
                admin_cntx = nova._get_tenant_context(context, cloud_admin=True)
                clients.initialise()
                contego_plugin = clients.Clients(admin_cntx)
                contego_client = contego_plugin.client("contego")
                service_info = contego_client.contego.get_service_list()
                if service_info and isinstance(service_info, tuple) and service_info[1].get('services'):
                    resp['services'] = service_info[1]['services']
                else:
                    resp['services'] = []
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch services due to %s' % str(ex))

            for service in resp['services']:
                quota_data['contego_services_total'] += 1
                service['updated_at'] = utils.get_local_time(service['updated_at'], '', '', tz)
                if service['state'] == "up":
                    quota_data['contego_services_up'] += 1
                elif service.get('state') == "down":
                    quota_data['contego_services_down'] += 1
                else:
                    quota_data['contego_services_others'] += 1

            quota_data['vms_protected'] = 0
            quota_data['total_vms'] = 0

            try:
                resp['chrageback_data'] = self.get_tenants_usage(context)
            except Exception as ex:
                LOG.exception(ex)
                quota_data['error'].append('Unable to fetch tenant usage due to %s' % str(ex))

            if resp['chrageback_data'].get('global_usage') and resp['chrageback_data'].get('tenants_usage'):
                global_usage = resp['chrageback_data']['global_usage']
                tenant_usage = resp['chrageback_data']['tenants_usage']
                if project_id is not None:
                    for tenant in tenant_usage:
                        if tenant == project_id:
                            quota_data['vms_protected'] = tenant_usage[tenant]['vms_protected']
                            quota_data['total_vms'] = tenant_usage[tenant]['total_vms']
                            quota_data['storage_used'] = tenant_usage[tenant]['used_capacity']
                            quota_data['total_utilization'] = tenant_usage[tenant]['used_capacity']
                            quota_data['total_capacity'] = global_usage['total_capacity']
                            quota_data['total_capacity_humanized'] = utils.sizeof_fmt(quota_data['total_capacity'])
                            quota_data['total_utilization_humanized'] = utils.sizeof_fmt(quota_data['total_utilization'])

                    try:
                        keystoneclient = clients.Clients(context).client("keystone")
                        project = keystoneclient.client.projects.get(project_id)
                        project_name = project.name
                    except Exception:
                        project_name = project_id

                    quota_data['tenant_name'] = 'by ' + project_name + ' Tenant' if project_name else None
                else:
                    quota_data['vms_protected'] = global_usage['vms_protected']
                    quota_data['total_vms'] = global_usage['total_vms']
                    quota_data['tenant_name'] = ''
        except Exception as ex:
            LOG.exception(ex)
        return quota_data