Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
Size: Mime:
import os
import yaml
import argparse
import uuid
from cliff import show, lister
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from prettytable import PrettyTable

from workloadmgrclient import timezone
from workloadmgrclient import utils
from workloadmgrclient.openstack.common import strutils
from workloadmgrclient.v1 import WorkloadmgrCommand
from workloadmgrclient.v1.backup_target_type import ShowBackupTargetType
from workloadmgrclient.v1 import validators

reassign_map_path = os.path.abspath(
    (
        os.path.join(
            os.path.dirname(__file__), "../input-files/workload_reassign_map_file.yaml"
        )
    )
)


class WorkloadCommand(WorkloadmgrCommand):
    resource = "workloads"

    def _produce_verbose_output(self, metadata, jobschedule):
        if "topology" in metadata:
            metadata.pop("topology")

        if "workloadgraph" in metadata:
            metadata.pop("workloadgraph")

        utils.print_dict(metadata, "Metadata")
        utils.print_dict(jobschedule, "Jobschedule")


class ListWorkload(WorkloadCommand, lister.Lister):
    """List all the workloads of current project."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--all",
            type=strutils.bool_from_string,
            metavar="{True,False}",
            help="List all workloads of all the projects (valid for admin user only)",
            default=False,
        )
        parser.add_argument(
            "--nfsshare",
            metavar="<nfsshare>",
            help="List all workloads of nfsshare (valid for admin user only)",
            default=None,
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        search_opts = {
            "all_workloads": parsed_args.all,
            "nfs_share": parsed_args.nfsshare,
        }
        workload_objs = client.list(search_opts=search_opts) or []
        return (
            ["ID", "Name", "ProjectID", "Status", "CreatedAt"],
            (
                osc_utils.get_item_properties(
                    obj,
                    [
                        "id",
                        "name",
                        "project_id",
                        "status",
                        "created_at",
                    ],
                )
                for obj in workload_objs
            ),
        )


class ShowWorkload(WorkloadCommand, show.ShowOne):
    """Show details about a workload."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "workload_id",
            metavar="<workload_id>",
            help="ID of the workload."
        )

        parser.add_argument(
            "--scheduler_trust",
            type=bool,
            metavar="<scheduler_trust {true}>",
            help="scheduler_trust."
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        scheduler_trust = False
        # checks for scheduler_trust key and pass it to find_resource if present.
        if parsed_args.scheduler_trust:
            scheduler_trust = {'scheduler_trust': parsed_args.scheduler_trust}
            workload = utils.find_resource(client, parsed_args.workload_id, scheduler_trust)
        else:
            workload = utils.find_resource(client, parsed_args.workload_id)
        info = {}
        info.update(workload._info)
        info.pop("links", None)
        info.pop("workload_type_id", None)
        metadata = info.pop("metadata", {})
        job_schedule = info.pop("jobschedule", {})
        job_schedule.pop("local_clock", None)
        job_schedule.pop("appliance_timezone", None)
        job_schedule.pop("retentionmanual", None)
        info["jobschedule"] = job_schedule["enabled"]

        for i, val in enumerate(info["instances"]):
            info["instances"][i].pop("metadata", None)

        # TODO: check for better approach here
        if getattr(
            self.app.options,
            "verbose",
            getattr(self.app.options, "verbose_level", None),
        ):
            self._produce_verbose_output(metadata, job_schedule)

        columns = list(info.keys())
        data = osc_utils.get_dict_properties(info, columns)
        return columns, data


class CreateWorkload(WorkloadCommand, lister.Lister):
    """Creates a workload."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--instance",
            metavar="<instance>",
            nargs="+",
            default=[],
            required=True,
            help="Required to set atleast one instance,"
            " Specify an instances to include in the workload.\n"
            "Eg. --instance <instance-uuid_1> <instance-uuid_2> ... <instance-uuid_N>",
        )
        parser.add_argument(
            "--display-name",
            metavar="<display-name>",
            help="Optional workload name. (Default=None)",
            default=None,
        )
        parser.add_argument(
            "--display-description",
            metavar="<display-description>",
            help="Optional workload description. (Default=None)",
            default=None,
        )
        parser.add_argument(
            "--source-platform",
            metavar="<source-platform>",
            help="Optional workload source platform (Default=None)",
            default=None,
        )
        parser.add_argument(
            "--jobschedule",
            metavar="<key=key-name>",
            action="append",
            dest="jobschedule",
            default=[],
            help="Specify following key value pairs for jobschedule \n"
            "Specify option multiple times to include multiple keys. \n"
            "If don't specify timezone, then by default it takes your local machine timezone\n"
            " 'start_date' : '06/05/2014' \n"
            " 'end_date' : '07/15/2014' \n"
            " 'start_time' : '2:30 PM' \n"
            " 'timezone: '' \n,                              "
            "For example --jobschedule start_date='mm/dd/yy' --jobschedule enabled=True\n"
            "In order to enable/disable scheduler pass enabled True / enabled False\n "
        )

        parser.add_argument(
            "--hourly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for hourly jobschedule "
            "interval=<n> where n is no of hours within list (1,2,3,4,6,8,12,24) "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --hourly interval='4' retention='1' snapshot_type='incremental' "
            "If you don't specify this option, following default value "
            " 'interval' : '1' "
            " 'retention' : '30' "
            " 'snapshot_type' : 'incremental' ",
        )

        parser.add_argument(
            "--daily",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for daily jobschedule "
            "backup_time='1:30 22:30 00:30' "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --daily backup_time='01:00 14:00 23:00' retention='1' snapshot_type='incremental' "
            " meaning of above input : everyday 1 AM, 2PM,11PM incremental snapshot get trigger, only 1 snapshot remain per day ",
        )

        parser.add_argument(
            "--weekly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for weekly jobschedule "
            "backup_day=[mon,tue,wed,thu,fri,sat,sun] "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --weekly backup_day='sun mon' retention='1' snapshot_type=incremental "
            " meaning of above input : incremental snapshot get trigger on every sun monday day on backup_time specified on daily option",
        )

        parser.add_argument(
            "--monthly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for monthly jobschedule "
            "month_backup_day=<1-31|last>, 'last': last day of the month "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --monthly month_backup_day='1 last' retention=1 snapshot_type=incremental "
            " meaning of above input : every month 1st and last day of the month, 1 snapshot remain on every end of month, snapshot type incremental ",
        )

        parser.add_argument(
            "--yearly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for yearly jobschedule "
            "backup_month=[jan,feb,mar,apr,may,jun,jul,aug,sep,oct,nov,dec] "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --yearly month_of_the_year='jan feb' retention='1' snapshot_type=full "
            " meaning of above input : every year Jan Feb 1 full snapshot reamin for retention",
        )

        parser.add_argument(
            "--manual",
            metavar="<key=key-name>",
            nargs="+",
            default=[],
            help="Specify following key value pairs for manual jobschedule "
            "retention=<snapshots count> "
            "retention_days_to_keep=<num of days>"
            "retention_days_to_keep only available for immutable Backup Targets"
            "For example --manual retention=30 retention_days_to_keep=5"
            " meaning of above input : manually trigger snapshot get deleted after retention value hit",
        )

        parser.add_argument(
            "--metadata",
            metavar="<key=key-name>",
            action="append",
            dest="metadata",
            default=[],
            help="Specify a key value pairs to include in the workload metadata "
            "Specify option multiple times to include multiple keys. "
            "key=value",
        )
        parser.add_argument(
            "--policy-id", metavar="<policy_id>", help="Policy ID", default=None
        )

        parser.add_argument(
            "--encryption",
            metavar="<True/False>",
            help="--encryption True/False (Default=False)",
            default=False,
            action=CreateWorkload.EncryptionAction,
        )

        parser.add_argument(
            "--secret-uuid",
            metavar="<secret_uuid>",
            help="Secret UUID",
            default=None,
        )

        parser.add_argument(
            "--backup-target-type",
            metavar="<backup_target_type>",
            help="Backup Target Type for this workload.",
            default=None,
        )

    class EncryptionAction(argparse.Action):
        def __call__(self, parser, namespace, values, option_string=None):
            if  values.lower() in ['true', 'yes']:
                values = True
                setattr(namespace, self.dest, values)

    @staticmethod
    def validate_encryption_on_workload(parsed_args):
        if (parsed_args.encryption and not parsed_args.secret_uuid) or  \
                (not parsed_args.encryption and parsed_args.secret_uuid):
            return False
        return True

    def take_action(self, parsed_args):
        client = self.get_client()
        instances = []
        if not parsed_args.instance:
            raise exceptions.CommandError("Please provide required --instance argument")

        for instance_uuid in parsed_args.instance:
            try:
                validators.validate_uuid(instance_uuid)
                instances.append({"instance-id": instance_uuid})
            except ValueError:
                raise exceptions.CommandError("Invalid --instance: {} Please provide correct instance uuid".format(instance_uuid))
        job_schedule = {}
        job_schedule = utils.validate_job_scheduler_param(parsed_args)

        metadata = {}
        for metadata_str in parsed_args.metadata:
            err_msg = (
                "Invalid metadata argument '%s'. metadata arguments must be of the "
                "form --metadata <key=value>" % metadata_str
            )

            for kv_str in metadata_str.split(","):
                try:
                    k, v = kv_str.split("=", 1)
                except ValueError:
                    raise exceptions.CommandError(err_msg)

                if k in metadata:
                    metadata[k] = v
                else:
                    metadata.setdefault(k, v)

        if parsed_args.policy_id is not None:
            metadata["policy_id"] = parsed_args.policy_id


        res = CreateWorkload.validate_encryption_on_workload(parsed_args)
        if not res:
            raise Exception('--encryption and --secret-uuid are ' \
                            'mutually inclusive in nature.')
        btt_name = parsed_args.backup_target_type
        res = ShowBackupTargetType(self.app, self.app_args).get_client().is_immutable(btt_name)
        if res.get("status") and job_schedule.get("enabled"):
            if job_schedule.get("hourly"):
                if job_schedule["hourly"].get("interval") and int(job_schedule["hourly"].get("interval")) != 24:
                    raise exceptions.CommandError(f"Hourly interval must be 24 for immutable backup target type:{btt_name}.")
                for time_line in ["daily", "weekly", "monthly", "yearly"]:
                    if time_line in job_schedule:
                        raise exceptions.CommandError(f"Field:{time_line} does not support by immutable backup type")

        workload_obj = client.create(
            parsed_args.display_name,
            parsed_args.display_description,
            parsed_args.source_platform,
            instances,
            job_schedule,
            metadata,
            encryption=parsed_args.encryption,
            secret_uuid=parsed_args.secret_uuid,
            backup_target_type=parsed_args.backup_target_type
        )

        if workload_obj:
            return (
                ["ID", "Name", "Status"],
                (
                    osc_utils.get_item_properties(obj, ["id", "name", "status"])
                    for obj in [workload_obj]
                ),
            )
        return


class ModifyWorkload(WorkloadCommand):
    """Modify a workload."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "workload_id", metavar="<workload_id>", help="ID of the workload."
        )
        parser.add_argument(
            "--display-name",
            metavar="<display-name>",
            help="Optional workload name. (Default=None)",
            default=None,
        )
        parser.add_argument(
            "--display-description",
            metavar="<display-description>",
            help="Optional workload description. (Default=None)",
            default=None,
        )
        parser.add_argument(
            "--instance",
            metavar="<instance>",
            nargs="*",
            default=[],
            help="Specify an instance(s) to include in the workload.\n"
            "Eg. --instance <instance-uuid_1> <instance-uuid_2> ... <instance-uuid_N>",
        )
        parser.add_argument(
            "--jobschedule",
            metavar="<key=key-name>",
            action="append",
            dest="jobschedule",
            default=[],
            help="Specify following key value pairs for jobschedule "
            "Specify option multiple times to include multiple keys. "
            "If don't specify timezone, then by default it takes your local machine timezone"
            " 'start_date' : '06/05/2014' "
            " 'end_date' : '07/15/2014' "
            " 'start_time' : '2:30 PM' "
            " 'timezone': <timezone> (Default=current system timezone) eg. 'Asia/Kolkata', 'America/New_York' ",
        )
        parser.add_argument(
            "--hourly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for hourly jobschedule "
            "interval=<n> where n is no of hours within list (1,2,3,4,6,8,12,24) "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --hourly interval='4' retention='1' snapshot_type='incremental' "
            "If you don't specify this option, following default value "
            " 'interval' : '1' "
            " 'retention' : '30' "
            " 'snapshot_type' : 'incremental' ",
        )

        parser.add_argument(
            "--daily",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for daily jobschedule "
            "backup_time='1:30 22:30 00:30' "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --daily backup_time='01:00 14:00 23:00' retention='1' snapshot_type='incremental' "
            " meaning of above input : everyday 1 AM, 2PM,11PM incremental snapshot get trigger, only 1 snapshot remain per day ",
        )

        parser.add_argument(
            "--weekly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for weekly jobschedule "
            "backup_day=[mon,tue,wed,thu,fri,sat,sun] "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --weekly backup_day='sun mon' retention='1' snapshot_type=incremental "
            " meaning of above input : incremental snapshot get trigger on every sun monday day on backup_time specified on daily option",
        )

        parser.add_argument(
            "--monthly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for monthly jobschedule "
            "month_backup_day=<1-31|last>, 'last': last day of the month "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --monthly month_backup_day='1 last' retention=1 snapshot_type=incremental "
            " meaning of above input : every month 1st and last day of the month, 1 snapshot remain on every end of month, snapshot type incremental ",
        )

        parser.add_argument(
            "--yearly",
            metavar="<key=key-name>",
            action="append",
            nargs="*",
            default=[],
            help="Specify following key value pairs for yearly jobschedule "
            "backup_month=[jan,feb,mar,apr,may,jun,jul,aug,sep,oct,nov,dec] "
            "retention=<snapshots count> "
            "snapshot_type=<full|incremental> "
            "For example --yearly month_of_the_year='jan feb' retention='1' snapshot_type=full "
            " meaning of above input : every year Jan 1 full snapshot reamin for retention",
        )

        parser.add_argument(
            "--manual",
            metavar="<key=key-name>",
            nargs="+",
            default=[],
            help="Specify following key value pairs for manual jobschedule "
            "retention=<snapshots count> "
            "retention_days_to_keep=<num of days>"
            "retention_days_to_keep only available for immutable Backup Targets"
            "For example --manual retention=30 retention_days_to_keep=5"
            " meaning of above input : manually trigger snapshot get deleted after retention value hit",
        )

        parser.add_argument(
            "--metadata",
            metavar="<key=key-name>",
            action="append",
            dest="metadata",
            default=[],
            help="Specify a key value pairs to include in the workload metadata "
            "Specify option multiple times to include multiple keys. "
            "key=value",
        )
        parser.add_argument(
            "--policy-id", metavar="<policy_id>", help="Policy ID", default=None
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        workload_obj = utils.find_resource(client, parsed_args.workload_id)
        instances = []
        for instance_uuid in parsed_args.instance:
            try:
                validators.validate_uuid(instance_uuid)
                instances.append({"instance-id": instance_uuid})
            except ValueError:
                raise exceptions.CommandError("Invalid --instance: {} Please provide correct instance uuid".format(instance_uuid))

        job_schedule = utils.validate_job_scheduler_param(parsed_args)
        metadata = {}
        for metadata_str in parsed_args.metadata:
            err_msg = (
                "Invalid metadata argument '%s'. metadata arguments must be of the "
                "form --metadata <key=value>" % metadata_str
            )

            for kv_str in metadata_str.split(","):
                try:
                    k, v = kv_str.split("=", 1)
                except ValueError:
                    raise exceptions.CommandError(err_msg)

                if k in metadata:
                    metadata[k] = v
                else:
                    metadata.setdefault(k, v)

        if parsed_args.policy_id is not None:
            metadata["policy_id"] = parsed_args.policy_id

        workload_obj.update(
            workload_obj.id,
            parsed_args.display_name,
            parsed_args.display_description,
            instances,
            job_schedule,
            metadata,
        )
        return


class DeleteWorkload(WorkloadCommand):
    """Remove a workload."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--database_only",
            metavar="<True/False>",
            help="Keep True if want to delete from database only.(Default=False)",
            default=False,
        )
        parser.add_argument(
            "workload_id", metavar="<workload_id>", help="ID of the workload to delete."
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        workload = utils.find_resource(client, parsed_args.workload_id)
        workload.delete(parsed_args.database_only)
        return


class ResetWorkload(WorkloadCommand):
    """reset a workload."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "workload_id", metavar="<workload_id>", help="ID of the workload to reset."
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        workload_obj = utils.find_resource(client, parsed_args.workload_id)
        workload_obj.reset()


class UnlockWorkload(WorkloadCommand):
    """unlock a workload."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "workload_id", metavar="<workload_id>", help="ID of the workload to reset."
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        workload_obj = utils.find_resource(client, parsed_args.workload_id)
        workload_obj.unlock()


class WorkloadAuditlog(WorkloadCommand, lister.Lister):
    """Get auditlog of workload manager"""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--time_in_minutes",
            metavar="<time_in_minutes>",
            default=1440,
            help="time in minutes(default is 24 hrs.)",
        )
        parser.add_argument(
            "--time_from",
            metavar="<time_from>",
            help="From date time in format 'MM-DD-YYYY'",
            default=None,
        )
        parser.add_argument(
            "--time_to",
            metavar="<time_to>",
            help="To date time in format 'MM-DD-YYYY'(defult is current day)",
            default=None,
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        audit_logs = client.get_auditlog(
            parsed_args.time_in_minutes, parsed_args.time_from, parsed_args.time_to
        )
        utils.print_list(
            audit_logs["auditlog"],
            ["UserName", "ObjectName", "Timestamp", "UserId", "Details"],
        )
        # TODO: check why following block is not printing expected output
        """return (['UserName', 'ObjectName', 'Timestamp', 'UserId', 'Details'],
                (osc_utils.get_dict_properties(
                     obj,
                     ['UserName', 'ObjectName', 'Timestamp', 'UserId', 'Details']
                ) for obj in audit_logs['auditlog']),)
        """


class ImportWorkloadList(WorkloadCommand, lister.Lister):
    """Get list of workloads to be imported."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--project-id",
            metavar="<project-id>",
            default=None,
            help="List workloads belongs to given project only.\n"
                 "If not provided then considers all importable workloads.\n"
                 "--project-id <project-id>",
        )

        parser.add_argument(
            "--source-bt",
            metavar="<source-bt>",
            nargs="+",
            default=[],
            help="List workloads belongs to given backup target(s) only.\n"
                 "If not provided then considers all available Backup Targets.\n"
                 "--source-bt <source-bt-1> <source-bt-2> ... <source-bt-N>",
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        if parsed_args.source_bt:
            for source_bt in parsed_args.source_bt:
                try:
                    uuid.UUID(source_bt)
                except ValueError:
                    raise exceptions.CommandError("Invalid --source-bt: {}".format(source_bt))
        if parsed_args.project_id:
            try:
                uuid.UUID(parsed_args.project_id)
            except ValueError:
                raise exceptions.CommandError("Invalid --project-id: {}".format(parsed_args.project_id))
        result = client.get_importworkloads_list(
            project_id=parsed_args.project_id,
            source_bt=parsed_args.source_bt
        )
        print("Fetch the importable workload list using cmd: workloadmgr job-detail-show <job-id>")
        return ( ["JOB-ID"], (osc_utils.get_dict_properties(obj, ["jobid"]) for obj in result['jobs']),)


class JobDetailShow(WorkloadCommand, lister.Lister):
    """Provides job details with job-id."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "jobid",
            metavar="<jobid>",
            help="Provides importable workload list of Jobid.",
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        result = client.get_job_details(parsed_args.jobid)
        job_action = result.pop('action')

        if job_action == 'list_importable_workloads':
            # prints job details
            self.produce_output(parsed_args, ['ID', 'CREATED_AT', 'UPDATED_AT', 'STATUS'], [(result['id'], result['created_at'], result['updated_at'], result['status'])])

            # prints importable workload list for the job
            self.produce_output(parsed_args, ['workload_id', 'name', 'project_id', 'backup_target', 'backup_target_types_name', 'backup_target_types_id'],[(wl['workload_id'], wl['name'], wl['project_id'], wl['backup_target'], wl['backup_target_types'], wl.get('backup_target_types_id')) for wl in result['workload_list']])
        elif job_action == 'import_workloads':
            job_list = PrettyTable(["JOB-WORKLOADS", "JOB-ID", "CREATED-AT", "COMPLETED-AT", "STATUS"])
            wl_list = PrettyTable(["WORKLOAD-ID", "STATUS", "MESSAGE"])
            job_list.align['JOB-WORKLOADS'] = 'l'
            job_list.max_width=95
            job_list._max_width={'JOB-ID':8, 'CREATED-AT':10, 'COMPLETED-AT':10}
            wl, job = [], []
            for each_job in result['jobid']:
                for k, v in each_job.items():
                    if k == 'wllist':
                        val = []
                        for each_wl in v:
                            for wk, wv in each_wl.items():
                                if wk == 'progress':
                                    #val.append(str(wv)+'%')
                                    pass
                                else:
                                    val.append(wv)
                            wl_list.add_row(val)
                            val = []
                        v = wl_list
                    elif k == 'completedat':
                        if each_job['status'].lower() == 'in-progress':
                            v = '-'

                    job.append(v)
            job_list.add_row(job)
            try:
                print(job_list)
            except Exception as ex:
                pass

        # this returning value needed to avoid error at the end
        return [(), ()]


class AbandonResource(WorkloadCommand, lister.Lister):
    """Abandon Resoures."""

    @staticmethod
    def _add_arguments(parser):
        workload_group = parser.add_mutually_exclusive_group()
        policy_group = parser.add_mutually_exclusive_group()
        workload_group.add_argument(
            "--workload-ids",
            metavar="<workload_ids>",
            nargs="+",
            default=[],
            help="Specify workload ids to abandon only specified workloads.\nNon-admin user can delete it's own workloads and admin user can delete workloads within all tenants where it has accessibility\n"
            "--workload-ids <workload-id_1> <workload-id_2> ... <workload-id_N>",
        )
        policy_group.add_argument(
            "--policy-ids",
            metavar="<policy_ids>",
            nargs="+",
            default=[],
            help="Specify policy ids to abandon only specified policies.\nOnly admin user is allowed to perform policy deletion.\nadmin user can delete policies within all tenants where it has accessibility\n"
            "--policy-ids <policy-id_1> <policy-id_2> ... <policy-id_N>",
        )
        workload_group.add_argument(
            "--all-workloads",
            help="It abandon all the workloads. Non-admin user can delete it's own all workloads and admin user can delete all workloads within all tenants where it has accessibility.\n--all-workloads",
            action="store_true"
        )
        policy_group.add_argument(
            "--all-policies",
            help="It abandon all the policies. Only admin user is allowed to perform all policy deletion action.\n--all-policies",
            action="store_true"
        )
        parser.add_argument(
            "--cloud-wide",
            help="Looks out for given resources in the admin's capabilities and will abandon those\n--cloud-wide",
            action="store_true"
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        if parsed_args.workload_ids:
            for workload_id in parsed_args.workload_ids:
                try:
                    uuid.UUID(workload_id)
                except ValueError:
                    raise exceptions.CommandError("Invalid --workload-ids: {}".format(workload_id))

        if parsed_args.policy_ids:
            for policy_id in parsed_args.policy_ids:
                try:
                    uuid.UUID(policy_id)
                except ValueError:
                    raise exceptions.CommandError("Invalid --policy-ids: {}".format(policy_id))

        result = client.abandon_resource(parsed_args.workload_ids, parsed_args.policy_ids, parsed_args.all_workloads, parsed_args.all_policies, parsed_args.cloud_wide)
        if result['abandon_workload_list']:
            self.produce_output(parsed_args, ['Name', 'WorkloadID', 'ProjectId', 'UserId'], [(wl['display_name'], wl['id'], wl['project_id'], wl['user_id']) for wl in result['abandon_workload_list']])

        if result['abandon_policy_list']:
            self.produce_output(parsed_args, ['Name', 'Policy_ID', 'Project_ID', 'User_ID'], [(policy['display_name'], policy['id'], policy['project_id'], policy['user_id']) for policy in result['abandon_policy_list']])

        return [(), ()]


class WorkloadNodes(WorkloadCommand, lister.Lister):
    """Get all the nodes of a workload manager"""

    def take_action(self, parsed_args):
        client = self.get_client()
        result = client.get_nodes()
        return (
            ["Node", "ID", "Version", "IPAddress", "IsController", "Status", "IsVIP"],
            (
                osc_utils.get_dict_properties(
                    obj,
                    [
                        "node",
                        "id",
                        "version",
                        "ipaddress",
                        "is_controller",
                        "status",
                        "is_vip",
                    ],
                )
                for obj in result["nodes"]
            ),
        )


class ListOrphanedWorkload(WorkloadCommand, lister.Lister):
    """List all the orphaned workloads having tenant_id or user_id which doesn't belong to current cloud."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--migrate_cloud",
            metavar="{True,False}",
            type=strutils.bool_from_string,
            default=False,
            help="Set to True if want to list workloads from other clouds as well."
            " Default if False",
        )
        parser.add_argument(
            "--generate_yaml",
            metavar="{True,False}",
            type=strutils.bool_from_string,
            default=False,
            help="Set to True if want to generate output file in yaml format,"
            " which would be further used as input for workload reassign API.",
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        orphaned_workloads = client.get_orphaned_workloads_list(parsed_args)
        if parsed_args.generate_yaml:
            print(
                "\nPlease find map file at " + str(os.getcwd()) + "/reassign_map.yaml\n"
            )
        return (
            ["Name", "ID", "Project ID", "User ID"],
            (
                osc_utils.get_dict_properties(
                    obj, ["name", "id", "project_id", "user_id"]
                )
                for obj in orphaned_workloads
            ),
        )


class StorageUsageWorkload(WorkloadCommand, show.ShowOne):
    """Get total storage used by workload manager"""

    def take_action(self, parsed_args):
        client = self.get_client()
        storage_usage_info = client.get_storage_usage()
        for storage in storage_usage_info["storage_usage"]:
            for key, val in storage.items():
                if type(val).__name__ == "int" or type(val).__name__ == "float":
                    val = str(val) + " Bytes or Approx ( " + utils.bytes_fmt(val) + " )"
                storage[key] = str(val)
            utils.print_dict(storage, wrap=100)
        for key, val in storage_usage_info["count_dict"].items():
            if type(val).__name__ == "int" or type(val).__name__ == "float":
                val = str(val) + " Bytes or Approx ( " + utils.bytes_fmt(val) + " )"
            storage_usage_info["count_dict"][key] = str(val)
        columns = list(storage_usage_info["count_dict"].keys())
        data = osc_utils.get_dict_properties(storage_usage_info["count_dict"], columns)
        return columns, data


class SnapshotWorkload(WorkloadCommand):
    """Snapshot a workload."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "workload_id",
            metavar="<workload_id>",
            help="ID of the workload to snapshot.",
        )
        parser.add_argument(
            "--full",
            dest="full",
            action="store_true",
            default=False,
            help="Specify if a full snapshot is required.",
        )
        parser.add_argument(
            "--display-name",
            metavar="<display-name>",
            help="Optional snapshot name. (Default=None)",
            default=None,
        )
        parser.add_argument(
            "--display-description",
            metavar="<display-description>",
            help="Optional snapshot description. (Default=None)",
            default=None,
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        workload_obj = utils.find_resource(client, parsed_args.workload_id)
        workload_obj.snapshot(
            parsed_args.full, parsed_args.display_name, parsed_args.display_description
        )
        return


class RemoveNode(WorkloadCommand):
    """Remove workload node by ip-address / hostname"""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "ip", metavar="<ip>", help="IP or hostname of node to remove"
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        client.remove_node(parsed_args.ip)
        return


class ImportWorkloads(WorkloadCommand, lister.Lister):
    """Import all workload records from backup store."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--workload-ids",
            metavar="<workload_ids>",
            nargs="+",
            default=[],
            help="Specify workload ids to import only specified workloads. If not provided then all available workloads from source BTT will be imported.\n"
            "--workload-ids <workload-id_1> <workload-id_2> ... <workload-id_N>",
        )

        parser.add_argument(
            "--source-btt",
            metavar="<source-btt>",
            nargs="+",
            default=[],
            help="It searches workloads in the given Backup Target Types Id. If not provided then considers default BTT. Only single --source-btt is allowed if --workload-ids are provided.\n"
                 "--source-btt <source-btt-1> <source-btt-2> ... <source-btt-N>",
        )

        parser.add_argument(
            "--target-btt",
            metavar="<target-btt>",
            default=None,
            help="Specify backup target Type id to which you need to assign workloads.\nIf not provided then assigns BTT of workload's original BT.\nWarning: If user changes BTT of workload then snapshot will not be imported and next snapshot will be Full in newly assigned BTT.\n"
            "--target-btt <target-btt>",
        )

        parser.add_argument(
            "--source-btt-all",
            help="This will search in all Backup Target Types and import the workloads.\n Only allowed when --workload-ids is NOT provided.\n--source-btt-all",
            action="store_true"
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        if parsed_args.workload_ids:
            if parsed_args.source_btt and len(parsed_args.source_btt) > 1:
                raise exceptions.CommandError("Only single --source-btt is allowed when --workload-ids are provided")

            if parsed_args.source_btt_all:
                raise exceptions.CommandError("--source-btt-all is not allowed when --workload-ids are provided")

            for workload_id in parsed_args.workload_ids:
                try:
                    uuid.UUID(workload_id)
                except ValueError:
                    raise exceptions.CommandError("Invalid --workload-ids: {}".format(workload_id))

        if parsed_args.source_btt:
            for source_btt in parsed_args.source_btt:
                try:
                    uuid.UUID(source_btt)
                except ValueError:
                    raise exceptions.CommandError("Invalid --source-btt: {}".format(source_btt))
        result = client.importworkloads(parsed_args.workload_ids, parsed_args.source_btt, parsed_args.source_btt_all, parsed_args.target_btt)
        print("Track progress using cmd: workloadmgr job-detail-show <job-id>")
        return ( ["JOB-ID"], (osc_utils.get_dict_properties(obj, ["jobid"]) for obj in result['jobs']),)


class ImportWorkloadsProgress(WorkloadCommand, lister.Lister):
    """Track Workloads Progress with job-id."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "jobid",
            metavar="<jobid>",
            help="Track workload import progress with Jobid.",
        )

    def take_action(self, parsed_args):
        """
        It receives in below format, It can have multiple jobs running with each job can have multiple workloads and its progress.
        {'jobid':[{
                    "id": "1234567890",
                    "created_at": "22nd Aug 2023",
                    "wllist": [{
                        'id':'123',
                        'importprogress':'20%',
                        'time_taken':'30 min'}],
                    "completedat": "22nd Aug 2023",
                    "status": "in-progress"
                }]}
        """
        client = self.get_client()
        result = client.importworkloadsprogress(parsed_args.jobid)
        job_list = PrettyTable(["JOB-WORKLOADS", "JOB-ID", "CREATED-AT", "COMPLETED-AT", "STATUS"])
        wl_list = PrettyTable(["WORKLOAD-ID", "STATUS", "MESSAGE"])
        job_list.align['JOB-WORKLOADS'] = 'l'
        job_list.max_width=95
        job_list._max_width={'JOB-ID':8, 'CREATED-AT':10, 'COMPLETED-AT':10}
        wl, job = [], []
        for each_job in result['jobid']:
            for k, v in each_job.items():
                if k == 'wllist':
                    val = []
                    for each_wl in v:
                        for wk, wv in each_wl.items():
                            if wk == 'progress':
                                #val.append(str(wv)+'%')
                                pass
                            else:
                                val.append(wv)
                        wl_list.add_row(val)
                        val = []
                    v = wl_list

                job.append(v)
        job_list.add_row(job)
        try:
            print(job_list)
        except Exception as ex:
            pass
        # this returning value needed to avoid error at the end
        return [(), ()]


class ReassignWorkloads(WorkloadCommand, lister.Lister):
    """Assign workload to a new tenant/user."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--old_tenant_ids",
            metavar="<old_tenant_id>",
            action="append",
            dest="old_tenant_ids",
            default=[],
            help="Specify old tenant ids from which workloads need to reassign to new tenant."
            " --old_tenant_ids <old_tenant_id> --old_tenant_ids <old_tenant_id>",
        )

        parser.add_argument(
            "--new_tenant_id",
            metavar="<new_tenant_id>",
            default=None,
            help="Specify new tenant id to which workloads need to reassign from old tenant."
            " --new_tenant_id <new_tenant_id>",
        )

        parser.add_argument(
            "--workload-ids",
            metavar="<workload_ids>",
            nargs="+",
            default=[],
            help="Specify workload_ids which need to reassign to new tenant. "
            "If not provided then all the workloads from old tenant "
            "will get reassigned to new tenant. "
            "--workload-ids <workload-id_1> <workload-id_2> ... <workload-id_N>",
        )

        parser.add_argument(
            "--user_id",
            metavar="<user_id>",
            default=None,
            help="Specify user id to which"
            " workloads need to reassign from old tenant. "
            "--user_id <user_id>",
        )

        parser.add_argument(
            "--migrate_cloud",
            metavar="{True,False}",
            type=strutils.bool_from_string,
            default=False,
            help="Set to True if want to reassign workloads from other clouds as well. Default if False",
        )

        parser.add_argument(
            "--source-btt",
            metavar="<source-btt>",
            nargs="+",
            default=[],
            help="It searches workloads in the given Backup Target Types Id. If not provided then considers default BTT. Only single --source-btt is allowed if --workload-ids are provided.\n"
                 "--source-btt <source-btt-1> <source-btt-2> ... <source-btt-N>",
        )

        parser.add_argument(
            "--source-btt-all",
            help="This will search in all Backup Target Types and reassign the workloads.\n Only allowed when --workload-ids is NOT provided.\nUser must provide --old_tenant_ids and --new_tenant_ids to use it.\n--source-btt-all",
            action="store_true"
        )

        parser.add_argument(
            "--map_file",
            metavar="<map_file>",
            default=None,
            type=open,
            help="Provide file path(relative or absolute) including file name of reassign map file."
            " Provide list of old workloads mapped to new tenants. "
            "Format for this file is YAML. For sample, please refer "
            "to this file: %s ." % reassign_map_path,
        )

    def take_action(self, parsed_args):
        client = self.get_client()

        if not (parsed_args.workload_ids or parsed_args.old_tenant_ids or parsed_args.new_tenant_id or parsed_args.user_id or parsed_args.map_file):
            raise exceptions.CommandError("Please provide required parameters")

        if parsed_args.map_file and (
            parsed_args.workload_ids or parsed_args.old_tenant_ids or parsed_args.new_tenant_id or parsed_args.user_id
        ):
            raise exceptions.CommandError(
                "Please provide only file or other required parameters."
            )

        if parsed_args.workload_ids and parsed_args.old_tenant_ids:
            raise exceptions.CommandError(
                "Please provide onle one parameter"
                " either --workload-ids or --old_tenant_ids"
            )

        if (parsed_args.workload_ids or parsed_args.old_tenant_ids) and (parsed_args.new_tenant_id is None):
            raise exceptions.CommandError("Please provide new_tenant_id.")

        if (parsed_args.workload_ids or parsed_args.old_tenant_ids) and (parsed_args.user_id is None):
            raise exceptions.CommandError("Please provide user_id.")

        if parsed_args.map_file is not None:
            data = {}
            with open(parsed_args.map_file) as m_file:
                data = yaml.load(m_file, Loader=yaml.SafeLoader)
            if not isinstance(data, dict) or data.get("reassign_mappings", None):
                message = (
                    "File content is not in required yaml format, "
                    + "Please provide require data in appropriate format."
                )
                raise exceptions.CommandError(message)
            parsed_args.map_file = data

        if parsed_args.workload_ids:
            if parsed_args.source_btt and len(parsed_args.source_btt) > 1:
                raise exceptions.CommandError(" Only single --source-btt is allowed when --workload-ids are provided")

            if parsed_args.source_btt_all:
                raise exceptions.CommandError(" --source-btt-all is not allowed when --workload-ids are provided")

            for workload_id in parsed_args.workload_ids:
                try:
                    uuid.UUID(workload_id)
                except ValueError:
                    raise exceptions.CommandError(" Invalid --workload-ids: {}".format(workload_id))

        if parsed_args.source_btt_all and not (parsed_args.old_tenant_ids and parsed_args.new_tenant_ids):
            raise exceptions.CommandError(" --old_tenant_ids and --new_tenant_ids are required when --source-btt-all is provided")

        if parsed_args.source_btt:
            for source_btt in parsed_args.source_btt:
                try:
                    uuid.UUID(source_btt)
                except ValueError:
                    raise exceptions.CommandError(" Invalid --source-btt: {}".format(source_btt))

        result = client.reassign_workloads(parsed_args)

        # prints failed to reassign workloads
        if result["failed_workloads"]:
            print('Failed to reassign following workloads.')
            utils.print_data_vertically([result["failed_workloads"]], ["Failed_Workloads"])

        # prints successful reassigned workload list
        if result['reassigned_workloads']:
            print('Following workloads are successfully reassigned')
            self.produce_output(parsed_args, ['Name', 'ID', 'ProjectId', 'UserId'], [(wl['name'], wl['id'], wl['project_id'], wl['user_id']) for wl in result['reassigned_workloads']])

        # prints reassigning workload list for the job
        if result['jobid_list']:
            print("Track progress of reassigning workload which were not part of the current cloud using cmd: workloadmgr job-detail-show <job-id>")
            self.produce_output(parsed_args, ['JOB-ID'], [(jobid,) for jobid in result['jobid_list']])

        # this returning value needed to avoid error at the end
        return [(), ()]


class AddSettings(WorkloadCommand, show.ShowOne):
    """Add workload settings"""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--setting",
            metavar="<key=key-name>",
            action="append",
            dest="settings",
            required=True,
            help="Required, Specify a key value pairs to include in the settings "
            "Specify option multiple times to include multiple settings. "
            "key=value",
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        settings = {}
        for settings_str in parsed_args.settings:
            err_msg = (
                "Invalid settings argument '%s'. settings arguments must be of the "
                "form --setting <key=value>" % settings_str
            )

            for kv_str in settings_str.split(","):
                try:
                    k, v = kv_str.split("=", 1)
                except ValueError:
                    raise exceptions.CommandError(err_msg)

                if k in settings:
                    settings[k] = v
                else:
                    settings.setdefault(k, v)

        settings = client.settings(settings)
        columns = list(settings.keys())
        data = osc_utils.get_dict_properties(settings, columns)
        return columns, data


class WorkloadEnableScheduler(WorkloadCommand):
    """enables workloads' scheduler"""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--workloadids",
            metavar="<workloadid>",
            action="append",
            dest="workloadids",
            required=True,
            help="Required atleast one workloadid ,"
            "Specify an ID of the workload whose scheduler enables. "
            "Specify option multiple times to include multiple workloads."
            " --workloadids <workloadid> --workloadids <workloadid>",
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        search_opts = {"workload_list": parsed_args.workloadids}
        workload_objs = client.list(search_opts=search_opts) or []
        resumed_list = []
        for workload_obj in workload_objs:
            try:
                workload_obj.update(
                    workload_id=workload_obj.id,
                    name=None,
                    description=None,
                    instances=None,
                    jobschedule={'enabled': '1'},
                    metadata=None,
                    is_admin_dashboard=True
                )
                resumed_list.append(workload_obj.id)
            except Exception as ex:
                raise ex
        if not len(resumed_list):
            raise exceptions.CommandError(
                "\nNo workloads' schedulers are resumed \n"
                "Provided workloadids are either absent or invalid"
            )
        elif len(resumed_list) < len(parsed_args.workloadids):
            invalid_ids = set(parsed_args.workloadids) - set(resumed_list)
            msg = (
                "\nOut of provided workloadids: \n"
                " Resumed ids: {} \n"
                " Invalid or absent ids: {}".format(
                    resumed_list, list(invalid_ids)
                )
            )
            raise exceptions.CommandError(msg)


class WorkloadDisableScheduler(WorkloadCommand):
    """disables workloads' scheduler"""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument(
            "--workloadids",
            metavar="<workloadid>",
            action="append",
            dest="workloadids",
            required=True,
            help="Required atleast one workloadid ,"
            "Specify an ID of the workload whose scheduler disables. "
            "Specify option multiple times to include multiple workloads."
            " --workloadids <workloadid> --workloadids <workloadid>",
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        search_opts = {"workload_list": parsed_args.workloadids}
        workload_objs = client.list(search_opts=search_opts) or []
        paused_list = []
        for workload_obj in workload_objs:
            try:
                workload_obj.pause()
                paused_list.append(workload_obj.id)
            except Exception as ex:
                raise ex
        if not len(paused_list):
            raise exceptions.CommandError(
                "\nNo workloads' schedulers are paused \n"
                "Provided workloadids are either absent or invalid"
            )
        elif len(paused_list) < len(parsed_args.workloadids):
            invalid_ids = set(parsed_args.workloadids) - set(paused_list)
            msg = (
                "\nOut of provided workloadids: \n"
                " Paused ids: {} \n"
                " Invalid or absent ids: {}".format(
                    paused_list, list(invalid_ids)
                )
            )
            raise exceptions.CommandError(msg)


class ProtectedVMs(WorkloadCommand):
    """Lists vms protected by tenant."""

    def take_action(self, parsed_args):
        client = self.get_client()
        vms = client.get_protected_vms()
        if vms.get('protected_vms'):
            utils.print_list(vms["protected_vms"], ["ID"])
        else:
            print("No protected VM's found.")
        return


class TenantUsage(WorkloadCommand):
    """Returns storage used and vms protected by tenants."""

    def take_action(self, parsed_args):
        client = self.get_client()
        usage = client.get_tenants_usage()
        utils.print_dict(
            usage["global_usage"], dict_property="Global Usage", dict_value="Values"
        )
        res = {
            usage.pop("tenant_name", project_id): usage
            for project_id, usage in usage["tenants_usage"].items()
        }
        utils.print_dict(res, dict_property="Project Name", dict_value="Usage")
        return


class DisableWorkloadService(WorkloadCommand):
    """disable a workload service."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument("node_name", metavar="<node_name>", help="node name.")
        parser.add_argument(
            "--reason",
            metavar="<reason>",
            help="Optional reason for disabling workload service. (Default='admin user disabled at <current_datetime>')",
            default=None,
        )

    def take_action(self, parsed_args):
        client = self.get_client()
        client.update_workloads_service(parsed_args.node_name, status=1, reason=parsed_args.reason)
        print('workloads service successfully disabled on node: {}'.format(parsed_args.node_name))


class EnableWorkloadService(WorkloadCommand):
    """enable a workload service."""

    @staticmethod
    def _add_arguments(parser):
        parser.add_argument("node_name", metavar="<node_name>", help="node name.")

    def take_action(self, parsed_args):
        client = self.get_client()
        client.update_workloads_service(parsed_args.node_name, status=0, reason=None)
        print('workloads service successfully enabled on node: {}'.format(parsed_args.node_name))