Repository URL to install this package:
Version:
4.0.105 ▾
|
# Copyright (c) 2014 TrilioData, Inc.
# All Rights Reserved.
from __future__ import print_function
import argparse
import os
import sys
import time
import ast
import json
import pickle
import yaml
from workloadmgrclient import exceptions
from workloadmgrclient import utils
from workloadmgrclient.openstack.common import strutils
from workloadmgrclient import timezone
backup_template = os.path.abspath(
(os.path.join(os.path.dirname(__file__), "../input-files/services_to_backup.yaml"))
)
def read_file(file_obj, eval=False):
try:
data = file_obj.read()
if len(data) == 0:
message = "File %s is empty." % file_obj.name
raise exceptions.CommandError(message)
if eval is True:
try:
data = ast.literal_eval(data)
except Exception as ex:
print("Error: " + ex.msg + "\nIn parsing " + file_obj.name)
exit()
return data
except Exception as ex:
raise ex
finally:
file_obj.close()
restore_json_path = os.path.abspath(
(os.path.join(os.path.dirname(__file__), "../input-files/restore.json"))
)
network_topology_restore_json_path = os.path.abspath(
(
os.path.join(
os.path.dirname(__file__), "../input-files/network_topology_restore.json"
)
)
)
reassign_map_path = os.path.abspath(
(
os.path.join(
os.path.dirname(__file__), "../input-files/workload_reassign_map_file.yaml"
)
)
)
def read_file(file_obj, eval=False):
try:
data = file_obj.read()
if len(data) == 0:
message = "File %s is empty." % file_obj.name
raise exceptions.CommandError(message)
if eval is True:
try:
data = ast.literal_eval(data)
except Exception as ex:
print("Error: " + ex.msg + "\nIn parsing " + file_obj.name)
exit()
return data
except Exception as ex:
raise ex
finally:
file_obj.close()
def _poll_for_status(
poll_fn, obj_id, action, final_ok_states, poll_period=5, show_progress=True
):
"""Block while an action is being performed, periodically printing
progress.
"""
def print_progress(progress):
if show_progress:
msg = "\rInstance %(action)s... %(progress)s%% complete" % dict(
action=action, progress=progress
)
else:
msg = "\rInstance %(action)s..." % dict(action=action)
sys.stdout.write(msg)
sys.stdout.flush()
print()
while True:
obj = poll_fn(obj_id)
status = obj.status.lower()
progress = getattr(obj, "progress", None) or 0
if status in final_ok_states:
print_progress(100)
print("\nFinished")
break
elif status == "error":
print("\nError %(action)s instance" % locals())
break
else:
print_progress(progress)
time.sleep(poll_period)
def _find_setting(cs, setting_name):
"""Get a setting by name."""
return utils.find_resource(cs.settings, setting_name)
def _find_workload_type(cs, workload_type_id):
"""Get a workload_type by ID."""
return utils.find_resource(cs.workload_types, workload_type_id)
def _find_workload(cs, workload_id):
"""Get a workload by ID."""
return utils.find_resource(cs.workloads, workload_id)
def _find_snapshot(cs, snapshot_id):
"""Get a workload snapshot by ID."""
return utils.find_resource(cs.snapshots, snapshot_id)
def _find_restore(cs, restore_id):
"""Get a workload snapshot restore by ID."""
return utils.find_resource(cs.restores, restore_id)
def _find_testbubble(cs, testbubble_id):
"""Get a workload snapshot testbubble by ID."""
return utils.find_resource(cs.testbubbles, testbubble_id)
def _find_transfer(cs, transfer):
"""Gets a transfer by name or ID."""
return utils.find_resource(cs.transfers, transfer)
def _translate_keys(collection, convert):
for item in collection:
keys = list(item.__dict__.keys())
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _translate_workload_snapshot_keys(collection):
convert = [("workloadId", "workload_Id")]
_translate_keys(collection, convert)
def _extract_metadata(args):
metadata = {}
for metadatum in args.metadata:
# unset doesn't require a val, so we have the if/else
if "=" in metadatum:
(key, value) = metadatum.split("=", 1)
else:
key = metadatum
value = None
metadata[key] = value
return metadata
def _print_type_and_extra_specs_list(vtypes):
formatters = {"extra_specs": _print_type_extra_specs}
utils.print_list(vtypes, ["ID", "Name", "extra_specs"], formatters)
def do_endpoints(cs, args):
"""Discover endpoints that get returned from the authenticate services."""
catalog = cs.client.service_catalog.catalog
if cs.client.auth_version == 3:
for e in catalog["token"]["catalog"]:
e_dict = {}
for end in e["endpoints"]:
if end["interface"] == "public":
e_dict["publicUrl"] = end["url"]
elif end["interface"] == "internal":
e_dict["internalUrl"] = end["url"]
elif end["interface"] == "admin":
e_dict["adminUrl"] = end["url"]
e_dict["region"] = end["region"]
e_dict["id"] = end["id"]
e_dict["id"] = e["id"]
utils.print_dict(e_dict, e["name"])
else:
for e in catalog["access"]["serviceCatalog"]:
utils.print_dict(e["endpoints"][0], e["name"])
def do_credentials(cs, args):
"""Show user credentials returned from auth."""
catalog = cs.client.service_catalog.catalog
if cs.client.auth_version == 3:
utils.print_dict(catalog["token"]["user"], "User Credentials")
del catalog["token"]["user"]
del catalog["token"]["catalog"]
utils.print_dict(catalog["token"], "Token")
else:
utils.print_dict(catalog["access"]["user"], "User Credentials")
utils.print_dict(catalog["access"]["token"], "Token")
_quota_resources = ["volumes", "snapshots", "gigabytes"]
def _print_type_extra_specs(vol_type):
try:
return vol_type.get_keys()
except exceptions.NotFound:
return "N/A"
def _print_workload_policy(policy):
pf = {}
assigned_projects = []
for field_value in policy.field_values:
pf[field_value["policy_field_name"]] = field_value["value"]
assigned_projects = policy.policy_assignments
columns = ["ID", "Name", "Description", "Status"]
utils.print_object(policy, columns)
if len(pf) > 0:
utils.print_dict(pf, dict_property="Policy Fields", dict_value="Values")
if len(assigned_projects):
utils.print_list(assigned_projects, ["project_id", "project_name"])
'''@utils.arg('--metadata',
metavar="<key=value>",
action='append',
dest='metadata',
default=[],
help= "Specify a key value pairs to include in the workload_type metadata "
"Specify option multiple times to include multiple keys. "
"key=value")
@utils.arg('--id', metavar='<id>',
help='Optional workload_type uuid. (Default=None)',
default=None)
@utils.arg('--display-name', metavar='<display-name>',
help='Optional workload_type name. (Default=None)',
default=None)
@utils.arg('--display-description', metavar='<display-description>',
help='Optional workload_type description. (Default=None)',
default=None)
@utils.arg('--is-public',
type=strutils.bool_from_string, metavar='{True,False}',
help='Make workload_type accessible to the public.',
default=False)
@utils.service_type('workloads')
def do_workload_type_create(cs, args):
"""Creates a workload_type."""
metadata = {}
for metadata_str in args.metadata:
err_msg = ("Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str)
try:
k, v = metadata_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
cs.workload_types.create(metadata,
args.id,
args.display_name,
args.display_description,
args.is_public)'''
'''@utils.arg('workload_type_id', metavar='<workload_type_id>', help='ID of the workload_type.')
@utils.arg('--metadata',
metavar="<key=value>",
action='append',
dest='metadata',
default=[],
help= "Specify a key value pairs to include in the workload_type metadata "
"Specify option multiple times to include multiple keys. "
"key=value")
@utils.service_type('workloads')
def do_workload_type_discover_instances(cs, args):
"""Discover instances of a workload_type."""
metadata = {}
for metadata_str in args.metadata:
err_msg = ("Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str)
for kv_str in metadata_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
instances = cs.workload_types.discover_instances(args.workload_type_id, metadata)
utils.print_dict(instances,wrap=100)'''
@utils.arg(
"workload_type_id", metavar="<workload_type_id>", help="ID of the workload_type."
)
@utils.service_type("workloads")
def do_workload_type_show(cs, args):
"""Show details about a workload_type."""
workload_type = _find_workload_type(cs, args.workload_type_id)
info = dict()
info.update(workload_type._info)
if "links" in info:
info.pop("links")
utils.print_dict(info, wrap=100)
@utils.service_type("workloads")
def do_workload_type_list(cs, args):
"""List all the workload_type."""
workload_types = cs.workload_types.list()
columns = ["ID", "Name"]
utils.print_list(workload_types, columns)
'''@utils.arg('workload_type_id', metavar='<workload_type_id>',
help='ID of the workload_type to delete.')
@utils.service_type('workloads')
def do_workload_type_delete(cs, args):
"""Remove a workload."""
workload_type = _find_workload_type(cs, args.workload_type_id)
workload_type.delete()'''
@utils.arg("vm_id", metavar="<vm_id>", help="ID of the VM")
@utils.arg("file_path", metavar="<file_path>", help="File path")
@utils.arg(
"--snapshotids",
metavar="<snapshotid>",
action="append",
dest="snapshotids",
default=[],
help="Search only in specified snapshot ids "
"snapshot-id: include the instance with this UUID ",
)
@utils.arg(
"--end_filter",
metavar="end_filter",
help="Displays last snapshots, example , last 10 snapshots, "
"default 0 means displays all snapshots",
default=0,
)
@utils.arg(
"--start_filter",
metavar="start_filter",
help="Displays snapshots starting from , example , snapshot starting from 5, "
"default 0 means starts from last snapshot",
default=0,
)
@utils.arg(
"--date_from",
metavar="<date_from>",
help="From date in format 'YYYY-MM-DDTHH:MM:SS' eg 2016-10-10T00:00:00,"
"If don't specify time then it takes 00:00 by default",
default=None,
)
@utils.arg(
"--date_to",
metavar="<date_to>",
help="To date in format 'YYYY-MM-DDTHH:MM:SS'(defult is current day),"
"Specify HH:MM:SS to get snapshots within same day inclusive/exclusive results for date_from and date_to",
default=None,
)
@utils.service_type("workloads")
def do_filepath_search(cs, args):
"""File path search"""
search = cs.file_search.search(
args.vm_id,
args.file_path,
args.snapshotids,
args.start_filter,
args.end_filter,
args.date_from,
args.date_to,
)
print("Please wait for few seconds to get results")
print("Your search ID: %s " % search.id)
while 1:
search = cs.file_search.get(search.id)
if search.status == "error":
err_msg = "Error:" + search.error_msg
raise exceptions.CommandError(err_msg)
if search.status == "completed":
columns = ["ID", "Status", "Filepath", "Vm_id"]
utils.print_object(search, columns)
snapshots = ast.literal_eval(search.json_resp)
for snapshot in snapshots:
utils.print_file_search_dict(snapshot)
break
time.sleep(10)
@utils.arg(
"--instance",
metavar="<instance-id=instance-uuid>",
action="append",
dest="instances",
required=True,
help="Required to set atleast one instance, Specify an instance to include in the workload. "
"Specify option multiple times to include multiple instances. "
"instance-id: include the instance with this UUID ",
)
@utils.arg(
"--display-name",
metavar="<display-name>",
help="Optional workload name. (Default=None)",
default=None,
)
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional workload description. (Default=None)",
default=None,
)
@utils.arg(
"--workload-type-id",
metavar="<workload-type-id>",
help="Optional Workload Type ID, (Default=Serial)",
default=None,
)
@utils.arg(
"--source-platform",
metavar="<source-platform>",
help="Optional workload source platform (Default=None)",
default=None,
)
@utils.arg(
"--jobschedule",
metavar="<key=key-name>",
action="append",
dest="jobschedule",
default=[],
help="Specify following key value pairs for jobschedule "
"Specify option multiple times to include multiple keys. "
"If don't specify timezone, then by default it takes your local machine timezone"
" 'start_date' : '06/05/2014' "
" 'end_date' : '07/15/2014' "
" 'start_time' : '2:30 PM' "
" 'interval' : '1 hr' "
" 'retention_policy_type' : 'Number of Snapshots to Keep' or 'Number of days to retain Snapshots' "
" 'retention_policy_value' : '30' "
" 'timezone: '' "
" , "
"For example --jobschedule start_date='mm/dd/yy' --jobschedule enabled=True"
"In order to enable/disable scheduler pass enabled True / enabled False",
)
@utils.arg(
"--metadata",
metavar="<key=key-name>",
action="append",
dest="metadata",
default=[],
help="Specify a key value pairs to include in the workload_type metadata "
"Specify option multiple times to include multiple keys. "
"key=value",
)
# @utils.arg("--workloadids",
# metavar="<workloadid>",
# action="append",
# dest="workloadids",
# default=[],
# help= "Specify workload ids "
# " for creating composite workload "
# "--workloadids <workloadid> --workloadids <workloadid>")
@utils.arg("--policy-id", metavar="<policy_id>", help="Policy ID", default=None)
@utils.service_type("workloads")
def do_workload_create(cs, args):
"""Creates a workload."""
instances = []
for instance_str in args.instances:
err_msg = (
"Invalid instance argument '%s'. Instance arguments must be of the "
"form --instance <instance-id=instance-uuid>" % instance_str
)
instance_info = {"instance-id": ""}
for kv_str in instance_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in instance_info:
instance_info[k] = v
else:
raise exceptions.CommandError(err_msg)
if not instance_info["instance-id"]:
raise exceptions.CommandError(err_msg)
instances.append(instance_info)
jobschedule = {}
for jobschedule_str in args.jobschedule:
err_msg = (
"Invalid jobschedule argument '%s'. jobschedule arguments must be of the "
"form --jobschedule <key=value>" % jobschedule_str
)
for kv_str in jobschedule_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in jobschedule:
try:
import pytz
pytz.timezone(v)
except:
raise exceptions.CommandError("Specify valid timeone " + v)
jobschedule[k] = v
else:
jobschedule.setdefault(k, v)
if len(jobschedule) >= 1 and "enabled" not in jobschedule:
raise exceptions.CommandError(
"Please specify --jobschedule enabled option in order to set scheduler for this workload"
)
else:
if "timezone" in jobschedule:
jobschedule[k] = v
else:
jobschedule.setdefault("timezone", timezone.get_localzone().zone)
metadata = {}
for metadata_str in args.metadata:
err_msg = (
"Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str
)
for kv_str in metadata_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
if args.policy_id is not None:
metadata["policy_id"] = args.policy_id
"""
workloadids = []
err_msg = ("Composite workload type must specify workload-ids ")
workload_type = _find_workload_type(cs, args.workload_type_id)
if workload_type.name == 'Composite':
if len(args.workloadids) == 0:
raise exceptions.CommandError(err_msg)
workload_info = []
composite = {}
workload_data = []
for ids in args.workloadids:
workload = _find_workload(cs, ids)
d = {}
d["text"] = str(workload.name)
d["value"] = str(workload.id)
workload_info.append(d)
inner_list = []
d = {}
d["type"] = "workload"
info = dict()
info.update(workload._info)
info['jobschedule']['enabled'] = str(info['jobschedule']['enabled'])
info['interval'] = str(info['interval'])
d['data'] = ast.literal_eval(json.dumps(info))
inner_list.append(d)
dt = {}
dt["flow"] = "serial"
dt['children'] = inner_list
workload_data.append(dt)
d = {}
d["flow"] = "serial"
d["children"] = workload_data
composite['compworkloads'] = json.dumps(workload_info)
composite['workloadgraph'] = json.dumps(d)
if workload_type.name == 'Composite':
metadata = composite
"""
workload = cs.workloads.create(
args.display_name,
args.display_description,
args.workload_type_id,
args.source_platform,
instances,
jobschedule,
metadata,
)
columns = ["ID", "Name", "Status"]
utils.print_object(workload, columns)
@utils.arg("workload_id", metavar="<workload_id>", help="ID of the workload.")
@utils.arg(
"--display-name",
metavar="<display-name>",
help="Optional workload name. (Default=None)",
default=None,
)
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional workload description. (Default=None)",
default=None,
)
@utils.arg(
"--instance",
metavar="<instance-id=instance-uuid>",
action="append",
dest="instances",
default=[],
help="Specify an instance to include in the workload. "
"Specify option multiple times to include multiple instances. "
"instance-id: include the instance with this UUID ",
)
@utils.arg(
"--jobschedule",
metavar="<key=key-name>",
action="append",
dest="jobschedule",
default=[],
help="Specify following key value pairs for jobschedule "
"Specify option multiple times to include multiple keys. "
"If don't specify timezone, then by default it takes your local machine timezone"
" 'start_date' : '06/05/2014' "
" 'end_date' : '07/15/2014' "
" 'start_time' : '2:30 PM' "
" 'interval' : '1 hr' "
" 'retention_policy_type' : 'Number of Snapshots to Keep' or 'Number of days to retain Snapshots' "
" 'retention_policy_value' : '30' ",
)
@utils.arg(
"--metadata",
metavar="<key=key-name>",
action="append",
dest="metadata",
default=[],
help="Specify a key value pairs to include in the workload_type metadata "
"Specify option multiple times to include multiple keys. "
"key=value",
)
@utils.arg("--policy-id", metavar="<policy_id>", help="Policy ID", default=None)
@utils.service_type("workloads")
def do_workload_modify(cs, args):
"""Modify a workload."""
workload = _find_workload(cs, args.workload_id)
instances = []
for instance_str in args.instances:
err_msg = (
"Invalid instance argument '%s'. Instance arguments must be of the "
"form --instance <instance-id=instance-uuid>" % instance_str
)
instance_info = {"instance-id": ""}
for kv_str in instance_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in instance_info:
instance_info[k] = v
else:
raise exceptions.CommandError(err_msg)
if not instance_info["instance-id"]:
raise exceptions.CommandError(err_msg)
instances.append(instance_info)
jobschedule = {}
for jobschedule_str in args.jobschedule:
err_msg = (
"Invalid jobschedule argument '%s'. jobschedule arguments must be of the "
"form --jobschedule <key=value>" % jobschedule_str
)
for kv_str in jobschedule_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in jobschedule:
try:
import pytz
pytz.timezone(v)
except:
raise exceptions.CommandError("Specify valid timeone " + v)
jobschedule[k] = v
else:
jobschedule.setdefault(k, v)
if len(jobschedule) >= 1 and "enabled" not in jobschedule:
raise exceptions.CommandError(
"Please specify --jobschedule enabled option in order to set scheduler for this workload"
)
else:
if "timezone" in jobschedule:
jobschedule[k] = v
else:
jobschedule.setdefault("timezone", timezone.get_localzone().zone)
metadata = {}
for metadata_str in args.metadata:
err_msg = (
"Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str
)
for kv_str in metadata_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
if args.policy_id is not None:
metadata["policy_id"] = args.policy_id
workload.update(
workload.id,
args.display_name,
args.display_description,
instances,
jobschedule,
metadata,
)
@utils.arg("workload_id", metavar="<workload_id>", help="ID of the workload.")
@utils.service_type("workloads")
def do_workload_show(cs, args):
"""
Show details about a workload.
To get detailed information use --verbose\n
Example command: workloadmgr --verbose workload-show <workload-id>\n
"""
workload = _find_workload(cs, args.workload_id)
info = dict()
info.update(workload._info)
if "links" in info:
info.pop("links")
if "metadata" in info:
metadata = info.pop("metadata")
if "jobschedule" in info:
jobschedule = info.pop("jobschedule")
info["jobschedule"] = jobschedule["enabled"]
if "timezone" in jobschedule:
date_time = utils.get_local_time(
jobschedule["start_date"] + " " + jobschedule["start_time"],
"%m/%d/%Y %I:%M %p",
"%m/%d/%Y %I:%M %p",
timezone.get_localzone().zone,
jobschedule["appliance_timezone"],
).split(" ")
jobschedule["start_date"] = date_time[0]
jobschedule["start_time"] = date_time[1] + " " + date_time[2]
for i, val in enumerate(info["instances"]):
removed_metadata = info["instances"][i].pop("metadata")
utils.print_dict(info, wrap=100)
if args.verbose == True:
if "topology" in metadata:
metadata.pop("topology")
if "workloadgraph" in metadata:
metadata.pop("workloadgraph")
utils.print_dict(metadata, "Metadata")
utils.print_dict(jobschedule, "Jobschedule")
"""@utils.arg('workload_id', metavar='<workload_id>', help='ID of the workload.')
@utils.service_type('workloads')
def do_workload_get_workflow(cs, args):
workflow = cs.workloads.get_workflow(args.workload_id)
utils.print_dict(workflow, wrap=100) """
'''@utils.arg('workload_id', metavar='<workload_id>', help='ID of the workload.')
@utils.service_type('workloads')
def do_workload_get_topology(cs, args):
"""Show topology of a workload."""
topology = cs.workloads.get_topology(args.workload_id)
utils.print_dict(topology, wrap=100)
#Disabling discover_instance from CLI
#@utils.arg('workload_id', metavar='<workload_id>', help='ID of the workload.')
#@utils.service_type('workloads')
#def do_workload_discover_instances(cs, args):
# """Show instances of a workload."""
# instances = cs.workloads.discover_instances(args.workload_id)
# if 'instances' in instances:
# instances = instances.pop('instances')
#
# inst = []
# for item in instances:
# d = dict()
# d['Name'] = str(item['vm_name'])
# datastores = ast.literal_eval(item['vm_metadata']['datastores'])
# for ds in datastores:
# if ds['_type'] == 'Datastore':
# d['Datastore'] = ds['name']
# d['Resource Pool'] = ast.literal_eval(item['vm_metadata']['resourcepool'])['name']
# d['VM Folder'] = ast.literal_eval(item['vm_metadata']['parent'])['name']
# clusters = ast.literal_eval(item['vm_metadata']['cluster'])
# for cl in clusters:
# if cl['_type'] == 'Cluster':
# d['Cluster'] = cl['name']
# hosts = ast.literal_eval(item['vm_metadata']['host'])
# for hs in hosts:
# if hs['_type'] == 'Host':
# d['Host'] = hs['name']
# networks = ast.literal_eval(item['vm_metadata']['networks'])
# for nt in networks:
# if nt['_type'] == 'Network':
# d['Network'] = nt['name']
# d['VM Power State'] = str(item['vm_power_state'])
# inst.append(d)
#
# utils.print_data_vertically(inst,['Instances','Value']) '''
@utils.arg(
"--all",
type=strutils.bool_from_string,
metavar="{True,False}",
help="List all workloads of all the projects(valid for admin user only)",
default=False,
)
@utils.arg(
"--nfsshare",
metavar="<nfsshare>",
help="List all workloads of nfsshare (valid for admin user only)",
default=None,
)
@utils.service_type("workloads")
def do_workload_list(cs, args):
"""List all the workloads of current project."""
search_opts = {"all_workloads": args.all, "nfs_share": args.nfsshare}
workloads = cs.workloads.list(search_opts=search_opts)
columns = ["ID", "Name", "Project_ID", "Workload_Type_ID", "Status", "Created_at"]
utils.print_list(workloads, columns)
@utils.arg(
"--project_id",
metavar="<project_id>",
default=None,
help="List workloads belongs to given project only.",
)
@utils.service_type("workloads")
def do_workload_get_importworkloads_list(cs, args):
"""Get list of workloads to be imported."""
workloads = cs.workloads.get_importworkloads_list(project_id=args.project_id)
columns = ["ID", "Name", "Workload_Type_ID", "Project_ID"]
utils.print_list(workloads, columns)
@utils.arg(
"--workloadids",
metavar="<workloadid>",
action="append",
dest="workloadids",
default=[],
help="Specify workload ids "
" to import only specified workloads "
"--workloadids <workloadid> --workloadids <workloadid>",
)
# @utils.arg('--upgrade', metavar='<upgrade>', type=strutils.bool_from_string, default=True, help="Specify True or False. Default is True")
@utils.service_type("workloads")
def do_workload_importworkloads(cs, args):
"""Import all workload records from backup store."""
workloads = cs.workloads.importworkloads(args.workloadids)
columns = ["ID", "Name", "Workload_Type_ID"]
utils.print_list(workloads["imported_workloads"], columns)
if len(workloads["failed_workloads"]) > 0:
message = (
"\nPlease verify failed workload id's are valid.\n"
+ "If workload project not exist in current cloud then use workload_reassign.\n"
)
utils.print_data_vertically(
[workloads["failed_workloads"]], ["Failed_Workloads"]
)
print(message)
@utils.service_type("workloads")
def do_workload_get_nodes(cs, args):
"""Get all the nodes of a workload manager"""
nodes = cs.workloads.get_nodes()
utils.print_list(
nodes["nodes"],
["node", "id", "version", "ipaddress", "is_controller", "status"],
)
'''@utils.arg('--hostname', metavar='<host>',
help='Name of the compute node',
default='all')
@utils.arg('--ip', metavar='<ip>', help='IP address of compute node', default='all')
def do_status_contego(cs, args):
"""Get contego service status running on a compute node"""
services = cs.workloads.get_contego_status(args.hostname, args.ip)
contego_status_all = []
for key,val in services.items():
val['host'] = key
contego_status_all.append(val)
utils.print_list(contego_status_all, ['host', 'name', 'status','id', 'running_state'])'''
@utils.arg("ip", metavar="<ip>", help="IP or hostname of node to remove")
@utils.service_type("workloads")
def do_workload_remove_node(cs, args):
"""Remove workload node by ipaddress / hostname"""
cs.workloads.remove_node(args.ip)
'''
@utils.arg('ip', metavar='<ip>', help='IP of node to add')
@utils.service_type('workloads')
def do_workload_add_node(cs, args):
"""Add workload node by ipaddress"""
cs.workloads.add_node(args.ip)'''
@utils.service_type("workloads")
def do_workload_get_storage_usage(cs, args):
"""Get total storage used by workload manager"""
storage_usage = cs.workloads.get_storage_usage()
for storage in storage_usage["storage_usage"]:
for key, val in storage.items():
if type(val).__name__ == "int" or type(val).__name__ == "float":
val = str(val) + " Bytes or Approx ( " + utils.bytes_fmt(val) + " )"
storage[key] = str(val)
utils.print_dict(storage, wrap=100)
for key, val in storage_usage["count_dict"].items():
if type(val).__name__ == "int" or type(val).__name__ == "float":
val = str(val) + " Bytes or Approx ( " + utils.bytes_fmt(val) + " )"
storage_usage["count_dict"][key] = str(val)
utils.print_dict(storage_usage["count_dict"])
'''
@utils.arg('--time_in_minutes', metavar='<time_in_minutes>', default=600, help='time in minutes')
@utils.service_type('workloads')
def do_workload_get_recentactivities(cs, args):
"""Get recentactivities of workload manager"""
recentactivities = cs.workloads.get_recentactivities(args.time_in_minutes)
utils.print_list(recentactivities['recentactivites'],['activity_result','activity_time','activity_description','activity_type'])
'''
@utils.arg(
"--time_in_minutes",
metavar="<time_in_minutes>",
default=1440,
help="time in minutes(default is 24 hrs.)",
)
@utils.arg(
"--time_from",
metavar="<time_from>",
help="From date time in format 'MM-DD-YYYY'",
default=None,
)
@utils.arg(
"--time_to",
metavar="<time_to>",
help="To date time in format 'MM-DD-YYYY'(defult is current day)",
default=None,
)
@utils.service_type("workloads")
def do_workload_get_auditlog(cs, args):
"""Get auditlog of workload manager"""
auditlog = cs.workloads.get_auditlog(
args.time_in_minutes, args.time_from, args.time_to
)
# utils.print_dict(auditlog['auditlog'], wrap=100)
utils.print_list(
auditlog["auditlog"],
["UserName", "ObjectName", "Timestamp", "UserId", "Details"],
)
'''
@utils.arg('--time_in_minutes', metavar='<time_in_minutes>', default=None, help='time in minutes')
@utils.arg('--status', metavar='<status>', default=None, help='status of tasks')
@utils.arg('--page', metavar='<page>', default=None, help='offset to start out of total records')
@utils.arg('--size', metavar='<size>', default=None, help='total records')
@utils.service_type('workloads')
def do_tasks_list(cs, args):
"""Get all tasks with or without filters"""
tasks = cs.workloads.get_tasks(args.time_in_minutes, args.status, args.page, args.size)
utils.print_list(tasks['tasks'],['display_name','display_description','status','created_at'])
'''
@utils.arg(
"--setting",
metavar="<key=key-name>",
action="append",
dest="settings",
required=True,
help="Required, Specify a key value pairs to include in the settings "
"Specify option multiple times to include multiple settings. "
"key=value",
)
@utils.service_type("workloads")
def do_add_new_settings(cs, args):
"""Settings."""
settings = {}
for settings_str in args.settings:
err_msg = (
"Invalid settings argument '%s'. settings arguments must be of the "
"form --setting <key=value>" % settings_str
)
for kv_str in settings_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in settings:
settings[k] = v
else:
settings.setdefault(k, v)
settings = cs.workloads.settings(settings)
utils.print_dict(settings, wrap=100)
@utils.arg(
"--database_only",
metavar="<True/False>",
help="Keep True if want to delete from database only.(Default=False)",
default=False,
)
@utils.arg("workload_id", metavar="<workload_id>", help="ID of the workload to delete.")
@utils.service_type("workloads")
def do_workload_delete(cs, args):
"""Remove a workload."""
workload = _find_workload(cs, args.workload_id)
workload.delete(args.database_only)
@utils.arg(
"workload_id", metavar="<workload_id>", help="ID of the workload to snapshot."
)
@utils.arg(
"--full",
dest="full",
action="store_true",
default=False,
help="Specify if a full snapshot is required.",
)
@utils.arg(
"--display-name",
metavar="<display-name>",
help="Optional snapshot name. (Default=None)",
default=None,
)
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional snapshot description. (Default=None)",
default=None,
)
@utils.service_type("workloads")
def do_workload_snapshot(cs, args):
"""Snapshots a workload."""
workload = _find_workload(cs, args.workload_id)
workload.snapshot(args.full, args.display_name, args.display_description)
@utils.arg(
"--workloadids",
metavar="<workloadid>",
action="append",
dest="workloadids",
required=True,
help="Required atleast one workloadid , Specify an ID of the workload whose scheduler disables. "
"Specify option multiple times to include multiple workloads. "
"--workloadids <workloadid> --workloadids <workloadid>",
)
@utils.service_type("workloads")
def do_disable_scheduler(cs, args):
"""disables scheduler"""
invalid_ids = []
for workload_id in args.workloadids:
try:
workload = _find_workload(cs, workload_id)
workload.pause()
except Exception as ex:
if "No workload with a name or ID" in str(ex):
invalid_ids.append(workload_id)
else:
raise ex
if len(invalid_ids):
msg = (
"Found invalid workload id's in the input. \n"
"List of invalid id's: %s" % (str(invalid_ids))
)
raise exceptions.CommandError(msg)
@utils.arg(
"--workloadids",
metavar="<workloadid>",
action="append",
dest="workloadids",
required=True,
help="Required atleast one workloadid , Specify an ID of the workload whose scheduler enables. "
"Specify option multiple times to include multiple workloads. "
"--workloadids <workloadid> --workloadids <workloadid>",
)
@utils.service_type("workloads")
def do_enable_scheduler(cs, args):
"""enables scheduler"""
invalid_ids = []
for workload_id in args.workloadids:
try:
workload = _find_workload(cs, workload_id)
workload.resume()
except Exception as ex:
if "No workload with a name or ID" in str(ex):
invalid_ids.append(workload_id)
else:
raise ex
if len(invalid_ids):
msg = (
"Found invalid workload id's in the input. \n"
"List of invalid id's: %s" % (str(invalid_ids))
)
raise exceptions.CommandError(msg)
@utils.arg("workload_id", metavar="<workload_id>", help="ID of the workload to unlock.")
@utils.service_type("workloads")
def do_workload_unlock(cs, args):
"""unlock a workload."""
workload = _find_workload(cs, args.workload_id)
workload.unlock()
@utils.arg("workload_id", metavar="<workload_id>", help="ID of the workload to reset.")
@utils.service_type("workloads")
def do_workload_reset(cs, args):
"""reset a workload."""
workload = _find_workload(cs, args.workload_id)
workload.reset()
@utils.arg("snapshot_id", metavar="<snapshot_id>", help="ID of the workload snapshot.")
@utils.arg(
"--output",
metavar="<output>",
default=None,
help="Option to get additional snapshot details, "
"Specify --output metadata for snapshot metadata, "
"Specify --output networks for snapshot vms networks, "
"Specify --output disks for snapshot vms disks",
)
@utils.service_type("workloads")
def do_snapshot_show(cs, args):
"""Show details about a workload snapshot"""
snapshot = _find_snapshot(cs, args.snapshot_id)
info = dict()
instances = dict()
metadata = dict()
info.update(snapshot._info)
if "links" in info:
info.pop("links")
if "instances" in info:
instances = info.pop("instances")
if "metadata" in info:
metadata = info.pop("metadata")
inst = []
networks = []
vdisks = []
for item in instances:
d = dict()
d["Name"] = str(item["name"])
if "imported_from_vcenter" in item["metadata"]:
datastores = ast.literal_eval(item["metadata"]["datastores"])
for ds in datastores:
if ds["_type"] == "Datastore":
d["Datastore"] = ds["name"]
d["Resource Pool"] = ast.literal_eval(item["metadata"]["resourcepool"])[
"name"
]
d["VM Folder"] = ast.literal_eval(item["metadata"]["parent"])["name"]
clusters = ast.literal_eval(item["metadata"]["cluster"])
for cl in clusters:
if cl["_type"] == "Cluster":
d["Cluster"] = cl["name"]
hosts = ast.literal_eval(item["metadata"]["host"])
for hs in hosts:
if hs["_type"] == "Host":
d["Host"] = hs["name"]
networks += ast.literal_eval(item["metadata"]["networks"])
vdisks += ast.literal_eval(item["metadata"]["vdisks"])
for nt in networks:
if nt["_type"] == "Network":
d["Network"] = nt["name"]
d["VM Power State"] = str(item["status"])
else:
d["Status"] = str(item["status"])
# TODO: nics is a long string... need to handle correctly
# d['NICs'] = str(item['nics'])
if "flavor" in item:
d["Flavor"] = str(item["flavor"])
if "security_group" in item:
d["Security Group"] = str(item["security_group"])
d["ID"] = str(item["id"])
for index, nic in enumerate(item["nics"]):
item["nics"][index]["vm_id"] = item["id"]
for index, disk in enumerate(item["vdisks"]):
item["vdisks"][index]["vm_id"] = item["id"]
networks += ast.literal_eval(str(item["nics"]))
vdisks += ast.literal_eval(str(item["vdisks"]))
inst.append(d)
meta = []
for item in metadata:
m = dict()
m[str(item["key"])] = str(item["value"])
meta.append(m)
info["size"] = (
str(info["size"]) + " Bytes or Approx (" + utils.bytes_fmt(info["size"]) + ")"
)
info["restore_size"] = (
str(info["restore_size"])
+ " Bytes or Approx ("
+ utils.bytes_fmt(info["restore_size"])
+ ")"
)
info["time_taken"] = str(info["time_taken"]) + " Seconds"
[
info.pop(k)
for k in [
"pinned",
"created_at",
"finished_at",
"updated_at",
"user_id",
"project_id",
]
]
if not info["warning_msg"]:
info.pop("warning_msg")
if info["status"] != "error":
info.pop("error_msg")
if info["status"] in set(["available", "error", "mounted"]):
info.pop("progress_msg")
if not info["status"] in set(["available"]):
info.pop("restore_size")
if info["status"] in set(["mounted"]):
for m in meta:
if "mounturl" in m:
info["mounturl"] = m["mounturl"]
break
utils.print_dict(info, "Snapshot property")
utils.print_data_vertically(inst, ["Instances", "Value"])
if args.output == "networks":
utils.print_data_vertically(networks, ["Networks", "Value"])
elif args.output == "disks":
utils.print_data_vertically(vdisks, ["Vdisks", "Value"])
elif args.output == "metadata":
utils.print_data_vertically(meta, ["Metadata", "Value"])
@utils.arg(
"--workload_id",
metavar="<workload_id>",
default=None,
help="Filter results by workload_id",
)
@utils.arg(
"--tvault_node",
metavar="<host>",
help="List all the snapshot operations scheduled on a tvault node(Default=None)",
default=None,
)
@utils.arg(
"--date_from",
metavar="<date_from>",
help="From date in format 'YYYY-MM-DDTHH:MM:SS' eg 2016-10-10T00:00:00, If don't specify time then it takes 00:00 by default",
default=None,
)
@utils.arg(
"--date_to",
metavar="<date_to>",
help="To date in format 'YYYY-MM-DDTHH:MM:SS'(defult is current day), Specify HH:MM:SS to get snapshots within same day inclusive/exclusive results for date_from and date_to",
default=None,
)
@utils.arg(
"--all",
type=strutils.bool_from_string,
metavar="{True,False}",
help="List all snapshots of all the projects(valid for admin user only)",
default=False,
)
@utils.service_type("workloads")
def do_snapshot_list(cs, args):
"""List all the workloads."""
search_opts = {
"host": args.tvault_node,
}
search_opts["workload_id"] = args.workload_id
search_opts["all"] = args.all
search_opts["date_from"] = args.date_from
search_opts["date_to"] = args.date_to
snapshots = cs.snapshots.list(search_opts=search_opts)
columns = [
"Created At",
"Name",
"ID",
"Workload ID",
"Snapshot Type",
"Status",
"Host",
]
utils.print_list(snapshots, columns)
@utils.arg(
"snapshot_id",
metavar="<snapshot_id>",
help="ID of the workload snapshot to delete.",
)
@utils.service_type("workloads")
def do_snapshot_delete(cs, args):
"""Remove a workload snapshot."""
snapshot = _find_snapshot(cs, args.snapshot_id)
snapshot.delete()
@utils.arg("snapshot_id", metavar="<snapshot_id>", help="ID of snapshot to cancel.")
@utils.service_type("workloads")
def do_snapshot_cancel(cs, args):
"""Cancel a snapshot."""
snapshot = _find_snapshot(cs, args.snapshot_id)
snapshot.cancel()
@utils.arg(
"snapshot_id",
metavar="<snapshot_id>",
help="ID of a snapshot, which network topology needs to be restored.",
)
@utils.arg(
"--display-name",
metavar="<display-name>",
help="Optional name for the restore.",
default="Network Topology Restore",
)
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional description for restore.",
default="Network Topology Restore",
)
@utils.service_type("workloads")
def do_restore_network_topology(cs, args):
"""Restores only network topology from of a snapshot"""
snapshot = _find_snapshot(cs, args.snapshot_id)
snapshot.restore_network_topology(args.display_name, args.display_description)
@utils.arg(
"snapshot_id",
metavar="<snapshot_id>",
help="ID of the workload snapshot to restore.",
)
# @utils.arg('--test',
# dest='test',
# action="store_true",
# default=False,
# help='Specify if testing of a restore is required')
@utils.arg(
"--display-name",
metavar="<display-name>",
help="Optional name for the restore. (Default=None)",
default="One Click Restore",
)
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional description for restore. (Default=None)",
default="One Click Restore",
)
# @utils.arg('--options', metavar='<options>',
# help="Restore options. (Default={'openstack': {}, 'type': 'vmware', 'oneclickrestore': True, 'vmware': {}})",
# default="{'openstack': {}, 'type': 'vmware', 'oneclickrestore': True, 'vmware': {}}")
@utils.service_type("workloads")
def do_snapshot_oneclick_restore(cs, args):
"""Restore a workload snapshot."""
snapshot = _find_snapshot(cs, args.snapshot_id)
options = ast.literal_eval(
"{'openstack': {}, 'type': 'openstack', 'oneclickrestore': True,"
" 'restore_type': 'oneclick', 'vmware': {}}"
)
snapshot.restore(False, args.display_name, args.display_description, options)
@utils.arg(
"snapshot_id",
metavar="<snapshot_id>",
help="ID of the workload snapshot to restore.",
)
# @utils.arg('--test',
# dest='test',
# action="store_true",
# default=False,
# help='Specify if testing of a restore is required')
@utils.arg(
"--display-name",
metavar="<display-name>",
help="Optional name for the restore. (Default=None)",
default="Selective Restore",
)
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional description for restore. (Default=None)",
default="Selective Restore",
)
@utils.arg(
"--filename",
metavar="<filename>",
type=open,
default=restore_json_path,
help="Provide file path(relative or absolute) including file name , by default it will read "
"file: %s .You can use this for reference or replace values into"
" this file." % restore_json_path,
)
@utils.service_type("workloads")
def do_snapshot_selective_restore(cs, args):
"""Selective restore workload snapshot."""
file_data = read_file(args.filename)
try:
json_data = yaml.load(file_data, Loader=yaml.SafeLoader)
except Exception as ex:
raise exceptions.CommandError(
"JSON conversion failed with error: {}".format(getattr(ex, "context", ex))
)
snapshot = _find_snapshot(cs, args.snapshot_id)
snapshot.restore(False, args.display_name, args.display_description, json_data)
@utils.arg(
"snapshot_id",
metavar="<snapshot_id>",
help="ID of the workload snapshot to restore.",
)
@utils.arg(
"--display-name",
metavar="<display-name>",
help="Optional name for the restore. (Default=None)",
default="Inplace Restore",
)
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional description for restore. (Default=None)",
default="Inplace Restore",
)
@utils.arg(
"--filename",
metavar="<filename>",
type=open,
default=restore_json_path,
help="Provide file path(relative or absolute) including file name , by default it will read "
"file: %s .You can use this for reference or replace values into"
" this file." % restore_json_path,
)
@utils.service_type("workloads")
def do_snapshot_inplace_restore(cs, args):
"""Inplace restore workload snapshot."""
file_data = read_file(args.filename)
try:
json_data = yaml.load(file_data, Loader=yaml.SafeLoader)
except Exception as ex:
raise exceptions.CommandError(
"JSON conversion failed with error: {}".format(getattr(ex, "context", ex))
)
snapshot = _find_snapshot(cs, args.snapshot_id)
snapshot.restore(False, args.display_name, args.display_description, json_data)
@utils.arg(
"snapshot_id", metavar="<snapshot_id>", help="ID of the workload snapshot to mount."
)
@utils.arg(
"mount_vm_id", metavar="<mount_vm_id>", help="VM ID that snapshot volumes mount to."
)
@utils.arg(
"--options", metavar="<options>", help="Mount options. (Default={})", default="{}"
)
@utils.service_type("workloads")
def do_snapshot_mount(cs, args):
"""Mount a workload snapshot."""
snapshot = _find_snapshot(cs, args.snapshot_id)
options = ast.literal_eval(args.options)
mounturl = snapshot.mount(args.mount_vm_id, options)
print(
'Please run "workloadmgr snapshot-show --output metadata %s" to '
"get the snapshot status" % args.snapshot_id
)
@utils.arg(
"snapshot_id",
metavar="<snapshot_id>",
help="ID of the workload snapshot to dismount.",
)
@utils.service_type("workloads")
def do_snapshot_dismount(cs, args):
"""Dismount a workload snapshot."""
snapshot = _find_snapshot(cs, args.snapshot_id)
snapshot.dismount()
@utils.arg(
"--workloadid",
metavar="<workloadid>",
help="Workload id (Default=None)",
default=None,
)
@utils.service_type("workloads")
def do_snapshot_mounted_list(cs, args):
"""List of all mounted snapshots"""
snapshots = cs.snapshots.snapshot_mounted_list(args.workloadid)
utils.print_list(
snapshots["mounted_snapshots"],
["snapshot_id", "snapshot_name", "workload_id", "mounturl"],
)
@utils.arg("restore_id", metavar="<restore_id>", help="ID of the restore.")
@utils.arg(
"--output",
metavar="<output>",
help="Option to get additional restore details, "
"Specify --output metadata for restore metadata,"
"--output networks "
"--output subnets "
"--output routers "
"--output flavors ",
)
@utils.service_type("workloads")
def do_restore_show(cs, args):
"""Show details about a workload snapshot restore"""
restore = _find_restore(cs, args.restore_id)
info = dict()
info.update(restore._info)
networks = []
subnets = []
routers = []
flavors = []
if "links" in info:
info.pop("links")
if "snapshot_details" in info:
info.pop("snapshot_details")
if "metadata" in info:
metadata = info.pop("metadata")
if "networks" in info:
networks = info.pop("networks")
if "subnets" in info:
subnets = info.pop("subnets")
if "routers" in info:
routers = info.pop("routers")
if "flavors" in info:
flavors = info.pop("flavors")
meta = []
for item in metadata:
m = dict()
m[str(item["key"])] = str(item["value"])
meta.append(m)
utils.print_dict(info, wrap=100)
if args.output == "metadata":
utils.print_data_vertically(meta, ["Metadata", "Value"])
if args.output == "networks":
utils.print_data_vertically(networks, ["Networks", "Value"])
if args.output == "subnets":
utils.print_data_vertically(subnets, ["Subnets", "Value"])
if args.output == "routers":
utils.print_data_vertically(routers, ["Routers", "Value"])
if args.output == "flavors":
utils.print_data_vertically(flavors, ["Flavors", "Value"])
@utils.arg(
"--snapshot_id",
metavar="<snapshot_id>",
default=None,
help="Filter results by snapshot_id",
)
@utils.service_type("workloads")
def do_restore_list(cs, args):
"""List all the restores."""
search_opts = {
"snapshot_id": args.snapshot_id,
}
restores = cs.restores.list(search_opts=search_opts)
columns = ["Created At", "Name", "ID", "Snapshot ID", "Size", "Status"]
utils.print_list(restores, columns)
@utils.arg("restore_id", metavar="<restores_id>", help="ID of the restore to delete.")
@utils.service_type("workloads")
def do_restore_delete(cs, args):
"""Delete a restore."""
restore = _find_restore(cs, args.restore_id)
restore.delete()
@utils.arg("restore_id", metavar="<restore_id>", help="ID of restore to cancel.")
@utils.service_type("workloads")
def do_restore_cancel(cs, args):
"""Cancel a restore."""
restore = _find_restore(cs, args.restore_id)
restore.cancel()
"""@utils.arg('testbubble_id', metavar='<testbubble_id>', help='ID of the testbubble.')
@utils.service_type('workloads')
def do_testbubble_show(cs, args):
testbubble = _find_testbubble(cs, args.testbubble_id)
info = dict()
info.update(testbubble._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info, wrap=100)
@utils.arg('--snapshot_id',
metavar='<snapshot_id>',
default=None,
help='Filter results by snapshot_id')
@utils.service_type('workloads')
def do_testbubble_list(cs, args):
search_opts = {
'snapshot_id': args.snapshot_id,
}
testbubbles = cs.testbubbles.list(search_opts=search_opts)
columns = ['ID', 'Snapshot ID', 'Status']
utils.print_list(testbubbles, columns)
@utils.arg('testbubble_id', metavar='<testbubbles_id>',
help='ID of the testbubble to delete.')
@utils.service_type('workloads')
def do_testbubble_delete(cs, args):
testbubble = _find_testbubble(cs, args.testbubble_id)
testbubble.delete()"""
@utils.arg("name", metavar="<name>", help="name of the setting")
@utils.arg("value", metavar="<vaule>", help="Value of the setting")
@utils.arg(
"--description",
metavar="<description>",
help="Optional description. (Default=None)",
default=None,
)
@utils.arg(
"--category",
metavar="<category>",
help="Optional category. (Default=None)",
default=None,
)
@utils.arg(
"--type",
metavar="<type>",
help="Optional type of setting. (Default=None)",
default=None,
)
@utils.arg(
"--is-public",
type=strutils.bool_from_string,
metavar="{True,False}",
help="Make setting accessible to the public.",
default=False,
)
@utils.arg(
"--is-hidden",
type=strutils.bool_from_string,
metavar="{True,False}",
help="Make setting hidden.",
default=False,
)
@utils.arg(
"--metadata",
metavar="<key=value>",
action="append",
dest="metadata",
default=[],
help="Specify a key value pairs to include in the settings metadata "
"Specify option multiple times to include multiple keys. "
"key=value",
)
@utils.service_type("workloads")
def do_setting_create(cs, args):
"""Creates a setting."""
metadata = {}
for metadata_str in args.metadata:
err_msg = (
"Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str
)
try:
k, v = metadata_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
cs.settings.create(
args.name,
args.value,
args.description,
args.category,
args.type,
args.is_public,
args.is_hidden,
metadata,
)
@utils.arg("name", metavar="<name>", help="name of the setting")
@utils.arg("value", metavar="<vaule>", help="Value of the setting")
@utils.arg(
"--description",
metavar="<description>",
help="Optional description. (Default=None)",
default=None,
)
@utils.arg(
"--category",
metavar="<category>",
help="Optional category. (Default=None)",
default=None,
)
@utils.arg(
"--type",
metavar="<type>",
help="Optional type of setting. (Default=None)",
default=None,
)
@utils.arg(
"--is-public",
type=strutils.bool_from_string,
metavar="{True,False}",
help="Make setting accessible to the public.",
default=False,
)
@utils.arg(
"--is-hidden",
type=strutils.bool_from_string,
metavar="{True,False}",
help="Make setting hidden.",
default=False,
)
@utils.arg(
"--metadata",
metavar="<key=value>",
action="append",
dest="metadata",
default=[],
help="Specify a key value pairs to include in the settings metadata "
"Specify option multiple times to include multiple keys. "
"key=value",
)
@utils.service_type("workloads")
def do_setting_update(cs, args):
"""Creates a setting."""
metadata = {}
for metadata_str in args.metadata:
err_msg = (
"Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str
)
try:
k, v = metadata_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
cs.settings.update(
args.name,
args.value,
args.description,
args.category,
args.type,
args.is_public,
args.is_hidden,
metadata,
)
@utils.arg("setting_name", metavar="<setting_name>", help="name of the setting.")
@utils.arg(
"--get_hidden",
type=strutils.bool_from_string,
metavar="{True,False}",
help="show hidden settings",
default=False,
)
@utils.service_type("workloads")
def do_setting_show(cs, args):
"""Show details of a setting."""
setting = cs.settings.get(args.setting_name, args.get_hidden)
if setting == None:
print("Setting not found")
else:
utils.print_dict(setting, wrap=100)
@utils.arg(
"--get_hidden",
type=strutils.bool_from_string,
metavar="{True,False}",
help="show hidden settings",
default=False,
)
@utils.service_type("workloads")
def do_setting_list(cs, args):
"""List all the settings."""
search_opts = {"get_hidden": args.get_hidden}
settings = cs.settings.list(search_opts=search_opts)
settings = settings[1]["settings"]
d = {}
columns = "Name"
if settings is None:
utils.print_dict(d, columns)
return
if isinstance(settings, dict):
utils.print_dict(settings, columns)
return
for v in settings:
if type(v) is dict:
for k, v1 in list(v["settings"].items()):
d.setdefault(k, str(v1))
utils.print_dict(d, columns)
@utils.arg("setting_name", metavar="<setting_name>", help="name of setting to delete.")
@utils.service_type("workloads")
def do_setting_delete(cs, args):
"""Remove a setting."""
cs.settings.delete(args.setting_name)
@utils.arg(
"role_name",
metavar="<role_name>",
help="name of the role that user what to delete to triliovault service.",
)
@utils.arg(
"--is_cloud_trust",
metavar="{True,False}",
type=strutils.bool_from_string,
help="Set to true if creating cloud\
admin trust. While creating cloud trust use same user and and tenant\
which used to configure TVault and keep the role admin.",
default=False,
)
@utils.service_type("workloads")
def do_trust_create(cs, args):
"""Creates a trust."""
trusts = cs.trusts.create(args.role_name, args.is_cloud_trust)
d = {}
columns = "Name"
for v in trusts:
if type(v) is dict:
for k, v1 in list(v.items()):
d.setdefault(k, str(v1))
utils.print_dict(d, columns)
@utils.arg("trust_id", metavar="<trust_id>", help="trust ID.")
@utils.service_type("workloads")
def do_trust_show(cs, args):
"""Show details of a trust."""
trust = cs.trusts.get(args.trust_id)
if trust == None:
print("Trust not found")
else:
utils.print_dict(trust, wrap=100)
@utils.arg("workload_id", metavar="<workload_id>", help="workload ID.")
@utils.service_type("workloads")
def do_scheduler_trust_validate(cs, args):
"""Validate scheduler trust for a given workload."""
result = cs.trusts.validate_scheduler_trust(args.workload_id)
if not result.get("scheduler_enabled", None):
print("Scheduler is disabled")
elif not result.get("trust", None):
print("Trust not found")
elif not result.get("is_valid", None):
print("Trust is broken")
else:
utils.print_dict(result.get("trust", {}), wrap=100)
@utils.service_type("workloads")
def do_trust_list(cs, args):
"""List all the trusts."""
trusts = cs.trusts.list()
columns = "Name"
for v in trusts:
if type(v) is dict:
d = {}
for k, v1 in list(v.items()):
if k == "name":
k = "trust_id"
d.setdefault(k, str(v1))
utils.print_dict(d, columns)
@utils.arg("trust_id", metavar="<trust_id>", help="ID of trust to delete.")
@utils.service_type("workloads")
def do_trust_delete(cs, args):
"""Remove a trust."""
cs.trusts.delete(args.trust_id)
@utils.service_type("workloads")
def do_enable_global_job_scheduler(cs, args):
"""Enable global job scheduler."""
job_scheduler = cs.global_job_scheduler.enable()
if job_scheduler:
print("Global job scheduler is successfully enabled")
else:
print("Global job scheduler is not enabled")
@utils.service_type("workloads")
def do_disable_global_job_scheduler(cs, args):
"""Disable global job scheduler."""
job_scheduler = cs.global_job_scheduler.disable()
if job_scheduler:
print("Global job scheduler is still running")
else:
print("Global job scheduler is successfully disabled")
@utils.service_type("workloads")
def do_get_global_job_scheduler(cs, args):
"""Show status of global job scheduler."""
enabled = cs.global_job_scheduler.get()
if enabled:
print("Global job scheduler enabled")
else:
print("Global job scheduler disabled")
@utils.arg(
"license_file",
metavar="<license_file>",
type=open,
help="Provide file path(relative or absolute) including file name of license file.",
)
@utils.service_type("workloads")
def do_license_create(cs, args):
"""Creates a license. (Admin only)"""
lic_text = read_file(args.license_file)
file_name = os.path.split(args.license_file.name)[1]
license_data = {"lic_txt": lic_text, "file_name": file_name}
license = cs.workloads.license_create(license_data)
utils.print_dict(license, wrap=100)
@utils.service_type("workloads")
def do_license_list(cs, args):
"""List the license. (Admin only)"""
license = cs.workloads.license_list()
metadata = {}
for meta in license["metadata"]:
if meta["key"] == "filename":
metadata["filename"] = meta["value"]
license["metadata"] = metadata
utils.print_dict(license, wrap=100)
@utils.service_type("workloads")
def do_license_check(cs, args):
"""Check the license. (Admin only)"""
message = cs.workloads.license_check()
print(message)
@utils.arg(
"--migrate_cloud",
metavar="{True,False}",
type=strutils.bool_from_string,
default=False,
help="Set to True if want to list workloads from other clouds as well. Default if False",
)
@utils.arg(
"--generate_yaml",
metavar="{True,False}",
type=strutils.bool_from_string,
default=False,
help="Set to True if want to generate output file in yaml format,\
which would be further used as input for workload reassign API.",
)
@utils.service_type("workloads")
def do_workload_get_orphaned_workloads_list(cs, args):
"""
List all the orphaned workloads having tenant_id or user_id which doesn't belong to current cloud.
"""
workloads = cs.workloads.get_orphaned_workloads_list(args)
columns = ["Name", "ID", "Project ID", "User ID"]
utils.print_list(workloads, columns)
if args.generate_yaml:
print("\nPlease find map file at " + str(os.getcwd()) + "/reassign_map.yaml\n")
@utils.arg(
"--old_tenant_ids",
metavar="<old_tenant_id>",
action="append",
dest="old_tenant_ids",
default=[],
help="Specify old tenant ids"
" from which workloads need to reassign to new tenant. "
"--old_tenant_ids <old_tenant_id> --old_tenant_ids <old_tenant_id>",
)
@utils.arg(
"--new_tenant_id",
metavar="<new_tenant_id>",
default=None,
help="Specify new tenant id "
"to which workloads need to reassign from old tenant. "
"--new_tenant_id <new_tenant_id>",
)
@utils.arg(
"--workload_ids",
metavar="<workload_id>",
action="append",
dest="workload_ids",
default=[],
help="Specify workload_ids which need to reassign to new tenant. "
"If not provided then all the workloads from old tenant "
"will get reassigned to new tenant. "
"--workload_ids <workload_id> --workload_ids <workload_id>",
)
@utils.arg(
"--user_id",
metavar="<user_id>",
default=None,
help="Specify user id "
"to which workloads need to reassign from old tenant. "
"--user_id <user_id>",
)
@utils.arg(
"--migrate_cloud",
metavar="{True,False}",
type=strutils.bool_from_string,
default=False,
help="Set to True if want to reassign workloads from other clouds as well. Default if False",
)
@utils.arg(
"--map_file",
metavar="<map_file>",
default=None,
type=open,
help="Provide file path(relative or absolute) including file name of reassign map file. "
"Provide list of old workloads mapped to new tenants. "
"Format for this file is YAML. For sample, please refer "
"to this file: %s ." % reassign_map_path,
)
@utils.service_type("workloads")
def do_workload_reassign_workloads(cs, args):
"""
Assign workload to a new tenant/user.
"""
if args.map_file is not None:
data = yaml.load(read_file(args.map_file))
if type(data) is not dict or data.get("reassign_mappings", None) is None:
message = (
"File content is not in required yaml format, "
+ "Please provide require data in appropriate format."
)
raise exceptions.CommandError(message)
args.map_file = data
workloads = cs.workloads.reassign_workloads(args)
columns = ["Name", "ID", "Project ID", "User ID"]
utils.print_list(workloads["reassigned_workloads"], columns)
if len(workloads["failed_workloads"]) > 0:
message = "\nPlease verify failed workload id's are valid.\n"
utils.print_data_vertically(
[workloads["failed_workloads"]], ["Failed_Workloads"]
)
print(message)
'''
@utils.arg('--jobschedule',
metavar="<key=key-name>",
action='append',
dest='jobschedule',
default=None,
help="Specify following key value pairs for jobschedule "
"Specify option multiple times to include multiple keys. "
" 'start_time' : '2:30 PM' "
" 'interval' : '1 hr' "
" 'retention_policy_value' : '30' "
" , "
"For example --jobschedule enabled=True"
"In order to enable/disable scheduler pass enabled True / enabled False")
@utils.arg('--config-file', metavar='<config_file_path>',
dest='configfile', type=open, default=None,
help="Provide file path(relative or absolute) including file name , "
"This file should contain list of services to backup, their "
"respective config, log folders and root credentials for database. "
"This should be in YAML format. For this you can refer to this template"
" file %s:" % backup_template,
)
@utils.arg('--authorized-key', metavar='<authorized_key>',
dest='authorized_key', type=open, default=None,
help="Provide file path(relative or absolute) including file name , "
"This private key would be used to connect with controller nodes"
"over the SSH.")
@utils.service_type('workloads')
def do_config_workload_configure(cs, args):
"""Configure services to backup and scheduler for OpenStack config workload."""
jobschedule = {}
services_to_backup = {}
config_data = {}
if args.jobschedule is None and args.configfile is None:
message = "Please provide one parameter among configfile" \
" and jobschedule to configure config workload."
raise exceptions.CommandError(message)
try:
config_workload = cs.config_backup.get_config_workload()
except Exception as ex:
config_workload = None
if args.authorized_key is not None:
authorized_key = read_file(args.authorized_key)
if args.configfile is not None:
data = read_file(args.configfile)
config_data = yaml.load(data)
# It should return a dict and there should be minimum one service/creds
if type(config_data) is not dict or len(config_data.keys()) < 1:
message = "Please check with input file. File content should be in given " \
"template format: %s and there should be minimum one database" \
"connection and trusted user." % backup_template
raise exceptions.CommandError(message)
# If user configuring for the first time then user must provide
# database credentials and trusted user
if any(key not in config_data for key in ['databases', 'trusted_user'])\
and config_workload is None:
message = "Please provide database credentials and trusted user,"\
"It's require for configuring config workload."
raise exceptions.CommandError(message)
if args.jobschedule is not None:
for jobschedule_str in args.jobschedule:
err_msg = ("Invalid jobschedule argument '%s'. jobschedule arguments must be of the "
"form --jobschedule <key=value>" % jobschedule_str)
for kv_str in jobschedule_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in jobschedule:
jobschedule[k] = v
else:
jobschedule.setdefault(k, v)
if (args.configfile is None or args.authorized_key is None) and config_workload is None:
message = "Database credentials, trusted user and authorized_key is required to configure config workload." \
" Please see the help to give required parameters."
raise exceptions.CommandError(message)
if 'authorized_key' in locals():
config_data['authorized_key'] = authorized_key
config_workload = cs.config_backup.config_workload(
jobschedule, config_data)
utils.print_dict(config_workload['config_workload'], 'Property')
@utils.arg('--jobschedule',
metavar="<key=key-name>",
action='append',
dest='jobschedule',
default=None,
help="Specify following key value pairs for jobschedule "
"Specify option multiple times to include multiple keys. "
" 'start_time' : '2:30 PM' "
" 'interval' : '1 hr' "
" 'retention_policy_value' : '30' "
" , "
"For example --jobschedule start_time='2:30 PM' --jobschedule retention_policy_value='30'")
@utils.service_type('workloads')
def do_config_workload_scheduler_enable(cs, args):
"""Enable/update scheduler for config workload."""
services = {}
jobschedule = {}
if args.jobschedule is not None:
for jobschedule_str in args.jobschedule:
err_msg = ("Invalid jobschedule argument '%s'. jobschedule arguments must be of the "
"form --jobschedule <key=value>" % jobschedule_str)
for kv_str in jobschedule_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in jobschedule:
jobschedule[k] = v
else:
jobschedule.setdefault(k, v)
if 'enabled' not in jobschedule:
jobschedule['enabled'] = True
else:
jobschedule['enabled'] = True
config_workload = cs.config_backup.config_workload(jobschedule, services)
utils.print_dict(config_workload['config_workload'], 'Property')
@utils.service_type('workloads')
def do_config_workload_scheduler_disable(cs, args):
"""Disable scheduler for config workload."""
services = {}
jobschedule = {'enabled': False}
config_workload = cs.config_backup.config_workload(jobschedule, services)
utils.print_dict(config_workload['config_workload'], 'Property')
@utils.service_type('workloads')
def do_config_workload_show(cs, args):
"""Show config backup workload object"""
config_workload = cs.config_backup.get_config_workload()
info = dict()
config_metadata = None
info.update(config_workload._info)
if 'metadata' in info:
config_metadata = info.pop('metadata')
if 'jobschedule' in info:
jobschedule = info.pop('jobschedule')
info['jobschedule'] = jobschedule['enabled']
utils.print_dict(info, wrap=100)
utils.print_dict(jobschedule, 'JobSchedule')
for metadata in config_metadata:
if metadata['key'] == 'services_to_backup':
services_to_backup = pickle.loads(metadata['value'])
utils.print_dict(
services_to_backup, dict_property="Services", dict_value="Config directories")
if metadata['key'] == 'databases':
databases = pickle.loads(metadata['value'])
utils.print_dict(databases, dict_property="Database",
dict_value="Connection parameters")
if metadata['key'] == 'trusted_user':
trusted_user = pickle.loads(metadata['value'])
utils.print_data_vertically(
[[trusted_user['username']]], ['Trusted user'])
@utils.arg('--display-name', metavar='<display-name>',
help='Optional backup name. (Default=Config backup)',
default='Config backup')
@utils.arg('--display-description', metavar='<display-description>',
help='Optional backup description. (Default=None)',
default='No description')
@utils.service_type('workloads')
def do_config_backup(cs, args):
"""Take backup of OpenStack config."""
config_backup = cs.config_backup.config_backup(
args.display_name, args.display_description)
backup = config_backup['config_backup']
utils.print_dict(backup)
@utils.service_type('workloads')
def do_config_backup_list(cs, args):
"""list all config backups."""
config_backups = cs.config_backup.config_backup_list()
for config_backup in config_backups:
config_backup.size = str(
config_backup.size) + ' Bytes or Approx (' + utils.bytes_fmt(config_backup.size) + ')'
columns = ['id', 'name', 'created_at', 'status',
'size', 'description', 'config_workload_id']
utils.print_list(config_backups, columns)
@utils.arg('backup_id', metavar='<backup_id>',
help='ID of the bakup to show.')
@utils.service_type('workloads')
def do_config_backup_show(cs, args):
"""Show config backup."""
config_backup = cs.config_backup.get_config_backup(args.backup_id)
info = dict()
info.update(config_backup._info)
info['size'] = str(info['size']) + ' Bytes or Approx (' + \
utils.bytes_fmt(info['size']) + ')'
info['time_taken'] = utils.humanize_time(info['time_taken'])
config_metadata = info.pop('metadata')
utils.print_dict(info, wrap=100)
for metadata in config_metadata:
if metadata['key'] == 'backup_summary':
upload_summary = pickle.loads(str(metadata['value']))
utils.print_dict(
upload_summary, dict_property='Host', dict_value='Status')
@utils.arg('backup_id', metavar='<backup_id>',
help='ID of the backup to delete.')
@utils.service_type('workloads')
def do_config_backup_delete(cs, args):
"""Delete a config backup."""
cs.config_backup.config_backup_delete(args.backup_id)
'''
@utils.service_type("workloads")
def do_get_tenants_usage(cs, args):
"""Gives storage used and vms protected by tenants."""
usage = cs.workloads.get_tenants_usage()
utils.print_dict(
usage["global_usage"], dict_property="Global Usage", dict_value="Values"
)
res = {}
for tenat_id, usage in usage["tenants_usage"].items():
res[usage.pop("tenant_name", tenat_id)] = usage
utils.print_dict(res, dict_property="Tenant Name", dict_value="Usage")
@utils.service_type("workloads")
def do_get_protected_vms(cs, args):
"""Gives list of vms protected by tenant."""
vms = cs.workloads.get_protected_vms()
utils.print_list(vms["protected_vms"], ["ID"])
@utils.arg(
"--policy-fields",
metavar="<key=key-name>",
action="append",
dest="policy_fields",
required=True,
default=[],
help="Specify following key value pairs for policy fields "
"Specify option multiple times to include multiple keys. "
" 'interval' : '1 hr' "
" 'retention_policy_type' : 'Number of Snapshots to Keep' or 'Number of days to retain Snapshots' "
" 'retention_policy_value' : '30' "
" 'fullbackup_interval' : '-1' (Enter Number of incremental snapshots to take Full Backup between 1 to 999, '-1' for 'NEVER' and '0' for 'ALWAYS')"
"For example --policy-fields interval='1 hr' --policy-fields retention_policy_type='Number of Snapshots to Keep'"
"--policy-fields retention_policy_value='30' --policy-fields fullbackup_interval='2'",
)
@utils.arg("display_name", metavar="<display_name>", help="policy name.")
@utils.arg(
"--display-description",
metavar="<display_description>",
help="Optional policy description. (Default=No description)",
default="No description",
)
@utils.arg(
"--metadata",
metavar="<key=key-name>",
action="append",
dest="metadata",
default=[],
help="Specify a key value pairs to include in the workload_type metadata "
"Specify option multiple times to include multiple keys. "
"key=value",
)
@utils.service_type("workloads")
def do_policy_create(cs, args):
"""Creates a policy."""
try:
policy_fields = {}
for policy_field_str in args.policy_fields:
err_msg = (
"Invalid policy_field argument '%s'. policy_field arguments must be of the "
"form --policy_field <key=value>" % policy_field_str
)
for kv_str in policy_field_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in policy_fields:
policy_fields[k] = v
else:
policy_fields.setdefault(k, v)
metadata = {}
for metadata_str in args.metadata:
err_msg = (
"Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str
)
for kv_str in metadata_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
policy = cs.workload_policy.create(
args.display_name, args.display_description, policy_fields, metadata
)
_print_workload_policy(policy)
except Exception as ex:
raise exceptions.CommandError(str(ex))
@utils.arg("policy_id", metavar="<policy_id>", help="ID of the policy.")
@utils.arg("--display-name", metavar="<display-name>", help="policy name.")
@utils.arg(
"--display-description",
metavar="<display-description>",
help="Optional policy description.",
)
@utils.arg(
"--policy-fields",
metavar="<key=key-name>",
action="append",
dest="policy_fields",
default=[],
help="Specify following key value pairs for policy fields "
"Specify option multiple times to include multiple keys. "
" 'interval' : '1 hr' "
" 'retention_policy_type' : 'Number of Snapshots to Keep' or 'Number of days to retain Snapshots' "
" 'retention_policy_value' : '30' "
" 'fullbackup_interval' : '-1' (Enter Number of incremental snapshots to take Full Backup between 1 to 999, '-1' for 'NEVER' and '0' for 'ALWAYS') "
"For example --policy-fields interval='1 hr' --policy-fields retention_policy_type='Number of Snapshots to Keep'"
"--policy-fields retention_policy_value='30' --policy-fields fullbackup_interval='2'",
)
@utils.arg(
"--metadata",
metavar="<key=key-name>",
action="append",
dest="metadata",
default=[],
help="Specify a key value pairs to include in the workload_type metadata "
"Specify option multiple times to include multiple keys. "
"key=value",
)
@utils.service_type("workloads")
def do_policy_update(cs, args):
"""Update a policy."""
try:
policy_fields = {}
for policy_field_str in args.policy_fields:
err_msg = (
"Invalid policy_field argument '%s'. policy_field arguments must be of the "
"form --policy_field <key=value>" % policy_field_str
)
for kv_str in policy_field_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in policy_fields:
policy_fields[k] = v
else:
policy_fields.setdefault(k, v)
metadata = {}
for metadata_str in args.metadata:
err_msg = (
"Invalid metadata argument '%s'. metadata arguments must be of the "
"form --metadata <key=value>" % metadata_str
)
for kv_str in metadata_str.split(","):
try:
k, v = kv_str.split("=", 1)
except ValueError as e:
raise exceptions.CommandError(err_msg)
if k in metadata:
metadata[k] = v
else:
metadata.setdefault(k, v)
policy = cs.workload_policy.update(
args.policy_id,
args.display_name,
args.display_description,
policy_fields,
metadata,
)
_print_workload_policy(policy)
except Exception as ex:
raise exceptions.CommandError(str(ex))
@utils.arg("policy_id", metavar="<policy_id>", help="ID of the policy.")
@utils.service_type("workloads")
def do_policy_delete(cs, args):
"""Remove a policy."""
cs.workload_policy.delete(args.policy_id)
@utils.arg("policy_id", metavar="<policy_id>", help="ID of the policy.")
@utils.service_type("workloads")
def do_policy_show(cs, args):
"""Show a policy."""
policy = cs.workload_policy.get(args.policy_id)
_print_workload_policy(policy)
@utils.service_type("workloads")
def do_policy_list(cs, args):
"""List all available policies."""
policies = cs.workload_policy.list()
columns = ["ID", "Name", "Status", "Description"]
utils.print_list(policies, columns)
@utils.arg("policy_id", metavar="<policy_id>", help="ID of the policy.")
@utils.arg(
"--add_project",
metavar="<project_id>",
action="append",
dest="add_project",
default=[],
help="ID of the projects to assign policy. "
"--add_project <project_id> --add_project <project_id>",
)
@utils.arg(
"--remove_project",
metavar="<project_id>",
action="append",
dest="remove_project",
default=[],
help="ID of the projects to remove policy. "
"--remove_project <project_id> --remove_project <project_id>",
)
@utils.service_type("workloads")
def do_policy_assign(cs, args):
"""Assign/Remove policy to given projects."""
result = cs.workload_policy.assign(
args.policy_id, args.add_project, args.remove_project
)
_print_workload_policy(result["policy"])
if len(result["failed_ids"]) > 0:
msg = "Please verify failed project id's are valid"
utils.print_data_vertically([result["failed_ids"]], ["Failed_projects"])
print(msg)
@utils.arg(
"project_id",
metavar="<project_id>",
help="ID of the project to list assigned policies.",
)
@utils.service_type("workloads")
def do_list_assigned_policies(cs, args):
"""List assigned policies on given project."""
policies = cs.workload_policy.get_assigned_policies(args.project_id)
utils.print_list(policies, ["policy_id", "policy_name"])
'''
@utils.arg('workload', metavar='<workload>',
help='Name or ID of workload to transfer.')
@utils.arg('--name',
metavar='<name>',
default=None,
help='Transfer name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.service_type('workloads')
def do_transfer_create(cs, args):
"""Creates a workload transfer."""
if args.display_name is not None:
args.name = args.display_name
workload = _find_workload(cs, args.workload)
transfer = cs.transfers.create(workload.id,
args.name)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('transfer', metavar='<transfer>',
help='Name or ID of transfer to delete.')
@utils.service_type('workloads')
def do_transfer_delete(cs, args):
"""aborts a transfer."""
transfer = _find_transfer(cs, args.transfer)
cs.transfers.delete(transfer.id)
@utils.arg('transfer', metavar='<transfer>',
help='Name or ID of transfer to delete.')
@utils.service_type('workloads')
def do_transfer_complete(cs, args):
"""completes a transfer."""
transfer = _find_transfer(cs, args.transfer)
cs.transfers.complete(transfer.id)
@utils.arg('transfer', metavar='<transfer>',
help='ID of transfer to accept.')
@utils.arg('auth_key', metavar='<auth_key>',
help='Authentication key of transfer to accept.')
@utils.service_type('workloads')
def do_transfer_accept(cs, args):
"""Accepts a workload transfer."""
transfer = cs.transfers.accept(args.transfer, args.auth_key)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.service_type('workloads')
def do_transfer_list(cs, args):
"""Lists all transfers."""
transfers = cs.transfers.list()
columns = ['ID', 'Workload ID', 'Name']
utils.print_list(transfers, columns)
@utils.arg('transfer', metavar='<transfer>',
help='Name or ID of transfer to accept.')
@utils.service_type('workloads')
def do_transfer_show(cs, args):
"""Shows transfer details."""
transfer = _find_transfer(cs, args.transfer)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
'''