Repository URL to install this package:
Version:
3.4.34 ▾
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 TrilioData, Inc.
import logging
import json
import six
try:
import __builtin__ as builtins
except ImportError:
import builtins
from workloadmgrclient.v1 import client as workloadmgrclient
from django.conf import settings
from horizon import exceptions
from openstack_dashboard.api.base import url_for, APIDictWrapper
from datetime import datetime
from datetime import timedelta
from operator import itemgetter, attrgetter
from openstack_dashboard import api
from openstack_dashboard import policy
import time
import pytz
import threading
from cgi import parse_qs, escape
from stat import *
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
def roundTime(time, modulo):
if ":" in time:
time = time.split(":")
time_new = int(time[1])
off = divmod(time_new, modulo)
if off[1] >= 10:
if off[0] >= 3:
return str(int(time[0]) + 1).zfill(2) + ":" + "15"
return time[0] + ":" + str((modulo - off[1] + modulo) + time_new)
else:
if off[0] >= 3:
return str(int(time[0]) + 1).zfill(2) + ":" + "00"
return time[0] + ":" + str((modulo - off[1]) + time_new)
def sizeof_fmt(num, suffix='B'):
try:
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
except Exception as ex:
LOG.exception(ex)
return num
def humanize_time(seconds, granularity=5):
try:
result = []
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(int(value), name))
return ' '.join(result[:granularity])
except Exception as ex:
LOG.exception(ex)
return seconds
def get_time_zone(request):
tz = 'UTC'
try:
tz = request._get_cookies()['django_timezone']
except:
try:
tz = request.COOKIES['django_timezone']
except:
pass
return tz
def get_local_time(record_time, input_format, output_format, tz, tz1=None):
"""
Convert and return the date and time - from GMT to local time
"""
try:
if record_time in (0, None, ''):
return ''
else:
if not input_format \
or input_format is None \
or input_format == '':
input_format = '%Y-%m-%dT%H:%M:%S.%f'
if not output_format \
or output_format is None \
or output_format == '':
output_format = "%m/%d/%Y %I:%M:%S %p"
local_time = datetime.strptime(
record_time, input_format)
local_tz = pytz.timezone(tz)
from_zone = pytz.timezone('UTC')
if tz1 is not None:
from_zone = pytz.timezone(tz1)
local_time = local_time.replace(tzinfo=from_zone)
local_time = local_time.astimezone(local_tz)
local_time = datetime.strftime(
local_time, output_format)
return local_time
except Exception as ex:
LOG.exception(ex)
return record_time
def permissions_to_unix_name(mode):
modestr = ""
if S_ISDIR(mode):
modestr = "d"
elif S_ISCHR(mode):
modestr = "c"
elif S_ISBLK(mode):
modestr = "b"
elif S_ISREG(mode):
modestr = "-"
elif S_ISFIFO(mode):
modestr = "f"
elif S_ISLNK(mode):
modestr = "l"
elif S_ISSOCK(mode):
modestr = "s"
if mode & S_IRUSR:
modestr += "r"
else:
modestr += "-"
if mode & S_IWUSR:
modestr += "w"
else:
modestr += "-"
if mode & S_IXUSR:
modestr += "x"
else:
modestr += "-"
if mode & S_IRGRP:
modestr += "r"
else:
modestr += "-"
if mode & S_IWGRP:
modestr += "w"
else:
modestr += "-"
if mode & S_IXGRP:
modestr += "x"
else:
modestr += "-"
if mode & S_IROTH:
modestr += "r"
else:
modestr += "-"
if mode & S_IWOTH:
modestr += "w"
else:
modestr += "-"
if mode & S_IXOTH:
modestr += "x"
else:
modestr += "-"
modestr += "."
return modestr
def get_time_with_time_zone_from_timestamp(timestamp):
time_stamp_time = datetime.fromtimestamp(timestamp).strftime('%b %d %Y %I:%M')
return time_stamp_time + " UTC "
"""utc_offset = datetime.fromtimestamp(timestamp) - datetime.utcfromtimestamp(timestamp)
secs = utc_offset.total_seconds()
#If timestamp is in UTC than there would be no difference
if secs == 0:
return time_stamp_time + " UTC "
else:
hours = int(secs / 3600)
minutes = int(secs / 60) % 60
return time_stamp_time + " UTC " + str(hours) + ":" + str(hours)"""
def convert_to_unix_format(stats):
for time in ['mtime', 'atime', 'ctime']:
if stats.get(time, None) is not None:
stats[time] = get_time_with_time_zone_from_timestamp(stats[time])
if stats.get('mode', None) is not None:
stats['mode'] = permissions_to_unix_name(int(stats.get('mode')))
if stats.get('size', None) is not None:
stats['size'] = sizeof_fmt(stats.get('size'))
res_list = ["mode", "nlink", "uid", "gid", "size", "ctime", "mtime", "atime"]
return [stats.get(key, None) for key in res_list]
def workload_api(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', "")
endpoint_type = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL')
workloadmgr_url = ""
try:
workloadmgr_url = url_for(request, 'workloads', endpoint_type)
except exceptions.ServiceCatalogException:
LOG.debug('no workloads service configured.')
return None
LOG.debug('workloadmgr connection created using token "%s" and url "%s"' %
(request.user.token.id, workloadmgr_url))
c = workloadmgrclient.Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=workloadmgr_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = workloadmgr_url
return c
def settings_create(request, context):
try:
workload_api(request).settings.create_bulk(context)
except Exception as ex:
raise Exception(str(ex))
def setting_get(request, name):
try:
setting = workload_api(request).settings.get(name)
return setting
except Exception as ex:
raise Exception(str(ex))
def settings_list(request, get_hidden=False, get_smtp_settings=False):
try:
search_opts = {'get_hidden': get_hidden, 'get_smtp_settings': get_smtp_settings}
settings = workload_api(request).settings.list(search_opts=search_opts)
return settings
except Exception as ex:
raise Exception(str(ex))
def test_email(request):
try:
message = workload_api(request).settings.test_email()
return message
except Exception as ex:
raise Exception(str(ex))
def workload_type_get(request, workload_type_id):
try:
workload_type = workload_api(request).workload_types.get(workload_type_id)
return workload_type
except Exception as ex:
raise Exception(str(ex))
def workload_type_list(request):
try:
workload_types = workload_api(request).workload_types.list()
return workload_types
except Exception as ex:
raise Exception(str(ex))
def workload_get_nodes(request):
try:
nodes = workload_api(request).workloads.get_nodes()
return nodes
except Exception as ex:
raise Exception(str(ex))
def workload_type_create(request, context):
try:
workload_type = workload_api(request).workload_types.create(context['metadata'],
context['name'],
context['description'])
return workload_type
except Exception as ex:
raise Exception(str(ex))
def workload_type_delete(request, workload_type_id):
try:
workload_api(request).workload_types.delete(workload_type_id)
return True
except Exception as ex:
raise Exception(str(ex))
def workload_get(request, workload_id):
try:
tz = get_time_zone(request)
workload = workload_api(request).workloads.get(workload_id)
workload.workload_type_id = (workload_type_get(request, workload.workload_type_id)).name
workload.created_at = get_local_time(workload.created_at, '', '', tz)
workload.updated_at = get_local_time(workload.updated_at, '', '', tz)
nextrun = "NA"
if 'enabled' in workload.jobschedule and workload.jobschedule['enabled']:
nextrun = 'nextrun' in workload.jobschedule and \
int(workload.jobschedule['nextrun']) or -1
workload.jobschedule['nextrun'] = humanize_time(nextrun, 5)
for instance in workload.instances:
hw_qemu_guest_agent = False
try:
server = api.nova.server_get(request, instance['id'])
hw_qemu_guest_agent = server.metadata.get('hw_qemu_guest_agent', False)
if hw_qemu_guest_agent is False:
if server.image:
image = api.glance.image_get(request, server.image['id'])
hw_qemu_guest_agent = image.properties.get('hw_qemu_guest_agent', 'no') == 'yes'
instance['hw_qemu_guest_agent'] = hw_qemu_guest_agent
except:
pass
return workload
except Exception as ex:
raise Exception(str(ex))
def workload_list(request):
try:
"""
id = request.GET.get('page',None)
if id is None:
workloads = workload_api(request).workloads.list_by_page('1')
else:
workloads = workload_api(request).workloads.list_by_page(id)
"""
workloads = workload_api(request).workloads.list(search_opts={'scheduler_trust':True})
search_opts = {}
snapshot_list = workload_api(request).snapshots. \
list(detailed=False, search_opts=search_opts)
tz = get_time_zone(request)
workload_snapshot_info = {}
for workload in workloads:
workload_snapshot_info[workload.id] = {
'failed_snapshots_count': 0,
'success_snapshots_count': 0}
for snapshot in snapshot_list:
info = workload_snapshot_info[snapshot.workload_id]
if snapshot.status in ("error", "cancelled"):
info['failed_snapshots_count'] += 1
elif snapshot.status in ("available", "mounted"):
info['success_snapshots_count'] += 1
for workload in workloads:
info = workload_snapshot_info[workload.id]
failed_snapshots_count = info['failed_snapshots_count']
success_snapshots_count = info['success_snapshots_count']
workload.snapshots_info = "Total:" \
+ str(failed_snapshots_count + success_snapshots_count) \
+ ", Success:" + str(success_snapshots_count) \
+ ", Error:" + str(failed_snapshots_count)
workload.created_at = get_local_time(
workload.created_at, '', "%I:%M:%S %p - %m/%d/%Y", tz)
workload.workload_type_id = \
(workload_type_get(request, workload.workload_type_id)).name
workload.trust='NA'
if hasattr(workload, 'scheduler_trust') and workload.scheduler_trust['scheduler_enabled']:
workload.trust= workload.scheduler_trust['is_valid']
return workloads
except Exception as ex:
raise Exception(str(ex))
def file_search(request, vm_id, filepath, snapshotids, start, end):
try:
search = workload_api(request).file_search.search(vm_id, filepath, snapshotids=snapshotids, start=start,
end=end)
return search
except Exception as ex:
raise Exception(str(ex))
def file_search_show(request, search_id):
try:
search = workload_api(request).file_search.get(search_id)
return search
except Exception as ex:
raise Exception(str(ex))
def page_size_get(request):
try:
return workload_api(request).workloads.settings('page_size')
except Exception as ex:
return Exception(str(ex))
def workload_vms_list(request):
try:
protected_vms = workload_api(request).workloads.get_protected_vms()
return protected_vms['protected_vms']
except Exception as ex:
raise Exception(str(ex))
def workload_create(request, context):
metadata = {}
if 'metadata' in context:
metadata = context['metadata']
if 'jobschedule' not in context:
context['jobschedule'] = {}
try:
workload = workload_api(request).workloads.create(context['name'],
context['description'], context['workload_type'],
'openstack', context['instances'],
context['jobschedule'], metadata)
return workload
except Exception as ex:
raise Exception(str(ex))
def workload_update(request, workload_id, context):
metadata = {}
if 'metadata' in context:
metadata = context['metadata']
if 'jobschedule' not in context:
context['jobschedule'] = {}
try:
workload = workload_api(request).workloads.update(
workload_id, context['name'],
context['description'], context['instances'],
context['jobschedule'], metadata)
return workload
except Exception as ex:
raise Exception(str(ex))
def workload_pause(request, workload_id):
try:
return workload_api(request).workloads.pause(workload_id)
except Exception as ex:
raise Exception(str(ex))
def workload_resume(request, workload_id):
try:
return workload_api(request).workloads.resume(workload_id)
except Exception as ex:
raise Exception(str(ex))
def workload_snapshot(request, workload_id, full):
try:
return workload_api(request).workloads.snapshot(workload_id, full)
except Exception as ex:
raise Exception(str(ex))
def workload_snapshot(request, workload_id, full, name, description):
try:
return workload_api(request).workloads.snapshot(workload_id, full, name, description)
except Exception as ex:
raise Exception(str(ex))
def workload_unlock(request, workload_id):
try:
workload_api(request).workloads.unlock(workload_id)
return True
except Exception as ex:
raise Exception(str(ex))
def workload_reset(request, workload_id):
try:
workload_api(request).workloads.reset(workload_id)
return True
except Exception as ex:
raise Exception(str(ex))
def workload_delete(request, workload_id):
try:
workload_api(request).workloads.delete(workload_id)
return True
except Exception as ex:
raise Exception(str(ex))
def snapshot_get(request, snapshot_id):
try:
tz = get_time_zone(request)
snapshot = workload_api(request).snapshots.get(snapshot_id)
snapshot.size = sizeof_fmt(snapshot.size)
snapshot.restore_size = sizeof_fmt(snapshot.restore_size)
snapshot.created_at = get_local_time(snapshot.created_at, '', '', tz)
snapshot.updated_at = get_local_time(snapshot.updated_at, '', '', tz)
for instance in snapshot.instances:
instance['metadata']['vm_metadata'] = json.loads(instance['metadata'].get('vm_metadata', "{}"))
for vdisk in instance['vdisks']:
vdisk['restore_size'] = sizeof_fmt(vdisk['restore_size'])
# instance hw_qemu_guest_agent flag takes precedence
if 'hw_qemu_guest_agent' in vdisk:
if 'hw_qemu_guest_agent' not in instance['metadata']['vm_metadata']:
instance['metadata']['vm_metadata']['hw_qemu_guest_agent'] = vdisk['hw_qemu_guest_agent']
return snapshot
except Exception as ex:
raise Exception(str(ex))
def snapshot_list(request, workload_id, status=None, filter_it=True, get_instances=False):
try:
tz = get_time_zone(request)
search_opts = {'workload_id': workload_id}
if status is not None:
search_opts['status'] = status
if get_instances is True:
search_opts['get_instances'] = get_instances
snapshots = workload_api(request).snapshots \
.list(detailed=True, search_opts=search_opts)
snapshots = builtins.sorted(snapshots,
key=attrgetter('created_at'), reverse=True)
if filter_it is False:
return snapshots
if len(snapshots) > 0:
for snapshot in snapshots:
restores_list = []
success_restores_count = 0
failed_restores_count = 0
try:
search_opts = {'snapshot_id': snapshot.id}
restores_list = workload_api(request).restores.list(
detailed=True, search_opts=search_opts)
if len(restores_list) > 0:
for restore in restores_list:
if restore.status == "error" or restore.status == "cancelled":
failed_restores_count += 1
elif restore.status == "available":
success_restores_count += 1
except:
pass
snapshot.restores_info = "Total:" + str(len(restores_list)) \
+ ", Success:" + str(success_restores_count) \
+ ", Error:" + str(failed_restores_count)
snapshot.size = sizeof_fmt(snapshot.size)
snapshot.created_at = get_local_time(
snapshot.created_at, '', "%I:%M:%S %p - %m/%d/%Y", tz)
return snapshots
except Exception as ex:
raise Exception(str(ex))
def snapshot_restore(request, snapshot_id, test=False, options=None):
try:
options = options or {'name': 'one click restore',
'description': '-',
"oneclickrestore": True,
"openstack": {},
"type": "openstack"}
workload_api(request).snapshots.restore(snapshot_id, test,
name=options['name'],
description=options['description'],
options=options)
return True
except Exception as ex:
raise Exception(str(ex))
def snapshot_mount(request, snapshot_id, vm_id):
try:
return workload_api(request).snapshots.mount(snapshot_id, vm_id)
except Exception as ex:
raise Exception(str(ex))
def snapshot_dismount(request, snapshot_id):
try:
workload_api(request).snapshots.dismount(snapshot_id)
return True
except Exception as ex:
raise Exception(str(ex))
def snapshot_delete(request, snapshot_id):
try:
workload_api(request).snapshots.delete(snapshot_id)
return True
except Exception as ex:
raise Exception(str(ex))
def snapshot_cancel(request, snapshot_id):
try:
workload_api(request).snapshots.cancel(snapshot_id)
return True
except Exception as ex:
raise Exception(str(ex))
def restore_get(request, restore_id):
try:
tz = get_time_zone(request)
restore = workload_api(request).restores.get(restore_id)
restore.size = sizeof_fmt(restore.size)
restore.created_at = get_local_time(restore.created_at, '', '', tz)
restore.updated_at = get_local_time(restore.updated_at, '', '', tz)
restore.time_taken = humanize_time(restore.time_taken)
return restore
except Exception as ex:
raise Exception(str(ex))
def restore_list(request, snapshot_id):
try:
tz = get_time_zone(request)
search_opts = {'snapshot_id': snapshot_id}
restores = workload_api(request).restores \
.list(detailed=True, search_opts=search_opts)
restores = builtins.sorted(restores,
key=attrgetter('created_at'), reverse=True)
for restore in restores:
restore.size = sizeof_fmt(restore.size)
restore.created_at = get_local_time(
restore.created_at, '', "%I:%M:%S %p - %m/%d/%Y", tz)
return restores
except Exception as ex:
raise Exception(str(ex))
def restore_delete(request, restore_id):
try:
workload_api(request).restores.delete(restore_id)
return True
except Exception as ex:
raise Exception(str(ex))
def restored_instances_list(request, restore_id):
return []
def restored_instance_get(request, restored_instance_id):
try:
return {'id': 'id',
'vm_id': 'vm_id',
'name': 'name',
'status': 'available'}
except Exception as ex:
raise Exception(str(ex))
def get_user_name(request, user_id):
user_name = user_id
if policy.check((("identity", "identity:get_user"),), request):
try:
user = api.keystone.user_get(request, user_id)
if user:
user_name = user.username
except Exception:
pass
else:
LOG.debug("Insufficient privilege level to view user information.")
return user_name
def get_project_name(request, project_id):
project_name = project_id
try:
project_info = \
api.keystone.tenant_get(request, project_id, admin=True)
if project_info:
project_name = project_info.name
except Exception:
pass
return project_name
def get_project_list(request):
project_list = []
try:
project_list = \
api.keystone.tenant_list(request)
except Exception:
pass
return project_list
def dashboard_nfs_workloads_data(request):
try:
workloads_list = []
var = parse_qs(request.environ['QUERY_STRING'])
nfs = escape(var.get('nfs', [''])[0])
try:
search_opts = {'all_workloads': True, 'nfs_share': nfs}
workloads = workload_api(request).workloads.list(search_opts=search_opts)
except:
workloads = []
return workloads
except Exception as ex:
raise Exception(str(ex))
def dashboard_workloads_data_per_tenant(request, project_id=None):
try:
# Get list of all workloads data for csv export
workloads_list = []
try:
search_opts = {'all_workloads': True, 'project_id': project_id}
workloads = workload_api(request).workloads.list(detailed=True, search_opts=search_opts)
known_project_names = {}
known_user_names = {}
known_types = {}
for wld in workloads:
workload_details = {}
# Fetching the names from dict instead of calling api every time
if wld.id not in known_user_names:
user_name = get_user_name(request, wld.user_id)
known_user_names[wld.user_id] = user_name
else:
user_name = known_user_names[wld.user_id]
if wld.project_id not in known_project_names:
project_name = get_project_name(request, wld.project_id)
known_project_names[wld.project_id] = project_name
else:
project_name = known_project_names[wld.project_id]
if wld.workload_type_id not in known_types:
wlm_type = (workload_type_get(request, wld.workload_type_id)).name
known_types[wld.workload_type_id] = wlm_type
else:
wlm_type = known_types[wld.workload_type_id]
if wld.jobschedule['enabled']:
scheduler_status = 'Enabled'
else:
scheduler_status = 'Disabled'
workload_details = {'User Id': wld.id, 'User Name': user_name,
'Project': project_name,
'Workload Name': wld.name,
'Workload Type': wlm_type,
'Availability Zone': wld.availability_zone,
'VMs': len(wld.instances),
'Storage Used': sizeof_fmt(float(wld.storage_usage['full']['usage'])
+ float(wld.storage_usage['incremental']['usage'])),
'Backup Target': wld.metadata.get('backup_media_target', "NA"),
'Scheduler Status': scheduler_status,
'Interval': wld.jobschedule['interval']}
workloads_list.append(workload_details)
except:
workloads_list = []
return workloads_list
except Exception as ex:
raise Exception(str(ex))
def dashboard_workloads_data(request, project_id=None):
try:
# Get list of all workloads
workload_list = []
try:
search_opts = {'all_workloads': True, 'project_id': project_id}
workloads = workload_api(request).workloads.list(detailed=True, search_opts=search_opts)
except:
workloads = []
var = parse_qs(request.environ['QUERY_STRING'])
draw = escape(var.get('draw', [''])[0])
length = escape(var.get('length', [''])[0])
start = escape(var.get('start', [''])[0])
search = escape(var.get('search', [''])[0])
total = len(workloads)
draw = int(draw)
length = int(length)
start = int(start)
# start = ((draw - 1) * length)
end = start + length
# in case of All workloads(represents -1 value) or paging exceeds the total workloads available
if length == -1 or end > total:
end = start + (total - start)
if len(workloads) > 0:
for index in range(start, end):
workload_details = []
# workload_details.append(index+1)
workload_details.append(workloads[index].id)
workload_details.append(get_user_name(request, workloads[index].user_id))
workload_details.append(get_project_name(request, workloads[index].project_id))
workload_details.append(workloads[index].name)
workload_details.append((workload_type_get(request, workloads[index].workload_type_id)).name)
workload_details.append(workloads[index].availability_zone)
workload_details.append(len(workloads[index].instances))
performance_values_size_line = []
performance_values_time_bar = []
performance_colors = []
performance_tooltips_size_line = []
performance_tooltips_time_bar = []
success_snapshots_count = 0
failed_snapshots_count = 0
full_snapshot = workloads[index].storage_usage['full']['snap_count']
incr_snapshot = workloads[index].storage_usage['incremental']['snap_count']
if full_snapshot + incr_snapshot > 0:
search_opts = {'workload_id': workloads[index].id, 'all': True}
try:
snapshot_list = workload_api(request).snapshots. \
list(detailed=True, search_opts=search_opts)
except:
snapshot_list = []
e1 = len(snapshot_list)
s1 = 0
graph_start = s1
if e1 > 25:
graph_start = e1 - 25
for index1 in range(s1, e1):
try:
snapshot = snapshot_list[index1]
except IndexError:
continue
if snapshot.status == "error":
failed_snapshots_count += 1
elif snapshot.status == "available" or snapshot.status == "mounted":
success_snapshots_count += 1
time_taken = snapshot.time_taken
size = snapshot.size
size_humanized = sizeof_fmt(size)
timetaken_humanized = humanize_time(time_taken)
if (snapshot.status == "available" or snapshot.status == "mounted") and index1 >= graph_start:
performance_tooltips_time_bar.append(timetaken_humanized)
performance_tooltips_size_line.append(size_humanized)
performance_values_time_bar.append(time_taken)
performance_values_size_line.append(size)
table_data = ""
try:
table_data = '<table cellpadding="0" cellspacing="0" width="100%"> \
<tr><td class="t_inner_cell t_first_column"><div class="sparkline_performance" \
data-values-bar="' + str(performance_values_time_bar) + '" \
data-values-line="' + str(performance_values_size_line) + '" \
data-tooltips-line="' + str(performance_tooltips_size_line) + '" \
data-tooltips-bar="' + str(performance_tooltips_time_bar) + '"></div></td><td class="t_inner_cell t_column_50"> \
<div class="sparkline_bar">' + str(full_snapshot) + ',\
' + str(incr_snapshot) + '</div></td> \
<td class="t_inner_cell t_column_50"><div class="t_badge alert-success" title="Successful snapshots"> \
' + str(success_snapshots_count) + '</div></td><td class="t_inner_cell t_column_50"><div class="t_badge alert-danger" \
title="Failed snapshots">' + str(
failed_snapshots_count) + '</div></td></tr></table>'
except Exception as ex:
pass
workload_details.append([table_data])
else:
table_data = '<table cellpadding="0" cellspacing="0" width="100%"> \
<tr><td style="border:0"> </td><td style="border:0"> </td> \
<td class="t_inner_cell t_column_50"><div class="t_badge alert-success" title="Successful snapshots"> \
' + str(success_snapshots_count) + '</div></td><td class="t_inner_cell t_column_50"><div class="t_badge alert-danger" \
title="Failed snapshots">' + str(
failed_snapshots_count) + '</div></td></tr></table>'
workload_details.append([table_data])
workload_details.append(sizeof_fmt(float(workloads[index].storage_usage['full']['usage'])
+ float(workloads[index].storage_usage['incremental']['usage'])))
workload_details.append(workloads[index].metadata.get('backup_media_target', "NA"))
if workloads[index].jobschedule['enabled']:
workload_details.append(humanize_time(workloads[index].jobschedule['nextrun'], 5))
workload_details.append(workloads[index].jobschedule['interval'])
action_data = '<label class="switch"><input type="checkbox" id="check_' + workloads[
index].id + '" checked data-record-id="' + workloads[
index].id + '#single_row#0" data-toggle="modal" data-target="#confirm-scheduler-update" style="display:none;"><span class="slider round"></span></label>'
workload_details.append(action_data)
else:
workload_details.append('-')
workload_details.append('disabled')
action_data = '<label class="switch"><input type="checkbox" id="check_' + workloads[
index].id + '" data-record-id="' + workloads[
index].id + '#single_row#1" data-toggle="modal" data-target="#confirm-scheduler-update" style="display:none;"><span class="slider round"></span></label>'
workload_details.append(action_data)
workload_list.append(workload_details)
output = {}
output['draw'] = draw
output['rocordsTotal'] = len(workloads)
output['recordsFiltered'] = len(workloads)
output['data'] = workload_list
return output
except Exception as ex:
raise Exception(str(ex))
"""
def dashboard_quota_data(request, tenant_id=None):
tz = get_time_zone(request)
node_wise_snapshot_count = ''
nodes = {}
quota_data = {'total_nodes': 0, 'available_nodes': 0}
try:
nodes = workload_api(request).workloads.get_nodes()
except Exception:
nodes['nodes'] = {}
total_snapshots = 0
for node in nodes['nodes']:
if not node['is_vip']:
quota_data['total_nodes'] += 1
if isinstance(node['status'], six.string_types) and node['status'].lower() == "up":
quota_data['available_nodes'] += 1
search_opts = {'host': node['node'], 'all': True, 'status': 'running'}
try:
snapshots = workload_api(request).snapshots \
.list(detailed=True, search_opts=search_opts)
except Exception:
snapshots = []
node['snapshots'] = []
for snapshot in snapshots:
if tenant_id and snapshot.project_id == tenant_id:
node['snapshots'].append(snapshot)
else:
node['snapshots'].append(snapshot)
total_snapshots += len(node['snapshots'])
node_wise_snapshot_count = \
node_wise_snapshot_count + str(len(node['snapshots'])) + ', '
quota_data['balance_nodes'] = quota_data['total_nodes'] - quota_data['available_nodes']
quota_data['total_snapshots'] = total_snapshots
quota_data['node_wise_snapshot_count'] = node_wise_snapshot_count[:2]
storage = []
try:
storage_usage = workload_api(request).workloads.get_storage_usage()
storage = storage_usage['storage_usage']
except Exception:
pass
quota_data['total_utilization'] = 0
quota_data['total_capacity'] = 0
for nfsshare in storage:
quota_data['storage_type'] = str(nfsshare['storage_type'])
quota_data['total_utilization'] += nfsshare['total_utilization']
quota_data['total_capacity'] += nfsshare['total_capacity']
quota_data['available_capacity'] = \
(float(quota_data['total_capacity']) -
float(quota_data['total_utilization']))
quota_data['total_capacity_humanized'] = \
sizeof_fmt(quota_data['total_capacity'])
quota_data['total_utilization_humanized'] = \
sizeof_fmt(quota_data['total_utilization'])
quota_data['available_capacity_humanized'] = \
sizeof_fmt(quota_data['available_capacity'])
quota_data['contego_services_up'] = 0
quota_data['contego_services_down'] = 0
quota_data['contego_services_others'] = 0
quota_data['contego_services_total'] = 0
try:
services = nova.service_list(request)
except Exception:
services = []
for service in services:
if 'contego' in service.binary:
quota_data['contego_services_total'] += 1
service.updated_at = get_local_time(service.updated_at, '', '', tz)
if service.state == "up":
quota_data['contego_services_up'] += 1
elif service.state == "down":
quota_data['contego_services_down'] += 1
else:
quota_data['contego_services_others'] += 1
quota_data['vms_protected'] = 0
quota_data['total_vms'] = 0
try:
chrageback_data = workload_api(request).workloads.get_tenants_usage()
global_usage = chrageback_data['global_usage']
tenant_usage = chrageback_data['tenants_usage']
if tenant_id is not None:
for tenant in tenant_usage:
if tenant == tenant_id:
quota_data['vms_protected'] = tenant_usage[tenant]['vms_protected']
quota_data['total_vms'] = tenant_usage[tenant]['total_vms']
quota_data['storage_used'] = tenant_usage[tenant]['used_capacity']
quota_data['total_utilization'] = tenant_usage[tenant]['used_capacity']
quota_data['total_capacity'] = global_usage['total_capacity']
quota_data['total_capacity_humanized'] = sizeof_fmt(quota_data['total_capacity'])
quota_data['total_utilization_humanized'] = sizeof_fmt(quota_data['total_utilization'])
quota_data['tenant_name'] = 'by ' + get_project_name(request, tenant_id) + ' Tenant'
else:
quota_data['vms_protected'] = global_usage['vms_protected']
quota_data['total_vms'] = global_usage['total_vms']
quota_data['tenant_name'] = ''
except Exception:
pass
return [quota_data]
"""
def dashboard_quota_data(request, project_id=None):
return workload_api(request).workloads.get_quota_data(project_id)
def dashboard_nodes_data(request):
try:
node_wise_snapshot_count = ''
total_snapshots = 0
nodes = {}
try:
nodes = workload_api(request).workloads.get_nodes()
except:
nodes['nodes'] = {}
available_nodes = 0
for node in nodes['nodes']:
search_opts = {'host': node['node']}
try:
snapshots = workload_api(request).snapshots \
.list(detailed=True, search_opts=search_opts)
except:
snapshots = []
current_snapshots = []
for snapshot in snapshots:
if snapshot.status != "available" \
and snapshot.status != "error" \
and snapshot.status != "mounted" \
and snapshot.status != "cancelled":
current_snapshots.append(snapshot)
node['snapshots'] = current_snapshots
nodes_data = {}
nodes_data['nodeslist'] = nodes['nodes']
nodes_data['total_nodes'] = len(nodes['nodes'])
return nodes_data
except Exception as ex:
raise Exception(str(ex))
storage_data = {}
def dashboard_storage_usage(request):
return workload_api(request).workloads.get_storage_usage();
def dashboard_recent_tasks(
request, timeInMinutes, tasksStatus, tasksPage, tasksSize):
tz = get_time_zone(request)
try:
try:
tasks = workload_api(request).workloads.get_tasks(
timeInMinutes, tasksStatus, tasksPage, tasksSize)
except:
tasks = []
for task in tasks['tasks']:
task['created_at'] = get_local_time(task['created_at'], '', '', tz)
task['finished_at'] = get_local_time(task['finished_at'], '', '', tz)
return tasks['tasks']
except Exception as ex:
raise Exception(str(ex))
def dashboard_recent_activities(request, timeinminutes):
try:
try:
activities = workload_api(request).workloads. \
get_recentactivities(timeinminutes)
except:
activities = []
tz = get_time_zone(request)
for activity in activities['recentactivites']:
activity['activity_time'] = \
get_local_time(activity['activity_time'], '', '%I:%M:%S %p - %m/%d/%Y', tz)
activity['user_name'] = \
get_user_name(request, activity['object_user_id'])
activity['project_name'] = \
get_project_name(request, activity['object_project_id'])
return activities['recentactivites']
except Exception as ex:
raise Exception(str(ex))
def dashboard_audit_log(request, time_in_minutes, start_date, end_date):
try:
audit = workload_api(request).workloads. \
get_auditlog(time_in_minutes, start_date, end_date)
return audit['auditlog']
except Exception as ex:
raise Exception(str(ex))
def dashboard_contego_services(request):
try:
services_data = {}
contego_services = []
try:
services = nova.service_list(request)
except Exception:
services = []
tz = get_time_zone(request)
for service in services:
if 'contego' in service.binary:
service.updated_at = get_local_time(service.updated_at, '', '', tz)
contego_services.append(service)
services_data['contego_services'] = contego_services
services_data['nova_services'] = services
except Exception as ex:
raise Exception(str(ex))
return services_data
def dashboard_license_data(request):
try:
try:
license = workload_api(request).workloads.license_list()
except Exception:
license = {}
return license
except Exception as ex:
raise Exception(str(ex))
def dashboard_license_validity_check(request):
try:
license = workload_api(request).workloads.license_check()
except Exception as ex:
raise ex
return license
def dashboard_license_create(request, license_data):
try:
license = workload_api(request).workloads.license_create(license_data)
except Exception as ex:
raise ex
return license
def dashboard_usage_data(request):
try:
usage_data = workload_api(request).workloads.get_tenants_chargeback()
tz = get_time_zone(request)
for tenant in usage_data:
usage_data[tenant]['used_capacity'] = sizeof_fmt(usage_data[tenant]['used_capacity'])
for wlm in usage_data[tenant]['workloads']:
usage_data[tenant]['workloads'][wlm]['size'] = sizeof_fmt(usage_data[tenant]['workloads'][wlm]['size'])
for snap in usage_data[tenant]['workloads'][wlm]['snapshots']:
usage_data[tenant]['workloads'][wlm]['snapshots'][snap]['size'] = sizeof_fmt(
usage_data[tenant]['workloads'][wlm]['snapshots'][snap]['size'])
usage_data[tenant]['workloads'][wlm]['snapshots'][snap]['created_at'] = get_local_time(
usage_data[tenant]['workloads'][wlm]['snapshots'][snap]['created_at'], '', '', tz)
except Exception as ex:
raise ex
return usage_data
def transfer_create(request, workload_id, transfer_name):
try:
try:
transfer = workload_api(request).transfers.create(workload_id, transfer_name)
except Exception:
transfer = {}
return transfer
except Exception as ex:
raise
def transfer_get(request, transfer_id):
try:
try:
transfer = workload_api(request).transfers.get(transfer_id)
except Exception:
transfer = {}
return transfer
except Exception as ex:
raise Exception(str(ex))
def transfer_list(request):
try:
try:
transfer = workload_api(request).transfers.list()
except Exception:
transfer = {}
return transfer
except Exception as ex:
raise Exception(str(ex))
def transfer_accept(request, transfer_id, auth):
try:
try:
transfer = workload_api(request).transfers.accept(transfer_id, auth)
except Exception:
transfer = {}
return transfer
except Exception as ex:
raise Exception(str(ex))
def transfer_delete(request, transfer_id):
workload_api(request).transfers.delete(transfer_id)
return True
def transfer_complete(request, transfer_id):
workload_api(request).transfers.complete(transfer_id)
return True
def get_global_job_scheduler(request):
return workload_api(request).global_job_scheduler.get()
def enable_global_job_scheduler(request):
return workload_api(request).global_job_scheduler.enable()
def disable_global_job_scheduler(request):
return workload_api(request).global_job_scheduler.disable()
def testbubble_get(request, testbubble_id):
try:
testbubble = workload_api(request).testbubbles.get(testbubble_id)
return testbubble
except Exception:
return None
def testbubble_list(request, snapshot_id):
search_opts = {'snapshot_id': snapshot_id}
testbubbles = workload_api(request).testbubbles.list(detailed=True, search_opts=search_opts)
return testbubbles
def testbubble_delete(request, testbubble_id):
workload_api(request).testbubbles.delete(testbubble_id)
return True
def get_config_backup_scheduler_settings(request):
return workload_api(request).config_backup.get_config_workload()
def set_config_backup_scheduler_settings(request, jobschedule, services_to_backup):
return workload_api(request).config_backup.config_workload(jobschedule, services_to_backup)
def config_backup_list(request):
config_backup = {}
try:
config_backup = workload_api(request).config_backup.config_backup_list()
except Exception as ex:
raise Exception(str(ex))
return config_backup
def config_backup_create(request, name, description):
config_backup = {}
try:
config_backup = workload_api(request).config_backup.config_backup(name, description)
except Exception as ex:
raise Exception(str(ex))
return config_backup
def config_backup_delete(request, backup_id):
try:
backup_delete_message = workload_api(request).config_backup.config_backup_delete(backup_id)
except Exception as ex:
raise Exception(str(ex))
return backup_delete_message
def config_backup_get(request, backup_id):
try:
backup_data = workload_api(request).config_backup.get_config_backup(backup_id)
except Exception as ex:
raise Exception(str(ex))
return backup_data
def get_default_services_to_backup(request):
return workload_api(request).config_backup.default_services
def create_policy(request, name, description, field_values):
return workload_api(request).workload_policy.create(name, description, field_values)
def get_policy_list(request):
try:
policy_data = workload_api(request).workload_policy.list()
except Exception as ex:
raise Exception(str(ex))
return policy_data
def get_policy(request, policy_id):
try:
policy_data = workload_api(request).workload_policy.get(policy_id)
except Exception as ex:
raise Exception(str(ex))
return policy_data
def update_policy(request, policy_id, name, description, field_values):
return workload_api(request).workload_policy.update(policy_id, name, description, field_values)
def assign_policy(request, policy_id, add_projects, remove_projects):
try:
policy_message = workload_api(request).workload_policy.assign(policy_id, add_projects, remove_projects)
except Exception as ex:
raise ex
return policy_message
def delete_policy(request, policy_id):
try:
policy_delete_message = workload_api(request).workload_policy.delete(policy_id)
except Exception as ex:
raise Exception(str(ex))
return policy_delete_message
def workload_policy_list(request, project_id):
try:
policy_list = workload_api(request).workload_policy.get_assigned_policies(project_id)
except Exception as ex:
raise Exception(str(ex))
return policy_list