Repository URL to install this package:
|
Version:
5.2.1.1.dev1 ▾
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Trilio Data, Inc. All Rights Reserved.
#
import contextlib
import datetime
import json
import pickle
import random
import os
import sys
import time
import uuid
from neutronclient.common import exceptions as neutron_exceptions
from taskflow import engines
from taskflow.utils import misc
from taskflow.listeners import printing
from taskflow.patterns import unordered_flow as uf
from taskflow.patterns import linear_flow as lf
from taskflow.patterns import graph_flow as gf
from taskflow import task
from taskflow import flow
from workloadmgr.openstack.common.gettextutils import _
from workloadmgr.openstack.common import log as logging
from workloadmgr.compute import nova
from workloadmgr.network import neutron
from workloadmgr.image import glance
from workloadmgr import autolog
import workloadmgr.context as context
from workloadmgr import utils
from workloadmgr.virt import driver
from workloadmgr.common.workloadmgr_keystoneclient import KeystoneClient
from workloadmgr.openstack.common import fileutils
from oslo_messaging._drivers import amqp
from . import vmtasks
from . import workflow
LOG = logging.getLogger(__name__)
Logger = autolog.Logger(LOG)
def get_vms(cntx, migration_id):
db = vmtasks.WorkloadMgrDB().db
migration = db.migration_get(cntx, migration_id)
migration_plan = db.migration_plan_get(cntx, migration.migration_plan_id)
migration_options = pickle.loads(bytes(migration.pickle, 'utf-8'))
migration_plan_vms = db.migration_plan_vms_get(cntx, migration_plan.id)
image_service = glance.get_default_image_service(production=True)
vms_to_be_migrated = []
for vm in migration_plan_vms:
vm_options = utils.get_vm_migration_options(
migration_options, vm.vm_id, migration_options['target'])
if vm_options.get('include', False):
vms_to_be_migrated.append(vm)
else:
continue
image_id = vm_options.get('image_source', "")
boot_option = 'bios'
for m in vm.metadata:
if m.key.lower() == "boot_options":
boot_option = m.value.lower()
break
if boot_option == 'uefi' and image_id == "":
raise Exception("A UEFI VM requires an glance image source "
"with 'hw_firmware_type' is set to 'UEFI'")
if image_id:
image = image_service.show(cntx, image_id)
if image.get('properties', {}).get('hw_firmware_type', 'bios').lower() != 'uefi':
raise Exception("Image %s must have 'hw_firmware_type' "
"property set to 'UEFI'" % image_id)
vms = vms_to_be_migrated
vms_without_power_sequence = []
for migration_vm in vms_to_be_migrated:
vm = {'vm_id': migration_vm.vm_id,
'vm_name': migration_vm.vm_name,
'keyname': db.get_metadata_value(migration_vm.metadata,
'key_name'),
'keydata': db.get_metadata_value(migration_vm.metadata,
'key_data'),
'hypervisor_hostname': 'None',
'hypervisor_type': 'QEMU'}
vm_options = utils.get_vm_migration_options(
migration_options, migration_vm.vm_id, migration_options['target'])
if vm_options and \
'power' in vm_options and \
vm_options['power'] and \
'sequence' in vm_options['power'] and \
vme_options['power']['sequence']:
pass
else:
vms_without_power_sequence.append(vm)
vms_with_power_sequence = []
sequence = 0
while (len(vms_with_power_sequence) +
len(vms_without_power_sequence)) < len(vms_to_be_migrated):
for migration_vm in vms_to_be_migrated:
vm = {'vm_id': migration_vm.vm_id,
'vm_name': migration_vm.vm_name,
'keyname': db.get_metadata_value(migration_vm.metadata,
'key_name'),
'keydata': db.get_metadata_value(migration_vm.metadata,
'key_data'),
'hypervisor_hostname': 'None',
'hypervisor_type': 'QEMU'}
vm_options = utils.get_vm_migration_options(
migration_options, migration_vm.vm_id, migration_options['target'])
if vm_options and \
'power' in vm_options and \
vm_options['power'] and \
'sequence' in vm_options['power'] and \
vm_options['power']['sequence']:
if sequence == int(vm_options['power']['sequence']):
vms_with_power_sequence.append(vm)
sequence = sequence + 1
vms = vms_with_power_sequence + vms_without_power_sequence
return vms
@autolog.log_method(Logger, 'migrationworkflow.get_vm_nics')
def get_vm_nics(cntx, db, instance, migration, migrated_net_resources):
db.migration_update(
cntx, migration['id'], {
'progress_msg': \
'Migrating network interfaces for Instance %s' % \
instance['vm_id']})
migration_obj = db.migration_get(cntx, migration['id'])
migration_options = pickle.loads(bytes(migration_obj.pickle, 'utf-8'))
instance_options = utils.get_vm_migration_options(
migration_options, instance['vm_id'], 'openstack')
migrated_nics = []
migration_plan_vm_resources = db.migration_plan_vm_resources_get(
cntx, instance['vm_id'], migration['migration_plan_id'])
unique_nic_check = []
for migration_plan_vm_resource in migration_plan_vm_resources:
if migration_plan_vm_resource.resource_type == 'nic':
vm_nic_migration_plan = db.migration_plan_vm_network_resource_get(
cntx, migration_plan_vm_resource.id)
network_type = db.get_metadata_value(vm_nic_migration_plan.metadata,
'network_type')
nic_data = pickle.loads(bytes(vm_nic_migration_plan.pickle, 'utf-8'))
nic_info = {}
if 'ip_address' in nic_data:
nic_info['migration_plan_ip_address'] = nic_data['ip_address']
# adjust IP address here
compute_service = nova.API(production=True)
network_service = neutron.API(production=True)
networks = network_service.get_networks(cntx)
network_id = db.get_metadata_value(vm_nic_migration_plan.metadata,
'network_id')
# Adjust network id to new network id
if nic_data['mac_address'] in migrated_net_resources:
mac_addr = nic_data['mac_address']
network_id = migrated_net_resources[mac_addr]['network_id']
if network_id in unique_nic_check:
raise Exception('Network mapping is not done correctly, \
duplicate network interface found for network: %s'%(
network_id))
unique_nic_check.append(network_id)
nic_info.setdefault('net-id', network_id)
ipinfo = None
try:
ipinfo = compute_service.get_fixed_ip(cntx,
nic_info['v4-fixed-ip'])
except BaseException:
# the old IP address may not belong to any of the subnets
pass
if ipinfo:
if ipinfo.hostname:
# IP in use. Raise an exception
raise Exception("IP address %s is in use. Cannot migrate \
VM" % nic_info['v4-fixed-ip'])
# else reuse existing ip address
else:
# find a free fixed ip on the subnet that we can use
for net in networks:
if net['id'] == nic_info['net-id']:
if net.get('cidr', None) is None:
network_type = 'neutron'
else:
network_type = 'nova'
break
if net['id'] != nic_info['net-id']:
raise Exception("Network by netid %s not found" % net.get('id'))
if nic_data['mac_address'] in migrated_net_resources and \
'id' in migrated_net_resources[nic_data['mac_address']]:
nic_info.setdefault(
'port-id',
migrated_net_resources[nic_data['mac_address']]['id'])
mac_addr = nic_data['mac_address']
network_id = migrated_net_resources[mac_addr]['network_id']
'network-id' in nic_info and nic_info.pop('network-id')
'v4-fixed-ip' in nic_info and nic_info.pop('v4-fixed-ip')
else:
m_addr = db.get_metadata_value(vm_nic_migration_plan.metadata,
'macaddress')
try:
new_network = migrated_net_resources[m_addr]
nic_info.setdefault('network-id', new_network['id'])
except BaseException:
pass
migrated_nics.append(nic_info)
return migrated_nics
@autolog.log_method(Logger, 'migrationworkflow.migrate_vm_flavor')
def migrate_vm_flavor(cntx, db, vm, migration):
cntx = nova._get_tenant_context(cntx)
migration_obj = db.migration_update(
cntx, migration['id'],
{'progress_msg': 'Migrating VM Flavor for VM ' +
vm['vm_id']})
compute_service = nova.API(production=True)
# default values
vcpus = '1'
ram = '512'
disk = '1'
ephemeral = '0'
swap = '0'
flavor_id = None
migration_plan_vm_resources = db.migration_plan_vm_resources_get(
cntx, vm['vm_id'], migration['migration_plan_id'])
for migration_plan_vm_resource in migration_plan_vm_resources:
if migration_plan_vm_resource.resource_type == 'flavor':
migration_plan_vm_flavor = db.migration_plan_vm_resource_get(
cntx, migration_plan_vm_resource.id)
vcpus = db.get_metadata_value(
migration_plan_vm_flavor.metadata, 'vcpus', vcpus)
ram = db.get_metadata_value(
migration_plan_vm_flavor.metadata, 'ram', ram)
disk = db.get_metadata_value(
migration_plan_vm_flavor.metadata, 'disk', ram)
ephemeral = db.get_metadata_value(
migration_plan_vm_flavor.metadata, 'ephemeral', ram)
swap = db.get_metadata_value(
migration_plan_vm_flavor.metadata, 'swap', swap)
flavor_id = db.get_metadata_value(
migration_plan_vm_flavor.metadata, 'id', flavor_id)
break
migration_options = pickle.loads(bytes(migration_obj.pickle, 'utf-8'))
instance_options = utils.get_vm_migration_options(
migration_options, vm['vm_id'], 'openstack')
if instance_options and 'flavor' in instance_options:
if instance_options['flavor'].get('id', "") != "":
flavor_id = instance_options['flavor'].get('id', flavor_id)
if instance_options['flavor'].get('vcpus', "") != "":
vcpus = instance_options['flavor'].get('vcpus', vcpus)
if instance_options['flavor'].get('ram', "") != "":
ram = instance_options['flavor'].get('ram', ram)
if instance_options['flavor'].get('disk', "") != "":
disk = instance_options['flavor'].get('disk', disk)
if instance_options['flavor'].get('ephemeral', "") != "":
ephemeral = instance_options['flavor'].get('ephemeral', ephemeral)
if instance_options['flavor'].get('swap', "") != "":
swap = instance_options['flavor'].get('swap', swap)
migrated_compute_flavor = None
for flavor in compute_service.get_flavors(cntx):
if flavor_id in (flavor.id, flavor.name):
migrated_compute_flavor = flavor
break
if not migrated_compute_flavor:
for flavor in compute_service.get_flavors(cntx):
if ((str(flavor.vcpus) == str(vcpus)) and
(str(flavor.ram) == str(ram)) and
(str(flavor.disk) == str(disk)) and
(str(flavor.ephemeral) == str(ephemeral)) and
(str(flavor.swap) == str(swap))):
migrated_compute_flavor = flavor
break
if not migrated_compute_flavor:
name = str(uuid.uuid4())
keystone_client = KeystoneClient(cntx)
migrated_compute_flavor = keystone_client.create_flavor(
name, ram, vcpus, disk, ephemeral)
migrated_vm_resource_values = {'id': migrated_compute_flavor.id,
'vm_id': migration['id'],
'migration_id': migration['id'],
'resource_type': 'flavor',
'resource_name': name,
'metadata': {},
'status': 'available'}
db.migration_vm_resource_create(
cntx, migrated_vm_resource_values)
return migrated_compute_flavor
@autolog.log_method(Logger, 'migrationworkflow.migration_vm_networks')
def migration_vm_networks(cntx, db, migration):
def _get_nic_migration_options(migration_options, instance_id, mac_address):
instance_options = utils.get_vm_migration_options(
migration_options, instance_id, 'openstack')
if instance_options and 'nics' in instance_options:
for nic_options in instance_options['nics']:
if 'mac_adress' in nic_options:
if nic_options['mac_adress'] == mac_address:
return nic_options
if 'mac_address' in nic_options:
if nic_options['mac_address'] == mac_address:
return nic_options
return None
def _get_nic_port_from_migration_options(migration_options,
migration_plan_vm_nic_options,
instance_id, mac_address):
def _get_port_for_ip(ports, ip_address):
if ports and ip_address:
for port in ports:
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
if fixed_ip['ip_address'] == ip_address:
return port
return None
def _create_port(name, network_id, vnic_type, fixed_ips):
params = {'name': name,
'network_id': network_id,
'tenant_id': cntx.tenant_id,
'binding:vnic_type': vnic_type}
params['fixed_ips'] = fixed_ips
try:
new_port = network_service.create_port(cntx, **params)
except Exception as ex:
LOG.exception(ex)
ips = []
for i, x in enumerate(fixed_ips):
ipaddr = ipaddress.ip_address(x['ip_address'])
if isinstance(ipaddr, ipaddress.IPv6Address):
ips.append({'subnet_id': x['subnet_id']})
else:
ips.append(x)
params['fixed_ips'] = ips
new_port = network_service.create_port(cntx, **params)
migration_vm_resource_values = {'id': new_port['id'],
'vm_id': migration['id'],
'migration_id': migration['id'],
'resource_type': 'port',
'resource_name': new_port['name'],
'metadata': {},
'status': 'available'}
db.migration_vm_resource_create(cntx, migration_vm_resource_values)
return new_port
networks_mapping = []
fixed_ips = []
pdata = migration_plan_vm_nic_options.get('port_data', None)
vnic_type = pdata['port'].get('binding:vnic_type', 'normal')
if 'networks_mapping' in migration_options['openstack'] and \
'networks' in migration_options['openstack']['networks_mapping']:
networks_mapping = \
migration_options['openstack']['networks_mapping']['networks']
# default to original VM network id, subnet id and ip address
network_id = migration_plan_vm_nic_options['network_id']
if not network_id or network_id.lower() == 'none':
network_id = migration_plan_vm_nic_options['network_name']
subnet_id = None
if 'subnet_id' in migration_plan_vm_nic_options:
subnet_id = migration_plan_vm_nic_options['subnet_id']
if 'ip_address' in migration_plan_vm_nic_options:
ip_address = migration_plan_vm_nic_options['ip_address']
port_name = ""
instance_options = utils.get_vm_migration_options(
migration_options, instance_id, 'openstack')
port_name = instance_options.get('name', '')
nic_options = _get_nic_migration_options(
migration_options, instance_id, mac_address)
if nic_options:
network_id = nic_options['network']['id']
if 'subnet' in nic_options['network']:
subnet_id = nic_options['network']['subnet']['id']
else:
subnet_id = None
if 'ip_address' in nic_options:
ip_address = nic_options['ip_address']
if ip_address and subnet_id:
fixed_ips = [{'ip_address': ip_address,
'subnet_id': subnet_id}]
elif subnet_id:
fixed_ips = [{'subnet_id': subnet_id}]
else:
for net in networks_mapping:
if network_id in [net['vmware_network']['id'], net['vmware_network']['id'].replace('-', '_')]:
if subnet_id:
snet = net['vmware_network']['subnet']
if snet['id'] == subnet_id:
subnet_id = \
net['target_network']['subnet']['id']
network_id = net['target_network']['id']
if migration_options.get('openstack', {}).get('restore_topology', False) is True:
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': ip_address}]
else:
ip_address = None
fixed_ips = [{'subnet_id': subnet_id}]
break
else:
network_id = net['target_network']['id']
subnet_id = net['target_network']['subnet']['id']
if migration_options.get('openstack', {}).get('restore_topology', False) is True:
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': ip_address}]
else:
ip_address = None
fixed_ips = [{'subnet_id': subnet_id}]
break
# Make sure networks and subnets exists
try:
network_service.get_network(cntx, network_id)
except Exception as ex:
raise Exception("Could not find the network that matches the "
"migration options")
try:
network_service.get_subnet(cntx, subnet_id)
except Exception as ex:
raise Exception("Could not find the subnet that matches the "
"migration options")
ports = network_service.get_ports(cntx, **{'tenant_id': cntx.tenant_id,
'network_id': network_id})
if migration_options.get('openstack', {}).get('restore_topology', False) is True:
if 'ip_address' in migration_plan_vm_nic_options:
ip_address = migration_plan_vm_nic_options['ip_address']
# If IP address is set, then choose the port with that ip address
if ports and ip_address:
port = _get_port_for_ip(ports, ip_address)
if port:
if 'device_id' in port and \
port['device_id'] in ('', None):
return port
else:
raise Exception(_("Given IP address %s is in use" %
ip_address))
else:
try:
return _create_port(port_name, network_id,
vnic_type, fixed_ips)
except Exception as ex:
LOG.exception(ex)
else:
# Let nova choose free ip address
return {'network_id': network_id}
raise Exception("Could not find the network that matches the migration "
"options")
cntx = nova._get_tenant_context(cntx)
migration_obj = db.migration_update(
cntx, migration['id'],
{'progress_msg': 'Restoring network resources'})
migration_options = pickle.loads(bytes(migration_obj.pickle, 'utf-8'))
migration_net_resources = {}
network_service = neutron.API()
for migration_plan_vm in db.migration_plan_vms_get(cntx, migration['migration_plan_id']):
migration_plan_vm_resources = db.migration_plan_vm_resources_get(
cntx, migration_plan_vm.vm_id, migration['migration_plan_id'])
for migration_plan_vm_resource in migration_plan_vm_resources:
if migration_plan_vm_resource.resource_type == 'nic':
instance_options = utils.get_vm_migration_options(migration_options,
migration_plan_vm.vm_id, 'openstack')
if instance_options.get('include', True) == False:
continue
src_network_type = db.get_metadata_value(
migration_plan_vm_resource.metadata,
'network_type') # nopep8
vm_nic_migration_plan = db.migration_plan_vm_network_resource_get(
cntx, migration_plan_vm_resource.id)
nic_data = pickle.loads(bytes(vm_nic_migration_plan.pickle, 'utf-8'))
if 'port_data' in nic_data:
nic_data['port_data'] = json.loads(nic_data['port_data'])
new_port = _get_nic_port_from_migration_options(
migration_options, nic_data,
migration_plan_vm.vm_id,
nic_data['mac_address'])
if new_port:
migration_net_resources.setdefault(
nic_data['mac_address'], new_port)
migration_net_resources[nic_data['mac_address']
]['production'] = True
if nic_data.get('floating_ip', None) is not None:
migration_net_resources[nic_data['mac_address']] \
['floating_ip'] = nic_data['floating_ip']
continue
# private network
pit_id = _get_pit_resource_id(vm_nic_migration_plan.metadata,
'network_id')
if pit_id:
if pit_id in migration_net_resources:
new_network = migration_net_resources[pit_id]
else:
raise Exception("Could not find the network that "
"matches the migration options")
# private subnet
pit_id = _get_pit_resource_id(vm_nic_migration_plan.metadata,
'subnet_id')
if pit_id:
if pit_id in migration_plan_net_resources:
new_subnet = migration_plan_net_resources[pit_id]
else:
raise Exception("Could not find the network that "
"matches the migration options")
# external network
pit_id = _get_pit_resource_id(vm_nic_migration_plan.metadata,
'ext_network_id')
if pit_id:
if pit_id in migration_net_resources:
new_ext_network = migration_net_resources[pit_id]
else:
raise Exception("Could not find the network that "
"matches the migration options")
# external subnet
pit_id = _get_pit_resource_id(vm_nic_migration_plan.metadata,
'ext_subnet_id')
if pit_id:
if pit_id in migration_net_resources:
new_ext_subnet = migration_net_resources[pit_id]
else:
raise Exception("Could not find the network "
"that matches the migration options")
# router
pit_id = _get_pit_resource_id(vm_nic_migration_plan.metadata,
'router_id')
if pit_id:
if pit_id in migration_net_resources:
new_router = migration_net_resources[pit_id]
else:
raise Exception("Could not find the network that "
"matches the migration options")
return migration_net_resources
@autolog.log_method(Logger, 'migration_vm_security_groups')
def migration_vm_security_groups(cntx, db, migration):
cntx = nova._get_tenant_context(cntx)
network_service = neutron.API(production=True)
neutron_client = neutron.get_client(cntx)
all_secgrps = neutron_client.list_security_groups()
migration_sec_groups = {}
migration_plan_vm_resources = db.migration_plan_resources_get(
cntx, migration['migration_plan_id'])
for migration_plan_vm_resource in migration_plan_vm_resources:
if migration_plan_vm_resource.resource_type != 'security_group':
continue
security_group_type = db.get_metadata_value(
migration_plan_vm_resource.metadata,
'security_group_type')
if security_group_type != 'neutron':
continue
vm_id = db.get_metadata_value(
migration_plan_vm_resource.metadata, 'vm_id')
migration_sec_groups[vm_id] = migration_sec_groups.get(vm_id, [])
vm_security_group_rule_values = []
for rule in db.migration_plan_vm_security_group_rules_get(
cntx, migration_plan_vm_resource.id):
vm_security_group_rule_values.append(pickle.loads(
bytes(rule.pickle, 'utf-8')))
sec1 = {'id': migration_plan_vm_resource.resource_name,
'name': db.get_metadata_value(migration_plan_vm_resource.metadata, 'name'),
'description': db.get_metadata_value(migration_plan_vm_resource.metadata, 'description'),
'security_group_rules': vm_security_group_rule_values}
migration_sec_groups[vm_id].append(sec1)
return_values = {}
for vm_id, res_sec_grps in migration_sec_groups.items():
return_values[vm_id] = {}
for res_map in res_sec_grps:
for existing_sec in all_secgrps['security_groups']:
if existing_sec['name'] == res_map['name']:
return_values[vm_id][res_map['name']] = existing_sec['id']
return return_values, {}
class MigrationVMNetworks(task.Task):
def execute(self, context, target_platform, migration):
return self.execute_with_log(context, target_platform, migration)
def revert(self, context, target_platform, migration, result, flow_failures):
return self.revert_with_log(context)
@autolog.log_method(Logger, 'MigrationVMNetworks.execute')
def execute_with_log(self, context, target_platform, migration):
# Restore the networking configuration of VMs
self.db = db = vmtasks.WorkloadMgrDB().db
self.cntx = cntx = amqp.RpcContext.from_dict(context)
self.target_platform = target_platform
db.migration_get_metadata_cancel_flag(cntx, migration['id'])
self.migrated_net_resources = migration_vm_networks(
cntx, db, migration)
return self.migrated_net_resources
@autolog.log_method(Logger, 'MigrationVMNetworks.revert')
def revert_with_log(self, *args, **kwargs):
pass
class MigrationSecurityGroups(task.Task):
def execute(self, context, target_platform, migration):
return self.execute_with_log(context, target_platform, migration)
def revert(self, context, target_platform, migration, result, flow_failures):
return self.revert_with_log(context)
@autolog.log_method(Logger, 'MigrationSecurityGroups.execute')
def execute_with_log(self, context, target_platform, migration):
# Restore the security groups
self.db = db = vmtasks.WorkloadMgrDB().db
self.cntx = cntx = amqp.RpcContext.from_dict(context)
self.target_platform = target_platform
self.security_groups = None
db.migration_get_metadata_cancel_flag(cntx, migration['id'])
self.security_groups, skipped_security_group_rule_migration = \
migration_vm_security_groups(cntx, db, migration)
if skipped_security_group_rule_migration:
db.migration_update(
cntx, migration['id'],
{'warning_msg': 'Unable to find security group rules: {}. Please refer workloadmgr logs for more details.'.format(skipped_security_group_rule_migration)})
return self.security_groups
@autolog.log_method(Logger, 'MigrationSecurityGroups.revert')
def revert_with_log(self, *args, **kwargs):
pass
@autolog.log_method(Logger, 'migrationworkflow.migrate_vm')
def migrate_vm(cntx, db, instance, migration, migrated_net_resources,
migrated_security_groups):
migrated_compute_flavor = migrate_vm_flavor(cntx, db, instance, migration)
migrated_nics = get_vm_nics(cntx, db, instance, migration,
migrated_net_resources)
migration_obj = db.migration_get(cntx, migration['id'])
migration_options = pickle.loads(bytes(migration_obj.pickle, 'utf-8'))
instance_options = utils.get_vm_migration_options(migration_options,
instance['vm_id'],
'openstack')
if instance_options.get('availability_zone', None) is None:
instance_options['availability_zone'] = migration_options.get(
'zone', None)
virtdriver = driver.load_compute_driver(None, 'libvirt.LibvirtDriver')
# call with new context
cntx = nova._get_tenant_context(cntx)
return virtdriver.migrate_vm(cntx, db, instance, migration,
migrated_net_resources,
migrated_security_groups,
migrated_compute_flavor,
migrated_nics,
instance_options)
@autolog.log_method(Logger, 'migrationworkflow.delete_migrated_vm')
def delete_migrated_vm(cntx, db, instance, migration):
return
virtdriver = driver.load_compute_driver(None, 'libvirt.LibvirtDriver')
virtdriver.delete_migrated_vm(cntx, db, instance, migration)
class MigrateVM(task.Task):
def execute(self, context, target_platform, instance, migration,
migrated_net_resources, migrated_security_groups):
return self.execute_with_log(
context,
target_platform,
instance,
migration,
migrated_net_resources,
migrated_security_groups)
def revert(self, context, target_platform, instance, migration,
migrated_net_resources, migrated_security_groups, result, flow_failures):
return self.revert_with_log(context, instance, migration)
@autolog.log_method(Logger, 'MigrateVM.execute')
def execute_with_log(self, context, target_platform, instance, migration,
migrated_net_resources, migrated_security_groups):
cntx = amqp.RpcContext.from_dict(context)
db = vmtasks.WorkloadMgrDB().db
db.migration_get_metadata_cancel_flag(cntx, migration['id'])
assert target_platform == 'openstack'
ret_val = migrate_vm(cntx, db, instance, migration,
migrated_net_resources, migrated_security_groups)
return {'vm_name': ret_val.vm_name,
'vm_id': ret_val.vm_id, 'uuid': ret_val.vm_id}
@autolog.log_method(Logger, 'MigrateVM.revert')
def revert_with_log(self, context, instance, migration ):
try:
cntx = amqp.RpcContext.from_dict(context)
db = vmtasks.WorkloadMgrDB().db
delete_migrated_vm(
cntx, db, instance, migration)
except Exception as ex:
LOG.exception(ex)
finally:
pass
def UnorderedMigrateVMs(instances):
flow = uf.Flow("migratevmuf")
for index, item in enumerate(instances):
flow.add(MigrateVM("MigrateVM_" + item['vm_id'],
rebind=dict(instance="instance_" + str(index)),
provides='migrated_instance_' + str(index)))
return flow
class MigrationWorkflow:
"""
Migration Workflow
"""
def __init__(self, name, store):
self._name = name
self._store = store
cntx = amqp.RpcContext.from_dict(self._store['context'])
self._store['instances'] = get_vms(cntx, self._store['migration']['id'])
for index, item in enumerate(self._store['instances']):
self._store['instance_' + str(index)] = item
def initflow(self):
self._flow = lf.Flow('MigrationFlow')
# Check if any pre migration conditions
#self._flow.add(vmtasks.UnorderedPreRestore(self._store['instances']))
self._flow.add(
MigrationVMNetworks(
"MigrationVMNetworks",
provides='migrated_net_resources'))
self._flow.add(
MigrationSecurityGroups(
"MigrationSecurityGroups",
provides='migrated_security_groups'))
self._flow.add(UnorderedMigrateVMs(self._store['instances']))
# linear poweron VMs
#self._flow.add(vmtasks.LinearSetVMsMetadata(self._store['instances']))
# linear poweron VMs
#self._flow.add(vmtasks.LinearPowerOnVMs(self._store['instances']))
def execute(self):
try:
self._store["path"] = os.path.join(self._store["path"], 'migration_' + str(self._store['migration_id']))
fileutils.ensure_tree(self._store["path"])
result = engines.run(
self._flow,
engine='parallel',
engine_conf='parallel',
backend={
'connection': self._store['connection'],
'path': self._store['path'], # save data to this directory
'max_cache_size': self._store['max_cache_size'], # keep up-to this much entries in memory
},
store=self._store)
finally:
fileutils.remove_tree(self._store["path"])
migration = pickle.loads(
self._store['migration']['pickle'].encode(
'ascii', 'ignore'))
if 'target' in migration and migration['target'] == "vmware":
compute_service = nova.API(production=True)
search_opts = {}
search_opts['deep_discover'] = '1'
cntx = amqp.RpcContext.from_dict(self._store['context'])
compute_service.get_servers(cntx, search_opts=search_opts)