Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
python3-dmapi / usr / lib / python3 / dist-packages / dmapi / objects / instance.py
Size: Mime:
# Copyright 2018 TrilioData Inc.
# All Rights Reserved.

import contextlib

from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import versionutils
from sqlalchemy import or_
from sqlalchemy.sql import func
from sqlalchemy.sql import null

from dmapi import db
from dmapi.db.sqlalchemy import api as db_api
from dmapi.db.sqlalchemy import models
from dmapi import exception
from dmapi.i18n import _
from dmapi import objects
from dmapi.objects import base
from dmapi.objects import fields
from dmapi import utils


CONF = cfg.CONF
LOG = logging.getLogger(__name__)


# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
                                    'security_groups',
                                    'pci_devices', 'tags', 'services',
                                    'fault']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['flavor', 'old_flavor',
                                        'new_flavor', 'ec2_ids']
# These are fields that are optional and in instance_extra
_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests',
                          'flavor', 'vcpu_model', 'migration_context',
                          'keypairs', 'device_metadata']
# These are fields that applied/drooped by migration_context
_MIGRATION_CONTEXT_ATTRS = ['numa_topology', 'pci_requests',
                            'pci_devices']

# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
                           _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS +
                           _INSTANCE_EXTRA_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
                           'security_groups']

# Maximum count of tags to one instance
MAX_TAG_COUNT = 50


def _expected_cols(expected_attrs):
    """Return expected_attrs that are columns needing joining.

    NB: This function may modify expected_attrs if one
    requested attribute requires another.
    """
    if not expected_attrs:
        return expected_attrs

    simple_cols = [attr for attr in expected_attrs
                   if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]

    complex_cols = ['extra.%s' % field
                    for field in _INSTANCE_EXTRA_FIELDS
                    if field in expected_attrs]
    if complex_cols:
        simple_cols.append('extra')
    simple_cols = [x for x in simple_cols if x not in _INSTANCE_EXTRA_FIELDS]
    expected_cols = simple_cols + complex_cols
    # NOTE(pumaranikar): expected_cols list can contain duplicates since
    # caller appends column attributes to expected_attr without checking if
    # it is already present in the list or not. Hence, we remove duplicates
    # here, if any. The resultant list is sorted based on list index to
    # maintain the insertion order.
    return sorted(list(set(expected_cols)), key=expected_cols.index)


_NO_DATA_SENTINEL = object()


# TODO(berrange): Remove ContegoObjectDictCompat
@base.ContegoObjectRegistry.register
class Instance(base.ContegoPersistentObject, base.ContegoObject,
               base.ContegoObjectDictCompat):
    # Version 2.0: Initial version
    VERSION = '2.0'

    fields = {
        'id': fields.IntegerField(),

        'hostname': fields.StringField(nullable=True),

        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),

        'instance_type_id': fields.IntegerField(nullable=True),

        'user_data': fields.StringField(nullable=True),

        'reservation_id': fields.StringField(nullable=True),

        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),

        'availability_zone': fields.StringField(nullable=True),

        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),

        'launched_on': fields.StringField(nullable=True),

        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),

        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'task_state': fields.StringField(nullable=True),
        }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    @property
    def image_meta(self):
        return objects.ImageMeta.from_instance(self)

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'system_metadata' in fields:
            self._orig_system_metadata = (dict(self.system_metadata) if
                                          'system_metadata' in self else {})
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata) if
                                   'metadata' in self else {})

    def obj_clone(self):
        """Create a copy of this instance object."""
        nobj = super(Instance, self).obj_clone()
        # Since the base object only does a deep copy of the defined fields,
        # need to make sure to also copy the additional tracking metadata
        # attributes so they don't show as changed and cause the metadata
        # to always be updated even when stale information.
        if hasattr(self, '_orig_metadata'):
            nobj._orig_metadata = dict(self._orig_metadata)
        if hasattr(self, '_orig_system_metadata'):
            nobj._orig_system_metadata = dict(self._orig_system_metadata)
        return nobj

    def obj_reset_changes(self, fields=None, recursive=False):
        super(Instance, self).obj_reset_changes(fields,
                                                recursive=recursive)
        self._reset_metadata_tracking(fields=fields)

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Instance, cls)._obj_from_primitive(context, objver,
                                                        primitive)
        self._reset_metadata_tracking()
        return self

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        except exception.ObjectActionError:
            # This indicates self.id was not set and could not be lazy loaded.
            # What this means is the instance has not been persisted to a db
            # yet, which should indicate it has not been scheduled yet. In this
            # situation it will have a blank name.
            if (self.vm_state == vm_states.BUILDING and
                    self.task_state == task_states.SCHEDULING):
                base_name = ''
            else:
                # If the vm/task states don't indicate that it's being booted
                # then we have a bug here. Log an error and attempt to return
                # the uuid which is what an error above would return.
                LOG.error('Could not lazy-load instance.id while '
                          'attempting to generate the instance name.')
                base_name = self.uuid
        return base_name

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (
                objects.InstanceFault.get_latest_for_instance(
                    context, instance.uuid))

        instance.obj_reset_changes()
        return instance

    @staticmethod
    def _db_instance_get_by_uuid(context, uuid, columns_to_join,
                                 use_slave=False):
        return db.instance_get_by_uuid(context, uuid,
                                       columns_to_join=columns_to_join)

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = cls._db_instance_get_by_uuid(context, uuid, columns_to_join,
                                               use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst,
                                   expected_attrs)