Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
Size: Mime:
# vim: tabstop=4 shiftwidth=4 softtabstop=4

# Copyright (c) 2013 TrilioData, Inc.
# All Rights Reserved.

import os
import re
import urllib.parse

import webob

from workloadmgr.api import wsgi
from workloadmgr.api import xmlutil
from workloadmgr import flags
from workloadmgr.openstack.common import log as logging
from workloadmgr import utils


LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS


XML_NS_V1 = "http://docs.openstack.org/workloadmgr/api/v1"


def get_pagination_params(request):
    """Return marker, limit tuple from request.

    :param request: `wsgi.Request` possibly containing 'marker' and 'limit'
                    GET variables. 'marker' is the id of the last element
                    the client has seen, and 'limit' is the maximum number
                    of items to return. If 'limit' is not specified, 0, or
                    > max_limit, we default to max_limit. Negative values
                    for either marker or limit will cause
                    exc.HTTPBadRequest() exceptions to be raised.

    """
    params = {}
    if "limit" in request.GET:
        params["limit"] = _get_limit_param(request)
    if "marker" in request.GET:
        params["marker"] = _get_marker_param(request)
    return params


def _get_limit_param(request):
    """Extract integer limit from request or fail"""
    try:
        limit = int(request.GET["limit"])
    except ValueError:
        msg = _("limit param must be an integer")
        raise webob.exc.HTTPBadRequest(explanation=msg)
    if limit < 0:
        msg = _("limit param must be positive")
        raise webob.exc.HTTPBadRequest(explanation=msg)
    return limit


def _get_marker_param(request):
    """Extract marker id from request or fail"""
    return request.GET["marker"]


def limited(items, request, max_limit=FLAGS.osapi_max_limit):
    """Return a slice of items according to requested offset and limit.

    :param items: A sliceable entity
    :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
                    GET variables. 'offset' is where to start in the list,
                    and 'limit' is the maximum number of items to return. If
                    'limit' is not specified, 0, or > max_limit, we default
                    to max_limit. Negative values for either offset or limit
                    will cause exc.HTTPBadRequest() exceptions to be raised.
    :kwarg max_limit: The maximum number of items to return from 'items'
    """
    try:
        offset = int(request.GET.get("offset", 0))
    except ValueError:
        msg = _("offset param must be an integer")
        raise webob.exc.HTTPBadRequest(explanation=msg)

    try:
        limit = int(request.GET.get("limit", max_limit))
    except ValueError:
        msg = _("limit param must be an integer")
        raise webob.exc.HTTPBadRequest(explanation=msg)

    if limit < 0:
        msg = _("limit param must be positive")
        raise webob.exc.HTTPBadRequest(explanation=msg)

    if offset < 0:
        msg = _("offset param must be positive")
        raise webob.exc.HTTPBadRequest(explanation=msg)

    limit = min(max_limit, limit or max_limit)
    range_end = offset + limit
    return items[offset:range_end]


def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
    """Return a slice of items according to the requested marker and limit."""
    params = get_pagination_params(request)

    limit = params.get("limit", max_limit)
    marker = params.get("marker")

    limit = min(max_limit, limit)
    start_index = 0
    if marker:
        start_index = -1
        for i, item in enumerate(items):
            if "flavorid" in item:
                if item["flavorid"] == marker:
                    start_index = i + 1
                    break
            elif item["id"] == marker or item.get("uuid") == marker:
                start_index = i + 1
                break
        if start_index < 0:
            msg = _("marker [%s] not found") % marker
            raise webob.exc.HTTPBadRequest(explanation=msg)
    range_end = start_index + limit
    return items[start_index:range_end]


def remove_version_from_href(href):
    """Removes the first api version from the href.

    Given: 'http://www.workloadmgr.com/v1/123'
    Returns: 'http://www.workloadmgr.com/123'

    Given: 'http://www.workloadmgr.com/v1'
    Returns: 'http://www.workloadmgr.com'

    """
    parsed_url = urllib.parse.urlsplit(href)
    url_parts = parsed_url.path.split("/", 2)

    # NOTE: this should match vX.X or vX
    expression = re.compile(r"^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)")
    if expression.match(url_parts[1]):
        del url_parts[1]

    new_path = "/".join(url_parts)

    if new_path == parsed_url.path:
        msg = _("href %s does not contain version") % href
        LOG.debug(msg)
        raise ValueError(msg)

    parsed_url = list(parsed_url)
    parsed_url[2] = new_path
    return urllib.parse.urlunsplit(parsed_url)


def dict_to_query_str(params):
    # TODO(throughnothing): we should just use urllib.urlencode instead of this
    # But currently we don't work with urlencoded url's
    param_str = ""
    for key, val in params.items():
        param_str = param_str + "=".join([str(key), str(val)]) + "&"

    return param_str.rstrip("&")


def parse_list_from_query_string(names):
    """
    accepts strings formatted as lists with square brackets
    names can be in the format
    "[bob,jeff,greg]" or '["bob","jeff","greg"]' or "['bob','jeff','greg']"
    """
    def remove_prefix(text: str, prefix: str):
        return text[text.startswith(prefix) and len(prefix):]

    def remove_postfix(text: str, postfix: str):
        if text.endswith(postfix):
            text = text[:-len(postfix)]
        return text

    if names is None:
        return

    if len(names) > 1:
        return names

    # if we don't start with a "[" and end with "]" it's just a normal entry
    flat_names = names[0]
    if not flat_names.startswith("[") and not flat_names.endswith("]"):
        return names

    flat_names = remove_prefix(flat_names, "[")
    flat_names = remove_postfix(flat_names, "]")

    names_list = flat_names.split(",")
    names_list = [remove_prefix(n.strip(), "\"") if "\"" in n else remove_prefix(n.strip(), "'") for n in names_list]
    names_list = [remove_postfix(n.strip(), "\"") if "\"" in n else remove_postfix(n.strip(), "'") for n in names_list]

    return names_list


class ViewBuilder(object):
    """Model API responses as dictionaries."""

    _collection_name = None

    def _get_links(self, request, identifier):
        return [
            {"rel": "self", "href": self._get_href_link(request, identifier),},
            {"rel": "bookmark", "href": self._get_bookmark_link(request, identifier),},
        ]

    def _get_next_link(self, request, identifier):
        """Return href string with proper limit and marker params."""
        params = request.params.copy()
        params["marker"] = identifier
        prefix = self._update_link_prefix(
            request.application_url, FLAGS.osapi_workloadmgr_base_URL
        )
        url = os.path.join(
            prefix,
            request.environ["workloadmgr.context"].project_id,
            self._collection_name,
        )
        return "%s?%s" % (url, dict_to_query_str(params))

    def _get_href_link(self, request, identifier):
        """Return an href string pointing to this object."""
        prefix = self._update_link_prefix(
            request.application_url, FLAGS.osapi_workloadmgr_base_URL
        )
        return os.path.join(
            prefix,
            request.environ["workloadmgr.context"].project_id,
            self._collection_name,
            str(identifier),
        )

    def _get_bookmark_link(self, request, identifier):
        """Create a URL that refers to a specific resource."""
        base_url = remove_version_from_href(request.application_url)
        base_url = self._update_link_prefix(base_url, FLAGS.osapi_workloadmgr_base_URL)
        return os.path.join(
            base_url,
            request.environ["workloadmgr.context"].project_id,
            self._collection_name,
            str(identifier),
        )

    def _get_collection_links(self, request, items, id_key="uuid"):
        """Retrieve 'next' link, if applicable."""
        links = []
        limit = int(request.params.get("limit", 0))
        if limit and limit == len(items):
            last_item = items[-1]
            if id_key in last_item:
                last_item_id = last_item[id_key]
            else:
                last_item_id = last_item["id"]
            links.append(
                {"rel": "next", "href": self._get_next_link(request, last_item_id),}
            )
        return links

    def _update_link_prefix(self, orig_url, prefix):
        if not prefix:
            return orig_url
        url_parts = list(urllib.parse.urlsplit(orig_url))
        prefix_parts = list(urllib.parse.urlsplit(prefix))
        url_parts[0:2] = prefix_parts[0:2]
        return urllib.parse.urlunsplit(url_parts)


class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
    def deserialize(self, text):
        dom = utils.safe_minidom_parse_string(text)
        metadata_node = self.find_first_child_named(dom, "metadata")
        metadata = self.extract_metadata(metadata_node)
        return {"body": {"metadata": metadata}}


class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
    def deserialize(self, text):
        dom = utils.safe_minidom_parse_string(text)
        metadata_item = self.extract_metadata(dom)
        return {"body": {"meta": metadata_item}}


class MetadataXMLDeserializer(wsgi.XMLDeserializer):
    def extract_metadata(self, metadata_node):
        """Marshal the metadata attribute of a parsed request"""
        if metadata_node is None:
            return {}
        metadata = {}
        for meta_node in self.find_children_named(metadata_node, "meta"):
            key = meta_node.getAttribute("key")
            metadata[key] = self.extract_text(meta_node)
        return metadata

    def _extract_metadata_container(self, datastring):
        dom = utils.safe_minidom_parse_string(datastring)
        metadata_node = self.find_first_child_named(dom, "metadata")
        metadata = self.extract_metadata(metadata_node)
        return {"body": {"metadata": metadata}}

    def create(self, datastring):
        return self._extract_metadata_container(datastring)

    def update_all(self, datastring):
        return self._extract_metadata_container(datastring)

    def update(self, datastring):
        dom = utils.safe_minidom_parse_string(datastring)
        metadata_item = self.extract_metadata(dom)
        return {"body": {"meta": metadata_item}}


metadata_nsmap = {None: xmlutil.XMLNS_V11}


class MetaItemTemplate(xmlutil.TemplateBuilder):
    def construct(self):
        sel = xmlutil.Selector("meta", xmlutil.get_items, 0)
        root = xmlutil.TemplateElement("meta", selector=sel)
        root.set("key", 0)
        root.text = 1
        return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)


class MetadataTemplateElement(xmlutil.TemplateElement):
    def will_render(self, datum):
        return True


class MetadataTemplate(xmlutil.TemplateBuilder):
    def construct(self):
        root = MetadataTemplateElement("metadata", selector="metadata")
        elem = xmlutil.SubTemplateElement(root, "meta", selector=xmlutil.get_items)
        elem.set("key", 0)
        elem.text = 1
        return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)