Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

aaronreidsmith / numpy   python

Repository URL to install this package:

Version: 1.17.4 

/ core / setup.py

from __future__ import division, print_function

import os
import sys
import pickle
import copy
import warnings
import platform
import textwrap
from os.path import join

from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from numpy._build_utils.apple_accelerate import (
    uses_accelerate_framework, get_sgemv_fix
    )
from numpy.compat import npy_load_module
from setup_common import *

# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")

# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
# bogus value for affected strides in order to help smoke out bad stride usage
# when relaxed stride checking is enabled.
NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING

# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration information between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30

class CallOnceOnly(object):
    def __init__(self):
        self._check_types = None
        self._check_ieee_macros = None
        self._check_complex = None

    def check_types(self, *a, **kw):
        if self._check_types is None:
            out = check_types(*a, **kw)
            self._check_types = pickle.dumps(out)
        else:
            out = copy.deepcopy(pickle.loads(self._check_types))
        return out

    def check_ieee_macros(self, *a, **kw):
        if self._check_ieee_macros is None:
            out = check_ieee_macros(*a, **kw)
            self._check_ieee_macros = pickle.dumps(out)
        else:
            out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
        return out

    def check_complex(self, *a, **kw):
        if self._check_complex is None:
            out = check_complex(*a, **kw)
            self._check_complex = pickle.dumps(out)
        else:
            out = copy.deepcopy(pickle.loads(self._check_complex))
        return out

def pythonlib_dir():
    """return path where libpython* is."""
    if sys.platform == 'win32':
        return os.path.join(sys.prefix, "libs")
    else:
        return get_config_var('LIBDIR')

def is_npy_no_signal():
    """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
    header."""
    return sys.platform == 'win32'

def is_npy_no_smp():
    """Return True if the NPY_NO_SMP symbol must be defined in public
    header (when SMP support cannot be reliably enabled)."""
    # Perhaps a fancier check is in order here.
    #  so that threads are only enabled if there
    #  are actually multiple CPUS? -- but
    #  threaded code can be nice even on a single
    #  CPU so that long-calculating code doesn't
    #  block.
    return 'NPY_NOSMP' in os.environ

def win32_checks(deflist):
    from numpy.distutils.misc_util import get_build_architecture
    a = get_build_architecture()

    # Distutils hack on AMD64 on windows
    print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
          (a, os.name, sys.platform))
    if a == 'AMD64':
        deflist.append('DISTUTILS_USE_SDK')

    # On win32, force long double format string to be 'g', not
    # 'Lg', since the MS runtime does not support long double whose
    # size is > sizeof(double)
    if a == "Intel" or a == "AMD64":
        deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')

def check_math_capabilities(config, moredefs, mathlibs):
    def check_func(func_name):
        return config.check_func(func_name, libraries=mathlibs,
                                 decl=True, call=True)

    def check_funcs_once(funcs_name):
        decl = dict([(f, True) for f in funcs_name])
        st = config.check_funcs_once(funcs_name, libraries=mathlibs,
                                     decl=decl, call=decl)
        if st:
            moredefs.extend([(fname2def(f), 1) for f in funcs_name])
        return st

    def check_funcs(funcs_name):
        # Use check_funcs_once first, and if it does not work, test func per
        # func. Return success only if all the functions are available
        if not check_funcs_once(funcs_name):
            # Global check failed, check func per func
            for f in funcs_name:
                if check_func(f):
                    moredefs.append((fname2def(f), 1))
            return 0
        else:
            return 1

    #use_msvc = config.check_decl("_MSC_VER")

    if not check_funcs_once(MANDATORY_FUNCS):
        raise SystemError("One of the required function to build numpy is not"
                " available (the list is %s)." % str(MANDATORY_FUNCS))

    # Standard functions which may not be available and for which we have a
    # replacement implementation. Note that some of these are C99 functions.

    # XXX: hack to circumvent cpp pollution from python: python put its
    # config.h in the public namespace, so we have a clash for the common
    # functions we test. We remove every function tested by python's
    # autoconf, hoping their own test are correct
    for f in OPTIONAL_STDFUNCS_MAYBE:
        if config.check_decl(fname2def(f),
                    headers=["Python.h", "math.h"]):
            OPTIONAL_STDFUNCS.remove(f)

    check_funcs(OPTIONAL_STDFUNCS)

    for h in OPTIONAL_HEADERS:
        if config.check_func("", decl=False, call=False, headers=[h]):
            h = h.replace(".", "_").replace(os.path.sep, "_")
            moredefs.append((fname2def(h), 1))

    for tup in OPTIONAL_INTRINSICS:
        headers = None
        if len(tup) == 2:
            f, args, m = tup[0], tup[1], fname2def(tup[0])
        elif len(tup) == 3:
            f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
        else:
            f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
        if config.check_func(f, decl=False, call=True, call_args=args,
                             headers=headers):
            moredefs.append((m, 1))

    for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
        if config.check_gcc_function_attribute(dec, fn):
            moredefs.append((fname2def(fn), 1))

    for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS:
        if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code,
                                                               header):
            moredefs.append((fname2def(fn), 1))

    for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
        if config.check_gcc_variable_attribute(fn):
            m = fn.replace("(", "_").replace(")", "_")
            moredefs.append((fname2def(m), 1))

    # C99 functions: float and long double versions
    check_funcs(C99_FUNCS_SINGLE)
    check_funcs(C99_FUNCS_EXTENDED)

def check_complex(config, mathlibs):
    priv = []
    pub = []

    try:
        if os.uname()[0] == "Interix":
            warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
            return priv, pub
    except Exception:
        # os.uname not available on all platforms. blanket except ugly but safe
        pass

    # Check for complex support
    st = config.check_header('complex.h')
    if st:
        priv.append(('HAVE_COMPLEX_H', 1))
        pub.append(('NPY_USE_C99_COMPLEX', 1))

        for t in C99_COMPLEX_TYPES:
            st = config.check_type(t, headers=["complex.h"])
            if st:
                pub.append(('NPY_HAVE_%s' % type2def(t), 1))

        def check_prec(prec):
            flist = [f + prec for f in C99_COMPLEX_FUNCS]
            decl = dict([(f, True) for f in flist])
            if not config.check_funcs_once(flist, call=decl, decl=decl,
                                           libraries=mathlibs):
                for f in flist:
                    if config.check_func(f, call=True, decl=True,
                                         libraries=mathlibs):
                        priv.append((fname2def(f), 1))
            else:
                priv.extend([(fname2def(f), 1) for f in flist])

        check_prec('')
        check_prec('f')
        check_prec('l')

    return priv, pub

def check_ieee_macros(config):
    priv = []
    pub = []

    macros = []

    def _add_decl(f):
        priv.append(fname2def("decl_%s" % f))
        pub.append('NPY_%s' % fname2def("decl_%s" % f))

    # XXX: hack to circumvent cpp pollution from python: python put its
    # config.h in the public namespace, so we have a clash for the common
    # functions we test. We remove every function tested by python's
    # autoconf, hoping their own test are correct
    _macros = ["isnan", "isinf", "signbit", "isfinite"]
    for f in _macros:
        py_symbol = fname2def("decl_%s" % f)
        already_declared = config.check_decl(py_symbol,
                headers=["Python.h", "math.h"])
        if already_declared:
            if config.check_macro_true(py_symbol,
                    headers=["Python.h", "math.h"]):
                pub.append('NPY_%s' % fname2def("decl_%s" % f))
        else:
            macros.append(f)
    # Normally, isnan and isinf are macro (C99), but some platforms only have
    # func, or both func and macro version. Check for macro only, and define
    # replacement ones if not found.
    # Note: including Python.h is necessary because it modifies some math.h
    # definitions
    for f in macros:
        st = config.check_decl(f, headers=["Python.h", "math.h"])
        if st:
            _add_decl(f)

    return priv, pub

def check_types(config_cmd, ext, build_dir):
    private_defines = []
    public_defines = []

    # Expected size (in number of bytes) for each type. This is an
    # optimization: those are only hints, and an exhaustive search for the size
    # is done if the hints are wrong.
    expected = {'short': [2], 'int': [4], 'long': [8, 4],
                'float': [4], 'double': [8], 'long double': [16, 12, 8],
                'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
                'off_t': [8, 4]}

    # Check we have the python header (-dev* packages on Linux)
    result = config_cmd.check_header('Python.h')
    if not result:
        python = 'python'
        if '__pypy__' in sys.builtin_module_names:
            python = 'pypy'
        raise SystemError(
                "Cannot compile 'Python.h'. Perhaps you need to "
                "install {0}-dev|{0}-devel.".format(python))
    res = config_cmd.check_header("endian.h")
    if res:
        private_defines.append(('HAVE_ENDIAN_H', 1))
        public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
    res = config_cmd.check_header("sys/endian.h")
    if res:
        private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
        public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))

    # Check basic types sizes
    for type in ('short', 'int', 'long'):
        res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
        if res:
            public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
        else:
            res = config_cmd.check_type_size(type, expected=expected[type])
            if res >= 0:
                public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
            else:
                raise SystemError("Checking sizeof (%s) failed !" % type)

    for type in ('float', 'double', 'long double'):
        already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
                                                 headers=["Python.h"])
        res = config_cmd.check_type_size(type, expected=expected[type])
        if res >= 0:
            public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
            if not already_declared and not type == 'long double':
                private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
        else:
            raise SystemError("Checking sizeof (%s) failed !" % type)

        # Compute size of corresponding complex type: used to check that our
        # definition is binary compatible with C99 complex type (check done at
        # build time in npy_common.h)
        complex_def = "struct {%s __x; %s __y;}" % (type, type)
        res = config_cmd.check_type_size(complex_def,
                                         expected=[2 * x for x in expected[type]])
        if res >= 0:
            public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
        else:
            raise SystemError("Checking sizeof (%s) failed !" % complex_def)

    for type in ('Py_intptr_t', 'off_t'):
        res = config_cmd.check_type_size(type, headers=["Python.h"],
                library_dirs=[pythonlib_dir()],
                expected=expected[type])

        if res >= 0:
            private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
            public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
        else:
            raise SystemError("Checking sizeof (%s) failed !" % type)

    # We check declaration AND type because that's how distutils does it.
    if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
        res = config_cmd.check_type_size('PY_LONG_LONG',  headers=['Python.h'],
Loading ...