Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

aaronreidsmith / scipy   python

Repository URL to install this package:

Version: 1.3.3 

/ optimize / tests / test_least_squares.py

from __future__ import division

from itertools import product

import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose,
                           assert_equal)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings

from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator

from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible


def fun_trivial(x, a=0):
    return (x - a)**2 + 5.0


def jac_trivial(x, a=0.0):
    return 2 * (x - a)


def fun_2d_trivial(x):
    return np.array([x[0], x[1]])


def jac_2d_trivial(x):
    return np.identity(2)


def fun_rosenbrock(x):
    return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])


def jac_rosenbrock(x):
    return np.array([
        [-20 * x[0], 10],
        [-1, 0]
    ])


def jac_rosenbrock_bad_dim(x):
    return np.array([
        [-20 * x[0], 10],
        [-1, 0],
        [0.0, 0.0]
    ])


def fun_rosenbrock_cropped(x):
    return fun_rosenbrock(x)[0]


def jac_rosenbrock_cropped(x):
    return jac_rosenbrock(x)[0]


# When x is 1-d array, return is 2-d array.
def fun_wrong_dimensions(x):
    return np.array([x, x**2, x**3])


def jac_wrong_dimensions(x, a=0.0):
    return np.atleast_3d(jac_trivial(x, a=a))


def fun_bvp(x):
    n = int(np.sqrt(x.shape[0]))
    u = np.zeros((n + 2, n + 2))
    x = x.reshape((n, n))
    u[1:-1, 1:-1] = x
    y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
    return y.ravel()


class BroydenTridiagonal(object):
    def __init__(self, n=100, mode='sparse'):
        np.random.seed(0)

        self.n = n

        self.x0 = -np.ones(n)
        self.lb = np.linspace(-2, -1.5, n)
        self.ub = np.linspace(-0.8, 0.0, n)

        self.lb += 0.1 * np.random.randn(n)
        self.ub += 0.1 * np.random.randn(n)

        self.x0 += 0.1 * np.random.randn(n)
        self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)

        if mode == 'sparse':
            self.sparsity = lil_matrix((n, n), dtype=int)
            i = np.arange(n)
            self.sparsity[i, i] = 1
            i = np.arange(1, n)
            self.sparsity[i, i - 1] = 1
            i = np.arange(n - 1)
            self.sparsity[i, i + 1] = 1

            self.jac = self._jac
        elif mode == 'operator':
            self.jac = lambda x: aslinearoperator(self._jac(x))
        elif mode == 'dense':
            self.sparsity = None
            self.jac = lambda x: self._jac(x).toarray()
        else:
            assert_(False)

    def fun(self, x):
        f = (3 - x) * x + 1
        f[1:] -= x[:-1]
        f[:-1] -= 2 * x[1:]
        return f

    def _jac(self, x):
        J = lil_matrix((self.n, self.n))
        i = np.arange(self.n)
        J[i, i] = 3 - 2 * x
        i = np.arange(1, self.n)
        J[i, i - 1] = -1
        i = np.arange(self.n - 1)
        J[i, i + 1] = -2
        return J


class ExponentialFittingProblem(object):
    """Provide data and function for exponential fitting in the form
    y = a + exp(b * x) + noise."""

    def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
                 n_points=11, random_seed=None):
        np.random.seed(random_seed)
        self.m = n_points
        self.n = 2

        self.p0 = np.zeros(2)
        self.x = np.linspace(x_range[0], x_range[1], n_points)

        self.y = a + np.exp(b * self.x)
        self.y += noise * np.random.randn(self.m)

        outliers = np.random.randint(0, self.m, n_outliers)
        self.y[outliers] += 50 * noise * np.random.rand(n_outliers)

        self.p_opt = np.array([a, b])

    def fun(self, p):
        return p[0] + np.exp(p[1] * self.x) - self.y

    def jac(self, p):
        J = np.empty((self.m, self.n))
        J[:, 0] = 1
        J[:, 1] = self.x * np.exp(p[1] * self.x)
        return J


def cubic_soft_l1(z):
    rho = np.empty((3, z.size))

    t = 1 + z
    rho[0] = 3 * (t**(1/3) - 1)
    rho[1] = t ** (-2/3)
    rho[2] = -2/3 * t**(-5/3)

    return rho


LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]


class BaseMixin(object):
    def test_basic(self):
        # Test that the basic calling sequence works.
        res = least_squares(fun_trivial, 2., method=self.method)
        assert_allclose(res.x, 0, atol=1e-4)
        assert_allclose(res.fun, fun_trivial(res.x))

    def test_args_kwargs(self):
        # Test that args and kwargs are passed correctly to the functions.
        a = 3.0
        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
            with suppress_warnings() as sup:
                sup.filter(UserWarning,
                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
                res = least_squares(fun_trivial, 2.0, jac, args=(a,),
                                    method=self.method)
                res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
                                    method=self.method)

            assert_allclose(res.x, a, rtol=1e-4)
            assert_allclose(res1.x, a, rtol=1e-4)

            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                          args=(3, 4,), method=self.method)
            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                          kwargs={'kaboom': 3}, method=self.method)

    def test_jac_options(self):
        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
            with suppress_warnings() as sup:
                sup.filter(UserWarning,
                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
                res = least_squares(fun_trivial, 2.0, jac, method=self.method)
            assert_allclose(res.x, 0, atol=1e-4)

        assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
                      method=self.method)

    def test_nfev_options(self):
        for max_nfev in [None, 20]:
            res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
                                method=self.method)
            assert_allclose(res.x, 0, atol=1e-4)

    def test_x_scale_options(self):
        for x_scale in [1.0, np.array([0.5]), 'jac']:
            res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
            assert_allclose(res.x, 0)
        assert_raises(ValueError, least_squares, fun_trivial,
                      2.0, x_scale='auto', method=self.method)
        assert_raises(ValueError, least_squares, fun_trivial,
                      2.0, x_scale=-1.0, method=self.method)
        assert_raises(ValueError, least_squares, fun_trivial,
                      2.0, x_scale=None, method=self.method)
        assert_raises(ValueError, least_squares, fun_trivial,
                      2.0, x_scale=1.0+2.0j, method=self.method)

    def test_diff_step(self):
        # res1 and res2 should be equivalent.
        # res2 and res3 should be different.
        res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
                             method=self.method)
        res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
                             method=self.method)
        res3 = least_squares(fun_trivial, 2.0,
                             diff_step=None, method=self.method)
        assert_allclose(res1.x, 0, atol=1e-4)
        assert_allclose(res2.x, 0, atol=1e-4)
        assert_allclose(res3.x, 0, atol=1e-4)
        assert_equal(res1.x, res2.x)
        assert_equal(res1.nfev, res2.nfev)
        assert_(res2.nfev != res3.nfev)

    def test_incorrect_options_usage(self):
        assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                      method=self.method, options={'no_such_option': 100})
        assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                      method=self.method, options={'max_nfev': 100})

    def test_full_result(self):
        # MINPACK doesn't work very well with factor=100 on this problem,
        # thus using low 'atol'.
        res = least_squares(fun_trivial, 2.0, method=self.method)
        assert_allclose(res.x, 0, atol=1e-4)
        assert_allclose(res.cost, 12.5)
        assert_allclose(res.fun, 5)
        assert_allclose(res.jac, 0, atol=1e-4)
        assert_allclose(res.grad, 0, atol=1e-2)
        assert_allclose(res.optimality, 0, atol=1e-2)
        assert_equal(res.active_mask, 0)
        if self.method == 'lm':
            assert_(res.nfev < 30)
            assert_(res.njev is None)
        else:
            assert_(res.nfev < 10)
            assert_(res.njev < 10)
        assert_(res.status > 0)
        assert_(res.success)

    def test_full_result_single_fev(self):
        # MINPACK checks the number of nfev after the iteration,
        # so it's hard to tell what he is going to compute.
        if self.method == 'lm':
            return

        res = least_squares(fun_trivial, 2.0, method=self.method,
                            max_nfev=1)
        assert_equal(res.x, np.array([2]))
        assert_equal(res.cost, 40.5)
        assert_equal(res.fun, np.array([9]))
        assert_equal(res.jac, np.array([[4]]))
        assert_equal(res.grad, np.array([36]))
        assert_equal(res.optimality, 36)
        assert_equal(res.active_mask, np.array([0]))
        assert_equal(res.nfev, 1)
        assert_equal(res.njev, 1)
        assert_equal(res.status, 0)
        assert_equal(res.success, 0)

    def test_rosenbrock(self):
        x0 = [-2, 1]
        x_opt = [1, 1]
        for jac, x_scale, tr_solver in product(
                ['2-point', '3-point', 'cs', jac_rosenbrock],
                [1.0, np.array([1.0, 0.2]), 'jac'],
                ['exact', 'lsmr']):
            with suppress_warnings() as sup:
                sup.filter(UserWarning,
                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
                res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
                                    tr_solver=tr_solver, method=self.method)
            assert_allclose(res.x, x_opt)

    def test_rosenbrock_cropped(self):
        x0 = [-2, 1]
        if self.method == 'lm':
            assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
                          x0, method='lm')
        else:
            for jac, x_scale, tr_solver in product(
                    ['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
                    [1.0, np.array([1.0, 0.2]), 'jac'],
                    ['exact', 'lsmr']):
                res = least_squares(
                    fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
                    tr_solver=tr_solver, method=self.method)
                assert_allclose(res.cost, 0, atol=1e-14)

    def test_fun_wrong_dimensions(self):
        assert_raises(ValueError, least_squares, fun_wrong_dimensions,
                      2.0, method=self.method)

    def test_jac_wrong_dimensions(self):
        assert_raises(ValueError, least_squares, fun_trivial,
                      2.0, jac_wrong_dimensions, method=self.method)

    def test_fun_and_jac_inconsistent_dimensions(self):
        x0 = [1, 2]
        assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
                      jac_rosenbrock_bad_dim, method=self.method)

    def test_x0_multidimensional(self):
        x0 = np.ones(4).reshape(2, 2)
        assert_raises(ValueError, least_squares, fun_trivial, x0,
                      method=self.method)

    def test_x0_complex_scalar(self):
        x0 = 2.0 + 0.0*1j
        assert_raises(ValueError, least_squares, fun_trivial, x0,
Loading ...