"""Functions used by least-squares algorithms."""
from __future__ import division, print_function, absolute_import
from math import copysign
import numpy as np
from numpy.linalg import norm
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator, aslinearoperator
EPS = np.finfo(float).eps
# Functions related to a trust-region problem.
def intersect_trust_region(x, s, Delta):
"""Find the intersection of a line with the boundary of a trust region.
This function solves the quadratic equation with respect to t
||(x + s*t)||**2 = Delta**2.
Returns
-------
t_neg, t_pos : tuple of float
Negative and positive roots.
Raises
------
ValueError
If `s` is zero or `x` is not within the trust region.
"""
a = np.dot(s, s)
if a == 0:
raise ValueError("`s` is zero.")
b = np.dot(x, s)
c = np.dot(x, x) - Delta**2
if c > 0:
raise ValueError("`x` is not within the trust region.")
d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
# Computations below avoid loss of significance, see "Numerical Recipes".
q = -(b + copysign(d, b))
t1 = q / a
t2 = c / q
if t1 < t2:
return t1, t2
else:
return t2, t1
def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
rtol=0.01, max_iter=10):
"""Solve a trust-region problem arising in least-squares minimization.
This function implements a method described by J. J. More [1]_ and used
in MINPACK, but it relies on a single SVD of Jacobian instead of series
of Cholesky decompositions. Before running this function, compute:
``U, s, VT = svd(J, full_matrices=False)``.
Parameters
----------
n : int
Number of variables.
m : int
Number of residuals.
uf : ndarray
Computed as U.T.dot(f).
s : ndarray
Singular values of J.
V : ndarray
Transpose of VT.
Delta : float
Radius of a trust region.
initial_alpha : float, optional
Initial guess for alpha, which might be available from a previous
iteration. If None, determined automatically.
rtol : float, optional
Stopping tolerance for the root-finding procedure. Namely, the
solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
max_iter : int, optional
Maximum allowed number of iterations for the root-finding procedure.
Returns
-------
p : ndarray, shape (n,)
Found solution of a trust-region problem.
alpha : float
Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
Sometimes called Levenberg-Marquardt parameter.
n_iter : int
Number of iterations made by root-finding procedure. Zero means
that Gauss-Newton step was selected as the solution.
References
----------
.. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
"""
def phi_and_derivative(alpha, suf, s, Delta):
"""Function of which to find zero.
It is defined as "norm of regularized (by alpha) least-squares
solution minus `Delta`". Refer to [1]_.
"""
denom = s**2 + alpha
p_norm = norm(suf / denom)
phi = p_norm - Delta
phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
return phi, phi_prime
suf = s * uf
# Check if J has full rank and try Gauss-Newton step.
if m >= n:
threshold = EPS * m * s[0]
full_rank = s[-1] > threshold
else:
full_rank = False
if full_rank:
p = -V.dot(uf / s)
if norm(p) <= Delta:
return p, 0.0, 0
alpha_upper = norm(suf) / Delta
if full_rank:
phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
alpha_lower = -phi / phi_prime
else:
alpha_lower = 0.0
if initial_alpha is None or not full_rank and initial_alpha == 0:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
else:
alpha = initial_alpha
for it in range(max_iter):
if alpha < alpha_lower or alpha > alpha_upper:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
if phi < 0:
alpha_upper = alpha
ratio = phi / phi_prime
alpha_lower = max(alpha_lower, alpha - ratio)
alpha -= (phi + Delta) * ratio / Delta
if np.abs(phi) < rtol * Delta:
break
p = -V.dot(suf / (s**2 + alpha))
# Make the norm of p equal to Delta, p is changed only slightly during
# this. It is done to prevent p lie outside the trust region (which can
# cause problems later).
p *= Delta / norm(p)
return p, alpha, it + 1
def solve_trust_region_2d(B, g, Delta):
"""Solve a general trust-region problem in 2 dimensions.
The problem is reformulated as a 4-th order algebraic equation,
the solution of which is found by numpy.roots.
Parameters
----------
B : ndarray, shape (2, 2)
Symmetric matrix, defines a quadratic term of the function.
g : ndarray, shape (2,)
Defines a linear term of the function.
Delta : float
Radius of a trust region.
Returns
-------
p : ndarray, shape (2,)
Found solution.
newton_step : bool
Whether the returned solution is the Newton step which lies within
the trust region.
"""
try:
R, lower = cho_factor(B)
p = -cho_solve((R, lower), g)
if np.dot(p, p) <= Delta**2:
return p, True
except LinAlgError:
pass
a = B[0, 0] * Delta**2
b = B[0, 1] * Delta**2
c = B[1, 1] * Delta**2
d = g[0] * Delta
f = g[1] * Delta
coeffs = np.array(
[-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
t = np.roots(coeffs) # Can handle leading zeros.
t = np.real(t[np.isreal(t)])
p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
i = np.argmin(value)
p = p[:, i]
return p, False
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
step_norm, bound_hit):
"""Update the radius of a trust region based on the cost reduction.
Returns
-------
Delta : float
New radius.
ratio : float
Ratio between actual and predicted reductions.
"""
if predicted_reduction > 0:
ratio = actual_reduction / predicted_reduction
elif predicted_reduction == actual_reduction == 0:
ratio = 1
else:
ratio = 0
if ratio < 0.25:
Delta = 0.25 * step_norm
elif ratio > 0.75 and bound_hit:
Delta *= 2.0
return Delta, ratio
# Construction and minimization of quadratic functions.
def build_quadratic_1d(J, g, s, diag=None, s0=None):
"""Parameterize a multivariate quadratic function along a line.
The resulting univariate quadratic function is given as follows:
::
f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
g.T * (s0 + s*t)
Parameters
----------
J : ndarray, sparse matrix or LinearOperator shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (n,)
Direction vector of a line.
diag : None or ndarray with shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
s0 : None or ndarray with shape (n,), optional
Initial point. If None, assumed to be 0.
Returns
-------
a : float
Coefficient for t**2.
b : float
Coefficient for t.
c : float
Free term. Returned only if `s0` is provided.
"""
v = J.dot(s)
a = np.dot(v, v)
if diag is not None:
a += np.dot(s * diag, s)
a *= 0.5
b = np.dot(g, s)
if s0 is not None:
u = J.dot(s0)
b += np.dot(u, v)
c = 0.5 * np.dot(u, u) + np.dot(g, s0)
if diag is not None:
b += np.dot(s0 * diag, s)
c += 0.5 * np.dot(s0 * diag, s0)
return a, b, c
else:
return a, b
def minimize_quadratic_1d(a, b, lb, ub, c=0):
"""Minimize a 1-d quadratic function subject to bounds.
The free term `c` is 0 by default. Bounds must be finite.
Returns
-------
t : float
Minimum point.
y : float
Minimum value.
"""
t = [lb, ub]
if a != 0:
extremum = -0.5 * b / a
if lb < extremum < ub:
t.append(extremum)
t = np.asarray(t)
y = t * (a * t + b) + c
min_index = np.argmin(y)
return t[min_index], y[min_index]
def evaluate_quadratic(J, g, s, diag=None):
"""Compute values of a quadratic function arising in least squares.
The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
Parameters
----------
J : ndarray, sparse matrix or LinearOperator, shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (k, n) or (n,)
Array containing steps as rows.
diag : ndarray, shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
Returns
-------
Loading ...