U
    Gvfg                  	   @   s   d Z ddlmZ ddlmZ ddlZddddd	d
gZG dd de	Z
d*ddZd+ddZeZd,ddZd-dd	Zdd Zdd Zdd Zd.dd
Zd/dd Zd0d!d"Zd1d%d&Zd2d(d)ZdS )3z
Functions
---------
.. autosummary::
   :toctree: generated/

    line_search_armijo
    line_search_wolfe1
    line_search_wolfe2
    scalar_search_wolfe1
    scalar_search_wolfe2

    )warn)	_minpack2NLineSearchWarningline_search_wolfe1line_search_wolfe2scalar_search_wolfe1scalar_search_wolfe2line_search_armijoc                   @   s   e Zd ZdS )r   N)__name__
__module____qualname__ r   r   >/tmp/pip-unpacked-wheel-96ln3f52/scipy/optimize/_linesearch.pyr      s   r   -C6??2   :0yE>+=c                    s   |dkrf  }|gdgdg fdd} fdd}t |}t|||||||	|
||d
\}}}|d d ||d fS )a  
    As `scalar_search_wolfe1` but do a line search to direction `pk`

    Parameters
    ----------
    f : callable
        Function `f(x)`
    fprime : callable
        Gradient of `f`
    xk : array_like
        Current point
    pk : array_like
        Search direction

    gfk : array_like, optional
        Gradient of `f` at point `xk`
    old_fval : float, optional
        Value of `f` at point `xk`
    old_old_fval : float, optional
        Value of `f` at point preceding `xk`

    The rest of the parameters are the same as for `scalar_search_wolfe1`.

    Returns
    -------
    stp, f_count, g_count, fval, old_fval
        As in `line_search_wolfe1`
    gval : array
        Gradient of `f` at the final point

    Nr   c                    s&   d  d7  < |   f  S Nr      r   sargsffcpkxkr   r   phiI   s    zline_search_wolfe1.<locals>.phic                    s:   |   f  d< d  d7  < t d S r   npdotr   )r   fprimegcgvalr   r   r   r   derphiM   s    z"line_search_wolfe1.<locals>.derphi)c1c2amaxaminxtol)r    r!   r   )r   r"   r   r   gfkold_fvalold_old_fvalr   r&   r'   r(   r)   r*   r   r%   derphi0stpZfvalr   )r   r   r   r"   r#   r$   r   r   r   r      s*    #        c
                 C   s  |dkr| d}|dkr |d}|dk	rT|dkrTt dd||  | }
|
dk rXd}
nd}
|}|}tdtj}tdt}d}d	}t|D ]T}t|
|||||	|||||\}}}}|dd
 dkr|}
| |}||}q qqd}|dd dks|dd dkrd}|||fS )a,  
    Scalar function search for alpha that satisfies strong Wolfe conditions

    alpha > 0 is assumed to be a descent direction.

    Parameters
    ----------
    phi : callable phi(alpha)
        Function at point `alpha`
    derphi : callable phi'(alpha)
        Objective function derivative. Returns a scalar.
    phi0 : float, optional
        Value of phi at 0
    old_phi0 : float, optional
        Value of phi at previous point
    derphi0 : float, optional
        Value derphi at 0
    c1 : float, optional
        Parameter for Armijo condition rule.
    c2 : float, optional
        Parameter for curvature condition rule.
    amax, amin : float, optional
        Maximum and minimum step size
    xtol : float, optional
        Relative tolerance for an acceptable step.

    Returns
    -------
    alpha : float
        Step size, or None if no suitable step was found
    phi : float
        Value of `phi` at the new point `alpha`
    phi0 : float
        Value of `phi` at `alpha=0`

    Notes
    -----
    Uses routine DCSRCH from MINPACK.

    N        r         ?)\( @)   )   s   STARTd   r3   s   FG   s   ERROR   s   WARN)minr    zerosZintcfloatrangeminpack2Zdcsrch)r   r%   phi0old_phi0r.   r&   r'   r(   r)   r*   alpha1phi1Zderphi1ZisaveZdsaveZtaskmaxiterir/   r   r   r   r   [   sF    ,
      
$
   c                    s   dgdgdgdg 	
fdd}| 	
fdd|dkr^
f  }t |	}dk	r	
fdd}nd}t||||||	|
||d	
\}}}}|dkrtd
t nd }|d d |||fS )a  Find alpha that satisfies strong Wolfe conditions.

    Parameters
    ----------
    f : callable f(x,*args)
        Objective function.
    myfprime : callable f'(x,*args)
        Objective function gradient.
    xk : ndarray
        Starting point.
    pk : ndarray
        Search direction.
    gfk : ndarray, optional
        Gradient value for x=xk (xk being the current parameter
        estimate). Will be recomputed if omitted.
    old_fval : float, optional
        Function value for x=xk. Will be recomputed if omitted.
    old_old_fval : float, optional
        Function value for the point preceding x=xk.
    args : tuple, optional
        Additional arguments passed to objective function.
    c1 : float, optional
        Parameter for Armijo condition rule.
    c2 : float, optional
        Parameter for curvature condition rule.
    amax : float, optional
        Maximum step size
    extra_condition : callable, optional
        A callable of the form ``extra_condition(alpha, x, f, g)``
        returning a boolean. Arguments are the proposed step ``alpha``
        and the corresponding ``x``, ``f`` and ``g`` values. The line search
        accepts the value of ``alpha`` only if this
        callable returns ``True``. If the callable returns ``False``
        for the step length, the algorithm will continue with
        new iterates. The callable is only called for iterates
        satisfying the strong Wolfe conditions.
    maxiter : int, optional
        Maximum number of iterations to perform.

    Returns
    -------
    alpha : float or None
        Alpha for which ``x_new = x0 + alpha * pk``,
        or None if the line search algorithm did not converge.
    fc : int
        Number of function evaluations made.
    gc : int
        Number of gradient evaluations made.
    new_fval : float or None
        New function value ``f(x_new)=f(x0+alpha*pk)``,
        or None if the line search algorithm did not converge.
    old_fval : float
        Old function value ``f(x0)``.
    new_slope : float or None
        The local slope along the search direction at the
        new value ``<myfprime(x_new), pk>``,
        or None if the line search algorithm did not converge.


    Notes
    -----
    Uses the line search algorithm to enforce strong Wolfe
    conditions. See Wright and Nocedal, 'Numerical Optimization',
    1999, pp. 59-61.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize import line_search

    A objective function and its gradient are defined.

    >>> def obj_func(x):
    ...     return (x[0])**2+(x[1])**2
    >>> def obj_grad(x):
    ...     return [2*x[0], 2*x[1]]

    We can find alpha that satisfies strong Wolfe conditions.

    >>> start_point = np.array([1.8, 1.7])
    >>> search_gradient = np.array([-1.0, -1.0])
    >>> line_search(obj_func, obj_grad, start_point, search_gradient)
    (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4])

    r   Nc                    s&   d  d7  < |   f  S r   r   alphar   r   r   r     s    zline_search_wolfe2.<locals>.phic                    sB   d  d7  < |   f  d< | d< t d S r   r   rD   )r   r"   r#   r$   
gval_alphar   r   r   r   r%     s    z"line_search_wolfe2.<locals>.derphic                    s2   d | kr |  |   }| ||d S )Nr   r   )rE   r   x)r%   extra_conditionr$   rF   r   r   r   r   extra_condition2%  s    z,line_search_wolfe2.<locals>.extra_condition2)rA   *The line search algorithm did not converge)r    r!   r   r   r   )r   Zmyfprimer   r   r+   r,   r-   r   r&   r'   r(   rH   rA   r   r.   rI   
alpha_starphi_starderphi_starr   )r   r%   rH   r   r   r"   r#   r$   rF   r   r   r   r      s:    X        c
                 C   s  |dkr| d}|dkr |d}d}
|dk	rL|dkrLt dd||  | }nd}|dk r\d}|dk	rnt ||}| |}|}|}|dkrdd }t|	D ]P}|dks|dk	r|
|krd}|}|}d}|dkrd}nd	d
|  }t|t  q |dk}|||| |  ks||krF|rFt|
||||| ||||||\}}} q ||}t|| | kr|||r|}|}|} q |dkrt||
|||| ||||||\}}} q d| }|dk	rt ||}|}
|}|}| |}|}q|}|}d}tdt ||||fS )a  Find alpha that satisfies strong Wolfe conditions.

    alpha > 0 is assumed to be a descent direction.

    Parameters
    ----------
    phi : callable phi(alpha)
        Objective scalar function.
    derphi : callable phi'(alpha)
        Objective function derivative. Returns a scalar.
    phi0 : float, optional
        Value of phi at 0.
    old_phi0 : float, optional
        Value of phi at previous point.
    derphi0 : float, optional
        Value of derphi at 0
    c1 : float, optional
        Parameter for Armijo condition rule.
    c2 : float, optional
        Parameter for curvature condition rule.
    amax : float, optional
        Maximum step size.
    extra_condition : callable, optional
        A callable of the form ``extra_condition(alpha, phi_value)``
        returning a boolean. The line search accepts the value
        of ``alpha`` only if this callable returns ``True``.
        If the callable returns ``False`` for the step length,
        the algorithm will continue with new iterates.
        The callable is only called for iterates satisfying
        the strong Wolfe conditions.
    maxiter : int, optional
        Maximum number of iterations to perform.

    Returns
    -------
    alpha_star : float or None
        Best alpha, or None if the line search algorithm did not converge.
    phi_star : float
        phi at alpha_star.
    phi0 : float
        phi at 0.
    derphi_star : float or None
        derphi at alpha_star, or None if the line search algorithm
        did not converge.

    Notes
    -----
    Uses the line search algorithm to enforce strong Wolfe
    conditions. See Wright and Nocedal, 'Numerical Optimization',
    1999, pp. 59-61.

    Nr0   r   r1   r2   c                 S   s   dS )NTr   )rE   r   r   r   r   <lambda>      z&scalar_search_wolfe2.<locals>.<lambda>z7Rounding errors prevent the line search from convergingz4The line search algorithm could not find a solution zless than or equal to amax: %sr3   rJ   )r8   r;   r   r   _zoomabs)r   r%   r=   r>   r.   r&   r'   r(   rH   rA   alpha0r?   phi_a1phi_a0Z	derphi_a0rB   rK   rL   rM   msgZnot_first_iterationZ	derphi_a1alpha2r   r   r   r   =  s    9

       
       


c              
   C   s6  t jdddd
 z|}||  }||  }	||	 d ||	  }
t d}|	d |d< |d  |d< |	d  |d< |d |d	< t |t || ||  || ||	  g \}}||
 }||
 }|| d| |  }| | t | d|   }W n" tk
r   Y W 5 Q R  d
S X W 5 Q R X t |s2d
S |S )z
    Finds the minimizer for a cubic polynomial that goes through the
    points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.

    If no minimizer can be found, return None.

    raisedivideZoverinvalidr3   )r3   r3   )r   r   )r   r      )r   r   )r   r   N)	r    errstateemptyr!   ZasarrayflattensqrtArithmeticErrorisfinite)afafpabfbcr   CdbdcZdenomd1ABradicalxminr   r   r   	_cubicmin  s.    

 rp   c           
   	   C   s   t jddddh z@|}|}|| d  }|| ||  ||  }| |d|   }	W n  tk
rr   Y W 5 Q R  dS X W 5 Q R X t |	sdS |	S )z
    Finds the minimizer for a quadratic polynomial that goes through
    the points (a,fa), (b,fb) with derivative at a of fpa.

    rW   rX   r1          @N)r    r\   r`   ra   )
rb   rc   rd   re   rf   Drh   ri   rm   ro   r   r   r   _quadmin  s    
rs   c                 C   s  d}d}d}d}|}d}||  }|dk r4||  }}n
| | }}|dkrb|| }t | ||||||}|dks|dks||| ks||| k r|| }t| ||||}|dks||| ks||| k r| d|  }||}|||	| |  ks||kr|}|}|}|}np||}t||
 | kr>|||r>|}|}|}q|||   dkrb|}|}| }|}n|}| }|} |}|}|d7 }||krd}d}d}qq|||fS )a  Zoom stage of approximate linesearch satisfying strong Wolfe conditions.
    
    Part of the optimization algorithm in `scalar_search_wolfe2`.
    
    Notes
    -----
    Implements Algorithm 3.6 (zoom) in Wright and Nocedal,
    'Numerical Optimization', 1999, pp. 61.

    rC   r   g?皙?N      ?r   )rp   rs   rQ   )Za_loZa_hiZphi_loZphi_hiZ	derphi_lor   r%   r=   r.   r&   r'   rH   rA   rB   Zdelta1Zdelta2Zphi_recZa_recZdalpharb   re   ZcchkZa_jZqchkZphi_ajZ	derphi_ajZa_starZval_starZvalprime_starr   r   r   rP     sd    	

 (  rP   r   c                    sj   t dg fdd}|dkr6|d}	n|}	t |}
t||	|
||d\}}|d |fS )a  Minimize over alpha, the function ``f(xk+alpha pk)``.

    Parameters
    ----------
    f : callable
        Function to be minimized.
    xk : array_like
        Current point.
    pk : array_like
        Search direction.
    gfk : array_like
        Gradient of `f` at point `xk`.
    old_fval : float
        Value of `f` at point `xk`.
    args : tuple, optional
        Optional arguments.
    c1 : float, optional
        Value to control stopping criterion.
    alpha0 : scalar, optional
        Value of `alpha` at start of the optimization.

    Returns
    -------
    alpha
    f_count
    f_val_at_alpha

    Notes
    -----
    Uses the interpolation algorithm (Armijo backtracking) as suggested by
    Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57

    r   c                    s&   d  d7  < |   f  S r   r   )r?   r   r   r   r     s    zline_search_armijo.<locals>.phiNr0   )r&   rR   )r    Z
atleast_1dr!   scalar_search_armijo)r   r   r   r+   r,   r   r&   rR   r   r=   r.   rE   r@   r   r   r   r	   `  s    "



c           	   
   C   s0   t | |||||||d}|d |d d|d fS )z8
    Compatibility wrapper for `line_search_armijo`
    )r   r&   rR   r   r   r3   )r	   )	r   r   r   r+   r,   r   r&   rR   rr   r   r   line_search_BFGS  s    rx   c                 C   s  | |}|||| |  kr$||fS | |d  d || ||   }| |}|||| |  krj||fS ||kr|d |d  ||  }	|d || ||   |d || ||    }
|
|	 }
|d  || ||   |d || ||    }||	 }| t t|d d|
 |   d|
  }| |}|||| |  krP||fS || |d kstd||  dk r||d }|}|}|}|}qjd|fS )a(  Minimize over alpha, the function ``phi(alpha)``.

    Uses the interpolation algorithm (Armijo backtracking) as suggested by
    Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57

    alpha > 0 is assumed to be a descent direction.

    Returns
    -------
    alpha
    phi1

    r3   rq   r[   g      @r   gQ?N)r    r_   rQ   )r   r=   r.   r&   rR   r)   rT   r?   rS   Zfactorrb   re   rV   Zphi_a2r   r   r   rv     s8    "
,$rv   rt   ru   c                 C   s  |d }t |}	d}
d}d}||
|  }| |\}}||	| ||
d  |  krX|
}q|
d | |d|
 d |   }|||  }| |\}}||	| ||d  |  kr| }q|d | |d| d |   }t|||
 ||
 }
t||| || }q||||fS )a@  
    Nonmonotone backtracking line search as described in [1]_

    Parameters
    ----------
    f : callable
        Function returning a tuple ``(f, F)`` where ``f`` is the value
        of a merit function and ``F`` the residual.
    x_k : ndarray
        Initial position.
    d : ndarray
        Search direction.
    prev_fs : float
        List of previous merit function values. Should have ``len(prev_fs) <= M``
        where ``M`` is the nonmonotonicity window parameter.
    eta : float
        Allowed merit function increase, see [1]_
    gamma, tau_min, tau_max : float, optional
        Search parameters, see [1]_

    Returns
    -------
    alpha : float
        Step length
    xp : ndarray
        Next position
    fp : float
        Merit function value at next position
    Fp : ndarray
        Residual at next position

    References
    ----------
    [1] "Spectral residual method without gradient information for solving
        large-scale nonlinear systems of equations." W. La Cruz,
        J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).

    r   r3   )maxr    clip)r   x_kdZprev_fsetagammatau_mintau_maxf_kZf_baralpha_palpha_mrE   xpfpFpalpha_tpalpha_tmr   r   r   _nonmonotone_line_search_cruz  s(    (  r   333333?c                 C   s(  d}d}d}|||  }| |\}}||| ||d  |  krF|}q|d | |d| d |   }|||  }| |\}}||| ||d  |  kr| }q|d | |d| d |   }t ||| |	| }t ||| |	| }q|
| d }|
| ||  | | }|}||||||fS )a  
    Nonmonotone line search from [1]

    Parameters
    ----------
    f : callable
        Function returning a tuple ``(f, F)`` where ``f`` is the value
        of a merit function and ``F`` the residual.
    x_k : ndarray
        Initial position.
    d : ndarray
        Search direction.
    f_k : float
        Initial merit function value.
    C, Q : float
        Control parameters. On the first iteration, give values
        Q=1.0, C=f_k
    eta : float
        Allowed merit function increase, see [1]_
    nu, gamma, tau_min, tau_max : float, optional
        Search parameters, see [1]_

    Returns
    -------
    alpha : float
        Step length
    xp : ndarray
        Next position
    fp : float
        Merit function value at next position
    Fp : ndarray
        Residual at next position
    C : float
        New value for the control parameter C
    Q : float
        New value for the control parameter Q

    References
    ----------
    .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
           search and its application to the spectral residual
           method'', IMA J. Numer. Anal. 29, 814 (2009).

    r   r3   )r    r{   )r   r|   r}   r   rh   Qr~   r   r   r   nur   r   rE   r   r   r   r   r   ZQ_nextr   r   r   _nonmonotone_line_search_cheng#  s*    /  r   )	NNNr   r   r   r   r   r   )NNNr   r   r   r   r   )	NNNr   r   r   NNrC   )NNNr   r   NNrC   )r   r   r   )r   r   r   )r   r   r   )r   rt   ru   )r   rt   ru   r   )__doc__warningsr   Zscipy.optimizer   r<   Znumpyr    __all__RuntimeWarningr   r   r   Zline_searchr   r   rp   rs   rP   r	   rx   rv   r   r   r   r   r   r   <module>   s|                  
<         
S             
 	            
 "[
4
	
?     
I      