#!/usr/bin/env python
# coding: utf-8

# In[2]:


#golden section search to find where a function reaches a minimum
def gs(f, a, b, n):
    from numpy import sqrt
    phi = (sqrt(5) + 1) / 2
    c = a + (phi-1)*(b-a)
    fc = f(c)
    for i in range(n):
        d = a + (2-phi)*(b-a)
        fd = f(d)
        if fc < fd:
            a = b
            b = d
        else:
            b = c
            c = d
            fc = fd
    return [a, b]


# In[3]:


from numpy import sin
f = lambda x: x - 5*sin(x)

gs(f, 1, 2, 20)


# In[4]:


L = 1
W = 1.5
g = lambda x: 4 * x**3 - 2 * x**2 * (L+W) + L*W*x 
h = lambda x: -g(x)


# In[5]:


gs(h, 0, L/2, 10)


# In[7]:


from matplotlib.pyplot import plot
from numpy import linspace
xs = linspace(0, L/2)
ys = g(xs)
plot(xs, ys)


# In[8]:


#Newton's method for finding where a function equals zero
def newton(f, df, x, n):
    for i in range(n):
        x = x - f(x)/df(x)
        print(x)
    return x


# In[9]:


g = lambda x: 4 * x**3 - 2 * x**2 * (L+W) + L*W*x 
gp = lambda x: 12 * x**2 - 4 * x * (L+W) + L*W #first derivative
gpp = lambda x: 24*x - 4*(L+W) #second derivative
newton(gp, gpp, 0.1, 5)


# In[10]:


f = lambda x: x - 5*sin(x)
xs = linspace(-10, 10)
ys = f(xs)
plot(xs, ys)


# In[11]:


from scipy.optimize import minimize_scalar
minimize_scalar(f, bounds=[1, 2])


# In[16]:


minimize_scalar(f, bounds=[7, 10], method='bounded') #another local minimum


# In[4]:





# In[7]:





# In[11]:





# In[10]:





# In[4]:





# In[6]:





# In[5]:





# In[13]:


#contour plot of a function of two variables
from numpy import exp, cos
f = lambda x: x[0]**2 + x[1]**2 - x[0]*x[1] - exp(-(x[0]**2)) + 10*cos(x[0] - x[1])
f([1, -1])


# In[15]:


from numpy import linspace, meshgrid
x1 = linspace(-4, 4, 40)
x2 = linspace(-4, 4, 40)
X1, X2 = meshgrid(x1, x2)


# In[16]:


Y = f([X1, X2])


# In[17]:


Y.shape


# In[40]:


from matplotlib.pyplot import subplots, contour, grid, xlabel, ylabel

fig, ax = subplots()
CS = ax.contour(X1, X2, Y)
ax.clabel(CS, inline=True, fontsize=10)
ax.set_title('Example function of 2 variables')
grid()
xlabel('x1')
ylabel('x2')


# In[22]:


#expression for gradient of f
from numpy import sin
g = lambda x: [2*x[0] - x[1] + 2 * x[0] * exp(-(x[0]**2)) - 10 * sin(x[0] - x[1]), 
              2*x[1] - x[0] + 10 * sin(x[0] - x[1])]


# In[27]:


#expression for the Hessian of f
H = lambda x: [[2 + (2 - 4*x[0]**2) * exp(-(x[0]**2)) - 10 * cos(x[0] - x[1]),
-1 + 10*cos((x[0] - x[1]))],
              [-1 + 10*cos(x[0] - x[1]), 
              2 - 10*cos(x[0] - x[1])]]


# In[23]:


x = [1, -1]
g(x)


# In[28]:


H(x)


# In[33]:


#multidimensional Newton's method
def newton_md(f, df, x, n):
    from numpy.linalg import solve
    for i in range(n):
        x = x - solve(df(x), f(x))
        print(x, f(x))
    return x


# In[35]:


x_min = newton_md(g, H, x, 10)


# In[37]:


f(x_min)


# In[42]:


from scipy.optimize import minimize
minimize(f, x)


# In[43]:


minimize(f, x, jac=g, hess=H)


# In[44]:


g(x_min) #at a local minimum, the gradient should be all zeros


# In[47]:


#at a local minimum, the hessian should be positive definite, meaning that
#its eigenvalues will be all positive
from numpy.linalg import eig
eig(H(x_min))


# In[ ]:




