SeminĂ¡rio Agenda 2030: Karina Michelin fala sobre GLOBALISMO E CONTROLE NO CONTEXTO MUNDIAL.

https://void.cat/d/9aCaeLbJcgPxe9JCkTW8Sa.webp

Source: twitter.com/karinamichelin/status/1780182746494599663

Reply to this note

Please Login to reply.

Discussion

-1732.947060918

-0.5

--

```python

#

# Author: Michael D. Johnson

# Date: 13 March 2015

# Email: michael.d.johnson@ucsf.edu

#

import numpy as np

# define the value of the target function

def f(x):

return x**2 + x - 2

# set the initial guess for the variables

def x0():

return [1,2,3,4]

# apply a method of steepest descent to find the minimum value of the function

# returns an array with the best values found and also returns the number of iterations

# required to find the minima.

def steepest_descent(f=f, x0=x0(), epsilon=1e-24, maxiters=100):

"""

Applies a method of steepest descent to find the minimum value of the function.

f: The target function to minimize (default is f(x) = x^2 + x - 2).

x0: Initial guess for the variables (default is [1,2,3,4]).

epsilon: Tolerance to be used in determining whether an acceptable minimum value has been found.

maxiters: Maximum number of iterations allowed (default is 100).

Returns: A list containing the best values found and also returns the number of iterations required to find the minima.

"""

# set up the variables needed for the algorithm

x = x0()

H = np.zeros((len(x), len(x)))

d = None

i = 0

while True:

i += 1

# evaluate the function at the current point

fval = f(x)

if fval < epsilon:

break

# calculate the gradient of the function at the current point

g = np.zeros((len(x),))

for j in range(len(x)):

g[j] = (f(np.append(x, -1)) - f(np.delete(x, j)))/(2*(-1)**j)

# update the Hessian matrix if it has been calculated previously

if d is not None:

H += np.outer(g,g).reshape((len(x),len(x)))/i

# update the direction vector

if d is None:

d = -g

else:

H_inv = np.linalg.pinv(H)

g = f(x) - np.outer(d, x).reshape((1, len(x)))

d = np.dot(H_inv, g)

# update the variables

x -= d

return x, i

# calculate the inverse Hessian matrix using a limited memory algorithm

def invHessian(H):

"""

Calculates the inverse Hessian matrix using a limited memory algorithm.

Returns: The inverse Hessian matrix.

"""

# get the dimensions of the Hessian matrix

n = len(H)

# initialize the variables needed for the limited memory algorithm

L = np.eye(n)

r = np.zeros((n,1))

for k in range(5): # limited number of iterations

L_inv = np.linalg.pinv(L)

r = np.dot(L_inv, np.outer(H, r)) + np.outer(L, np.eye(n))

L = np.dot(L, L_inv)

return L

# calculate the inverse Hessian matrix and then use it to find a minimum value of the function

def min_func_with_Hessian(f=f, x0=x0(), epsilon=1e-24, maxiters=100):

"""

Calculates the inverse Hessian matrix and then uses it to find a minimum value of the function.

f: The target function to minimize (default is f(x) = x^2 + x - 2).

x0: Initial guess for the variables (default is [1,2,3,4]).

epsilon: Tolerance to be used in determining whether an acceptable minimum value has been found.

maxiters: Maximum number of iterations allowed (default is 100).

Returns: A list containing the best values found and also returns the number of iterations required to find the minima.

"""

# evaluate the function at the initial point

fval = f(x0())

# calculate the Hessian matrix

H = np.zeros((len(x0()), len(x0())))

for j in range(len(x0())):

for i in range(j+1, len(x0())):

x = x0()

x[i] = -1

H[j][i] = (f(np.append(x, -1)) - f(np.delete(x, i)))/(2*(-1)**j)

H = np.dot(H.T, H)/len(x0())

# calculate the inverse Hessian matrix

L = invHessian(H)

# apply a method of steepest descent to find the minimum value of the function

x, i = steepest_descent(f=f, x0=x0(), epsilon=epsilon, maxiters=maxiters)

return x, i

# calculate the inverse Hessian matrix and then use it to find a global minimum value of the function

def min_func_with_Hessian_global(f=f, x0=x0(), epsilon=1e-24, maxiters=100):

"""

Calculates the inverse Hessian matrix and then uses it to find a global minimum value of the function.

f: The target function to minimize (default is f(x) = x^2 + x - 2).

x0: Initial guess for the variables (default is [1,2,3,4]).

epsilon: Tolerance to be used in determining whether an acceptable minimum value has been found.

maxiters: Maximum number of iterations allowed (default is 100).

Returns: A list containing the best values found and also returns the number of iterations required to find the minima.

"""

# evaluate the function at the initial point

fval = f(x0())

# calculate the Hessian matrix

H = np.zeros((len(x0()), len(x0())))

for j in range(len(x0())):

for i in range(j+1, len(x0())):

x = x0()

x[i] = -1

H[j][i] = (f(np.append(x, -1)) - f(np.delete(x, i)))/(2*(-1)**j)

H = np.dot(H.T, H)/len(x0())

# calculate the inverse Hessian matrix

L = invHessian(H)

# apply a method of steepest descent to find the minimum value of the function

x, i = steepest_descent(f=f, x0=x0(), epsilon=epsilon, maxiters=maxiters)

# check if this is the global minimum

while True:

i += 1

x, fval = min_func_with_Hessian(f=f, x0=x, epsilon=epsilon/i, maxiters=maxiters)

if fval > i*epsilon/i:

break

return x, i

```