import numpy as np

def f(x):
    return - (x[0]**2 + x[1]**2) + 4

def grad_f(x):
    return np.array([-2 * x[0], -2 * x[1]])
def bfgs_maximize(f, grad_f, x0, max_iter=100, tol=1e-6):
    x = x0
    n = len(x)
    H = np.eye(n)

    for i in range(max_iter):
        grad = grad_f(x)

        if np.linalg.norm(grad) < tol :
            print(f"Converged in  {i} iterations.")
            break

        p = H @ grad
        x_new = x + p

        s = x_new - x
        y = grad_f(x_new) - grad

        ys = np.dot(y,s)
        if ys == 0:
            print("Zero divison in BFGS update. Stopping.")
            break

        rho = 1.0 / ys
        I = np.eye(n)
        H = (I - rho * np.outer(s,y)) @ H @ (I - rho * np.outer(y,s)) + rho * np.outer (s, s)

        x = x_new

    return x

x0 = np.array([2.0 , 1.0])

solution = bfgs_maximize(f, grad_f, x0)

print("Local maximum found at :", solution)
print("Function value at maximum  : ",f(solution))