import numpy as np alpha = 0.1 h=0.000001 ftol = 1e-12 def f(p): _sum = 0 # simple sum of x and y squared for i in range(len(p)): _sum += p[i]**2 return _sum def del_f(p): dp = np.zeros_like(p) print(f'p={p}') for i in range(len(p)): _save = p[i] # save p[i] so it can be restored later p[i] += h # take a step in the positive direction print(f'p={p}') fhi = f(p) # evaluate after taking a positive step print(f'fhi={fhi}') p[i] = _save # restore p[i] print(f'p={p}') # p[0] = p[i] -= h # take a step in the negative direction. print(f'p={p}') # p[0] shouldn't be zero flo = f(p) # evaluate after taking a negative step p[i] = _save # restore p[i] print(f'flo={flo}') p[i] = _save # restore cell to its original value dp[i] = (fhi-flo)/(2*h) # calculate the gradient with P as the center value print(f'dp={dp}') pass return dp p = np.array([1,1]) while f(p) > ftol: dp = del_f(p) # the gradient p = p - alpha*dp # Now update the parameter values print(f'p[0]={p[0]:11.9f} p[1]={p[1]:11.9f} dp[0]={dp[0]:8.6f} dp[1]={dp[1]:8.6f}')This is the start if a gradient descent program.
There is a line above p[0] -= h tha sets p[0] to 0 when p[0]=1 in the line before.
After subtracting h p[0] should be equal to 0.99999 but instead is 0
This is the print out I get
p=[1 1]
p=[1 1]
fhi=2
p=[1 1]
p=[0 1]
flo=1
dp=[500000 0]
Since initial p = [1,1] and I am only adding or subtracting h which is small, p[0] should not be zero until after many iterations.
I tried several keywords to pretty print the python script but none worked. I see I just had to wait for the preview.