Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sor #105

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open

Sor #105

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions python/GaussSeidel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import numpy as np
import math

x = np.array([[2, 2],
[1, 3]])
b = np.array([[2], [1]])
x0 = np.array([[0], [0]])
tol = 10 ^ (-10)
max_iter = 100

def GaussSeidel(A, b, x0, tol, max_iter):
# use this algorithm only if it converges
N = np.tril(A)
P = N - A
Ggs = np.dot(np.linalg.inv(N), P)
# calculate the spectral radius of Ggs
aux = np.linalg.eig(Ggs)
sr = np.max(np.abs(aux[0]))

# check if the algorithm converges
if (sr - 1 >= np.finfo(float).eps):
x = float('nan')
step = -1
print("Matrix does not converge")
return
n = b.shape
x = x0
# iterate to the maximum number of iterations
for step in range (1, max_iter):
# iterate through every x[i]
for i in range(n[0]):
# compute the sum using the updated values from the current step
new_values_sum = np.dot(A[i, 1 : i - 1], x[1 : i - 1])

# compute the sum using the previous step values
for j in range(i + 1, n[0]):
old_values_sum = np.dot(A[i, j], x0[j])
x[i] = b[i] - (old_values_sum + new_values_sum)
x[i] = x[i] / A[i, i]

# when the new values get close enough to the last values
# regarding the imposed tolerance "tol", we reached the solution
if (np.linalg.norm(x - x0) < tol):
print(step)
break
# update the last computed values with the new values
x0 = x
print("X = {}".format(x))
print("Numarul de pasi este: {}".format(step))

GaussSeidel(x, b, x0, tol, max_iter)
51 changes: 51 additions & 0 deletions python/Jacobi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import numpy as np
import math

x = np.array([[2, 2],
[1, 3]])
b = np.array([[2], [1]])
x0 = np.array([[0], [0]])
tol = 10 ^ (-10)
max_iter = 1000


def Jacobi(A, b, x0, tol, max_iter):
# use this algorithm only if it converges
N = np.diag(np.diag(A))
P = N - A
Gj = np.dot(np.linalg.inv(N), P)

# calculate the spectral radius of Gj
aux = np.linalg.eig(Gj)
sr = np.max(np.abs(aux[0]))

# check if the algorithm converges
if (sr - 1 >= np.finfo(float).eps):
x = float('nan')
step = -1
print("Matrix does not converge")
return
n = b.shape
x = x0
# iterate to the maximum number of iterations
for step in range (1, max_iter):
# iterate through every x[i]
for i in range(n[0]):
# val_sum represents the sum of the Jacobi algorithm and
# A(i, i) * x0(i) for which we calculate val_x so that we can
# substract it from val_sum
val_sum = np.dot(A[i, :], x0[:, 0])
val_x = np.dot(A[i, i], x0[i])
x[i] = b[i] - (val_sum - val_x)
x[i] = x[i] / A[i, i]

# when the new values get close enough to the last values
# regarding the imposed tolerance "tol", we reached the solution
if (np.linalg.norm(x - x0) < tol):
print(step)
break
# update the last computed values with the new values
x0 = x
print("X = {}".format(x))
print("Numarul de pasi este: {}".format(step))
Jacobi(x, b, x0, tol, max_iter)
45 changes: 45 additions & 0 deletions python/SOR.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import numpy as np
import math

x = np.array([[2.0, 2.0],
[1.0, 3.0]])
b = np.array([[2.0], [1.0]])
x0 = np.array([[0.0], [0.0]])
tol = 10 ^ (-10)
max_iter = 5
w = 1.1

def SOR(A, b, x0, tol, max_iter, w):
#
# sanity checks
if (w >= 2 or w <= 0):
print('w should be inside (0, 2)');
step = -1;
x = float('nan')
return
n = b.shape
x = x0

# iterate to the maximum number of iterations
for step in range (1, max_iter):
# iterate through every x[i]
for i in range(n[0]):
# compute the sum using the updated values from the current step
new_values_sum = np.dot(A[i, 1 : (i - 1)], x[1 : (i - 1)])
# compute the sum using the previous step values
for j in range(i + 1, n[0]):
old_values_sum = np.dot(A[i, j], x0[j])
print(old_values_sum)
x[i] = b[i] - (old_values_sum + new_values_sum) / A[i, i]
x[i] = np.dot(x[i], w) + np.dot(x0[i], (1 - w))

# when the new values get close enough to the last values
# regarding the imposed tolerance "tol", we reached the solution
if (np.linalg.norm(x - x0) < tol):
print(step)
break
# update the last computed values with the new values
x0 = x
print("X = {}".format(x))
print("Numarul de pasi este: {}".format(step))
SOR(x, b, x0, tol, max_iter, w)
41 changes: 41 additions & 0 deletions python/SecantMethod.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import numpy as np
# solves f(x) = 0 by doing max_iter steps of the secant method;
# the secant method requires two initial values, x0 and x1
# which should ideally be chosen close to the root;
# y = feval(f, x) evaluates a function using its name or its handle
# and using the input arguments;
def f(x):
return 2 * x**5 + 1

def SecantMethod(f, x0, x1, tol, max_iter):
x = 0
for i in range (1, max_iter):
print(" La pasul: {}".format(i))
# function values: f(x0) and f(x1)
f0 = f(x0)
f1 = f(x1)
print("f0 = {}; f1 = {}".format(f0, f1))
# the root of function f is approximated using the formula
xi = x1 - f1 * (x1 - x0) / (f1 - f0)
# calculate f(xi)
fxi = f(xi)

# xi is the solution
if (abs(fxi) < np.finfo(float).eps):
x = xi
return x
# calculate eps
epsilon = abs((xi - x1) / xi)

# stop if the secant method reached its convergence limit
if (epsilon < tol):
x = xi
return x
# update the last two computed values
x0 = x1
x1 = xi

print("Maximum number of iterations reached: {}".format(i))
return x

print(SecantMethod(f, 17.0, 6.0, 10 ** (-10), 100))