summaryrefslogtreecommitdiffstats
path: root/Wrappers
diff options
context:
space:
mode:
authorEdoardo Pasca <edo.paskino@gmail.com>2019-06-13 15:16:21 +0100
committerEdoardo Pasca <edo.paskino@gmail.com>2019-06-13 15:16:21 +0100
commit3869559b14500fa4d730f084c4645b6c485c647f (patch)
treea2f625c7bdec01faa37fce23d7f9d95006a68ded /Wrappers
parent516ac57569a76e4d41a2abdd3cd786641f1aea7f (diff)
downloadframework-3869559b14500fa4d730f084c4645b6c485c647f.tar.gz
framework-3869559b14500fa4d730f084c4645b6c485c647f.tar.bz2
framework-3869559b14500fa4d730f084c4645b6c485c647f.tar.xz
framework-3869559b14500fa4d730f084c4645b6c485c647f.zip
play around with test
Diffstat (limited to 'Wrappers')
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/CGLS.py7
-rwxr-xr-xWrappers/Python/wip/fix_test.py45
2 files changed, 35 insertions, 17 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
index 8474d89..4faffad 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
@@ -50,7 +50,7 @@ class CGLS(Algorithm):
def set_up(self, x_init, operator , data ):
self.r = data.copy()
- self.x = x_init.copy()
+ self.x = x_init * 0
self.operator = operator
self.d = operator.adjoint(self.r)
@@ -96,11 +96,12 @@ class CGLS(Algorithm):
Ad = self.operator.direct(self.d)
norm = Ad.squared_norm()
if norm == 0.:
- print ('cannot update solution')
+ print ('norm = 0, cannot update solution')
+ print ("self.d norm", self.d.squared_norm(), self.d.as_array())
raise StopIteration()
alpha = self.normr2/norm
if alpha == 0.:
- print ('cannot update solution')
+ print ('alpha = 0, cannot update solution')
raise StopIteration()
self.d *= alpha
Ad *= alpha
diff --git a/Wrappers/Python/wip/fix_test.py b/Wrappers/Python/wip/fix_test.py
index 316606e..9eb0a4e 100755
--- a/Wrappers/Python/wip/fix_test.py
+++ b/Wrappers/Python/wip/fix_test.py
@@ -61,15 +61,20 @@ class Norm1(Function):
opt = {'memopt': True}
# Problem data.
-m = 4
-n = 10
-np.random.seed(1)
+m = 500
+n = 200
+
+# if m < n then the problem is under-determined and algorithms will struggle to find a solution.
+# One approach is to add regularisation
+
+#np.random.seed(1)
Amat = np.asarray( np.random.randn(m, n), dtype=numpy.float32)
+Amat = np.asarray( np.random.random_integers(1,10, (m, n)), dtype=numpy.float32)
#Amat = np.asarray(np.eye(m), dtype=np.float32) * 2
A = LinearOperatorMatrix(Amat)
bmat = np.asarray( np.random.randn(m), dtype=numpy.float32)
-bmat *= 0
-bmat += 2
+#bmat *= 0
+#bmat += 2
print ("bmat", bmat.shape)
print ("A", A.A)
#bmat.shape = (bmat.shape[0], 1)
@@ -78,7 +83,7 @@ print ("A", A.A)
# Change n to equal to m.
vgb = VectorGeometry(m)
vgx = VectorGeometry(n)
-b = vgb.allocate(2, dtype=numpy.float32)
+b = vgb.allocate(VectorGeometry.RANDOM_INT, dtype=numpy.float32)
# b.fill(bmat)
#b = DataContainer(bmat)
@@ -99,11 +104,12 @@ a = VectorData(x_init.as_array(), deep_copy=True)
assert id(x_init.as_array()) != id(a.as_array())
#%%
-f.L = LinearOperator.PowerMethod(A, 25, x_init)[0]
-print ('f.L', f.L)
+# f.L = LinearOperator.PowerMethod(A, 25, x_init)[0]
+# print ('f.L', f.L)
rate = (1 / f.L) / 6
f.L *= 12
-
+print (f.L)
+# rate = f.L / 1000
# Initial guess
#x_init = DataContainer(np.zeros((n, 1)))
print ('x_init', x_init.as_array())
@@ -133,28 +139,35 @@ print ("pippo", pippo.as_array())
print ("x_init", x_init.as_array())
print ("x1", x1.as_array())
+y = A.direct(x_init)
+y *= 0
+A.direct(x_init, out=y)
# Combine with least squares and solve using generic FISTA implementation
#x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt)
def callback(it, objective, solution):
- print (objective, f(solution))
+ print ("callback " , it , objective, f(solution))
fa = FISTA(x_init=x_init, f=f, g=g1)
-fa.max_iteration = 100
+fa.max_iteration = 1000
fa.update_objective_interval = int( fa.max_iteration / 10 )
fa.run(fa.max_iteration, callback = None, verbose=True)
gd = GradientDescent(x_init=x_init, objective_function=f, rate = rate )
-gd.max_iteration = 100
+gd.max_iteration = 10000
gd.update_objective_interval = int( gd.max_iteration / 10 )
gd.run(gd.max_iteration, callback = None, verbose=True)
+
+
cgls = CGLS(x_init= x_initcgls, operator=A, data=b)
cgls.max_iteration = 1000
-cgls.update_objective_interval = 2
+cgls.update_objective_interval = int( cgls.max_iteration / 10 )
#cgls.should_stop = stop_criterion(cgls)
-cgls.run(10, callback = callback, verbose=True)
+cgls.run(cgls.max_iteration, callback = callback, verbose=True)
+
+
# Print for comparison
print("FISTA least squares plus 1-norm solution and objective value:")
@@ -165,3 +178,7 @@ print ("data ", b.as_array())
print ('FISTA ', A.direct(fa.get_output()).as_array())
print ('GradientDescent', A.direct(gd.get_output()).as_array())
print ('CGLS ', A.direct(cgls.get_output()).as_array())
+
+cond = numpy.linalg.cond(A.A)
+
+print ("cond" , cond) \ No newline at end of file