summaryrefslogtreecommitdiffstats
path: root/Wrappers/Python
diff options
context:
space:
mode:
authorepapoutsellis <epapoutsellis@gmail.com>2019-04-07 08:38:39 +0100
committerepapoutsellis <epapoutsellis@gmail.com>2019-04-07 08:38:39 +0100
commitc5673f753915b88308a32ec8734380c7a5468bb6 (patch)
tree9b357660dee40d328ec0cbfbc2e334df0aceb324 /Wrappers/Python
parentec6dabadd80b712beba834732042ba589038314d (diff)
downloadframework-c5673f753915b88308a32ec8734380c7a5468bb6.tar.gz
framework-c5673f753915b88308a32ec8734380c7a5468bb6.tar.bz2
framework-c5673f753915b88308a32ec8734380c7a5468bb6.tar.xz
framework-c5673f753915b88308a32ec8734380c7a5468bb6.zip
changes for Symmetrized Gradient
Diffstat (limited to 'Wrappers/Python')
-rwxr-xr-xWrappers/Python/ccpi/framework/BlockGeometry.py7
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py24
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py4
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py3
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py2
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py159
-rw-r--r--Wrappers/Python/wip/pdhg_TV_denoising_precond.py25
-rw-r--r--Wrappers/Python/wip/pdhg_TV_tomography2D.py13
8 files changed, 155 insertions, 82 deletions
diff --git a/Wrappers/Python/ccpi/framework/BlockGeometry.py b/Wrappers/Python/ccpi/framework/BlockGeometry.py
index 0f43155..5dd6750 100755
--- a/Wrappers/Python/ccpi/framework/BlockGeometry.py
+++ b/Wrappers/Python/ccpi/framework/BlockGeometry.py
@@ -16,17 +16,16 @@ class BlockGeometry(object):
''''''
self.geometries = args
self.index = 0
- #shape = kwargs.get('shape', None)
- #if shape is None:
- # shape = (len(args),1)
+
shape = (len(args),1)
self.shape = shape
- #print (self.shape)
+
n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
if len(args) != n_elements:
raise ValueError(
'Dimension and size do not match: expected {} got {}'
.format(n_elements, len(args)))
+
def get_item(self, index):
'''returns the Geometry in the BlockGeometry located at position index'''
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
index d0e27ae..7c6bc8a 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
@@ -104,8 +104,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
x_old = operator.domain_geometry().allocate()
y_old = operator.range_geometry().allocate()
-
-
+
xbar = x_old
x_tmp = x_old
x = x_old
@@ -118,7 +117,9 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
t = time.time()
- objective = []
+ primal = []
+ dual = []
+ pdgap = []
for i in range(niter):
@@ -137,11 +138,18 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
x_old = x
y_old = y
- if i%100==0:
+# if i%100==0:
+
+ p1 = f(operator.direct(x)) + g(x)
+ d1 = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y)))
+ pd1 = p1 - d1
+
+ primal.append(p1)
+ dual.append(d1)
+ pdgap.append(pd1)
+
- primal = f(operator.direct(x)) + g(x)
- dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y)))
- print( i, primal, dual, primal-dual)
+# print( i, primal, dual, primal-dual)
# plt.imshow(x.as_array())
# plt.show()
@@ -149,7 +157,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
t_end = time.time()
- return x, t_end - t, objective
+ return x, t_end - t, primal, dual, pdgap
diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
index 24c4e4b..0faba22 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
@@ -6,12 +6,12 @@ Created on Fri Mar 1 22:51:17 2019
@author: evangelos
"""
-from ccpi.optimisation.operators import Operator
+from ccpi.optimisation.operators import LinearOperator
from ccpi.optimisation.ops import PowerMethodNonsquare
from ccpi.framework import ImageData, BlockDataContainer
import numpy as np
-class FiniteDiff(Operator):
+class FiniteDiff(LinearOperator):
# Works for Neum/Symmetric & periodic boundary conditions
# TODO add central differences???
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index e00de0c..0c267fc 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -43,8 +43,7 @@ class Gradient(LinearOperator):
def direct(self, x, out=None):
- tmp = self.gm_range.allocate()
-
+ tmp = self.gm_range.allocate()
for i in range(tmp.shape[0]):
tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x))
return tmp
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
index 1b88cba..d54db9b 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py
@@ -70,7 +70,7 @@ class SparseFiniteDiff():
def sum_abs_col(self):
- res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C'))
+ res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))
res[res==0]=1
return ImageData(res)
diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
index d908e49..ea3ba8f 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py
@@ -6,59 +6,93 @@ Created on Fri Mar 1 22:53:55 2019
@author: evangelos
"""
-from ccpi.optimisation.operators import Operator
-from ccpi.optimisation.operators import FiniteDiff
+from ccpi.optimisation.operators import Gradient, Operator, LinearOperator, ScaledOperator
from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.framework import ImageData, DataContainer
-import numpy as np
+from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer
+import numpy
+from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff
-class SymmetrizedGradient(Operator):
+class SymmetrizedGradient(Gradient):
- def __init__(self, gm_domain, gm_range, bnd_cond = 'Neumann', **kwargs):
+ def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs):
- super(SymmetrizedGradient, self).__init__()
+ super(SymmetrizedGradient, self).__init__(gm_domain, bnd_cond, **kwargs)
- self.gm_domain = gm_domain # Domain of Grad Operator
- self.gm_range = gm_range # Range of Grad Operator
- self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences
-
- # Kwargs Default options
- self.memopt = kwargs.get('memopt',False)
- self.correlation = kwargs.get('correlation','Space')
+ '''
+ Domain of SymGrad is the Range of Gradient
+ '''
+ self.gm_domain = self.gm_range
+ self.bnd_cond = bnd_cond
+
+ self.channels = self.gm_range.get_item(0).channels
- #TODO not tested yet, operator norm???
- self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain))
-
+ if self.correlation=='Space':
+ if self.channels>1:
+ pass
+ else:
+# # 2D image ---> Dx v1, Dyv2, Dx
+ tmp = self.gm_domain.geometries + (self.gm_domain.get_item(0),)
+ self.gm_range = BlockGeometry(*tmp )
+ self.ind1 = range(self.gm_domain.get_item(0).length)
+ self.ind2 = range(self.gm_domain.get_item(0).length-1, -1, -1)
+# self.order = myorder = [0,1,2 3]
+
+ elif self.correlation=='SpaceChannels':
+ if self.channels>1:
+ pass
+ else:
+ raise ValueError('No channels to correlate')
+
def direct(self, x, out=None):
- tmp = np.zeros(self.gm_range)
- tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0])
- tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1])
- tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) +
- FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) )
+# tmp = numpy.zeros(self.gm_range)
+# tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0])
+# tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1])
+# tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) +
+# FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) )
+#
+# return type(x)(tmp)
+
+ tmp = [[None]*2]*2
+ for i in range(2):
+ for j in range(2):
+ tmp[i][j]=FiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).adjoint(x.get_item(j))
+ tmp = numpy.array(tmp)
+ z = 0.5 * (tmp.T + tmp)
- return type(x)(tmp)
-
+ return BlockDataContainer(z.tolist())
+
def adjoint(self, x, out=None):
+ pass
- tmp = np.zeros(self.gm_domain)
+ res = []
+ for i in range(2):
+ for j in range(2):
+
+ restmpFiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).direct(x.get_item(j))
+
+
- tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \
- FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2])
-
- tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \
- FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1])
-
- return type(x)(tmp)
+# for
+
+# tmp = numpy.zeros(self.gm_domain)
+#
+# tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \
+# FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2])
+#
+# tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \
+# FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1])
+#
+# return type(x)(tmp)
def alloc_domain_dim(self):
- return ImageData(np.zeros(self.gm_domain))
+ return ImageData(numpy.zeros(self.gm_domain))
def alloc_range_dim(self):
- return ImageData(np.zeros(self.range_dim))
+ return ImageData(numpy.zeros(self.range_dim))
def domain_dim(self):
return self.gm_domain
@@ -80,29 +114,58 @@ if __name__ == '__main__':
###########################################################################
## Symmetrized Gradient
+ from ccpi.framework import DataContainer
+ from ccpi.optimisation.operators import Gradient, BlockOperator, FiniteDiff
+ import numpy as np
N, M = 2, 3
- ig = (N,M)
- ig2 = (2,) + ig
- ig3 = (3,) + ig
- u1 = DataContainer(np.random.randint(10, size=ig2))
- w1 = DataContainer(np.random.randint(10, size=ig3))
+ K = 2
+
+ ig1 = ImageGeometry(N, M)
+ ig2 = ImageGeometry(N, M, channels=K)
+
+ E1 = SymmetrizedGradient(ig1, correlation = 'Space', bnd_cond='Neumann')
+ E2 = SymmetrizedGradient(ig2, correlation = 'SpaceChannels', bnd_cond='Periodic')
- E = SymmetrizedGradient(ig2,ig3)
+ print(E1.domain_geometry().shape)
+ print(E2.domain_geometry().shape)
- d1 = E.direct(u1)
- d2 = E.adjoint(w1)
+ u1 = E1.gm_domain.allocate('random_int')
+ u2 = E2.gm_domain.allocate('random_int')
+
+
+ res = E1.direct(u1)
+
+ Dx = FiniteDiff(ig1, direction = 1, bnd_cond = 'Neumann')
+ Dy = FiniteDiff(ig1, direction = 0, bnd_cond = 'Neumann')
+
+ B = BlockOperator(Dy, Dx)
+ V = BlockDataContainer(u1,u2)
+
+ res = B.adjoint(V)
- LHS = (d1.as_array()[0]*w1.as_array()[0] + \
- d1.as_array()[1]*w1.as_array()[1] + \
- 2*d1.as_array()[2]*w1.as_array()[2]).sum()
+# ig = (N,M)
+# ig2 = (2,) + ig
+# ig3 = (3,) + ig
+# u1 = ig.allocate('random_int')
+# w1 = E.gm_range.allocate('random_int')
+# DataContainer(np.random.randint(10, size=ig3))
- RHS = (u1.as_array()[0]*d2.as_array()[0] + \
- u1.as_array()[1]*d2.as_array()[1]).sum()
- print(LHS, RHS, E.norm())
+# d1 = E.direct(u1)
+# d2 = E.adjoint(w1)
+# LHS = (d1.as_array()[0]*w1.as_array()[0] + \
+# d1.as_array()[1]*w1.as_array()[1] + \
+# 2*d1.as_array()[2]*w1.as_array()[2]).sum()
+#
+# RHS = (u1.as_array()[0]*d2.as_array()[0] + \
+# u1.as_array()[1]*d2.as_array()[1]).sum()
+#
+#
+# print(LHS, RHS, E.norm())
+#
#
diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py
index d5c021d..426ce8b 100644
--- a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py
+++ b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py
@@ -74,7 +74,7 @@ else:
###########################################################################
#%%
-diag_precon = True
+diag_precon = False
if diag_precon:
@@ -82,7 +82,7 @@ if diag_precon:
tau = 1/operator.sum_abs_row()
sigma = 1/ operator.sum_abs_col()
-
+
return tau, sigma
tau, sigma = tau_sigma_precond(operator)
@@ -99,14 +99,19 @@ else:
#%%
opt = {'niter':2000}
-#
-res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
+
+res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
-aaa = res[0].as_array()
-#
-plt.imshow(aaa)
+plt.figure(figsize=(5,5))
+plt.imshow(res.as_array())
plt.colorbar()
plt.show()
+
+#aaa = res[0].as_array()
+#
+#plt.imshow(aaa)
+#plt.colorbar()
+#plt.show()
#c2 = aaa
#del aaa
#%%
@@ -114,9 +119,9 @@ plt.show()
#c2 = aaa
##%%
#%%
-z = c1 - c2
-plt.imshow(np.abs(z[0:95,0:95]))
-plt.colorbar()
+#z = c1 - c2
+#plt.imshow(np.abs(z[0:95,0:95]))
+#plt.colorbar()
#%%
#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py
index 9feb05b..f06f166 100644
--- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py
+++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py
@@ -13,7 +13,7 @@ from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, Acquisi
import numpy as np
import matplotlib.pyplot as plt
-from ccpi.optimisation.algorithms import PDHG
+from ccpi.optimisation.algorithms import PDHG, PDHG_old
from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \
@@ -109,7 +109,7 @@ if diag_precon:
tau, sigma = tau_sigma_precond(operator)
else:
- sigma = 10
+ sigma = 1
tau = 1/(sigma*normK**2)
#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
@@ -120,13 +120,12 @@ else:
opt = {'niter':2000}
#
-res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
+res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
-aaa = res[0].as_array()
-#
-plt.imshow(aaa)
+plt.figure(figsize=(5,5))
+plt.imshow(res.as_array())
plt.colorbar()
-plt.show()
+plt.show()
#%%
#sol = pdhg.get_output().as_array()