summaryrefslogtreecommitdiffstats
path: root/Wrappers/Python
diff options
context:
space:
mode:
authorepapoutsellis <epapoutsellis@gmail.com>2019-06-10 13:43:11 +0100
committerepapoutsellis <epapoutsellis@gmail.com>2019-06-10 13:43:11 +0100
commitd5cb6fd1f42d98b24a0bd2ae2646d6f8914ba863 (patch)
treee352127d39eb1ad1d99843b1fa06353e9a986d21 /Wrappers/Python
parent5835b1468ef0d509bb67d7eef590eb172769a2e9 (diff)
parentbb7657fbf8fe00de7f674e31fa6d1dbd130e79fc (diff)
downloadframework-d5cb6fd1f42d98b24a0bd2ae2646d6f8914ba863.tar.gz
framework-d5cb6fd1f42d98b24a0bd2ae2646d6f8914ba863.tar.bz2
framework-d5cb6fd1f42d98b24a0bd2ae2646d6f8914ba863.tar.xz
framework-d5cb6fd1f42d98b24a0bd2ae2646d6f8914ba863.zip
merge demos
Diffstat (limited to 'Wrappers/Python')
-rwxr-xr-xWrappers/Python/ccpi/framework/TestData.py27
-rwxr-xr-xWrappers/Python/ccpi/framework/framework.py2
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/FISTA.py118
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py9
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L1Norm.py88
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py6
-rw-r--r--Wrappers/Python/data/shapes.pngbin0 -> 19339 bytes
-rw-r--r--Wrappers/Python/demos/CompareAlgorithms/CGLS_FISTA_PDHG_LeastSquares.py67
-rw-r--r--Wrappers/Python/demos/FISTA_examples/FISTA_CGLS.py50
-rw-r--r--Wrappers/Python/demos/FISTA_examples/FISTA_Tikhonov_Poisson_Denoising.py13
-rwxr-xr-xWrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TGV_Denoising.py169
-rwxr-xr-xWrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising.py86
-rw-r--r--Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_2D_time.py192
-rw-r--r--Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_Gaussian_3D.py152
-rw-r--r--Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_Tikhonov_Denoising.py112
-rw-r--r--Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_2D_time.py192
-rw-r--r--Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_Gaussian_3D.py181
-rw-r--r--Wrappers/Python/demos/PDHG_examples/PDHG_TV_Color_Denoising.py115
-rw-r--r--Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TGV_Tomo2D.py7
-rw-r--r--Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TV_Tomo2D_poisson.py142
20 files changed, 1264 insertions, 464 deletions
diff --git a/Wrappers/Python/ccpi/framework/TestData.py b/Wrappers/Python/ccpi/framework/TestData.py
index 752bc13..cfad7a3 100755
--- a/Wrappers/Python/ccpi/framework/TestData.py
+++ b/Wrappers/Python/ccpi/framework/TestData.py
@@ -16,6 +16,7 @@ class TestData(object):
PEPPERS = 'peppers.tiff'
RESOLUTION_CHART = 'resolution_chart.tiff'
SIMPLE_PHANTOM_2D = 'simple_jakobs_phantom'
+ SHAPES = 'shapes.png'
def __init__(self, **kwargs):
self.data_dir = kwargs.get('data_dir', data_dir)
@@ -23,7 +24,7 @@ class TestData(object):
def load(self, which, size=(512,512), scale=(0,1), **kwargs):
if which not in [TestData.BOAT, TestData.CAMERA,
TestData.PEPPERS, TestData.RESOLUTION_CHART,
- TestData.SIMPLE_PHANTOM_2D]:
+ TestData.SIMPLE_PHANTOM_2D, TestData.SHAPES]:
raise ValueError('Unknown TestData {}.'.format(which))
if which == TestData.SIMPLE_PHANTOM_2D:
N = size[0]
@@ -34,6 +35,16 @@ class TestData(object):
ig = ImageGeometry(voxel_num_x = N, voxel_num_y = M, dimension_labels=[ImageGeometry.HORIZONTAL_X, ImageGeometry.HORIZONTAL_Y])
data = ig.allocate()
data.fill(sdata)
+
+ elif which == TestData.SHAPES:
+
+ tmp = numpy.array(Image.open(os.path.join(self.data_dir, which)).convert('L'))
+ N = 200
+ M = 300
+ ig = ImageGeometry(voxel_num_x = N, voxel_num_y = M, dimension_labels=[ImageGeometry.HORIZONTAL_X, ImageGeometry.HORIZONTAL_Y])
+ data = ig.allocate()
+ data.fill(tmp/numpy.max(tmp))
+
else:
tmp = Image.open(os.path.join(self.data_dir, which))
print (tmp)
@@ -94,5 +105,17 @@ class TestData(object):
data = data/data.max()
- return ImageData(data)
+ return ImageData(data)
+
+ def shapes(**kwargs):
+
+ tmp = Image.open(os.path.join(data_dir, 'shapes.png')).convert('LA')
+
+ size = kwargs.get('size',(300, 200))
+
+ data = numpy.array(tmp.resize(size))
+
+ data = data/data.max()
+
+ return ImageData(data)
diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py
index 3840f2c..e906ca6 100755
--- a/Wrappers/Python/ccpi/framework/framework.py
+++ b/Wrappers/Python/ccpi/framework/framework.py
@@ -821,7 +821,7 @@ class DataContainer(object):
if self.shape == other.shape:
# return (self*other).sum()
if method == 'numpy':
- return numpy.dot(self.as_array().ravel(), other.as_array())
+ return numpy.dot(self.as_array().ravel(), other.as_array().ravel())
elif method == 'reduce':
# see https://github.com/vais-ral/CCPi-Framework/pull/273
# notice that Python seems to be smart enough to use
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
index ee51049..419958a 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
@@ -20,102 +20,62 @@ class FISTA(Algorithm):
x_init: initial guess
f: data fidelity
g: regularizer
- h:
- opt: additional algorithm
+ opt: additional options
'''
-
+
+
def __init__(self, **kwargs):
'''initialisation can be done at creation time if all
proper variables are passed or later with set_up'''
super(FISTA, self).__init__()
- self.f = None
- self.g = None
+ self.f = kwargs.get('f', None)
+ self.g = kwargs.get('g', None)
+ self.x_init = kwargs.get('x_init',None)
self.invL = None
self.t_old = 1
- args = ['x_init', 'f', 'g', 'opt']
- for k,v in kwargs.items():
- if k in args:
- args.pop(args.index(k))
- if len(args) == 0:
- return self.set_up(kwargs['x_init'],
- f=kwargs['f'],
- g=kwargs['g'],
- opt=kwargs['opt'])
+ if self.f is not None and self.g is not None:
+ print ("Calling from creator")
+ self.set_up(self.x_init, self.f, self.g)
+
- def set_up(self, x_init, f=None, g=None, opt=None):
+ def set_up(self, x_init, f, g, opt=None, **kwargs):
- # default inputs
- if f is None:
- self.f = ZeroFunction()
- else:
- self.f = f
- if g is None:
- g = ZeroFunction()
- self.g = g
- else:
- self.g = g
+ self.f = f
+ self.g = g
# algorithmic parameters
if opt is None:
- opt = {'tol': 1e-4, 'memopt':False}
-
- self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4
- memopt = opt['memopt'] if 'memopt' in opt.keys() else False
- self.memopt = memopt
-
- # initialization
- if memopt:
- self.y = x_init.copy()
- self.x_old = x_init.copy()
- self.x = x_init.copy()
- self.u = x_init.copy()
- else:
- self.x_old = x_init.copy()
- self.y = x_init.copy()
-
- #timing = numpy.zeros(max_iter)
- #criter = numpy.zeros(max_iter)
+ opt = {'tol': 1e-4}
-
+ self.y = x_init.copy()
+ self.x_old = x_init.copy()
+ self.x = x_init.copy()
+ self.u = x_init.copy()
+
+
self.invL = 1/f.L
self.t_old = 1
def update(self):
- # algorithm loop
- #for it in range(0, max_iter):
-
- if self.memopt:
- # u = y - invL*f.grad(y)
- # store the result in x_old
- self.f.gradient(self.y, out=self.u)
- self.u.__imul__( -self.invL )
- self.u.__iadd__( self.y )
- # x = g.prox(u,invL)
- self.g.proximal(self.u, self.invL, out=self.x)
-
- self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
-
- # y = x + (t_old-1)/t*(x-x_old)
- self.x.subtract(self.x_old, out=self.y)
- self.y.__imul__ ((self.t_old-1)/self.t)
- self.y.__iadd__( self.x )
-
- self.x_old.fill(self.x)
- self.t_old = self.t
-
-
- else:
- u = self.y - self.invL*self.f.gradient(self.y)
-
- self.x = self.g.proximal(u,self.invL)
-
- self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
-
- self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old)
-
- self.x_old = self.x.copy()
- self.t_old = self.t
+
+ self.f.gradient(self.y, out=self.u)
+ self.u.__imul__( -self.invL )
+ self.u.__iadd__( self.y )
+ # x = g.prox(u,invL)
+ self.g.proximal(self.u, self.invL, out=self.x)
+
+ self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
+
+# self.x.subtract(self.x_old, out=self.y)
+ self.y = self.x - self.x_old
+ self.y.__imul__ ((self.t_old-1)/self.t)
+ self.y.__iadd__( self.x )
+
+ self.x_old.fill(self.x)
+ self.t_old = self.t
def update_objective(self):
- self.loss.append( self.f(self.x) + self.g(self.x) ) \ No newline at end of file
+ self.loss.append( self.f(self.x) + self.g(self.x) )
+
+
diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
index 0d3c8f5..6920829 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
@@ -52,8 +52,8 @@ class KullbackLeibler(Function):
'''
- # TODO avoid scipy import ????
- tmp = scipy.special.kl_div(self.b.as_array(), x.as_array())
+ ind = x.as_array()>0
+ tmp = scipy.special.kl_div(self.b.as_array()[ind], x.as_array()[ind])
return numpy.sum(tmp)
@@ -78,9 +78,8 @@ class KullbackLeibler(Function):
def convex_conjugate(self, x):
- # TODO avoid scipy import ????
- xlogy = scipy.special.xlogy(self.b.as_array(), 1 - x.as_array())
- return numpy.sum(-xlogy)
+ xlogy = - scipy.special.xlogy(self.b.as_array(), 1 - x.as_array())
+ return numpy.sum(xlogy)
def proximal(self, x, tau, out=None):
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
index 79040a0..37c2016 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
@@ -60,7 +60,7 @@ class L1Norm(Function):
y = 0
if self.b is not None:
- y = 0 + (self.b * x).sum()
+ y = 0 + self.b.dot(x)
return y
def proximal(self, x, tau, out=None):
@@ -95,98 +95,24 @@ class L1Norm(Function):
return ScaledFunction(self, scalar)
-#import numpy as np
-##from ccpi.optimisation.funcs import Function
-#from ccpi.optimisation.functions import Function
-#from ccpi.framework import DataContainer, ImageData
-#
-#
-############################# L1NORM FUNCTIONS #############################
-#class SimpleL1Norm(Function):
-#
-# def __init__(self, alpha=1):
-#
-# super(SimpleL1Norm, self).__init__()
-# self.alpha = alpha
-#
-# def __call__(self, x):
-# return self.alpha * x.abs().sum()
-#
-# def gradient(self,x):
-# return ValueError('Not Differentiable')
-#
-# def convex_conjugate(self,x):
-# return 0
-#
-# def proximal(self, x, tau):
-# ''' Soft Threshold'''
-# return x.sign() * (x.abs() - tau * self.alpha).maximum(0)
-#
-# def proximal_conjugate(self, x, tau):
-# return x.divide((x.abs()/self.alpha).maximum(1.0))
-
-#class L1Norm(SimpleL1Norm):
-#
-# def __init__(self, alpha=1, **kwargs):
-#
-# super(L1Norm, self).__init__()
-# self.alpha = alpha
-# self.b = kwargs.get('b',None)
-#
-# def __call__(self, x):
-#
-# if self.b is None:
-# return SimpleL1Norm.__call__(self, x)
-# else:
-# return SimpleL1Norm.__call__(self, x - self.b)
-#
-# def gradient(self, x):
-# return ValueError('Not Differentiable')
-#
-# def convex_conjugate(self,x):
-# if self.b is None:
-# return SimpleL1Norm.convex_conjugate(self, x)
-# else:
-# return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum()
-#
-# def proximal(self, x, tau):
-#
-# if self.b is None:
-# return SimpleL1Norm.proximal(self, x, tau)
-# else:
-# return self.b + SimpleL1Norm.proximal(self, x - self.b , tau)
-#
-# def proximal_conjugate(self, x, tau):
-#
-# if self.b is None:
-# return SimpleL1Norm.proximal_conjugate(self, x, tau)
-# else:
-# return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau)
-#
-
-###############################################################################
-
-
-
-
if __name__ == '__main__':
from ccpi.framework import ImageGeometry
import numpy
- N, M = 40,40
+ N, M = 400,400
ig = ImageGeometry(N, M)
scalar = 10
- b = ig.allocate('random_int')
- u = ig.allocate('random_int')
+ b = ig.allocate('random')
+ u = ig.allocate('random')
f = L1Norm()
f_scaled = scalar * L1Norm()
-
+
f_b = L1Norm(b=b)
f_scaled_b = scalar * L1Norm(b=b)
- # call
-
+ # call
+
a1 = f(u)
a2 = f_scaled(u)
numpy.testing.assert_equal(scalar * a1, a2)
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
index b77d472..2f05119 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
@@ -76,7 +76,7 @@ class L2NormSquared(Function):
tmp = 0
if self.b is not None:
- tmp = (x * self.b).sum()
+ tmp = x.dot(self.b) #(x * self.b).sum()
return (1./4.) * x.squared_norm() + tmp
@@ -151,7 +151,7 @@ if __name__ == '__main__':
import numpy
# TESTS for L2 and scalar * L2
- M, N, K = 2,3,5
+ M, N, K = 20,30,50
ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K)
u = ig.allocate('random_int')
b = ig.allocate('random_int')
@@ -178,7 +178,7 @@ if __name__ == '__main__':
#check convex conjuagate with data
d1 = f1.convex_conjugate(u)
- d2 = (1/4) * u.squared_norm() + (u*b).sum()
+ d2 = (1/4) * u.squared_norm() + u.dot(b)
numpy.testing.assert_equal(d1, d2)
# check proximal no data
diff --git a/Wrappers/Python/data/shapes.png b/Wrappers/Python/data/shapes.png
new file mode 100644
index 0000000..dd4f680
--- /dev/null
+++ b/Wrappers/Python/data/shapes.png
Binary files differ
diff --git a/Wrappers/Python/demos/CompareAlgorithms/CGLS_FISTA_PDHG_LeastSquares.py b/Wrappers/Python/demos/CompareAlgorithms/CGLS_FISTA_PDHG_LeastSquares.py
index 0875c20..672d4bc 100644
--- a/Wrappers/Python/demos/CompareAlgorithms/CGLS_FISTA_PDHG_LeastSquares.py
+++ b/Wrappers/Python/demos/CompareAlgorithms/CGLS_FISTA_PDHG_LeastSquares.py
@@ -32,7 +32,7 @@ Problem: min_x || A x - g ||_{2}^{2}
"""
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry
+from ccpi.framework import ImageData, TestData, AcquisitionGeometry
import numpy as np
import numpy
@@ -42,17 +42,17 @@ from ccpi.optimisation.algorithms import PDHG, CGLS, FISTA
from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, FunctionOperatorComposition
from ccpi.astra.ops import AstraProjectorSimple
-import astra
+import astra
+import os, sys
-# Create Ground truth phantom and Sinogram
-N = 128
-x = np.zeros((N,N))
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-data = ImageData(x)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
+# Load Data
+loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
+
+N = 50
+M = 50
+data = loader.load(TestData.SIMPLE_PHANTOM_2D, size=(N,M), scale=(0,1))
+ig = data.geometry
detectors = N
angles = np.linspace(0, np.pi, N, dtype=np.float32)
@@ -69,13 +69,14 @@ sin = Aop.direct(data)
noisy_data = sin
+###############################################################################
# Setup and run Astra CGLS algorithm
vol_geom = astra.create_vol_geom(N, N)
proj_geom = astra.create_proj_geom('parallel', 1.0, detectors, angles)
-proj_id = astra.create_projector('strip', proj_geom, vol_geom)
+proj_id = astra.create_projector('linear', proj_geom, vol_geom)
# Create a sinogram from a phantom
-sinogram_id, sinogram = astra.create_sino(x, proj_id)
+sinogram_id, sinogram = astra.create_sino(data.as_array(), proj_id)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
@@ -92,6 +93,7 @@ astra.algorithm.run(alg_id, 1000)
recon_cgls_astra = astra.data2d.get(rec_id)
+###############################################################################
# Setup and run the CGLS algorithm
x_init = ig.allocate()
cgls = CGLS(x_init=x_init, operator=Aop, data=noisy_data)
@@ -99,12 +101,15 @@ cgls.max_iteration = 1000
cgls.update_objective_interval = 200
cgls.run(1000, verbose=False)
+###############################################################################
# Setup and run the PDHG algorithm
operator = Aop
f = L2NormSquared(b = noisy_data)
g = ZeroFunction()
+
## Compute operator Norm
normK = operator.norm()
+
## Primal & dual stepsizes
sigma = 1
tau = 1/(sigma*normK**2)
@@ -112,17 +117,17 @@ tau = 1/(sigma*normK**2)
pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
pdhg.max_iteration = 1000
pdhg.update_objective_interval = 200
-pdhg.run(1000, verbose=False)
+pdhg.run(1000, verbose=True)
+###############################################################################
# Setup and run the FISTA algorithm
fidelity = FunctionOperatorComposition(L2NormSquared(b=noisy_data), Aop)
regularizer = ZeroFunction()
-opt = {'memopt':True}
-fista = FISTA(x_init=x_init , f=fidelity, g=regularizer, opt=opt)
+fista = FISTA(x_init=x_init , f=fidelity, g=regularizer)
fista.max_iteration = 1000
fista.update_objective_interval = 200
-fista.run(1000, verbose=False)
+fista.run(1000, verbose=True)
#%% Show results
@@ -131,18 +136,22 @@ plt.suptitle('Reconstructions ', fontsize=16)
plt.subplot(2,2,1)
plt.imshow(cgls.get_output().as_array())
+plt.colorbar()
plt.title('CGLS reconstruction')
plt.subplot(2,2,2)
plt.imshow(fista.get_output().as_array())
+plt.colorbar()
plt.title('FISTA reconstruction')
plt.subplot(2,2,3)
plt.imshow(pdhg.get_output().as_array())
+plt.colorbar()
plt.title('PDHG reconstruction')
plt.subplot(2,2,4)
plt.imshow(recon_cgls_astra)
+plt.colorbar()
plt.title('CGLS astra')
diff1 = pdhg.get_output() - cgls.get_output()
@@ -167,28 +176,14 @@ plt.title('Diff CLGS astra vs CGLS')
plt.colorbar()
+#%%
+print('Primal Objective (FISTA) {} '.format(fista.objective[-1]))
+print('Primal Objective (CGLS) {} '.format(cgls.objective[-1]))
+print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0]))
+true_obj = (Aop.direct(cglsd.get_output())-noisy_data).squared_norm()
+print('True objective {}'.format(true_obj))
-
-
-
-
-
-
-
-
-
-
-
-
-#
-#
-#
-#
-#
-#
-#
-#
diff --git a/Wrappers/Python/demos/FISTA_examples/FISTA_CGLS.py b/Wrappers/Python/demos/FISTA_examples/FISTA_CGLS.py
index 68fb649..1a96a2d 100644
--- a/Wrappers/Python/demos/FISTA_examples/FISTA_CGLS.py
+++ b/Wrappers/Python/demos/FISTA_examples/FISTA_CGLS.py
@@ -21,14 +21,15 @@
from ccpi.framework import AcquisitionGeometry
from ccpi.optimisation.algorithms import FISTA
-from ccpi.optimisation.functions import IndicatorBox, ZeroFunction, L2NormSquared, FunctionOperatorComposition
-from ccpi.astra.ops import AstraProjectorSimple
+from ccpi.optimisation.functions import IndicatorBox, ZeroFunction, \
+ L2NormSquared, FunctionOperatorComposition
+from ccpi.astra.operators import AstraProjectorSimple
import numpy as np
import matplotlib.pyplot as plt
from ccpi.framework import TestData
import os, sys
-
+from ccpi.optimisation.funcs import Norm2sq
# Load Data
loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
@@ -117,16 +118,15 @@ plt.show()
# demonstrated in the rest of this file. In general all methods need an initial
# guess and some algorithm options to be set:
-f = FunctionOperatorComposition(0.5 * L2NormSquared(b=sin), Aop)
+f = FunctionOperatorComposition(L2NormSquared(b=sin), Aop)
+#f = Norm2sq(Aop, sin, c=0.5, memopt=True)
g = ZeroFunction()
x_init = ig.allocate()
-opt = {'memopt':True}
-
-fista = FISTA(x_init=x_init, f=f, g=g, opt=opt)
-fista.max_iteration = 100
-fista.update_objective_interval = 1
-fista.run(100, verbose=True)
+fista = FISTA(x_init=x_init, f=f, g=g)
+fista.max_iteration = 1000
+fista.update_objective_interval = 100
+fista.run(1000, verbose=True)
plt.figure()
plt.imshow(fista.get_output().as_array())
@@ -134,18 +134,12 @@ plt.title('FISTA unconstrained')
plt.colorbar()
plt.show()
-plt.figure()
-plt.semilogy(fista.objective)
-plt.title('FISTA objective')
-plt.show()
-
-#%%
# Run FISTA for least squares with lower/upper bound
-fista0 = FISTA(x_init=x_init, f=f, g=IndicatorBox(lower=0,upper=1), opt=opt)
-fista0.max_iteration = 100
-fista0.update_objective_interval = 1
-fista0.run(100, verbose=True)
+fista0 = FISTA(x_init=x_init, f=f, g=IndicatorBox(lower=0,upper=1))
+fista0.max_iteration = 1000
+fista0.update_objective_interval = 100
+fista0.run(1000, verbose=True)
plt.figure()
plt.imshow(fista0.get_output().as_array())
@@ -153,10 +147,10 @@ plt.title('FISTA constrained in [0,1]')
plt.colorbar()
plt.show()
-plt.figure()
-plt.semilogy(fista0.objective)
-plt.title('FISTA constrained in [0,1]')
-plt.show()
+#plt.figure()
+#plt.semilogy(fista0.objective)
+#plt.title('FISTA constrained in [0,1]')
+#plt.show()
#%% Check with CVX solution
@@ -178,10 +172,10 @@ if cvx_not_installable:
vol_geom = astra.create_vol_geom(N, N)
proj_geom = astra.create_proj_geom('parallel', 1.0, detectors, angles)
- proj_id = astra.create_projector('strip', proj_geom, vol_geom)
+ proj_id = astra.create_projector('linear', proj_geom, vol_geom)
matrix_id = astra.projector.matrix(proj_id)
-
+
ProjMat = astra.matrix.get(matrix_id)
tmp = sin.as_array().ravel()
@@ -216,4 +210,6 @@ if cvx_not_installable:
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
- \ No newline at end of file
+
+ print('Primal Objective (CVX) {} '.format(obj.value))
+ print('Primal Objective (FISTA) {} '.format(fista0.loss[1])) \ No newline at end of file
diff --git a/Wrappers/Python/demos/FISTA_examples/FISTA_Tikhonov_Poisson_Denoising.py b/Wrappers/Python/demos/FISTA_examples/FISTA_Tikhonov_Poisson_Denoising.py
index 41219f4..6007990 100644
--- a/Wrappers/Python/demos/FISTA_examples/FISTA_Tikhonov_Poisson_Denoising.py
+++ b/Wrappers/Python/demos/FISTA_examples/FISTA_Tikhonov_Poisson_Denoising.py
@@ -21,7 +21,7 @@
"""
-Tikhonov for Poisson denoising using FISTA algorithm:
+"Tikhonov regularization" for Poisson denoising using FISTA algorithm:
Problem: min_x, x>0 \alpha * ||\nabla x||_{2}^{2} + \int x - g * log(x)
@@ -52,14 +52,14 @@ from skimage.util import random_noise
loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
# Load Data
-N = 150
-M = 150
+N = 100
+M = 100
data = loader.load(TestData.SIMPLE_PHANTOM_2D, size=(N,M), scale=(0,1))
ig = data.geometry
ag = ig
-# Create Noisy data. Add Gaussian noise
+# Create Noisy data with Poisson noise
n1 = random_noise(data.as_array(), mode = 'poisson', seed = 10)
noisy_data = ImageData(n1)
@@ -75,7 +75,6 @@ plt.title('Noisy Data')
plt.colorbar()
plt.show()
-#%%
# Regularisation Parameter
alpha = 10
@@ -114,8 +113,7 @@ fid.proximal = KL_Prox_PosCone
reg = FunctionOperatorComposition(alpha * L2NormSquared(), operator)
x_init = ig.allocate()
-opt = {'memopt':True}
-fista = FISTA(x_init=x_init , f=reg, g=fid, opt=opt)
+fista = FISTA(x_init=x_init , f=reg, g=fid)
fista.max_iteration = 2000
fista.update_objective_interval = 500
fista.run(2000, verbose=True)
@@ -196,7 +194,6 @@ if cvx_not_installable:
plt.title('Middle Line Profiles')
plt.show()
- #TODO what is the output of fista.objective, fista.loss
print('Primal Objective (CVX) {} '.format(obj.value))
print('Primal Objective (FISTA) {} '.format(fista.loss[1]))
diff --git a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TGV_Denoising.py b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TGV_Denoising.py
index 4d6da00..9dbcf3e 100755
--- a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TGV_Denoising.py
+++ b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TGV_Denoising.py
@@ -23,23 +23,21 @@
Total Generalised Variation (TGV) Denoising using PDHG algorithm:
-Problem: min_{x} \alpha * ||\nabla x - w||_{2,1} +
- \beta * || E w ||_{2,1} +
- fidelity
-
- where fidelity can be as follows depending on the noise characteristics
- of the data:
- * Norm2Squared \frac{1}{2} * || x - g ||_{2}^{2}
- * KullbackLeibler \int u - g * log(u) + Id_{u>0}
- * L1Norm ||u - g||_{1}
-
- \alpha: Regularization parameter
- \beta: Regularization parameter
-
+Problem: min_{u} \alpha * ||\nabla u - w||_{2,1} +
+ \beta * || E u ||_{2,1} +
+ Fidelity(u, g)
+
\nabla: Gradient operator
- E: Symmetrized Gradient operator
+ E: Symmetrized Gradient operator
+ \alpha: Regularization parameter
+ \beta: Regularization parameter
- g: Noisy Data with Salt & Pepper Noise
+ g: Noisy Data
+
+ Fidelity = 1) L2NormSquarred ( \frac{1}{2} * || u - g ||_{2}^{2} ) if Noise is Gaussian
+ 2) L1Norm ( ||u - g||_{1} )if Noise is Salt & Pepper
+ 3) Kullback Leibler (\int u - g * log(u) + Id_{u>0}) if Noise is Poisson
+
Method = 0 ( PDHG - split ) : K = [ \nabla, - Identity
ZeroOperator, E
@@ -48,10 +46,15 @@ Problem: min_{x} \alpha * ||\nabla x - w||_{2,1} +
Method = 1 (PDHG - explicit ): K = [ \nabla, - Identity
ZeroOperator, E ]
+
+ Default: TGV denoising
+ noise = Gaussian
+ Fidelity = L2NormSquarred
+ method = 0
"""
-from ccpi.framework import ImageData, ImageGeometry
+from ccpi.framework import ImageData
import numpy as np
import numpy
@@ -63,14 +66,14 @@ from ccpi.optimisation.operators import BlockOperator, Identity, \
Gradient, SymmetrizedGradient, ZeroOperator
from ccpi.optimisation.functions import ZeroFunction, L1Norm, \
MixedL21Norm, BlockFunction, KullbackLeibler, L2NormSquared
-import sys
+
+from ccpi.framework import TestData
+import os, sys
if int(numpy.version.version.split('.')[1]) > 12:
from skimage.util import random_noise
else:
from demoutil import random_noise
-
-
# user supplied input
if len(sys.argv) > 1:
which_noise = int(sys.argv[1])
@@ -81,35 +84,23 @@ print ("Applying {} noise")
if len(sys.argv) > 2:
method = sys.argv[2]
else:
- method = '1'
+ method = '0'
print ("method ", method)
-# Create phantom for TGV SaltPepper denoising
-
-N = 100
-
-x1 = np.linspace(0, int(N/2), N)
-x2 = np.linspace(int(N/2), 0., N)
-xv, yv = np.meshgrid(x1, x2)
-
-xv[int(N/4):int(3*N/4)-1, int(N/4):int(3*N/4)-1] = yv[int(N/4):int(3*N/4)-1, int(N/4):int(3*N/4)-1].T
-#data = xv
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-data = ig.allocate()
-data.fill(xv/xv.max())
+loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
+data = loader.load(TestData.SHAPES)
+ig = data.geometry
ag = ig
# Create noisy data.
-# Apply Salt & Pepper noise
-# gaussian
-# poisson
noises = ['gaussian', 'poisson', 's&p']
noise = noises[which_noise]
if noise == 's&p':
n1 = random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10)
elif noise == 'poisson':
- n1 = random_noise(data.as_array(), mode = noise, seed = 10)
+ scale = 5
+ n1 = random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale
elif noise == 'gaussian':
n1 = random_noise(data.as_array(), mode = noise, seed = 10)
else:
@@ -128,23 +119,24 @@ plt.title('Noisy Data')
plt.colorbar()
plt.show()
-# Regularisation Parameters
+# Regularisation Parameter depending on the noise distribution
if noise == 's&p':
alpha = 0.8
elif noise == 'poisson':
- alpha = .1
-elif noise == 'gaussian':
alpha = .3
+elif noise == 'gaussian':
+ alpha = .2
-beta = numpy.sqrt(2)* alpha
+# TODO add ref why this choice
+beta = 2 * alpha
-# fidelity
+# Fidelity
if noise == 's&p':
f3 = L1Norm(b=noisy_data)
elif noise == 'poisson':
f3 = KullbackLeibler(noisy_data)
elif noise == 'gaussian':
- f3 = L2NormSquared(b=noisy_data)
+ f3 = 0.5 * L2NormSquared(b=noisy_data)
if method == '0':
@@ -162,7 +154,6 @@ if method == '0':
f1 = alpha * MixedL21Norm()
f2 = beta * MixedL21Norm()
- # f3 depends on the noise characteristics
f = BlockFunction(f1, f2, f3)
g = ZeroFunction()
@@ -183,28 +174,27 @@ else:
f = BlockFunction(f1, f2)
g = BlockFunction(f3, ZeroFunction())
-## Compute operator Norm
+# Compute operator Norm
normK = operator.norm()
-#
+
# Primal & dual stepsizes
sigma = 1
tau = 1/(sigma*normK**2)
-
# Setup and run the PDHG algorithm
pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 200
-pdhg.run(2000, verbose = True)
+pdhg.update_objective_interval = 100
+pdhg.run(2000)
-#%%
+# Show results
plt.figure(figsize=(20,5))
plt.subplot(1,4,1)
-plt.imshow(data.as_array())
+plt.imshow(data.subset(channel=0).as_array())
plt.title('Ground Truth')
plt.colorbar()
plt.subplot(1,4,2)
-plt.imshow(noisy_data.as_array())
+plt.imshow(noisy_data.subset(channel=0).as_array())
plt.title('Noisy Data')
plt.colorbar()
plt.subplot(1,4,3)
@@ -212,10 +202,81 @@ plt.imshow(pdhg.get_output()[0].as_array())
plt.title('TGV Reconstruction')
plt.colorbar()
plt.subplot(1,4,4)
-plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
-plt.plot(np.linspace(0,N,N), pdhg.get_output()[0].as_array()[int(N/2),:], label = 'TGV reconstruction')
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), data.as_array()[int(ig.shape[0]/2),:], label = 'GTruth')
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output()[0].as_array()[int(ig.shape[0]/2),:], label = 'TGV reconstruction')
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
+#%% Check with CVX solution
+from ccpi.optimisation.operators import SparseFiniteDiff
+
+try:
+ from cvxpy import *
+ cvx_not_installable = True
+except ImportError:
+ cvx_not_installable = False
+
+if cvx_not_installable:
+
+ u = Variable(ig.shape)
+ w1 = Variable(ig.shape)
+ w2 = Variable(ig.shape)
+
+ # create TGV regulariser
+ DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
+ DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
+
+ regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u) - vec(w1), \
+ DY.matrix() * vec(u) - vec(w2)]), 2, axis = 0)) + \
+ beta * sum(norm(vstack([ DX.matrix().transpose() * vec(w1), DY.matrix().transpose() * vec(w2), \
+ 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ), \
+ 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ) ]), 2, axis = 0 ) )
+
+ constraints = []
+
+ # choose solver
+ if 'MOSEK' in installed_solvers():
+ solver = MOSEK
+ else:
+ solver = SCS
+
+ # fidelity
+ if noise == 's&p':
+ fidelity = pnorm( u - noisy_data.as_array(),1)
+ elif noise == 'poisson':
+ fidelity = sum(kl_div(noisy_data.as_array(), u))
+ solver = SCS
+ elif noise == 'gaussian':
+ fidelity = 0.5 * sum_squares(noisy_data.as_array() - u)
+
+ obj = Minimize( regulariser + fidelity)
+ prob = Problem(obj)
+ result = prob.solve(verbose = True, solver = solver)
+
+ diff_cvx = numpy.abs( pdhg.get_output()[0].as_array() - u.value )
+
+ plt.figure(figsize=(15,15))
+ plt.subplot(3,1,1)
+ plt.imshow(pdhg.get_output()[0].as_array())
+ plt.title('PDHG solution')
+ plt.colorbar()
+ plt.subplot(3,1,2)
+ plt.imshow(u.value)
+ plt.title('CVX solution')
+ plt.colorbar()
+ plt.subplot(3,1,3)
+ plt.imshow(diff_cvx)
+ plt.title('Difference')
+ plt.colorbar()
+ plt.show()
+
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output()[0].as_array()[int(ig.shape[0]/2),:], label = 'PDHG')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), u.value[int(ig.shape[0]/2),:], label = 'CVX')
+ plt.legend()
+ plt.title('Middle Line Profiles')
+ plt.show()
+
+ print('Primal Objective (CVX) {} '.format(obj.value))
+ print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0])) \ No newline at end of file
diff --git a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising.py b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising.py
index 0f1effa..d848b9f 100755
--- a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising.py
+++ b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising.py
@@ -24,15 +24,18 @@
Total Variation Denoising using PDHG algorithm:
-Problem: min_x, x>0 \alpha * ||\nabla x||_{2,1} + ||x-g||_{1}
+Problem: min_{u}, \alpha * ||\nabla u||_{2,1} + Fidelity(u, g)
\alpha: Regularization parameter
\nabla: Gradient operator
- g: Noisy Data with Salt & Pepper Noise
-
-
+ g: Noisy Data
+
+ Fidelity = 1) L2NormSquarred ( \frac{1}{2} * || u - g ||_{2}^{2} ) if Noise is Gaussian
+ 2) L1Norm ( ||u - g||_{1} )if Noise is Salt & Pepper
+ 3) Kullback Leibler (\int u - g * log(u) + Id_{u>0}) if Noise is Poisson
+
Method = 0 ( PDHG - split ) : K = [ \nabla,
Identity]
@@ -40,10 +43,14 @@ Problem: min_x, x>0 \alpha * ||\nabla x||_{2,1} + ||x-g||_{1}
Method = 1 (PDHG - explicit ): K = \nabla
+ Default: ROF denoising
+ noise = Gaussian
+ Fidelity = L2NormSquarred
+ method = 0
+
+
"""
-from ccpi.framework import ImageData, ImageGeometry
-
import numpy as np
import numpy
import matplotlib.pyplot as plt
@@ -73,32 +80,27 @@ if len(sys.argv) > 2:
else:
method = '0'
print ("method ", method)
-# Create phantom for TV Salt & Pepper denoising
-N = 100
+
loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
-data = loader.load(TestData.SIMPLE_PHANTOM_2D, size=(N,N))
-data = loader.load(TestData.PEPPERS, size=(N,N))
+data = loader.load(TestData.SHAPES, size=(50,50))
ig = data.geometry
ag = ig
# Create noisy data.
-# Apply Salt & Pepper noise
-# gaussian
-# poisson
noises = ['gaussian', 'poisson', 's&p']
noise = noises[which_noise]
if noise == 's&p':
n1 = random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2)
elif noise == 'poisson':
- n1 = random_noise(data.as_array(), mode = noise, seed = 10)
+ scale = 5
+ n1 = random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale
elif noise == 'gaussian':
n1 = random_noise(data.as_array(), mode = noise, seed = 10)
else:
raise ValueError('Unsupported Noise ', noise)
noisy_data = ig.allocate()
noisy_data.fill(n1)
-#noisy_data = ImageData(n1)
# Show Ground Truth and Noisy Data
plt.figure(figsize=(10,5))
@@ -112,8 +114,14 @@ plt.title('Noisy Data')
plt.colorbar()
plt.show()
-# Regularisation Parameter
-alpha = .2
+
+# Regularisation Parameter depending on the noise distribution
+if noise == 's&p':
+ alpha = 0.8
+elif noise == 'poisson':
+ alpha = 1
+elif noise == 'gaussian':
+ alpha = .3
# fidelity
if noise == 's&p':
@@ -121,30 +129,25 @@ if noise == 's&p':
elif noise == 'poisson':
f2 = KullbackLeibler(noisy_data)
elif noise == 'gaussian':
- f2 = L2NormSquared(b=noisy_data)
+ f2 = 0.5 * L2NormSquared(b=noisy_data)
if method == '0':
# Create operators
- op1 = Gradient(ig, correlation=Gradient.CORRELATION_SPACECHANNEL)
+ op1 = Gradient(ig, correlation=Gradient.CORRELATION_SPACE)
op2 = Identity(ig, ag)
# Create BlockOperator
operator = BlockOperator(op1, op2, shape=(2,1) )
# Create functions
- f1 = alpha * MixedL21Norm()
- #f2 = L1Norm(b = noisy_data)
- f = BlockFunction(f1, f2)
-
+ f = BlockFunction(alpha * MixedL21Norm(), f2)
g = ZeroFunction()
else:
- # Without the "Block Framework"
operator = Gradient(ig)
f = alpha * MixedL21Norm()
- #g = L1Norm(b = noisy_data)
g = f2
@@ -154,14 +157,15 @@ normK = operator.norm()
# Primal & dual stepsizes
sigma = 1
tau = 1/(sigma*normK**2)
-opt = {'niter':2000, 'memopt': True}
+
# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
+pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 50
+pdhg.update_objective_interval = 100
pdhg.run(2000)
+
if data.geometry.channels > 1:
plt.figure(figsize=(20,15))
for row in range(data.geometry.channels):
@@ -179,11 +183,12 @@ if data.geometry.channels > 1:
plt.title('TV Reconstruction')
plt.colorbar()
plt.subplot(3,4,4+row*4)
- plt.plot(np.linspace(0,N,N), data.subset(channel=row).as_array()[int(N/2),:], label = 'GTruth')
- plt.plot(np.linspace(0,N,N), pdhg.get_output().subset(channel=row).as_array()[int(N/2),:], label = 'TV reconstruction')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), data.subset(channel=row).as_array()[int(N/2),:], label = 'GTruth')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output().subset(channel=row).as_array()[int(N/2),:], label = 'TV reconstruction')
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
+
else:
plt.figure(figsize=(20,5))
plt.subplot(1,4,1)
@@ -199,8 +204,8 @@ else:
plt.title('TV Reconstruction')
plt.colorbar()
plt.subplot(1,4,4)
- plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
- plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'TV reconstruction')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), data.as_array()[int(ig.shape[0]/2),:], label = 'GTruth')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output().as_array()[int(ig.shape[0]/2),:], label = 'TV reconstruction')
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
@@ -233,8 +238,17 @@ if cvx_not_installable:
if 'MOSEK' in installed_solvers():
solver = MOSEK
else:
- solver = SCS
-
+ solver = SCS
+
+ # fidelity
+ if noise == 's&p':
+ fidelity = pnorm( u - noisy_data.as_array(),1)
+ elif noise == 'poisson':
+ fidelity = sum(kl_div(noisy_data.as_array(), u))
+ solver = SCS
+ elif noise == 'gaussian':
+ fidelity = 0.5 * sum_squares(noisy_data.as_array() - u)
+
obj = Minimize( regulariser + fidelity)
prob = Problem(obj)
result = prob.solve(verbose = True, solver = solver)
@@ -256,8 +270,8 @@ if cvx_not_installable:
plt.colorbar()
plt.show()
- plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'PDHG')
- plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output().as_array()[int(ig.shape[0]/2),:], label = 'PDHG')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), u.value[int(ig.shape[0]/2),:], label = 'CVX')
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
diff --git a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_2D_time.py b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_2D_time.py
new file mode 100644
index 0000000..14608db
--- /dev/null
+++ b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_2D_time.py
@@ -0,0 +1,192 @@
+#========================================================================
+# Copyright 2019 Science Technology Facilities Council
+# Copyright 2019 University of Manchester
+#
+# This work is part of the Core Imaging Library developed by Science Technology
+# Facilities Council and University of Manchester
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#=========================================================================
+
+from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData
+
+import numpy as np
+import numpy
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG
+
+from ccpi.optimisation.operators import BlockOperator, Gradient, Identity
+from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \
+ MixedL21Norm, BlockFunction
+
+from ccpi.astra.ops import AstraProjectorMC
+
+import os
+import tomophantom
+from tomophantom import TomoP2D
+
+# Create phantom for TV 2D dynamic tomography
+
+model = 102 # note that the selected model is temporal (2D + time)
+N = 128 # set dimension of the phantom
+# one can specify an exact path to the parameters file
+# path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat'
+path = os.path.dirname(tomophantom.__file__)
+path_library2D = os.path.join(path, "Phantom2DLibrary.dat")
+#This will generate a N_size x N_size x Time frames phantom (2D + time)
+phantom_2Dt = TomoP2D.ModelTemporal(model, N, path_library2D)
+
+plt.close('all')
+plt.figure(1)
+plt.rcParams.update({'font.size': 21})
+plt.title('{}''{}'.format('2D+t phantom using model no.',model))
+for sl in range(0,np.shape(phantom_2Dt)[0]):
+ im = phantom_2Dt[sl,:,:]
+ plt.imshow(im, vmin=0, vmax=1)
+# plt.pause(.1)
+# plt.draw
+
+
+ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N, channels = np.shape(phantom_2Dt)[0])
+data = ImageData(phantom_2Dt, geometry=ig)
+ag = ig
+
+# Create Noisy data. Add Gaussian noise
+np.random.seed(10)
+noisy_data = ImageData( data.as_array() + np.random.normal(0, 0.25, size=ig.shape) )
+
+tindex = [3, 6, 10]
+
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(10, 10))
+plt.subplot(1,3,1)
+plt.imshow(noisy_data.as_array()[tindex[0],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[0]))
+plt.subplot(1,3,2)
+plt.imshow(noisy_data.as_array()[tindex[1],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[1]))
+plt.subplot(1,3,3)
+plt.imshow(noisy_data.as_array()[tindex[2],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[2]))
+
+fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
+ wspace=0.02, hspace=0.02)
+
+plt.show()
+
+#%%
+# Regularisation Parameter
+alpha = 0.3
+
+# Create operators
+#op1 = Gradient(ig)
+op1 = Gradient(ig, correlation='Space')
+op2 = Gradient(ig, correlation='SpaceChannels')
+
+op3 = Identity(ig, ag)
+
+# Create BlockOperator
+operator1 = BlockOperator(op1, op3, shape=(2,1) )
+operator2 = BlockOperator(op2, op3, shape=(2,1) )
+
+# Create functions
+
+f1 = alpha * MixedL21Norm()
+f2 = 0.5 * L2NormSquared(b = noisy_data)
+f = BlockFunction(f1, f2)
+
+g = ZeroFunction()
+
+# Compute operator Norm
+normK1 = operator1.norm()
+normK2 = operator2.norm()
+
+#%%
+# Primal & dual stepsizes
+sigma1 = 1
+tau1 = 1/(sigma1*normK1**2)
+
+sigma2 = 1
+tau2 = 1/(sigma2*normK2**2)
+
+# Setup and run the PDHG algorithm
+pdhg1 = PDHG(f=f,g=g,operator=operator1, tau=tau1, sigma=sigma1)
+pdhg1.max_iteration = 2000
+pdhg1.update_objective_interval = 200
+pdhg1.run(2000)
+
+# Setup and run the PDHG algorithm
+pdhg2 = PDHG(f=f,g=g,operator=operator2, tau=tau2, sigma=sigma2)
+pdhg2.max_iteration = 2000
+pdhg2.update_objective_interval = 200
+pdhg2.run(2000)
+
+
+#%%
+
+tindex = [3, 6, 10]
+fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 8))
+
+plt.subplot(3,3,1)
+plt.imshow(phantom_2Dt[tindex[0],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[0]))
+
+plt.subplot(3,3,2)
+plt.imshow(phantom_2Dt[tindex[1],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[1]))
+
+plt.subplot(3,3,3)
+plt.imshow(phantom_2Dt[tindex[2],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[2]))
+
+plt.subplot(3,3,4)
+plt.imshow(pdhg1.get_output().as_array()[tindex[0],:,:])
+plt.axis('off')
+plt.subplot(3,3,5)
+plt.imshow(pdhg1.get_output().as_array()[tindex[1],:,:])
+plt.axis('off')
+plt.subplot(3,3,6)
+plt.imshow(pdhg1.get_output().as_array()[tindex[2],:,:])
+plt.axis('off')
+
+
+plt.subplot(3,3,7)
+plt.imshow(pdhg2.get_output().as_array()[tindex[0],:,:])
+plt.axis('off')
+plt.subplot(3,3,8)
+plt.imshow(pdhg2.get_output().as_array()[tindex[1],:,:])
+plt.axis('off')
+plt.subplot(3,3,9)
+plt.imshow(pdhg2.get_output().as_array()[tindex[2],:,:])
+plt.axis('off')
+
+#%%
+im = plt.imshow(pdhg1.get_output().as_array()[tindex[0],:,:])
+
+
+fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
+ wspace=0.02, hspace=0.02)
+
+cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
+cbar = fig.colorbar(im, cax=cb_ax)
+
+
+plt.show()
+
diff --git a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_Gaussian_3D.py b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_Gaussian_3D.py
new file mode 100644
index 0000000..3d91bf9
--- /dev/null
+++ b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_TV_Denoising_Gaussian_3D.py
@@ -0,0 +1,152 @@
+#========================================================================
+# Copyright 2019 Science Technology Facilities Council
+# Copyright 2019 University of Manchester
+#
+# This work is part of the Core Imaging Library developed by Science Technology
+# Facilities Council and University of Manchester
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#=========================================================================
+"""
+
+Total Variation (3D) Denoising using PDHG algorithm:
+
+
+Problem: min_{x} \alpha * ||\nabla x||_{2,1} + \frac{1}{2} * || x - g ||_{2}^{2}
+
+ \alpha: Regularization parameter
+
+ \nabla: Gradient operator
+
+ g: 3D Noisy Data with Gaussian Noise
+
+ Method = 0 ( PDHG - split ) : K = [ \nabla,
+ Identity]
+
+
+ Method = 1 (PDHG - explicit ): K = \nabla
+
+"""
+
+from ccpi.framework import ImageData, ImageGeometry
+
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG
+
+from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
+from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \
+ MixedL21Norm, BlockFunction
+
+from skimage.util import random_noise
+
+# Create phantom for TV Gaussian denoising
+import timeit
+import os
+from tomophantom import TomoP3D
+import tomophantom
+
+# Create a phantom from Tomophantom
+print ("Building 3D phantom using TomoPhantom software")
+tic=timeit.default_timer()
+model = 13 # select a model number from the library
+N = 64 # Define phantom dimensions using a scalar value (cubic phantom)
+path = os.path.dirname(tomophantom.__file__)
+path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
+
+#This will generate a N x N x N phantom (3D)
+phantom_tm = TomoP3D.Model(model, N, path_library3D)
+
+# Create noisy data. Apply Gaussian noise
+ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N, voxel_num_z=N)
+ag = ig
+n1 = random_noise(phantom_tm, mode = 'gaussian', mean=0, var = 0.001, seed=10)
+noisy_data = ImageData(n1)
+
+# Show results
+sliceSel = int(0.5*N)
+plt.figure(figsize=(15,15))
+plt.subplot(3,1,1)
+plt.imshow(noisy_data.as_array()[sliceSel,:,:],vmin=0, vmax=1)
+plt.title('Axial View')
+plt.colorbar()
+plt.subplot(3,1,2)
+plt.imshow(noisy_data.as_array()[:,sliceSel,:],vmin=0, vmax=1)
+plt.title('Coronal View')
+plt.colorbar()
+plt.subplot(3,1,3)
+plt.imshow(noisy_data.as_array()[:,:,sliceSel],vmin=0, vmax=1)
+plt.title('Sagittal View')
+plt.colorbar()
+plt.show()
+
+# Regularisation Parameter
+alpha = 0.05
+
+# Setup and run the PDHG algorithm
+operator = Gradient(ig)
+f = alpha * MixedL21Norm()
+g = 0.5 * L2NormSquared(b = noisy_data)
+
+normK = operator.norm()
+
+sigma = 1
+tau = 1/(sigma*normK**2)
+
+pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
+pdhg.max_iteration = 2000
+pdhg.update_objective_interval = 200
+pdhg.run(2000, verbose = True)
+
+# Show results
+fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 8))
+fig.suptitle('TV Reconstruction',fontsize=20)
+
+plt.subplot(2,3,1)
+plt.imshow(noisy_data.as_array()[sliceSel,:,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.title('Axial View')
+
+plt.subplot(2,3,2)
+plt.imshow(noisy_data.as_array()[:,sliceSel,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.title('Coronal View')
+
+plt.subplot(2,3,3)
+plt.imshow(noisy_data.as_array()[:,:,sliceSel],vmin=0, vmax=1)
+plt.axis('off')
+plt.title('Sagittal View')
+
+
+plt.subplot(2,3,4)
+plt.imshow(pdhg.get_output().as_array()[sliceSel,:,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.subplot(2,3,5)
+plt.imshow(pdhg.get_output().as_array()[:,sliceSel,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.subplot(2,3,6)
+plt.imshow(pdhg.get_output().as_array()[:,:,sliceSel],vmin=0, vmax=1)
+plt.axis('off')
+im = plt.imshow(pdhg.get_output().as_array()[:,:,sliceSel],vmin=0, vmax=1)
+
+
+fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
+ wspace=0.02, hspace=0.02)
+
+cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
+cbar = fig.colorbar(im, cax=cb_ax)
+
+
+plt.show()
+
diff --git a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_Tikhonov_Denoising.py b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_Tikhonov_Denoising.py
index 5aa334d..78d4980 100644
--- a/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_Tikhonov_Denoising.py
+++ b/Wrappers/Python/demos/PDHG_examples/GatherAll/PDHG_Tikhonov_Denoising.py
@@ -18,26 +18,37 @@
# limitations under the License.
#
#=========================================================================
+
"""
-Tikhonov Denoising using PDHG algorithm:
+``Tikhonov`` Regularization Denoising using PDHG algorithm:
-Problem: min_{x} \alpha * ||\nabla x||_{2}^{2} + \frac{1}{2} * || x - g ||_{2}^{2}
+Problem: min_{u}, \alpha * ||\nabla u||_{2,2} + Fidelity(u, g)
\alpha: Regularization parameter
\nabla: Gradient operator
- g: Noisy Data with Gaussian Noise
+ g: Noisy Data
+ Fidelity = 1) L2NormSquarred ( \frac{1}{2} * || u - g ||_{2}^{2} ) if Noise is Gaussian
+ 2) L1Norm ( ||u - g||_{1} )if Noise is Salt & Pepper
+ 3) Kullback Leibler (\int u - g * log(u) + Id_{u>0}) if Noise is Poisson
+
Method = 0 ( PDHG - split ) : K = [ \nabla,
Identity]
- Method = 1 (PDHG - explicit ): K = \nabla
-
-"""
+ Method = 1 (PDHG - explicit ): K = \nabla
+
+
+ Default: Tikhonov denoising
+ noise = Gaussian
+ Fidelity = L2NormSquarred
+ method = 0
+
+"""
from ccpi.framework import ImageData, TestData
@@ -62,7 +73,7 @@ else:
if len(sys.argv) > 1:
which_noise = int(sys.argv[1])
else:
- which_noise = 0
+ which_noise = 2
print ("Applying {} noise")
if len(sys.argv) > 2:
@@ -71,39 +82,25 @@ else:
method = '0'
print ("method ", method)
-# Create phantom for TV Salt & Pepper denoising
-N = 100
-
loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
-data = loader.load(TestData.SIMPLE_PHANTOM_2D, size=(N,N))
+data = loader.load(TestData.SHAPES)
ig = data.geometry
ag = ig
-# Create noisy data. Apply Salt & Pepper noise
# Create noisy data.
-# Apply Salt & Pepper noise
-# gaussian
-# poisson
noises = ['gaussian', 'poisson', 's&p']
noise = noises[which_noise]
if noise == 's&p':
n1 = random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2)
elif noise == 'poisson':
- n1 = random_noise(data.as_array(), mode = noise, seed = 10)
+ scale = 5
+ n1 = random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale
elif noise == 'gaussian':
n1 = random_noise(data.as_array(), mode = noise, seed = 10)
else:
raise ValueError('Unsupported Noise ', noise)
noisy_data = ImageData(n1)
-# fidelity
-if noise == 's&p':
- f2 = L1Norm(b=noisy_data)
-elif noise == 'poisson':
- f2 = KullbackLeibler(noisy_data)
-elif noise == 'gaussian':
- f2 = 0.5 * L2NormSquared(b=noisy_data)
-
# Show Ground Truth and Noisy Data
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
@@ -116,15 +113,21 @@ plt.title('Noisy Data')
plt.colorbar()
plt.show()
-
-# Regularisation Parameter
-# no edge preservation alpha is big
+# Regularisation Parameter depending on the noise distribution
if noise == 's&p':
- alpha = 8.
+ alpha = 20
elif noise == 'poisson':
- alpha = 8.
+ alpha = 10
elif noise == 'gaussian':
- alpha = 8.
+ alpha = 5
+
+# fidelity
+if noise == 's&p':
+ f2 = L1Norm(b=noisy_data)
+elif noise == 'poisson':
+ f2 = KullbackLeibler(noisy_data)
+elif noise == 'gaussian':
+ f2 = 0.5 * L2NormSquared(b=noisy_data)
if method == '0':
@@ -135,21 +138,14 @@ if method == '0':
# Create BlockOperator
operator = BlockOperator(op1, op2, shape=(2,1) )
- # Create functions
-
+ # Create functions
f1 = alpha * L2NormSquared()
- # f2 must change according to noise
- #f2 = 0.5 * L2NormSquared(b = noisy_data)
f = BlockFunction(f1, f2)
g = ZeroFunction()
else:
- # Without the "Block Framework"
operator = Gradient(ig)
- f = alpha * L2NormSquared()
- # g must change according to noise
- #g = 0.5 * L2NormSquared(b = noisy_data)
g = f2
@@ -159,13 +155,12 @@ normK = operator.norm()
# Primal & dual stepsizes
sigma = 1
tau = 1/(sigma*normK**2)
-opt = {'niter':2000, 'memopt': True}
# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
+pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 50
-pdhg.run(1500)
+pdhg.update_objective_interval = 100
+pdhg.run(2000)
plt.figure(figsize=(20,5))
@@ -179,11 +174,11 @@ plt.title('Noisy Data')
plt.colorbar()
plt.subplot(1,4,3)
plt.imshow(pdhg.get_output().as_array())
-plt.title('Tikhonov Reconstruction')
+plt.title('TV Reconstruction')
plt.colorbar()
plt.subplot(1,4,4)
-plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
-plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'TV reconstruction')
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), data.as_array()[int(ig.shape[0]/2),:], label = 'GTruth')
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output().as_array()[int(ig.shape[0]/2),:], label = 'Tikhonov reconstruction')
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
@@ -200,7 +195,6 @@ try:
except ImportError:
cvx_not_installable = False
-
if cvx_not_installable:
##Construct problem
@@ -212,13 +206,21 @@ if cvx_not_installable:
# Define Total Variation as a regulariser
regulariser = alpha * sum_squares(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
- fidelity = 0.5 * sum_squares(u - noisy_data.as_array())
# choose solver
if 'MOSEK' in installed_solvers():
solver = MOSEK
else:
- solver = SCS
+ solver = SCS
+
+ # fidelity
+ if noise == 's&p':
+ fidelity = pnorm( u - noisy_data.as_array(),1)
+ elif noise == 'poisson':
+ fidelity = sum(kl_div(noisy_data.as_array(), u))
+ solver = SCS
+ elif noise == 'gaussian':
+ fidelity = 0.5 * sum_squares(noisy_data.as_array() - u)
obj = Minimize( regulariser + fidelity)
prob = Problem(obj)
@@ -241,16 +243,16 @@ if cvx_not_installable:
plt.colorbar()
plt.show()
- plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'PDHG')
- plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output().as_array()[int(ig.shape[0]/2),:], label = 'PDHG')
+ plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), u.value[int(ig.shape[0]/2),:], label = 'CVX')
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
print('Primal Objective (CVX) {} '.format(obj.value))
print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0]))
-
-
-
-
-
+#
+#
+#
+#
+#
diff --git a/Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_2D_time.py b/Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_2D_time.py
new file mode 100644
index 0000000..14608db
--- /dev/null
+++ b/Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_2D_time.py
@@ -0,0 +1,192 @@
+#========================================================================
+# Copyright 2019 Science Technology Facilities Council
+# Copyright 2019 University of Manchester
+#
+# This work is part of the Core Imaging Library developed by Science Technology
+# Facilities Council and University of Manchester
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#=========================================================================
+
+from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData
+
+import numpy as np
+import numpy
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG
+
+from ccpi.optimisation.operators import BlockOperator, Gradient, Identity
+from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \
+ MixedL21Norm, BlockFunction
+
+from ccpi.astra.ops import AstraProjectorMC
+
+import os
+import tomophantom
+from tomophantom import TomoP2D
+
+# Create phantom for TV 2D dynamic tomography
+
+model = 102 # note that the selected model is temporal (2D + time)
+N = 128 # set dimension of the phantom
+# one can specify an exact path to the parameters file
+# path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat'
+path = os.path.dirname(tomophantom.__file__)
+path_library2D = os.path.join(path, "Phantom2DLibrary.dat")
+#This will generate a N_size x N_size x Time frames phantom (2D + time)
+phantom_2Dt = TomoP2D.ModelTemporal(model, N, path_library2D)
+
+plt.close('all')
+plt.figure(1)
+plt.rcParams.update({'font.size': 21})
+plt.title('{}''{}'.format('2D+t phantom using model no.',model))
+for sl in range(0,np.shape(phantom_2Dt)[0]):
+ im = phantom_2Dt[sl,:,:]
+ plt.imshow(im, vmin=0, vmax=1)
+# plt.pause(.1)
+# plt.draw
+
+
+ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N, channels = np.shape(phantom_2Dt)[0])
+data = ImageData(phantom_2Dt, geometry=ig)
+ag = ig
+
+# Create Noisy data. Add Gaussian noise
+np.random.seed(10)
+noisy_data = ImageData( data.as_array() + np.random.normal(0, 0.25, size=ig.shape) )
+
+tindex = [3, 6, 10]
+
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(10, 10))
+plt.subplot(1,3,1)
+plt.imshow(noisy_data.as_array()[tindex[0],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[0]))
+plt.subplot(1,3,2)
+plt.imshow(noisy_data.as_array()[tindex[1],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[1]))
+plt.subplot(1,3,3)
+plt.imshow(noisy_data.as_array()[tindex[2],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[2]))
+
+fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
+ wspace=0.02, hspace=0.02)
+
+plt.show()
+
+#%%
+# Regularisation Parameter
+alpha = 0.3
+
+# Create operators
+#op1 = Gradient(ig)
+op1 = Gradient(ig, correlation='Space')
+op2 = Gradient(ig, correlation='SpaceChannels')
+
+op3 = Identity(ig, ag)
+
+# Create BlockOperator
+operator1 = BlockOperator(op1, op3, shape=(2,1) )
+operator2 = BlockOperator(op2, op3, shape=(2,1) )
+
+# Create functions
+
+f1 = alpha * MixedL21Norm()
+f2 = 0.5 * L2NormSquared(b = noisy_data)
+f = BlockFunction(f1, f2)
+
+g = ZeroFunction()
+
+# Compute operator Norm
+normK1 = operator1.norm()
+normK2 = operator2.norm()
+
+#%%
+# Primal & dual stepsizes
+sigma1 = 1
+tau1 = 1/(sigma1*normK1**2)
+
+sigma2 = 1
+tau2 = 1/(sigma2*normK2**2)
+
+# Setup and run the PDHG algorithm
+pdhg1 = PDHG(f=f,g=g,operator=operator1, tau=tau1, sigma=sigma1)
+pdhg1.max_iteration = 2000
+pdhg1.update_objective_interval = 200
+pdhg1.run(2000)
+
+# Setup and run the PDHG algorithm
+pdhg2 = PDHG(f=f,g=g,operator=operator2, tau=tau2, sigma=sigma2)
+pdhg2.max_iteration = 2000
+pdhg2.update_objective_interval = 200
+pdhg2.run(2000)
+
+
+#%%
+
+tindex = [3, 6, 10]
+fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 8))
+
+plt.subplot(3,3,1)
+plt.imshow(phantom_2Dt[tindex[0],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[0]))
+
+plt.subplot(3,3,2)
+plt.imshow(phantom_2Dt[tindex[1],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[1]))
+
+plt.subplot(3,3,3)
+plt.imshow(phantom_2Dt[tindex[2],:,:])
+plt.axis('off')
+plt.title('Time {}'.format(tindex[2]))
+
+plt.subplot(3,3,4)
+plt.imshow(pdhg1.get_output().as_array()[tindex[0],:,:])
+plt.axis('off')
+plt.subplot(3,3,5)
+plt.imshow(pdhg1.get_output().as_array()[tindex[1],:,:])
+plt.axis('off')
+plt.subplot(3,3,6)
+plt.imshow(pdhg1.get_output().as_array()[tindex[2],:,:])
+plt.axis('off')
+
+
+plt.subplot(3,3,7)
+plt.imshow(pdhg2.get_output().as_array()[tindex[0],:,:])
+plt.axis('off')
+plt.subplot(3,3,8)
+plt.imshow(pdhg2.get_output().as_array()[tindex[1],:,:])
+plt.axis('off')
+plt.subplot(3,3,9)
+plt.imshow(pdhg2.get_output().as_array()[tindex[2],:,:])
+plt.axis('off')
+
+#%%
+im = plt.imshow(pdhg1.get_output().as_array()[tindex[0],:,:])
+
+
+fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
+ wspace=0.02, hspace=0.02)
+
+cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
+cbar = fig.colorbar(im, cax=cb_ax)
+
+
+plt.show()
+
diff --git a/Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_Gaussian_3D.py b/Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_Gaussian_3D.py
new file mode 100644
index 0000000..03dc2ef
--- /dev/null
+++ b/Wrappers/Python/demos/PDHG_examples/MultiChannel/PDHG_TV_Denoising_Gaussian_3D.py
@@ -0,0 +1,181 @@
+#========================================================================
+# Copyright 2019 Science Technology Facilities Council
+# Copyright 2019 University of Manchester
+#
+# This work is part of the Core Imaging Library developed by Science Technology
+# Facilities Council and University of Manchester
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#=========================================================================
+"""
+
+Total Variation (3D) Denoising using PDHG algorithm:
+
+
+Problem: min_{x} \alpha * ||\nabla x||_{2,1} + \frac{1}{2} * || x - g ||_{2}^{2}
+
+ \alpha: Regularization parameter
+
+ \nabla: Gradient operator
+
+ g: Noisy Data with Gaussian Noise
+
+ Method = 0 ( PDHG - split ) : K = [ \nabla,
+ Identity]
+
+
+ Method = 1 (PDHG - explicit ): K = \nabla
+
+"""
+
+from ccpi.framework import ImageData, ImageGeometry
+
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG
+
+from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
+from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \
+ MixedL21Norm, BlockFunction
+
+from skimage.util import random_noise
+
+# Create phantom for TV Gaussian denoising
+import timeit
+import os
+from tomophantom import TomoP3D
+import tomophantom
+
+print ("Building 3D phantom using TomoPhantom software")
+tic=timeit.default_timer()
+model = 13 # select a model number from the library
+N = 64 # Define phantom dimensions using a scalar value (cubic phantom)
+path = os.path.dirname(tomophantom.__file__)
+path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
+
+#This will generate a N x N x N phantom (3D)
+phantom_tm = TomoP3D.Model(model, N, path_library3D)
+
+#%%
+
+# Create noisy data. Add Gaussian noise
+ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N, voxel_num_z=N)
+ag = ig
+n1 = random_noise(phantom_tm, mode = 'gaussian', mean=0, var = 0.001, seed=10)
+noisy_data = ImageData(n1)
+
+sliceSel = int(0.5*N)
+plt.figure(figsize=(15,15))
+plt.subplot(3,1,1)
+plt.imshow(noisy_data.as_array()[sliceSel,:,:],vmin=0, vmax=1)
+plt.title('Axial View')
+plt.colorbar()
+plt.subplot(3,1,2)
+plt.imshow(noisy_data.as_array()[:,sliceSel,:],vmin=0, vmax=1)
+plt.title('Coronal View')
+plt.colorbar()
+plt.subplot(3,1,3)
+plt.imshow(noisy_data.as_array()[:,:,sliceSel],vmin=0, vmax=1)
+plt.title('Sagittal View')
+plt.colorbar()
+plt.show()
+
+#%%
+
+# Regularisation Parameter
+alpha = 0.05
+
+method = '0'
+
+if method == '0':
+
+ # Create operators
+ op1 = Gradient(ig)
+ op2 = Identity(ig, ag)
+
+ # Create BlockOperator
+ operator = BlockOperator(op1, op2, shape=(2,1) )
+
+ # Create functions
+
+ f1 = alpha * MixedL21Norm()
+ f2 = 0.5 * L2NormSquared(b = noisy_data)
+ f = BlockFunction(f1, f2)
+
+ g = ZeroFunction()
+
+else:
+
+ # Without the "Block Framework"
+ operator = Gradient(ig)
+ f = alpha * MixedL21Norm()
+ g = 0.5 * L2NormSquared(b = noisy_data)
+
+
+# Compute operator Norm
+normK = operator.norm()
+
+# Primal & dual stepsizes
+sigma = 1
+tau = 1/(sigma*normK**2)
+
+# Setup and run the PDHG algorithm
+pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
+pdhg.max_iteration = 2000
+pdhg.update_objective_interval = 200
+pdhg.run(2000, verbose = True)
+
+
+#%%
+fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 8))
+fig.suptitle('TV Reconstruction',fontsize=20)
+
+
+plt.subplot(2,3,1)
+plt.imshow(noisy_data.as_array()[sliceSel,:,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.title('Axial View')
+
+plt.subplot(2,3,2)
+plt.imshow(noisy_data.as_array()[:,sliceSel,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.title('Coronal View')
+
+plt.subplot(2,3,3)
+plt.imshow(noisy_data.as_array()[:,:,sliceSel],vmin=0, vmax=1)
+plt.axis('off')
+plt.title('Sagittal View')
+
+
+plt.subplot(2,3,4)
+plt.imshow(pdhg.get_output().as_array()[sliceSel,:,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.subplot(2,3,5)
+plt.imshow(pdhg.get_output().as_array()[:,sliceSel,:],vmin=0, vmax=1)
+plt.axis('off')
+plt.subplot(2,3,6)
+plt.imshow(pdhg.get_output().as_array()[:,:,sliceSel],vmin=0, vmax=1)
+plt.axis('off')
+im = plt.imshow(pdhg.get_output().as_array()[:,:,sliceSel],vmin=0, vmax=1)
+
+
+fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
+ wspace=0.02, hspace=0.02)
+
+cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
+cbar = fig.colorbar(im, cax=cb_ax)
+
+
+plt.show()
+
diff --git a/Wrappers/Python/demos/PDHG_examples/PDHG_TV_Color_Denoising.py b/Wrappers/Python/demos/PDHG_examples/PDHG_TV_Color_Denoising.py
new file mode 100644
index 0000000..ddf5ace
--- /dev/null
+++ b/Wrappers/Python/demos/PDHG_examples/PDHG_TV_Color_Denoising.py
@@ -0,0 +1,115 @@
+#========================================================================
+# Copyright 2019 Science Technology Facilities Council
+# Copyright 2019 University of Manchester
+#
+# This work is part of the Core Imaging Library developed by Science Technology
+# Facilities Council and University of Manchester
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#=========================================================================
+
+"""
+Total Variation Denoising using PDHG algorithm:
+Problem: min_x, x>0 \alpha * ||\nabla x||_{2,1} + ||x-g||_{1}
+ \alpha: Regularization parameter
+
+ \nabla: Gradient operator
+
+ g: Noisy Data with Salt & Pepper Noise
+
+
+ Method = 0 ( PDHG - split ) : K = [ \nabla,
+ Identity]
+
+
+ Method = 1 (PDHG - explicit ): K = \nabla
+
+
+"""
+
+import numpy
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG
+
+from ccpi.optimisation.operators import Gradient, BlockOperator, FiniteDiff
+from ccpi.optimisation.functions import MixedL21Norm, MixedL11Norm, L2NormSquared, BlockFunction, L1Norm
+from ccpi.framework import TestData, ImageGeometry
+import os, sys
+if int(numpy.version.version.split('.')[1]) > 12:
+ from skimage.util import random_noise
+else:
+ from demoutil import random_noise
+
+loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
+data = loader.load(TestData.PEPPERS, size=(256,256))
+ig = data.geometry
+ag = ig
+
+# Create noisy data.
+n1 = random_noise(data.as_array(), mode = 'gaussian', var = 0.15, seed = 50)
+noisy_data = ig.allocate()
+noisy_data.fill(n1)
+
+# Show Ground Truth and Noisy Data
+plt.figure(figsize=(10,5))
+plt.subplot(1,2,1)
+plt.imshow(data.as_array())
+plt.title('Ground Truth')
+plt.colorbar()
+plt.subplot(1,2,2)
+plt.imshow(noisy_data.as_array())
+plt.title('Noisy Data')
+plt.colorbar()
+plt.show()
+
+# Regularisation Parameter
+operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE)
+f1 = 5 * MixedL21Norm()
+g = 0.5 * L2NormSquared(b=noisy_data)
+
+# Compute operator Norm
+normK = operator.norm()
+
+# Primal & dual stepsizes
+sigma = 1
+tau = 1/(sigma*normK**2)
+
+# Setup and run the PDHG algorithm
+pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma)
+pdhg1.max_iteration = 2000
+pdhg1.update_objective_interval = 200
+pdhg1.run(1000)
+
+
+# Show results
+plt.figure(figsize=(10,10))
+plt.subplot(2,2,1)
+plt.imshow(data.as_array())
+plt.title('Ground Truth')
+plt.colorbar()
+plt.subplot(2,2,2)
+plt.imshow(noisy_data.as_array())
+plt.title('Noisy Data')
+plt.colorbar()
+plt.subplot(2,2,3)
+plt.imshow(pdhg1.get_output().as_array())
+plt.title('TV Reconstruction')
+plt.colorbar()
+plt.subplot(2,2,4)
+plt.imshow(pdhg2.get_output().as_array())
+plt.title('TV Reconstruction')
+plt.colorbar()
+plt.show()
+
diff --git a/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TGV_Tomo2D.py b/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TGV_Tomo2D.py
index bc0081f..422deb9 100644
--- a/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TGV_Tomo2D.py
+++ b/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TGV_Tomo2D.py
@@ -85,15 +85,8 @@ data = ImageData(data/data.max())
ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
ag = ig
-# Create noisy data. Add Gaussian noise
-scale = 0.5
-eta = 0
-n1 = scale * np.random.poisson(eta + sin.as_array()/scale)
-
-noisy_data = AcquisitionData(n1, ag)
#Create Acquisition Data and apply poisson noise
-
detectors = N
angles = np.linspace(0, np.pi, N)
diff --git a/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TV_Tomo2D_poisson.py b/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TV_Tomo2D_poisson.py
index 95fe2c1..e7e33cb 100644
--- a/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TV_Tomo2D_poisson.py
+++ b/Wrappers/Python/demos/PDHG_examples/Tomo/PDHG_TV_Tomo2D_poisson.py
@@ -84,7 +84,7 @@ sin = Aop.direct(data)
# Create noisy data. Apply Poisson noise
scale = 0.5
eta = 0
-n1 = scale * np.random.poisson(eta + sin.as_array()/scale)
+n1 = np.random.poisson(eta + sin.as_array())
noisy_data = AcquisitionData(n1, ag)
@@ -100,6 +100,8 @@ plt.title('Noisy Data')
plt.colorbar()
plt.show()
+
+#%%
# Regularisation Parameter
alpha = 2
@@ -157,72 +159,72 @@ plt.show()
#%% Check with CVX solution
#
-from ccpi.optimisation.operators import SparseFiniteDiff
-import astra
-import numpy
-
-try:
- from cvxpy import *
- cvx_not_installable = True
-except ImportError:
- cvx_not_installable = False
-
-
-if cvx_not_installable:
-
-
- ##Construct problem
- u = Variable(N*N)
- q = Variable()
-
- DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
- DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
-
- regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
-
- # create matrix representation for Astra operator
-
- vol_geom = astra.create_vol_geom(N, N)
- proj_geom = astra.create_proj_geom('parallel', 1.0, detectors, angles)
-
- proj_id = astra.create_projector('strip', proj_geom, vol_geom)
-
- matrix_id = astra.projector.matrix(proj_id)
-
- ProjMat = astra.matrix.get(matrix_id)
-
- tmp = noisy_data.as_array().ravel()
-
- fidelity = sum(kl_div(tmp, ProjMat * u))
-
- constraints = [q>=fidelity, u>=0]
- solver = SCS
- obj = Minimize( regulariser + q)
- prob = Problem(obj, constraints)
- result = prob.solve(verbose = True, solver = solver)
-
- diff_cvx = np.abs(pdhg.get_output().as_array() - np.reshape(u.value, (N, N)))
-
- plt.figure(figsize=(15,15))
- plt.subplot(3,1,1)
- plt.imshow(pdhg.get_output().as_array())
- plt.title('PDHG solution')
- plt.colorbar()
- plt.subplot(3,1,2)
- plt.imshow(np.reshape(u.value, (N, N)))
- plt.title('CVX solution')
- plt.colorbar()
- plt.subplot(3,1,3)
- plt.imshow(diff_cvx)
- plt.title('Difference')
- plt.colorbar()
- plt.show()
-
- plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'PDHG')
- plt.plot(np.linspace(0,N,N), np.reshape(u.value, (N, N))[int(N/2),:], label = 'CVX')
- plt.legend()
- plt.title('Middle Line Profiles')
- plt.show()
-
- print('Primal Objective (CVX) {} '.format(obj.value))
- print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0])) \ No newline at end of file
+#from ccpi.optimisation.operators import SparseFiniteDiff
+#import astra
+#import numpy
+#
+#try:
+# from cvxpy import *
+# cvx_not_installable = True
+#except ImportError:
+# cvx_not_installable = False
+#
+#
+#if cvx_not_installable:
+#
+#
+# ##Construct problem
+# u = Variable(N*N)
+# q = Variable()
+#
+# DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
+# DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
+#
+# regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
+#
+# # create matrix representation for Astra operator
+#
+# vol_geom = astra.create_vol_geom(N, N)
+# proj_geom = astra.create_proj_geom('parallel', 1.0, detectors, angles)
+#
+# proj_id = astra.create_projector('strip', proj_geom, vol_geom)
+#
+# matrix_id = astra.projector.matrix(proj_id)
+#
+# ProjMat = astra.matrix.get(matrix_id)
+#
+# tmp = noisy_data.as_array().ravel()
+#
+# fidelity = sum(kl_div(tmp, ProjMat * u))
+#
+# constraints = [q>=fidelity, u>=0]
+# solver = SCS
+# obj = Minimize( regulariser + q)
+# prob = Problem(obj, constraints)
+# result = prob.solve(verbose = True, solver = solver)
+#
+# diff_cvx = np.abs(pdhg.get_output().as_array() - np.reshape(u.value, (N, N)))
+#
+# plt.figure(figsize=(15,15))
+# plt.subplot(3,1,1)
+# plt.imshow(pdhg.get_output().as_array())
+# plt.title('PDHG solution')
+# plt.colorbar()
+# plt.subplot(3,1,2)
+# plt.imshow(np.reshape(u.value, (N, N)))
+# plt.title('CVX solution')
+# plt.colorbar()
+# plt.subplot(3,1,3)
+# plt.imshow(diff_cvx)
+# plt.title('Difference')
+# plt.colorbar()
+# plt.show()
+#
+# plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'PDHG')
+# plt.plot(np.linspace(0,N,N), np.reshape(u.value, (N, N))[int(N/2),:], label = 'CVX')
+# plt.legend()
+# plt.title('Middle Line Profiles')
+# plt.show()
+#
+# print('Primal Objective (CVX) {} '.format(obj.value))
+# print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0])) \ No newline at end of file