summaryrefslogtreecommitdiffstats
path: root/Wrappers
diff options
context:
space:
mode:
Diffstat (limited to 'Wrappers')
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py19
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py104
-rw-r--r--Wrappers/Python/wip/pdhg_tv_denoising_poisson.py98
3 files changed, 62 insertions, 159 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
index a41bd48..c0c2c10 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
@@ -169,12 +169,10 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
if i%10==0:
#
p1 = f(operator.direct(x)) + g(x)
- print(p1)
-# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) )
-# primal.append(p1)
-# dual.append(d1)
-# pdgap.append(p1-d1)
-# print(p1, d1, p1-d1)
+ d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) )
+ primal.append(p1)
+ dual.append(d1)
+ pdgap.append(p1-d1)
else:
@@ -199,11 +197,10 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
if i%10==0:
#
p1 = f(operator.direct(x)) + g(x)
- print(p1)
-# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) )
-# primal.append(p1)
-# dual.append(d1)
-# pdgap.append(p1-d1)
+ d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) )
+ primal.append(p1)
+ dual.append(d1)
+ pdgap.append(p1-d1)
# print(p1, d1, p1-d1)
diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
index 09e39bf..1a64b13 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
@@ -38,32 +38,15 @@ class KullbackLeibler(Function):
def __call__(self, x):
-
- # TODO check
-<<<<<<< HEAD
- tmp = x + self.bnoise
- ind = tmp.as_array()>0
-
- res = x.as_array()[ind] - self.b.as_array()[ind] * numpy.log(tmp.as_array()[ind])
-
- return sum(res)
-
-=======
+ res_tmp = numpy.zeros(x.shape)
- self.sum_value = x + self.bnoise
- if (self.sum_value.as_array()<0).any():
- self.sum_value = numpy.inf
+ tmp = x + self.bnoise
+ ind = x.as_array()>0
- if self.sum_value==numpy.inf:
- return numpy.inf
- else:
- tmp = self.sum_value.copy()
- #tmp.fill( numpy.log(tmp.as_array()) )
- self.log(tmp)
- return (x - self.b * tmp ).sum()
-
-# return numpy.sum( x.as_array() - self.b.as_array() * numpy.log(self.sum_value.as_array()))
+ res_tmp[ind] = x.as_array()[ind] - self.b.as_array()[ind] * numpy.log(tmp.as_array()[ind])
+
+ return res_tmp.sum()
def log(self, datacontainer):
'''calculates the in-place log of the datacontainer'''
@@ -71,7 +54,7 @@ class KullbackLeibler(Function):
datacontainer.as_array().ravel(), True):
raise ValueError('KullbackLeibler. Cannot calculate log of negative number')
datacontainer.fill( numpy.log(datacontainer.as_array()) )
->>>>>>> origin/composite_operator_datacontainer
+
def gradient(self, x, out=None):
@@ -79,19 +62,19 @@ class KullbackLeibler(Function):
if out is None:
return 1 - self.b/(x + self.bnoise)
else:
-<<<<<<< HEAD
- self.b.divide(x+self.bnoise, out=out)
- out.subtract(1, out=out)
-
- def convex_conjugate(self, x):
-
- tmp = self.b/( 1 - x )
- ind = tmp.as_array()>0
-
- sel
-
- return (self.b * ( ImageData( numpy.log(tmp) ) - 1 ) - self.bnoise * (x - 1)).sum()
-=======
+#<<<<<<< HEAD
+# self.b.divide(x+self.bnoise, out=out)
+# out.subtract(1, out=out)
+#
+# def convex_conjugate(self, x):
+#
+# tmp = self.b/( 1 - x )
+# ind = tmp.as_array()>0
+#
+# sel
+#
+# return (self.b * ( ImageData( numpy.log(tmp) ) - 1 ) - self.bnoise * (x - 1)).sum()
+#=======
x.add(self.bnoise, out=out)
self.b.divide(out, out=out)
out.subtract(1, out=out)
@@ -99,21 +82,17 @@ class KullbackLeibler(Function):
def convex_conjugate(self, x):
- tmp = self.b/( 1 - x )
- self.log(tmp)
- return (self.b * ( tmp - 1 ) - self.bnoise * (x - 1)).sum()
-# return self.b * ( ImageData(numpy.log(self.b/(1-x)) - 1 )) - self.bnoise * (x - 1)
->>>>>>> origin/composite_operator_datacontainer
+ tmp = self.b/(1-x)
+ ind = tmp.as_array()>0
+
+ return (self.b.as_array()[ind] * (numpy.log(tmp.as_array()[ind])-1)).sum()
+
def proximal(self, x, tau, out=None):
if out is None:
return 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() )
else:
-<<<<<<< HEAD
- tmp = 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() )
- out.fill(tmp)
-=======
tmp = 0.5 *( (x - self.bnoise - tau) +
( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt()
)
@@ -130,36 +109,29 @@ class KullbackLeibler(Function):
out += tmp
out *= 0.5
-
->>>>>>> origin/composite_operator_datacontainer
-
-
+
def proximal_conjugate(self, x, tau, out=None):
if out is None:
z = x + tau * self.bnoise
-<<<<<<< HEAD
+
return 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt())
else:
z = x + tau * self.bnoise
res = 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt())
out.fill(res)
-
-=======
- return (z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()
- else:
- z_m = x + tau * self.bnoise - 1
- self.b.multiply(4*tau, out=out)
- z_m.multiply(z_m, out=z_m)
- out += z_m
- out.sqrt(out=out)
- # z = z_m + 2
- z_m.sqrt(out=z_m)
- z_m += 2
- out *= -1
- out += z_m
->>>>>>> origin/composite_operator_datacontainer
+# else:
+# z_m = x + tau * self.bnoise -1
+# self.b.multiply(4*tau, out=out)
+# z_m.multiply(z_m, out=z_m)
+# out += z_m
+# out.sqrt(out=out)
+# z = z_m + 2
+# z_m.sqrt(out=z_m)
+# z_m += 2
+# out *= -1
+# out += z_m
def __rmul__(self, scalar):
diff --git a/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py
index 16e6b43..ec745a2 100644
--- a/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py
+++ b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py
@@ -27,7 +27,7 @@ from timeit import default_timer as timer
# ############################################################################
# Create phantom for TV Poisson denoising
-N = 100
+N = 200
data = np.zeros((N,N))
data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
@@ -36,7 +36,7 @@ ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
ag = ig
# Create noisy data. Add Gaussian noise
-n1 = random_noise(data, mode = 'poisson')
+n1 = random_noise(data, mode = 'poisson', seed = 10)
noisy_data = ImageData(n1)
plt.imshow(noisy_data.as_array())
@@ -59,7 +59,7 @@ if method == '0':
operator = BlockOperator(op1, op2, shape=(2,1) )
f1 = alpha * MixedL21Norm()
- f2 = 0.5 * KullbackLeibler(noisy_data)
+ f2 = KullbackLeibler(noisy_data)
f = BlockFunction(f1, f2 )
g = ZeroFunction()
@@ -71,7 +71,7 @@ else:
###########################################################################
operator = Gradient(ig)
f = alpha * MixedL21Norm()
- g = 0.5 * KullbackLeibler(noisy_data)
+ g = KullbackLeibler(noisy_data)
###########################################################################
# Compute operator Norm
@@ -133,22 +133,25 @@ except ImportError:
if cvx_not_installable:
##Construct problem
- u = Variable(ig.shape)
+ u1 = Variable(ig.shape)
+ q = Variable()
DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
# Define Total Variation as a regulariser
- regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
- fidelity = 0.5 * sum_squares(u - noisy_data.as_array())
+ regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u1), DY.matrix() * vec(u1)]), 2, axis = 0))
-
- solver = SCS
- obj = Minimize( regulariser + fidelity)
- prob = Problem(obj)
+ fidelity = sum( u1 - multiply(noisy_data.as_array(), log(u1)) )
+ constraints = [q>= fidelity, u1>=0]
+
+ solver = ECOS
+ obj = Minimize( regulariser + q)
+ prob = Problem(obj, constraints)
result = prob.solve(verbose = True, solver = solver)
+
- diff_cvx = numpy.abs( res.as_array() - u.value )
+ diff_cvx = numpy.abs( res.as_array() - u1.value )
# Show result
plt.figure(figsize=(15,15))
@@ -158,7 +161,7 @@ if cvx_not_installable:
plt.colorbar()
plt.subplot(3,1,2)
- plt.imshow(u.value)
+ plt.imshow(u1.value)
plt.title('CVX solution')
plt.colorbar()
@@ -172,72 +175,3 @@ if cvx_not_installable:
-#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
-#pdhg.max_iteration = 2000
-#pdhg.update_objective_interval = 10
-#
-#pdhg.run(2000)
-
-
-
-#sol = pdhg.get_output().as_array()
-##sol = result.as_array()
-##
-#fig = plt.figure()
-#plt.subplot(1,2,1)
-#plt.imshow(noisy_data.as_array())
-##plt.colorbar()
-#plt.subplot(1,2,2)
-#plt.imshow(sol)
-##plt.colorbar()
-#plt.show()
-##
-
-##
-#plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth')
-#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon')
-#plt.legend()
-#plt.show()
-
-
-#%% Compare with cvx
-
-#try_cvx = input("Do you want CVX comparison (0/1)")
-#
-#if try_cvx=='0':
-#
-# from cvxpy import *
-# import sys
-# sys.path.insert(0,'/Users/evangelos/Desktop/Projects/CCPi/CCPi-Framework/Wrappers/Python/ccpi/optimisation/cvx_scripts')
-# from cvx_functions import TV_cvx
-#
-# u = Variable((N, N))
-# fidelity = pnorm( u - noisy_data.as_array(),1)
-# regulariser = alpha * TV_cvx(u)
-# solver = MOSEK
-# obj = Minimize( regulariser + fidelity)
-# constraints = []
-# prob = Problem(obj, constraints)
-#
-# # Choose solver (SCS is fast but less accurate than MOSEK)
-# result = prob.solve(verbose = True, solver = solver)
-#
-# print('Objective value is {} '.format(obj.value))
-#
-# diff_pdhg_cvx = np.abs(u.value - res.as_array())
-# plt.imshow(diff_pdhg_cvx)
-# plt.colorbar()
-# plt.title('|CVX-PDHG|')
-# plt.show()
-#
-# plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX')
-# plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'PDHG')
-# plt.legend()
-# plt.show()
-#
-#else:
-# print('No CVX solution available')
-
-
-
-