@article {628, title = {Efficient and Robust Image Restoration Using Multiple-Feature L2-Relaxed Sparse Analysis Priors}, journal = {IEEE Transactions on Image Processing}, volume = {24}, year = {2015}, month = {Dec}, pages = {5046-5059}, abstract = {

We propose a novel formulation for relaxed analysis-based sparsity in multiple dictionaries as a general type of prior for images, and apply it for Bayesian estimation in image restoration problems. Our formulation of a l2 -relaxed l0 pseudo-norm prior allows for an especially simple maximum a posteriori estimation iterative marginal optimization algorithm, whose convergence we prove. We achieve a significant speedup over the direct (static) solution by using dynamically evolving parameters through the estimation loop. As an added heuristic twist, we fix in advance the number of iterations, and then empirically optimize the involved parameters according to two performance benchmarks. The resulting constrained dynamic method is not just fast and effective, it is also highly robust and flexible. First, it is able to provide an outstanding tradeoff between computational load and performance, in visual and objective, mean square error and structural similarity terms, for a large variety of degradation tests, using the same set of parameter values for all tests. Second, the performance benchmark can be easily adapted to specific types of degradation, image classes, and even performance criteria. Third, it allows for using simultaneously several dictionaries with complementary features. This unique combination makes ours a highly practical deconvolution method.

}, keywords = {Bayes methods, Bayesian estimation, Convergence, Dictionaries, Estimation, Kernel, L2-relaxed L0 pseudo norm, L2-relaxed L0 pseudo-norm prior, L2-relaxed sparse analysis priors, Maximum likelihood estimation, Optimization, Redundancy, computational load, constrained dynamic method, deconvolution, deconvolution method, dynamically evolving parameters, estimation loop, fast constrained dynamic algorithm, image restoration, iterative marginal optimization, iterative methods, maximum a posteriori estimation, mean square error, mean square error methods, multiple representations, multiple-feature L2-relaxed sparse analysis priors, optimisation, robust tunable parameters, structural similarity terms}, issn = {1057-7149}, doi = {10.1109/TIP.2015.2478405}, author = {Javier Portilla and Antonio Trist{\'a}n-Vega and Ivan W. Selesnick} } @conference {411, title = {Neural architectures for parametric estimation of a posteriori probabilities by constrained conditional density functions}, booktitle = {Neural Networks for Signal Processing - Proceedings of the IEEE Workshop}, year = {1999}, publisher = {IEEE, Piscataway, NJ, United States}, organization = {IEEE, Piscataway, NJ, United States}, address = {Madison, WI, USA}, abstract = {

A new approach to the estimation of {\textquoteright}a posteriori{\textquoteright} class probabilities using neural networks, the Joint Network and Data Density Estimation (JNDDE), is presented in this paper. It is based on the estimation of the conditional data density functions, with some restrictions imposed by the classifier structure; the Bayes{\textquoteright} rule is used to obtain the {\textquoteright}a posteriori{\textquoteright} probabilities from these densities. The proposed method is applied to three different network structures: the logistic perceptron (for the binary case), the softmax perceptron (for multi-class problems) and a generalized softmax perceptron (that can be used to map arbitrarily complex probability functions). Gaussian mixture models are used for the conditional densities. The method has the advantage of establishing a distinction between the network parameters and the model parameters. Complexity on any of them can be fixed as desired. Maximum Likelihood gradient-based rules for the estimation of the parameters can be obtained. It is shown that JNDDE exhibits a more robust convergence characteristics than other methods of a posteriori probability estimation, such as those based on the minimization of a Strict Sense Bayesian (SSB) cost function.

}, keywords = {Asymptotic stability, Constraint theory, Data structures, Gaussian mixture models, Joint network and data density estimation, Mathematical models, Maximum likelihood estimation, Neural networks, Probability}, doi = {https://doi.org/10.1109/NNSP.1999.788145}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033321049\&partnerID=40\&md5=7967fa377810cc0c3e6a4d9020024b80}, author = {J I Arribas and Jes{\'u}s Cid-Sueiro and T Adali and A R Figueiras-Vidal} } @conference {410, title = {Neural networks to estimate ML multi-class constrained conditional probability density functions}, booktitle = {Proceedings of the International Joint Conference on Neural Networks}, year = {1999}, publisher = {IEEE, United States}, organization = {IEEE, United States}, address = {Washington, DC, USA}, abstract = {

In this paper, a new algorithm, the Joint Network and Data Density Estimation (JNDDE), is proposed to estimate the {\textquoteleft}a posteriori{\textquoteright} probabilities of the targets with neural networks in multiple classes problems. It is based on the estimation of conditional density functions for each class with some restrictions or constraints imposed by the classifier structure and the use Bayes rule to force the a posteriori probabilities at the output of the network, known here as a implicit set. The method is applied to train perceptrons by means of Gaussian mixture inputs, as a particular example for the Generalized Softmax Perceptron (GSP) network. The method has the advantage of providing a clear distinction between the network architecture and the model of the data constraints, giving network parameters or weights on one side and data over parameters on the other. MLE stochastic gradient based rules are obtained for JNDDE. This algorithm can be applied to hybrid labeled and unlabeled learning in a natural fashion.

}, keywords = {Generalized softmax perceptron (GSP) network, Joint network and data density estimation (JNDDE), Mathematical models, Maximum likelihood estimation, Neural networks, Probability density function, Random processes}, doi = {https://doi.org/10.1109/IJCNN.1999.831174}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033326060\&partnerID=40\&md5=bb38c144dac0872f3a467dc12170e6b6}, author = {J I Arribas and Jes{\'u}s Cid-Sueiro and T Adali and A R Figueiras-Vidal} }