@article {420, title = {A model selection algorithm for a posteriori probability estimation with neural networks}, journal = {IEEE Transactions on Neural Networks}, volume = {16}, year = {2005}, pages = {799-809}, abstract = {

This paper proposes a novel algorithm to jointly determine the structure and the parameters of a posteriori probability model based on neural networks (NNs). It makes use of well-known ideas of pruning, splitting, and merging neural components and takes advantage of the probabilistic interpretation of these components. The algorithm, so called a posteriori probability model selection (PPMS), is applied to an NN architecture called the generalized softmax perceptron (GSP) whose outputs can be understood as probabilities although results shown can be extended to more general network architectures. Learning rules are derived from the application of the expectation-maximization algorithm to the GSP-PPMS structure. Simulation results show the advantages of the proposed algorithm with respect to other schemes. {\^A}{\textcopyright} 2005 IEEE.

}, keywords = {Algorithms, Automated, Biological, Breast Neoplasms, Computer simulation, Computer-Assisted, Computing Methodologies, Decision Support Techniques, Diagnosis, Estimation, Expectation-maximization, Generalized Softmax Perceptron (GSP), Humans, Mathematical models, Model selection, Models, Neural Networks (Computer), Neural networks, Numerical Analysis, Objective function, Pattern recognition, Posterior probability, Probability, Statistical, Stochastic Processes, algorithm, article, artificial neural network, automated pattern recognition, biological model, breast tumor, classification, cluster analysis, computer analysis, computer assisted diagnosis, decision support system, evaluation, human, mathematical computing, methodology, statistical model, statistics}, issn = {10459227}, doi = {10.1109/TNN.2005.849826}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-23044459586\&partnerID=40\&md5=f00e7d86a625cfc466373a2a938276d0}, author = {J I Arribas and Jes{\'u}s Cid-Sueiro} } @conference {415, title = {Neural network fusion strategies for identifying breast masses}, booktitle = {IEEE International Conference on Neural Networks - Conference Proceedings}, year = {2004}, address = {Budapest}, abstract = {

In this work, we introduce the Perceptron Average neural network fusion strategy and implemented a number of other fusion strategies to identify breast masses in mammograms as malignant or benign with both balanced and imbalanced input features. We numerically compare various fixed and trained fusion rules, i.e., the Majority Vote, Simple Average, Weighted Average, and Perceptron Average, when applying them to a binary statistical pattern recognition problem. To judge from the experimental results, the Weighted Average approach outperforms the other fusion strategies with balanced input features, while the Perceptron Average is superior and achieves the goals with lowest standard deviation with imbalanced ensembles. We concretely analyze the results of above fusion strategies, state the advantages of fusing the component networks, and provide our particular broad sense perspective about information fusion in neural networks.

}, keywords = {Biological organs, Breast cancers, Component neural networks (CNN), Image segmentation, Information fusions, Learning algorithms, Linear systems, Mammography, Mathematical models, Multilayer neural networks, Pattern recognition, Posterior probabilities, Tumors}, isbn = {0780383591}, doi = {10.1109/IJCNN.2004.1381010}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-10844231826\&partnerID=40\&md5=2be794a5832413fed34152d61dd49388}, author = {Y Wu and J He and Y Man and J I Arribas} } @conference {413, title = {Fusing Output Information in Neural Networks: Ensemble Performs Better}, booktitle = {Annual International Conference of the IEEE Engineering in Medicine and Biology - Proceedings}, year = {2003}, address = {Cancun}, abstract = {

A neural network ensemble is a learning paradigm where a finite number of component neural networks are trained for the same task. Previous research suggests that an ensemble as a whole is often more accurate than any of the single component networks. This paper focuses on the advantages of fusing different nature network architectures, and to determine the appropriate information fusion algorithm in component neural networks by several approaches within hard decision classifiers, when solving a binary pattern recognition problem. We numerically simulated and compared the different fusion approaches in terms of the mean-square error rate in testing data set, over synthetically generated binary Gaussian noisy data, and stated the advantages of fusing the hard outputs of different component networks to make a final hard decision classification. The results of the experiments indicate that neural network ensembles can indeed improve the overall accuracy for classification problems; in all fusion architectures tested, the ensemble correct classification rates are better than those achieved by the individual component networks. Finally we are nowadays comparing the above mentioned hard decision classifiers with new soft decision classifier architectures that make use of the additional continuous type intermediate network soft outputs, fulfilling probability fundamental laws (positive, and add to unity), which can be understood as the a posteriori probabilities of a given pattern to belong to a certain class.

}, keywords = {Algorithms, Backpropagation, Classification (of information), Computer simulation, Decision making, Estimation, Gaussian noise (electronic), Information fusions, Mathematical models, Medical imaging, Model selection, Multilayer neural networks, Neural network ensembles, Pattern recognition, Probability, Probability estimation, Problem solving, Regularization, Statistical methods, Statistical pattern recognition, Vectors}, doi = {https://doi.org/10.1109/IEMBS.2003.1280254}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-1542301061\&partnerID=40\&md5=32dbadb3b6ac3c6ae1ea33d89b52c75f}, author = {Y Wu and J I Arribas} } @conference {412, title = {Estimates of constrained multi-class a posteriori probabilities in time series problems with neural networks}, booktitle = {Proceedings of the International Joint Conference on Neural Networks}, year = {1999}, publisher = {IEEE, United States}, organization = {IEEE, United States}, address = {Washington, DC, USA}, abstract = {

In time series problems, where time ordering is a crucial issue, the use of Partial Likelihood Estimation (PLE) represents a specially suitable method for the estimation of parameters in the model. We propose a new general supervised neural network algorithm, Joint Network and Data Density Estimation (JNDDE), that employs PLE to approximate conditional probability density functions for multi-class classification problems. The logistic regression analysis is generalized to multiple class problems with softmax regression neural network used to model the a-posteriori probabilities such that they are approximated by the network outputs. Constraints to the network architecture, as well as to the model of data, are imposed, resulting in both a flexible network architecture and distribution modeling. We consider application of JNDDE to channel equalization and present simulation results.

}, keywords = {Approximation theory, Computer simulation, Constraint theory, Data structures, Joint network-data density estimation (JNDDE), Mathematical models, Multi-class a posteriori probabilities, Neural networks, Partial likelihood estimation (PLE), Probability density function, Regression analysis}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033325263\&partnerID=40\&md5=8c6134020b0b2a9c5ab05b131c070b88}, author = {J I Arribas and Jes{\'u}s Cid-Sueiro and T Adali and H Ni and B Wang and A R Figueiras-Vidal} } @conference {411, title = {Neural architectures for parametric estimation of a posteriori probabilities by constrained conditional density functions}, booktitle = {Neural Networks for Signal Processing - Proceedings of the IEEE Workshop}, year = {1999}, publisher = {IEEE, Piscataway, NJ, United States}, organization = {IEEE, Piscataway, NJ, United States}, address = {Madison, WI, USA}, abstract = {

A new approach to the estimation of {\textquoteright}a posteriori{\textquoteright} class probabilities using neural networks, the Joint Network and Data Density Estimation (JNDDE), is presented in this paper. It is based on the estimation of the conditional data density functions, with some restrictions imposed by the classifier structure; the Bayes{\textquoteright} rule is used to obtain the {\textquoteright}a posteriori{\textquoteright} probabilities from these densities. The proposed method is applied to three different network structures: the logistic perceptron (for the binary case), the softmax perceptron (for multi-class problems) and a generalized softmax perceptron (that can be used to map arbitrarily complex probability functions). Gaussian mixture models are used for the conditional densities. The method has the advantage of establishing a distinction between the network parameters and the model parameters. Complexity on any of them can be fixed as desired. Maximum Likelihood gradient-based rules for the estimation of the parameters can be obtained. It is shown that JNDDE exhibits a more robust convergence characteristics than other methods of a posteriori probability estimation, such as those based on the minimization of a Strict Sense Bayesian (SSB) cost function.

}, keywords = {Asymptotic stability, Constraint theory, Data structures, Gaussian mixture models, Joint network and data density estimation, Mathematical models, Maximum likelihood estimation, Neural networks, Probability}, doi = {https://doi.org/10.1109/NNSP.1999.788145}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033321049\&partnerID=40\&md5=7967fa377810cc0c3e6a4d9020024b80}, author = {J I Arribas and Jes{\'u}s Cid-Sueiro and T Adali and A R Figueiras-Vidal} } @conference {410, title = {Neural networks to estimate ML multi-class constrained conditional probability density functions}, booktitle = {Proceedings of the International Joint Conference on Neural Networks}, year = {1999}, publisher = {IEEE, United States}, organization = {IEEE, United States}, address = {Washington, DC, USA}, abstract = {

In this paper, a new algorithm, the Joint Network and Data Density Estimation (JNDDE), is proposed to estimate the {\textquoteleft}a posteriori{\textquoteright} probabilities of the targets with neural networks in multiple classes problems. It is based on the estimation of conditional density functions for each class with some restrictions or constraints imposed by the classifier structure and the use Bayes rule to force the a posteriori probabilities at the output of the network, known here as a implicit set. The method is applied to train perceptrons by means of Gaussian mixture inputs, as a particular example for the Generalized Softmax Perceptron (GSP) network. The method has the advantage of providing a clear distinction between the network architecture and the model of the data constraints, giving network parameters or weights on one side and data over parameters on the other. MLE stochastic gradient based rules are obtained for JNDDE. This algorithm can be applied to hybrid labeled and unlabeled learning in a natural fashion.

}, keywords = {Generalized softmax perceptron (GSP) network, Joint network and data density estimation (JNDDE), Mathematical models, Maximum likelihood estimation, Neural networks, Probability density function, Random processes}, doi = {https://doi.org/10.1109/IJCNN.1999.831174}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033326060\&partnerID=40\&md5=bb38c144dac0872f3a467dc12170e6b6}, author = {J I Arribas and Jes{\'u}s Cid-Sueiro and T Adali and A R Figueiras-Vidal} }