Seizure-driven brain damage in epilepsy accumulates over time, especially in the hippocampus, which can lead to sclerosis, cognitive decline, and death. Excitotoxicity is the prevalent model to explain ictal neurodegeneration. Current labeling technologies cannot distinguish between excitotoxicity and hypoxia, however, because they share common molecular mechanisms. This leaves open the possibility that undetected ischemic hypoxia, due to ictal blood flow restriction, could contribute to neurodegeneration previously ascribed to excitotoxicity. We tested this possibility with Confocal Laser Endomicroscopy (CLE) and novel stereological analyses in several models of epileptic mice. We found a higher number and magnitude of NG2+ mural-cell mediated capillary constrictions in the hippocampus of epileptic mice than in that of normal mice, in addition to spatial coupling between capillary constrictions and oxidative stressed neurons and neurodegeneration. These results reveal a role for hypoxia driven by capillary blood flow restriction in ictal neurodegeneration. {\textcopyright} 2017 The Author(s).

}, doi = {10.1038/srep43276}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85014072909\&doi=10.1038\%2fsrep43276\&partnerID=40\&md5=e9d3567266bdc360a7addc92be350c8d}, author = {Leal-Campanario, R. and Alarcon-Martinez, L. and Rieiro, H. and Martinez-Conde, S. and Alarcon-Martinez, T. and Zhao, X. and LaMee, J. and Popp, P.J. and Calhoun, M.E. and Juan I. Arribas and Schlegel, A.A. and Di Stasi, L.L. and Rho, J.M. and Inge, L. and Otero-Millan, J. and Treiman, D.M. and Macknik, S.L.} } @article {7460246, title = {A computer-aided diagnosis system with EEG based on the P3b wave during an auditory odd-ball task in schizophrenia}, journal = {IEEE Transactions on Biomedical Engineering}, volume = {PP}, number = {99}, year = {2016}, pages = {1-1}, keywords = {Computer aided diagnosis, Design automation, Electrodes, Electroencephalography, Feature extraction, Indexes, Sensitivity}, issn = {0018-9294}, doi = {10.1109/TBME.2016.2558824}, author = {L. Santos-Mayo and Luis Miguel San-Jose-Revuelta and Juan I. Arribas} } @article {SanJos{\'e}Revuelta2016561, title = {Three Natural Computation methods for joint channel estimation and symbol detection in multiuser communications}, journal = {Applied Soft Computing}, volume = {49}, year = {2016}, pages = {561 - 569}, abstract = {Abstract This paper studies three of the most important optimization algorithms belonging to Natural Computation (NC): genetic algorithm (GA), tabu search (TS) and simulated quenching (SQ). A concise overview of these methods, including their fundamentals, drawbacks and comparison, is described in the first half of the paper. Our work is particularized and focused on a specific application: joint channel estimation and symbol detection in a Direct-Sequence/Code-Division Multiple-Access (DS/CDMA) multiuser communications scenario; therefore, its channel model is described and the three methods are explained and particularized for solving this. Important issues such as suboptimal convergence, cycling search or control of the population diversity have deserved special attention. Several numerical simulations analyze the performance of these three methods, showing, as well, comparative results with well-known classical algorithms such as the Minimum Mean Square Error estimator (MMSE), the Matched Filter (MF) or Radial Basis Function (RBF)-based detection schemes. As a consequence, the three proposed methods would allow transmission at higher data rates over channels under more severe fading and interference conditions. Simulations show that our proposals require less computational load in most cases. For instance, the proposed \{GA\} saves about 73\% of time with respect to the standard GA. Besides, when the number of active users doubles from 10 to 20, the complexity of the proposed \{GA\} increases by a factor of 8.33, in contrast to 32 for the optimum maximum likelihood detector. The load of \{TS\} and \{SQ\} is around 15{\textendash}25\% higher than that of the proposed GA.}, keywords = {Population diversity}, issn = {1568-4946}, doi = {http://dx.doi.org/10.1016/j.asoc.2016.08.034}, url = {http://www.sciencedirect.com/science/article/pii/S1568494616304288}, author = {Luis Miguel San-Jose-Revuelta and Juan I. Arribas} } @book {676, title = {Sunflowers: growth and development, environmental influences and pests/diseases.}, year = {2014}, pages = {323}, publisher = {Nova Science Publishers}, organization = {Nova Science Publishers}, address = {New York}, abstract = {We are all well aware that the importance of the sunflower (*Helianthus Annus*) as a crop has increased significantly in recent years, not only in the food industry but also as a natural energy resource in oil production. I am, thus, very pleased to be able to present this comprehensive monograph on a wide range of important issues regarding sunflowers, with an emphasis on environmental influences, pests and diseases in order to maximise production whilst minimising costs.

Contributors where selected based on their proven experience in the field of sunflowers. Contributors submitted an extended abstract that was assessed for relevance. They were then invited to contribute draft chapters. Each chapter underwent a stringent and thorough peer review process by other experts in the field, with final approval by the editor who, thus, was able to balance the topics from all contributors.

The book contains important original results. Each chapter deals with a different topic, and draws, where appropriate, from studies and results previously published by the authors. Authors were encouraged to complement their writing with original and high quality graphs, charts, tables, figures, pictures and photographs.

It{\textquoteright}s my honour and pleasure to acknowledge the rigorous work carried out by all authors in this book, and at the same time I am very grateful to them for trusting me in leading this project in the role of the editor of their work. My thanks also go to the anonymous reviewers who contributed their time so generously to this book, and without whom it would not exist.

I am also very grateful to Nova Science Pubs. for inviting me to lead this book, and thank them for the help and coverage provided during the whole time that this project lasted.

I really do hope that you find this book of interest and wish you enjoy its reading as much as I have done through the whole editing process and as much I am sure all authors have done while writing it.

}, keywords = {desease, environmental, Leaf classification, pest, Sunflower}, isbn = {978-1-63117.348-6}, doi = {https://www.scopus.com/record/display.uri?eid=2-s2.0-84948981604\&origin=resultslist\&sort=plf-f\&src=s\&sid=6fdffa7042d279955cdde5960c4dc452\&sot=autdocs\&sdt=autdocs\&sl=17\&s=AU-ID\%287103041133\%29\&relpos=4\&citeCnt=0\&searchTerm=}, url = {https://www.novapublishers.com/catalog/product_info.php?products_id=48247\&osCsid=b}, author = {Juan I. Arribas} } @article {408, title = {Evaluation of the use of low-cost GPS receivers in the autonomous guidance of agricultural tractors}, journal = {Spanish Journal of Agricultural Research}, volume = {9}, year = {2011}, pages = {377-388}, abstract = {This paper evaluates the use of low-cost global positioning system (GPS) receivers in the autonomous guidance of agricultural tractors. An autonomous guidance system was installed in a 6400 John Deere agricultural tractor. A lowcost GPS receiver was used as positioning sensor. Three different control laws were implemented in order to evaluate the autonomous guidance of the tractor with the low-cost receiver. The guidance was experimentally tested with the tracking of straight trajectories and with the step response. The total guidance error was obtained from the receiver accuracy and from the guidance error. For the evaluation of the receiver{\textquoteright}s accuracy, positioning data from several lowcost receivers were recorded and analyzed. For the evaluation of the guidance error, tests were performed with each control law at three different speeds. The conclusions obtained were that relative accuracy of low-cost receivers decreases with the time; that for an interval lower than 15 min, the error usually remains below 1 m; that all the control laws have a similar behavior and it is conditioned by the control law adjustment; that automatic guidance with lowcost receivers is possible with speeds that went up to 9 km h -1; and finally, that the total error in the guidance is mainly determined by the receiver{\textquoteright}s accuracy.

}, issn = {1695971X}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79959669468\&partnerID=40\&md5=774d42717ec127c9a6c5e25864da9722}, author = {Sergio Alonso-Garcia and Jaime Gomez-Gil and Juan I. Arribas} } @article {424, title = {Leaf classification in sunflower crops by computer vision and neural networks}, journal = {Computers and Electronics in Agriculture}, volume = {78}, year = {2011}, pages = {9-18}, abstract = {In this article, we present an automatic leaves image classification system for sunflower crops using neural networks, which could be used in selective herbicide applications. The system is comprised of four main stages. First, a segmentation based on rgb color space is performed. Second, many different features are detected and then extracted from the segmented image. Third, the most discriminable set of features are selected. Finally, the Generalized Softmax Perceptron (GSP) neural network architecture is used in conjunction with the recently proposed Posterior Probability Model Selection (PPMS) algorithm for complexity selection in order to select the leaves in an image and then classify them either as sunflower or non-sunflower. The experimental results show that the proposed system achieves a high level of accuracy with only five selected discriminative features obtaining an average Correct Classification Rate of 85\% and an area under the receiver operation curve over 90\%, for the test set. {\^A}{\textcopyright} 2011 Elsevier B.V.

}, keywords = {accuracy assessment, agricultural technology, algorithm, artificial neural network, automation, Classification rates, Computer vision, Crops, dicotyledon, Discriminative features, experimental study, Generalized softmax perceptron, Helianthus, herbicide, Herbicide application, Herbicides, Image classification, Image classification systems, Leaf classification, Learning machines, Model selection, Network architecture, Neural networks, Posterior probability, RGB color space, segmentation, Segmented images, Sunflower, Test sets}, issn = {01681699}, doi = {10.1016/j.compag.2011.05.007}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960909222\&partnerID=40\&md5=21ed82f5d64eed47a0ed6821b492675d}, author = {Juan I. Arribas and G V Sanchez-Ferrero and G Ruiz-Ruiz and Jaime Gomez-Gil} } @article {423, title = {Automatic bayesian classification of healthy controls, bipolar disorder, and schizophrenia using intrinsic connectivity maps from fMRI data}, journal = {IEEE Transactions on Biomedical Engineering}, volume = {57}, year = {2010}, pages = {2850-2860}, abstract = {We present a method for supervised, automatic, and reliable classification of healthy controls, patients with bipolar disorder, and patients with schizophrenia using brain imaging data. The method uses four supervised classification learning machines trained with a stochastic gradient learning rule based on the minimization of KullbackLeibler divergence and an optimal model complexity search through posterior probability estimation. Prior to classification, given the high dimensionality of functional MRI (fMRI) data, a dimension reduction stage comprising two steps is performed: first, a one-sample univariate t-test mean-difference Tscore approach is used to reduce the number of significant discriminative functional activated voxels, and then singular value decomposition is performed to further reduce the dimension of the input patterns to a number comparable to the limited number of subjects available for each of the three classes. Experimental results using functional brain imaging (fMRI) data include receiver operation characteristic curves for the three-way classifier with area under curve values around 0.82, 0.89, and 0.90 for healthy control versus nonhealthy, bipolar disorder versus nonbipolar, and schizophrenia patients versus nonschizophrenia binary problems, respectively. The average three-way correct classification rate (CCR) is in the range of 70\%-72\%, for the test set, remaining close to the estimated Bayesian optimal CCR theoretical upper bound of about 80\%, estimated from the one nearest-neighbor classifier over the same data. {\^A}{\textcopyright} 2010 IEEE.

}, keywords = {Algorithms, area under the curve, article, Artificial Intelligence, Bayesian learning, Bayesian networks, Bayes Theorem, Biological, bipolar disorder, Brain, Case-Control Studies, classification, Classifiers, Computer-Assisted, controlled study, Diseases, functional magnetic resonance imaging, Functional MRI (fMRI), human, Humans, Learning machines, Learning systems, machine learning, Magnetic Resonance Imaging, major clinical study, Models, neuroimaging, Operation characteristic, Optimization, patient coding, receiver operating characteristic, reliability, Reproducibility of Results, ROC Curve, schizophrenia, Signal Processing, Singular value decomposition, Statistical tests, Stochastic models, Student t test}, issn = {00189294}, doi = {10.1109/TBME.2010.2080679}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78649311169\&partnerID=40\&md5=d3b90f1a3ee4ef209d131ef986e142db}, author = {Juan I. Arribas and V D Calhoun and T Adali} } @article {422, title = {A radius and ulna TW3 bone age assessment system}, journal = {IEEE Transactions on Biomedical Engineering}, volume = {55}, year = {2008}, pages = {1463-1476}, abstract = {An end-to-end system to automate the well-known Tanner - Whitehouse (TW3) clinical procedure to estimate the skeletal age in childhood is proposed. The system comprises the detailed analysis of the two most important bones in TW3: the radius and ulna wrist bones. First, a modified version of an adaptive clustering segmentation algorithm is presented to properly semi-automatically segment the contour of the bones. Second, up to 89 features are defined and extracted from bone contours and gray scale information inside the contour, followed by some well-founded feature selection mathematical criteria, based on the ideas of maximizing the classes{\textquoteright} separability. Third, bone age is estimated with the help of a Generalized Softmax Perceptron (GSP) neural network (NN) that, after supervised learning and optimal complexity estimation via the application of the recently developed Posterior Probability Model Selection (PPMS) algorithm, is able to accurately predict the different development stages in both radius and ulna from which and with the help of the TW3 methodology, we are able to conveniently score and estimate the bone age of a patient in years, in what can be understood as a multiple-class (multiple stages) pattern recognition approach with posterior probability estimation. Finally, numerical results are presented to evaluate the system performance in predicting the bone stages and the final patient bone age over a private hand image database, with the help of the pediatricians and the radiologists expert diagnoses. {\^A}{\textcopyright} 2006 IEEE.

}, keywords = {Age Determination by Skeleton, Aging, algorithm, Algorithms, article, Artificial Intelligence, artificial neural network, Automated, automation, Bone, bone age, Bone age assessment, bone maturation, childhood, Clustering algorithms, Computer-Assisted, Humans, instrumentation, Model selection, Neural networks, Pattern recognition, Radiographic Image Interpretation, radius, Reproducibility of Results, Sensitivity and Specificity, Skeletal maturity, ulna}, issn = {00189294}, doi = {10.1109/TBME.2008.918554}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-42249094547\&partnerID=40\&md5=2cecfea5f75a61b048611f2391b00aed}, author = {Antonio Trist{\'a}n-Vega and Juan I. Arribas} } @article {419, title = {A fast B-spline pseudo-inversion algorithm for consistent image registration}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {4673 LNCS}, year = {2007}, pages = {768-775}, abstract = {Recently, the concept of consistent image registration has been introduced to refer to a set of algorithms that estimate both the direct and inverse deformation together, that is, they exchange the roles of the target and the scene images alternatively; it has been demonstrated that this technique improves the registration accuracy, and that the biological significance of the obtained deformations is also improved. When dealing with free form deformations, the inversion of the transformations obtained becomes computationally intensive. In this paper, we suggest the parametrization of such deformations by means of a cubic B-spline, and its approximated inversion using a highly efficient algorithm. The results show that the consistency constraint notably improves the registration accuracy, especially in cases of a heavy initial misregistration, with very little computational overload. {\^A}{\textcopyright} Springer-Verlag Berlin Heidelberg 2007.

}, keywords = {Approximation algorithms, Computational overload, Consistent registration, Constraint theory, Image registration, Inverse problems, Inverse transformation, Parameterization}, isbn = {9783540742715}, issn = {03029743}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-38149022572\&partnerID=40\&md5=627751cd7654872cbd9ee74a249752eb}, author = {Antonio Trist{\'a}n-Vega and Juan I. Arribas} } @inbook {418, title = {A statistical-genetic algorithm to select the most significant features in mammograms}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {4673 LNCS}, year = {2007}, pages = {189-196}, abstract = {An automatic classification system into either malignant or benign microcalcification from mammograms is a helpful tool in breast cancer diagnosis. From a set of extracted features, a classifying method using neural networks can provide a probability estimation that can help the radiologist in his diagnosis. With this objective in mind, this paper proposes a feature selection algorithm from a massive number of features based on a statistical distance method in conjunction with a genetic algorithm (GA). The use of a statistical distance as optimality criterion was improved with genetic algorithms for selecting an appropriate subset of features, thus making this algorithm capable of performing feature selection from a massive set of initial features. Additionally, it provides a criterion to select an appropriate number of features to be employed. Experimental work was performed using Generalized Softmax Perceptrons (GSP), trained with a Strict Sense Bayesian cost function for direct probability estimation, as microcalcification classifiers. A Posterior Probability Model Selection (PPMS) algorithm was employed to determine the network complexity. Results showed that this algorithm converges into a subset of features which has a good classification rate and Area Under Curve (AUC) of the Receiver Operating Curve (ROC). {\^A}{\textcopyright} Springer-Verlag Berlin Heidelberg 2007.

}, keywords = {Breast cancer, Diagnosis, Feature extraction, Genetic algorithms, Mammography, Microcalcification classification, Network complexity, Neural network classifiers, Neural networks, Tumors}, isbn = {9783540742715}, issn = {03029743}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-38149142403\&partnerID=40\&md5=ef139db3a0e5d603c4f721316abdcf2c}, author = {G V Sanchez-Ferrero and Juan I. Arribas} } @inbook {421, title = {Estimation of Posterior Probabilities with Neural Networks: Application to Microcalcification Detection in Breast Cancer Diagnosis}, booktitle = {Handbook of Neural Engineering}, year = {2006}, pages = {41-58}, publisher = {John Wiley \& Sons, Inc.}, organization = {John Wiley \& Sons, Inc.}, isbn = {9780470056691}, doi = {10.1002/9780470068298.ch3}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-42249107409\&partnerID=40\&md5=aac6237961cec1a48c0e843a9a1912a4}, author = {Juan I. Arribas and Jes{\'u}s Cid-Sueiro and Carlos Alberola L{\'o}pez} } @proceedings {casaseca2006comparative, title = {A comparative study on microcalcification detection methods with posterior probability estimation based on Gaussian mixture models}, journal = {Engineering in Medicine and Biology Society, 2005. IEEE-EMBS 2005. 27th Annual International Conference of the}, year = {2005}, pages = {49{\textendash}54}, publisher = {IEEE}, author = {P. Casaseca-de-la-Higuera and Juan I. Arribas and Emma Mu{\~n}oz-Moreno and Carlos Alberola L{\'o}pez} } @inbook {arribas2005estimation, title = {Estimation of Posterior Probabilities with Neural Networks: Application to Microcalcification Detection in Breast Cancer Diagnosis}, booktitle = {Handbook of Neural Engineering}, year = {2005}, pages = {41{\textendash}58}, publisher = {Wiley Online Library}, organization = {Wiley Online Library}, author = {Juan I. Arribas and Jes{\'u}s Cid-Sueiro and Carlos Alberola L{\'o}pez} } @article {420, title = {A model selection algorithm for a posteriori probability estimation with neural networks}, journal = {IEEE Transactions on Neural Networks}, volume = {16}, year = {2005}, pages = {799-809}, abstract = {This paper proposes a novel algorithm to jointly determine the structure and the parameters of a posteriori probability model based on neural networks (NNs). It makes use of well-known ideas of pruning, splitting, and merging neural components and takes advantage of the probabilistic interpretation of these components. The algorithm, so called a posteriori probability model selection (PPMS), is applied to an NN architecture called the generalized softmax perceptron (GSP) whose outputs can be understood as probabilities although results shown can be extended to more general network architectures. Learning rules are derived from the application of the expectation-maximization algorithm to the GSP-PPMS structure. Simulation results show the advantages of the proposed algorithm with respect to other schemes. {\^A}{\textcopyright} 2005 IEEE.

}, keywords = {algorithm, Algorithms, article, artificial neural network, Automated, automated pattern recognition, Biological, biological model, Breast Neoplasms, breast tumor, classification, cluster analysis, computer analysis, Computer-Assisted, computer assisted diagnosis, Computer simulation, Computing Methodologies, decision support system, Decision Support Techniques, Diagnosis, Estimation, evaluation, Expectation-maximization, Generalized Softmax Perceptron (GSP), human, Humans, mathematical computing, Mathematical models, methodology, Models, Model selection, Neural networks, Neural Networks (Computer), Numerical Analysis, Objective function, Pattern recognition, Posterior probability, Probability, Statistical, statistical model, statistics, Stochastic Processes}, issn = {10459227}, doi = {10.1109/TNN.2005.849826}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-23044459586\&partnerID=40\&md5=f00e7d86a625cfc466373a2a938276d0}, author = {Juan I. Arribas and Jes{\'u}s Cid-Sueiro} } @conference {417, title = {A radius and ulna skeletal age assessment system}, booktitle = {2005 IEEE Workshop on Machine Learning for Signal Processing}, year = {2005}, address = {Mystic, CT}, abstract = {An end to end system to partially automate the TW3 bone age assessment procedure is proposed. The system comprises the detailed analysis of the two more important bones in TW3: the radius and ulna wrist bones. First, a generalization of K-means algorithm is presented to semi-automatically segment the contour of the bones and thus extract up to 89 features describing shapes and textures from bones. Second, a well-founded feature selection criterion based on the statistical properties of data is used in order to properly choose the most relevant features. Third, bone age is estimated with the help of a Generalized Softmax Perceptron (GSP) Neural Network (NN) whose optimal complexity is estimated via the Posterior Probability Model Selection (PPMS) algorithm. We can then predict the different development stages in both radius and ulna, from which we are able to score and estimate the bone age of a patient in years and finally we compare the NN results with those from the pediatrician expert discrepancies. {\^A}{\textcopyright} 2005 IEEE.

}, keywords = {Algorithms, Bone, Feature extraction, Generalized Softmax Perceptron (GSP), Living systems studies, Neural networks, Probability Model Selection (PPMS), Skeletal age assessment system}, isbn = {0780395174; 9780780395176}, doi = {10.1109/MLSP.2005.1532903}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-33749052083\&partnerID=40\&md5=eefa29ac09f4efa304b613cf07ab8d10}, author = {Antonio Trist{\'a}n-Vega and Juan I. Arribas} } @conference {415, title = {Neural network fusion strategies for identifying breast masses}, booktitle = {IEEE International Conference on Neural Networks - Conference Proceedings}, year = {2004}, address = {Budapest}, abstract = {In this work, we introduce the Perceptron Average neural network fusion strategy and implemented a number of other fusion strategies to identify breast masses in mammograms as malignant or benign with both balanced and imbalanced input features. We numerically compare various fixed and trained fusion rules, i.e., the Majority Vote, Simple Average, Weighted Average, and Perceptron Average, when applying them to a binary statistical pattern recognition problem. To judge from the experimental results, the Weighted Average approach outperforms the other fusion strategies with balanced input features, while the Perceptron Average is superior and achieves the goals with lowest standard deviation with imbalanced ensembles. We concretely analyze the results of above fusion strategies, state the advantages of fusing the component networks, and provide our particular broad sense perspective about information fusion in neural networks.

}, keywords = {Biological organs, Breast cancers, Component neural networks (CNN), Image segmentation, Information fusions, Learning algorithms, Linear systems, Mammography, Mathematical models, Multilayer neural networks, Pattern recognition, Posterior probabilities, Tumors}, isbn = {0780383591}, doi = {10.1109/IJCNN.2004.1381010}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-10844231826\&partnerID=40\&md5=2be794a5832413fed34152d61dd49388}, author = {Y Wu and J He and Y Man and Juan I. Arribas} } @conference {414, title = {A fully automatic algorithm for contour detection of bones in hand radiographs using active contours}, booktitle = {IEEE International Conference on Image Processing}, year = {2003}, address = {Barcelona}, abstract = {This paper1 presents an algorithm for automatically detecting bone contours from hand radiographs using active contours. Prior knowledge is first used to locate initial contours for the snakes inside each bone of interest. Next, an adaptive snake algorithm is applied so that parameters are properly adjusted for each bone specifically. We introduce a novel truncation technique to prevent the external forces of the snake from pulling the contour outside the bones boundaries, yielding excelent results.

}, keywords = {Active contours, Algorithms, Bone, Cocentric circumferences, Contour measurement, Medical imaging, Object recognition, Radiography}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0344271749\&partnerID=40\&md5=5fcf06edb482cc1527b2e8d3a940065b}, author = {Rodrigo de Luis-Garc{\'\i}a and Marcos Martin-Fernandez and Juan I. Arribas and Carlos Alberola L{\'o}pez} } @conference {luis2003fully, title = {A fully automatic algorithm for contour detection of bones in hand radiographs using active contours}, booktitle = {Image Processing, 2003. ICIP 2003. Proceedings. 2003 International Conference on}, volume = {3}, year = {2003}, pages = {III{\textendash}421}, publisher = {IEEE}, organization = {IEEE}, author = {Rodrigo de Luis-Garc{\'\i}a and Marcos Martin-Fernandez and Juan I. Arribas and Carlos Alberola L{\'o}pez} } @conference {luis2003fully, title = {A fully automatic algorithm for contour detection of bones in hand radiographs using active contours}, booktitle = {Image Processing, 2003. ICIP 2003. Proceedings. 2003 International Conference on}, volume = {3}, year = {2003}, pages = {III{\textendash}421}, publisher = {IEEE}, organization = {IEEE}, author = {Rodrigo de Luis-Garc{\'\i}a and Marcos Martin-Fernandez and Juan I. Arribas and Carlos Alberola L{\'o}pez} } @conference {413, title = {Fusing Output Information in Neural Networks: Ensemble Performs Better}, booktitle = {Annual International Conference of the IEEE Engineering in Medicine and Biology - Proceedings}, year = {2003}, address = {Cancun}, abstract = {A neural network ensemble is a learning paradigm where a finite number of component neural networks are trained for the same task. Previous research suggests that an ensemble as a whole is often more accurate than any of the single component networks. This paper focuses on the advantages of fusing different nature network architectures, and to determine the appropriate information fusion algorithm in component neural networks by several approaches within hard decision classifiers, when solving a binary pattern recognition problem. We numerically simulated and compared the different fusion approaches in terms of the mean-square error rate in testing data set, over synthetically generated binary Gaussian noisy data, and stated the advantages of fusing the hard outputs of different component networks to make a final hard decision classification. The results of the experiments indicate that neural network ensembles can indeed improve the overall accuracy for classification problems; in all fusion architectures tested, the ensemble correct classification rates are better than those achieved by the individual component networks. Finally we are nowadays comparing the above mentioned hard decision classifiers with new soft decision classifier architectures that make use of the additional continuous type intermediate network soft outputs, fulfilling probability fundamental laws (positive, and add to unity), which can be understood as the a posteriori probabilities of a given pattern to belong to a certain class.

}, keywords = {Algorithms, Backpropagation, Classification (of information), Computer simulation, Decision making, Estimation, Gaussian noise (electronic), Information fusions, Mathematical models, Medical imaging, Model selection, Multilayer neural networks, Neural network ensembles, Pattern recognition, Probability, Probability estimation, Problem solving, Regularization, Statistical methods, Statistical pattern recognition, Vectors}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-1542301061\&partnerID=40\&md5=32dbadb3b6ac3c6ae1ea33d89b52c75f}, author = {Y Wu and Juan I. Arribas} } @conference {arribas2003neural, title = {Neural posterior probabilities for microcalcification detection in breast cancer diagnoses}, booktitle = {Neural Engineering, 2003. Conference Proceedings. First International IEEE EMBS Conference on}, year = {2003}, pages = {660{\textendash}663}, publisher = {IEEE}, organization = {IEEE}, author = {Juan I. Arribas and Carlos Alberola L{\'o}pez and Mateos-Marcos, A and Jes{\'u}s Cid-Sueiro} } @proceedings {de2002neural, title = {A neural architecture for bone age assessment}, journal = {Proc. of the IASTED International Conference on Signal Processing, Pattern Recognition \& Applications}, year = {2002}, pages = {161{\textendash}166}, author = {Rodrigo de Luis-Garc{\'\i}a and Juan I. Arribas and Santiago Aja-Fern{\'a}ndez and Lopez, C Alberola} } @article {409, title = {Cost functions to estimate a posteriori probabilities in multiclass problems}, journal = {IEEE Transactions on Neural Networks}, volume = {10}, year = {1999}, pages = {645-656}, abstract = {The problem of designing cost functions to estimate a posteriori probabilities in multiclass problems is addressed in this paper. We establish necessary and sufficient conditions that these costs must satisfy in one-class one-output networks whose outputs are consistent with probability laws. We focus our attention on a particular subset of the corresponding cost functions; those which verify two usually interesting properties: symmetry and separability (well-known cost functions, such as the quadratic cost or the cross entropy are particular cases in this subset). Finally, we present a universal stochastic gradient learning rule for single-layer networks, in the sense of minimizing a general version of these cost functions for a wide family of nonlinear activation functions.

}, keywords = {Cost functions, Estimation, Functions, Learning algorithms, Multiclass problems, Neural networks, Pattern recognition, Probability, Problem solving, Random processes, Stochastic gradient learning rule}, issn = {10459227}, doi = {10.1109/72.761724}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0032643080\&partnerID=40\&md5=d528195bd6ec84531e59ddd2ececcd46}, author = {Jes{\'u}s Cid-Sueiro and Juan I. Arribas and S Urban-Munoz and A R Figueiras-Vidal} } @conference {412, title = {Estimates of constrained multi-class a posteriori probabilities in time series problems with neural networks}, booktitle = {Proceedings of the International Joint Conference on Neural Networks}, year = {1999}, publisher = {IEEE, United States}, organization = {IEEE, United States}, address = {Washington, DC, USA}, abstract = {In time series problems, where time ordering is a crucial issue, the use of Partial Likelihood Estimation (PLE) represents a specially suitable method for the estimation of parameters in the model. We propose a new general supervised neural network algorithm, Joint Network and Data Density Estimation (JNDDE), that employs PLE to approximate conditional probability density functions for multi-class classification problems. The logistic regression analysis is generalized to multiple class problems with softmax regression neural network used to model the a-posteriori probabilities such that they are approximated by the network outputs. Constraints to the network architecture, as well as to the model of data, are imposed, resulting in both a flexible network architecture and distribution modeling. We consider application of JNDDE to channel equalization and present simulation results.

}, keywords = {Approximation theory, Computer simulation, Constraint theory, Data structures, Joint network-data density estimation (JNDDE), Mathematical models, Multi-class a posteriori probabilities, Neural networks, Partial likelihood estimation (PLE), Probability density function, Regression analysis}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033325263\&partnerID=40\&md5=8c6134020b0b2a9c5ab05b131c070b88}, author = {Juan I. Arribas and Jes{\'u}s Cid-Sueiro and T Adali and H Ni and B Wang and A R Figueiras-Vidal} } @conference {411, title = {Neural architectures for parametric estimation of a posteriori probabilities by constrained conditional density functions}, booktitle = {Neural Networks for Signal Processing - Proceedings of the IEEE Workshop}, year = {1999}, publisher = {IEEE, Piscataway, NJ, United States}, organization = {IEEE, Piscataway, NJ, United States}, address = {Madison, WI, USA}, abstract = {A new approach to the estimation of {\textquoteright}a posteriori{\textquoteright} class probabilities using neural networks, the Joint Network and Data Density Estimation (JNDDE), is presented in this paper. It is based on the estimation of the conditional data density functions, with some restrictions imposed by the classifier structure; the Bayes{\textquoteright} rule is used to obtain the {\textquoteright}a posteriori{\textquoteright} probabilities from these densities. The proposed method is applied to three different network structures: the logistic perceptron (for the binary case), the softmax perceptron (for multi-class problems) and a generalized softmax perceptron (that can be used to map arbitrarily complex probability functions). Gaussian mixture models are used for the conditional densities. The method has the advantage of establishing a distinction between the network parameters and the model parameters. Complexity on any of them can be fixed as desired. Maximum Likelihood gradient-based rules for the estimation of the parameters can be obtained. It is shown that JNDDE exhibits a more robust convergence characteristics than other methods of a posteriori probability estimation, such as those based on the minimization of a Strict Sense Bayesian (SSB) cost function.

}, keywords = {Asymptotic stability, Constraint theory, Data structures, Gaussian mixture models, Joint network and data density estimation, Mathematical models, Maximum likelihood estimation, Neural networks, Probability}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033321049\&partnerID=40\&md5=7967fa377810cc0c3e6a4d9020024b80}, author = {Juan I. Arribas and Jes{\'u}s Cid-Sueiro and T Adali and A R Figueiras-Vidal} } @conference {410, title = {Neural networks to estimate ML multi-class constrained conditional probability density functions}, booktitle = {Proceedings of the International Joint Conference on Neural Networks}, year = {1999}, publisher = {IEEE, United States}, organization = {IEEE, United States}, address = {Washington, DC, USA}, abstract = {In this paper, a new algorithm, the Joint Network and Data Density Estimation (JNDDE), is proposed to estimate the {\textquoteleft}a posteriori{\textquoteright} probabilities of the targets with neural networks in multiple classes problems. It is based on the estimation of conditional density functions for each class with some restrictions or constraints imposed by the classifier structure and the use Bayes rule to force the a posteriori probabilities at the output of the network, known here as a implicit set. The method is applied to train perceptrons by means of Gaussian mixture inputs, as a particular example for the Generalized Softmax Perceptron (GSP) network. The method has the advantage of providing a clear distinction between the network architecture and the model of the data constraints, giving network parameters or weights on one side and data over parameters on the other. MLE stochastic gradient based rules are obtained for JNDDE. This algorithm can be applied to hybrid labeled and unlabeled learning in a natural fashion.

}, keywords = {Generalized softmax perceptron (GSP) network, Joint network and data density estimation (JNDDE), Mathematical models, Maximum likelihood estimation, Neural networks, Probability density function, Random processes}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0033326060\&partnerID=40\&md5=bb38c144dac0872f3a467dc12170e6b6}, author = {Juan I. Arribas and Jes{\'u}s Cid-Sueiro and T Adali and A R Figueiras-Vidal} }