@article {868, title = {Automatic non-destructive video estimation of maturation levels in Fuji apple (Malus Malus pumila) fruit in orchard based on colour (Vis) and spectral (NIR) data}, journal = {Biosystems Engineering}, volume = {195}, year = {2020}, pages = {136{\textendash}151}, abstract = {Non-destructive estimates information on the desired properties of fruit without damaging them. The objective of this work is to present an algorithm for the automatic and non-destructive estimation of four maturity stages (unripe, half-ripe, ripe, or overripe) of Fuji apples (Malus Malus pumila) using both colour and spectral data from fruit. In order to extract spectral and colour data to train a proposed system, 170 samples of Fuji apples were collected. Colour and spectral features were extracted using a CR-400 Chroma Meter colorimeter and a custom set up. The second component of colour space and near infrared (NIR) spectrum data in wavelength ranges of 535{\textendash}560 nm, 835{\textendash}855 nm, and 950{\textendash}975 nm, were used to train the proposed algorithm. A hybrid artificial neural network-simulated annealing algorithm (ANN-SA) was used for classification purposes. A total of 1000 iterations were conducted to evaluate the reliability of the classification process. Results demonstrated that after training the correction classification rate (CCR, accuracy) was, at the best state, 100\% (test set) using both colour and spectral data. The CCR of the four different classifiers were 93.27\%, 99.62\%, 98.55\%, and 99.59\%, for colour features, spectral data wavelength ranges of 535{\textendash}560 nm, 835{\textendash}855 nm, and 950{\textendash}975 nm, respectively, over the test set. These results suggest that the proposed method is capable of the non-destructive estimation of different maturity stages of Fuji apple with a remarkable accuracy, in particular within the 535{\textendash}560 nm wavelength range.}, doi = {https://doi.org/10.1016/j.biosystemseng.2020.04.015}, url = {https://www.sciencedirect.com/science/article/pii/S1537511020301148}, author = {Pourdarbani, Razieh and Sabzi, Sajad and Kalantari, Davood and Karimzadeh, Rouhollah and Ilbeygi, Elham and J I Arribas} } @article {867, title = {An automatic visible-range video weed detection, segmentation and classification prototype in potato field}, journal = {Heliyon}, volume = {6}, year = {2020}, pages = {e03685}, abstract = {Weeds might be defined as destructive plants that grow and compete with agricultural crops in order to achieve water and nutrients. Uniform spray of herbicides is nowadays a common cause in crops poisoning, environment pollution and high cost of herbicide consumption. Site-specific spraying is a possible solution for the problems that occur with uniform spray in fields. For this reason, a machine vision prototype is proposed in this study based on video processing and meta-heuristic classifiers for online identification and classification of Marfona potato plant (Solanum tuberosum) and 4299 samples from five weed plant varieties: Malva neglecta (mallow), Portulaca oleracea (purslane), Chenopodium album L (lamb{\textquoteright}s quarters), Secale cereale L (rye) and Xanthium strumarium (coklebur). In order to properly train the machine vision system, various videos taken from two Marfona potato fields within a surface of six hectares are used. After extraction of texture features based on the gray level co-occurrence matrix (GLCM), color features, spectral descriptors of texture, moment invariants and shape features, six effective discriminant features were selected: the standard deviation of saturation (S) component in HSV color space, difference of first and seventh moment invariants, mean value of hue component (H) in HSI color space, area to length ratio, average blue-difference chrominance (Cb) component in YCbCr color space and standard deviation of in-phase (I) component in YIQ color space. Classification results show a high accuracy of 98\% correct classification rate (CCR) over the test set, being able to properly identify potato plant from previously mentioned five different weed varieties. Finally, the machine vision prototype was tested in field under real conditions and was able to properly detect, segment and classify weed from potato plant at a speed of up to 0.15 m/s.}, doi = {https://doi.org/10.1016/j.heliyon.2020.e03685}, url = {https://www.sciencedirect.com/science/article/pii/S2405844020305302}, author = {Sabzi, Sajad and Yousef Abbaspour-Gilandeh and J I Arribas} } @article {862, title = {An Automatic Non-Destructive Method for the Classification of the Ripeness Stage of Red Delicious Apples in Orchards Using Aerial Video}, journal = {Agronomy}, volume = {9}, year = {2019}, abstract = {The estimation of the ripening state in orchards helps improve post-harvest processes. Picking fruits based on their stage of maturity can reduce the cost of storage and increase market outcomes. Moreover, aerial images and the estimated ripeness can be used as indicators for detecting water stress and determining the water applied during irrigation. Additionally, they can also be related to the crop coefficient (Kc) of seasonal water needs. The purpose of this research is to develop a new computer vision algorithm to detect the existing fruits in aerial images of an apple cultivar (of Red Delicious variety) and estimate their ripeness stage among four possible classes: unripe, half-ripe, ripe, and overripe. The proposed method is based on a combination of the most effective color features and a classifier based on artificial neural networks optimized with genetic algorithms. The obtained results indicate an average classification accuracy of 97.88\%, over a dataset of 8390 images and 27,687 apples, and values of the area under the ROC (receiver operating characteristic) curve near or above 0.99 for all classes. We believe this is a remarkable performance that allows a proper non-intrusive estimation of ripening that will help to improve harvesting strategies.}, doi = {https://doi.org/10.3390/agronomy9020084}, url = {https://www.mdpi.com/2073-4395/9/2/84}, author = {S Sabzi and Yousef Abbaspour-Gilandeh and G Garcia-Mateos and A Ruiz-Canales and J M Molina-Martinez and J I Arribas} } @article {858, title = {An automatic and non-intrusive hybrid computer vision system for the estimation of peel thickness in Thomson orange}, journal = {Spanish Journal of Agricultural Research}, volume = {16}, year = {2018}, pages = {e0204}, abstract = {Orange peel has important flavor and nutrition properties and is often used for making jam and oil in the food industry. For previous reasons, oranges with high peel thickness are valuable. In order to properly estimate peel thickness in Thomson orange fruit, based on a number of relevant image features (area, eccentricity, perimeter, length/area, blue component, green component, red component, width, contrast, texture, width/area, width/length, roughness, and length) a novel automatic and non-intrusive approach based on computer vision with a hybrid particle swarm optimization (PSO), genetic algorithm (GA) and artificial neural network (ANN) system is proposed. Three features (width/area, width/length and length/area ratios) were selected as inputs to the system. A total of 100 oranges were used, performing cross validation with 100 repeated experiments with uniform random samples test sets. Taguchi{\textquoteright}s robust optimization technique was applied to determine the optimal set of parameters. Prediction results for orange peel thickness (mm) based on the levels that were achieved by Taguchi{\textquoteright}s method were evaluated in several ways, including orange peel thickness true-estimated boxplots for the 100 orange database and various error parameters: the sum square error (SSE), the mean absolute error (MAE), the coefficient of determination (R2), the root mean square error (RMSE), and the mean square error (MSE), resulting in mean error parameter values of R2=0.854{\textpm}0.052, MSE=0.038{\textpm}0.010, and MAE=0.159{\textpm}0.023, over the test set, which to our best knowledge are remarkable numbers for an automatic and non-intrusive approach with potential application to real-time orange peel thickness estimation in the food industry. }, doi = {http://dx.doi.org/10.5424/sjar/2018164-11185}, url = {https://revistas.inia.es/index.php/sjar/article/view/11185}, author = {H Javadikia and S Sabzi and J I Arribas} } @article {698, title = {Abnormal Capillary Vasodynamics Contribute to Ictal Neurodegeneration in Epilepsy}, journal = {Scientific Reports}, volume = {7}, year = {2017}, abstract = {

Seizure-driven brain damage in epilepsy accumulates over time, especially in the hippocampus, which can lead to sclerosis, cognitive decline, and death. Excitotoxicity is the prevalent model to explain ictal neurodegeneration. Current labeling technologies cannot distinguish between excitotoxicity and hypoxia, however, because they share common molecular mechanisms. This leaves open the possibility that undetected ischemic hypoxia, due to ictal blood flow restriction, could contribute to neurodegeneration previously ascribed to excitotoxicity. We tested this possibility with Confocal Laser Endomicroscopy (CLE) and novel stereological analyses in several models of epileptic mice. We found a higher number and magnitude of NG2+ mural-cell mediated capillary constrictions in the hippocampus of epileptic mice than in that of normal mice, in addition to spatial coupling between capillary constrictions and oxidative stressed neurons and neurodegeneration. These results reveal a role for hypoxia driven by capillary blood flow restriction in ictal neurodegeneration. {\textcopyright} 2017 The Author(s).

}, doi = {10.1038/srep43276}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85014072909\&doi=10.1038\%2fsrep43276\&partnerID=40\&md5=e9d3567266bdc360a7addc92be350c8d}, author = {Leal-Campanario, R. and Alarcon-Martinez, L. and Rieiro, H. and Martinez-Conde, S. and Alarcon-Martinez, T. and Zhao, X. and LaMee, J. and Popp, P.J. and Calhoun, M.E. and J I Arribas and Schlegel, A.A. and Di Stasi, L.L. and Rho, J.M. and Inge, L. and Otero-Millan, J. and Treiman, D.M. and Macknik, S.L.} } @article {423, title = {Automatic bayesian classification of healthy controls, bipolar disorder, and schizophrenia using intrinsic connectivity maps from fMRI data}, journal = {IEEE Transactions on Biomedical Engineering}, volume = {57}, year = {2010}, pages = {2850-2860}, abstract = {

We present a method for supervised, automatic, and reliable classification of healthy controls, patients with bipolar disorder, and patients with schizophrenia using brain imaging data. The method uses four supervised classification learning machines trained with a stochastic gradient learning rule based on the minimization of KullbackLeibler divergence and an optimal model complexity search through posterior probability estimation. Prior to classification, given the high dimensionality of functional MRI (fMRI) data, a dimension reduction stage comprising two steps is performed: first, a one-sample univariate t-test mean-difference Tscore approach is used to reduce the number of significant discriminative functional activated voxels, and then singular value decomposition is performed to further reduce the dimension of the input patterns to a number comparable to the limited number of subjects available for each of the three classes. Experimental results using functional brain imaging (fMRI) data include receiver operation characteristic curves for the three-way classifier with area under curve values around 0.82, 0.89, and 0.90 for healthy control versus nonhealthy, bipolar disorder versus nonbipolar, and schizophrenia patients versus nonschizophrenia binary problems, respectively. The average three-way correct classification rate (CCR) is in the range of 70\%-72\%, for the test set, remaining close to the estimated Bayesian optimal CCR theoretical upper bound of about 80\%, estimated from the one nearest-neighbor classifier over the same data. {\^A}{\textcopyright} 2010 IEEE.

}, keywords = {Algorithms, Artificial Intelligence, Bayes Theorem, Bayesian learning, Bayesian networks, Biological, Brain, Case-Control Studies, Classifiers, Computer-Assisted, Diseases, Functional MRI (fMRI), Humans, Learning machines, Learning systems, Magnetic Resonance Imaging, Models, Operation characteristic, Optimization, ROC Curve, Reproducibility of Results, Signal Processing, Singular value decomposition, Statistical tests, Stochastic models, Student t test, area under the curve, article, bipolar disorder, classification, controlled study, functional magnetic resonance imaging, human, machine learning, major clinical study, neuroimaging, patient coding, receiver operating characteristic, reliability, schizophrenia}, issn = {00189294}, doi = {10.1109/TBME.2010.2080679}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78649311169\&partnerID=40\&md5=d3b90f1a3ee4ef209d131ef986e142db}, author = {J I Arribas and V D Calhoun and T Adali} }