2023
Saeed, Farah; Sun, Shangpeng; Rodriguez-Sanchez, Javier; Snider, John; Liu, Tianming; Li, Changying
Cotton plant part 3D segmentation and architectural trait extraction using point voxel convolutional neural networks Journal Article
In: Plant Methods, vol. 19, no. 1, pp. 33, 2023, ISSN: 1746-4811.
Abstract | Links | BibTeX | Tags: deep learning, High-throughput phenotyping, LiDAR, machine learning
@article{Saeed2023,
title = {Cotton plant part 3D segmentation and architectural trait extraction using point voxel convolutional neural networks},
author = {Farah Saeed and Shangpeng Sun and Javier Rodriguez-Sanchez and John Snider and Tianming Liu and Changying Li},
url = {https://doi.org/10.1186/s13007-023-00996-1},
doi = {10.1186/s13007-023-00996-1},
issn = {1746-4811},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Plant Methods},
volume = {19},
number = {1},
pages = {33},
abstract = {Plant architecture can influence crop yield and quality. Manual extraction of architectural traits is, however, time-consuming, tedious, and error prone. The trait estimation from 3D data addresses occlusion issues with the availability of depth information while deep learning approaches enable learning features without manual design. The goal of this study was to develop a data processing workflow by leveraging 3D deep learning models and a novel 3D data annotation tool to segment cotton plant parts and derive important architectural traits.},
keywords = {deep learning, High-throughput phenotyping, LiDAR, machine learning},
pubstate = {published},
tppubtype = {article}
}
2022
Rodriguez-Sanchez, Javier; Li, Changying; Paterson, Andrew
Cotton yield estimation from aerial imagery using machine learning approaches Journal Article
In: Frontiers in Plant Science, vol. 13, 2022.
Links | BibTeX | Tags: High-throughput phenotyping, machine learning
@article{nokey,
title = {Cotton yield estimation from aerial imagery using machine learning approaches},
author = {Javier Rodriguez-Sanchez and Changying Li and Andrew Paterson},
url = {https://www.frontiersin.org/articles/10.3389/fpls.2022.870181/full},
year = {2022},
date = {2022-04-01},
urldate = {2022-04-01},
journal = {Frontiers in Plant Science},
volume = {13},
keywords = {High-throughput phenotyping, machine learning},
pubstate = {published},
tppubtype = {article}
}
Adke, Shrinidhi; Li, Changying; Rasheed, Khaled M.; Maier, Frederick W.
Supervised and Weakly Supervised Deep Learning for Segmentation and Counting of Cotton Bolls Using Proximal Imagery Journal Article
In: Sensors, vol. 22, no. 10, 2022, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: deep learning, machine learning
@article{Adke2022,
title = {Supervised and Weakly Supervised Deep Learning for Segmentation and Counting of Cotton Bolls Using Proximal Imagery},
author = {Shrinidhi Adke and Changying Li and Khaled M. Rasheed and Frederick W. Maier},
url = {https://www.mdpi.com/1424-8220/22/10/3688},
doi = {10.3390/s22103688},
issn = {1424-8220},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Sensors},
volume = {22},
number = {10},
abstract = {The total boll count from a plant is one of the most important phenotypic traits for cotton breeding and is also an important factor for growers to estimate the final yield. With the recent advances in deep learning, many supervised learning approaches have been implemented to perform phenotypic trait measurement from images for various crops, but few studies have been conducted to count cotton bolls from field images. Supervised learning models require a vast number of annotated images for training, which has become a bottleneck for machine learning model development. The goal of this study is to develop both fully supervised and weakly supervised deep learning models to segment and count cotton bolls from proximal imagery. A total of 290 RGB images of cotton plants from both potted (indoor and outdoor) and in-field settings were taken by consumer-grade cameras and the raw images were divided into 4350 image tiles for further model training and testing. Two supervised models (Mask R-CNN and S-Count) and two weakly supervised approaches (WS-Count and CountSeg) were compared in terms of boll count accuracy and annotation costs. The results revealed that the weakly supervised counting approaches performed well with RMSE values of 1.826 and 1.284 for WS-Count and CountSeg, respectively, whereas the fully supervised models achieve RMSE values of 1.181 and 1.175 for S-Count and Mask R-CNN, respectively, when the number of bolls in an image patch is less than 10. In terms of data annotation costs, the weakly supervised approaches were at least 10 times more cost efficient than the supervised approach for boll counting. In the future, the deep learning models developed in this study can be extended to other plant organs, such as main stalks, nodes, and primary and secondary branches. Both the supervised and weakly supervised deep learning models for boll counting with low-cost RGB images can be used by cotton breeders, physiologists, and growers alike to improve crop breeding and yield estimation.},
keywords = {deep learning, machine learning},
pubstate = {published},
tppubtype = {article}
}
Tan, Chenjiao; Li, Changying; He, Dongjian; Song, Huaibo
Towards real-time tracking and counting of seedlings with a one-stage detector and optical flow Journal Article
In: Computers and Electronics in Agriculture, vol. 193, pp. 106683, 2022, ISSN: 0168-1699.
Abstract | Links | BibTeX | Tags: Cotton seedling, Counting, Deep convolutional neural network, deep learning, machine learning, object detection, Optical flow
@article{TAN2022106683,
title = {Towards real-time tracking and counting of seedlings with a one-stage detector and optical flow},
author = {Chenjiao Tan and Changying Li and Dongjian He and Huaibo Song},
url = {https://www.sciencedirect.com/science/article/pii/S0168169921007006},
doi = {https://doi.org/10.1016/j.compag.2021.106683},
issn = {0168-1699},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Computers and Electronics in Agriculture},
volume = {193},
pages = {106683},
abstract = {The population of crop seedlings is important for breeders and growers to evaluate the emergence rate of different cultivars and the necessity of replanting, but manual counting of plant seedlings is time-consuming and tedious. Building upon our prior work, we advanced the cotton seedling tracking method by incorporating a one-stage object detection deep neural network and optical flow to improve tracking speed and counting accuracy. Videos of cotton seedlings were captured using consumer-grade video cameras from the top view. You Only Look Once Version 4 (YOLOv4), a one-stage object detection network, was trained to detect cotton seedlings in each frame and to generate bounding boxes. To associate the same seedlings between adjacent frames, an optical flow-based tracking method was adopted to estimate camera motions. By comparing the positions of bounding boxes predicted by optical flow and detected by the YOLOv4 network in the same frame, the number of cotton seedlings was updated. The trained YOLOv4 model achieved high accuracy under conditions of occlusions, blurry images, complex backgrounds, and extreme illuminations. The F1 score of the final detection model was 0.98 and the average precision was 99.12%. Important tracking metrics were compared to evaluate the tracking performance. The Multiple-Object Tracking Accuracy (MOTA) and ID switch of the proposed tracking method were 72.8% and 0.1%, respectively. Counting results showed that the relative error of all testing videos was 3.13%. Compared with the Kalman filter and particle filter-based methods, our optical flow-based method generated fewer errors on testing videos because of higher accuracy of motion estimation. Compared with our previous work, the RMSE of the optical flow-based method decreased by 0.54 and the counting speed increased from 2.5 to 10.8 frames per second. The counting speed can reach 16.6 frames per second if the input resolution was reduced to 1280 × 720 pixels with an only 0.45% reduction in counting accuracy. The proposed method provides an automatic and near real-time tracking approach for counting of multiple cotton seedlings in video frames with improved speed and accuracy, which will benefit plant breeding and precision crop management.},
keywords = {Cotton seedling, Counting, Deep convolutional neural network, deep learning, machine learning, object detection, Optical flow},
pubstate = {published},
tppubtype = {article}
}
Petti, Daniel; Li, Changying
Weakly-supervised learning to automatically count cotton flowers from aerial imagery Journal Article
In: Computers and Electronics in Agriculture, vol. 194, pp. 106734, 2022, ISSN: 0168-1699.
Abstract | Links | BibTeX | Tags: Active learning, deep learning, High-throughput phenotyping, machine learning, Multiple-instance learning, Object counting
@article{Petti2022,
title = {Weakly-supervised learning to automatically count cotton flowers from aerial imagery},
author = {Daniel Petti and Changying Li},
url = {https://www.sciencedirect.com/science/article/pii/S0168169922000515},
doi = {https://doi.org/10.1016/j.compag.2022.106734},
issn = {0168-1699},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Computers and Electronics in Agriculture},
volume = {194},
pages = {106734},
abstract = {Counting plant flowers is a common task with applications for estimating crop yields and selecting favorable genotypes. Typically, this requires a laborious manual process, rendering it impractical to obtain accurate flower counts throughout the growing season. The model proposed in this study uses weak supervision, based on Convolutional Neural Networks (CNNs), which automates such a counting task for cotton flowers using imagery collected from an unmanned aerial vehicle (UAV). Furthermore, the model is trained using Multiple Instance Learning (MIL) in order to reduce the required amount of annotated data. MIL is a binary classification task in which any image with at least one flower falls into the positive class, and all others are negative. In the process, a novel loss function was developed that is designed to improve the performance of image-processing models that use MIL. The model is trained on a large dataset of cotton plant imagery which was collected over several years and will be made publicly available. Additionally, an active-learning-based approach is employed in order to generate the annotations for the dataset while minimizing the required amount of human intervention. Despite having minimal supervision, the model still demonstrates good performance on the testing dataset. Multiple models were tested with different numbers of parameters and input sizes, achieving a minimum average absolute count error of 2.43. Overall, this study demonstrates that a weakly-supervised model is a promising method for solving the flower counting problem while minimizing the human labeling effort.},
keywords = {Active learning, deep learning, High-throughput phenotyping, machine learning, Multiple-instance learning, Object counting},
pubstate = {published},
tppubtype = {article}
}
2021
Ni, Xueping; Li, Changying; Jiang, Huanyu; Takeda, Fumiomi
Three-dimensional photogrammetry with deep learning instance segmentation to extract berry fruit harvestability traits Journal Article
In: ISPRS Journal of Photogrammetry and Remote Sensing, vol. 171, pp. 297-309, 2021, ISSN: 0924-2716.
Abstract | Links | BibTeX | Tags: 2D-3D projection, 3D reconstruction, Blueberry traits, deep learning, machine learning, mask R-CNN
@article{NI2021297,
title = {Three-dimensional photogrammetry with deep learning instance segmentation to extract berry fruit harvestability traits},
author = {Xueping Ni and Changying Li and Huanyu Jiang and Fumiomi Takeda},
url = {https://www.sciencedirect.com/science/article/pii/S0924271620303178},
doi = {https://doi.org/10.1016/j.isprsjprs.2020.11.010},
issn = {0924-2716},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
volume = {171},
pages = {297-309},
abstract = {Fruit cluster characteristics such as compactness, maturity, berry number, and berry size, are important phenotypic traits associated with harvestability and yield of blueberry genotypes and can be used to monitor berry development and improve crop management. The goal of this study was to develop a complete framework of 3D segmentation for individual blueberries as they develop in clusters and to extract blueberry cluster traits. To achieve this goal, an image-capturing system was developed to capture blueberry images to facilitate 3D reconstruction and a 2D-3D projection-based photogrammetric pipeline was proposed to extract berry cluster traits. The reconstruction was performed for four southern highbush blueberry cultivars (‘Emerald’, ‘Farthing’, ‘Meadowlark’ and ‘Star’) with 10 cluster samples for each cultivar based on photogrammetry. A minimum bounding box was created to surround a 3D blueberry cluster to calculate compactness as the ratio of berry volume and minimum bounding box volume. Mask R-CNN was used to segment individual blueberries with the maturity property from 2D images and the instance masks were projected onto 3D point clouds to establish 2D-3D correspondences. The developed trait extraction algorithm was used to segment individual 3D blueberries to obtain berry number, individual berry volume, and berry maturity. Berry maturity was used to calculate cluster maturity as the ratio of the mature berry (blue colored fruit) number and the total berry (blue, reddish, and green colored fruit) number comprising the cluster. The accuracy of determining the fruit number in a cluster is 97.3%. The linear regression for cluster maturity has a R2 of 0.908 with a RMSE of 0.068. The cluster berry volume has a RMSE of 2.92 cm3 compared with the ground truth, indicating that the individual berry volume has an error of less than 0.292 cm3 for clusters with a berry number greater than 10. The statistical analyses of the traits for the four cultivars reveals that, in the middle of April, ‘Emerald’ and ‘Farthing’ were more compact than ‘Meadowlark’ and ‘Star’, and the mature berry volume of ‘Farthing’ was greater than ‘Emerald’ and ‘Meadowlark’, while ‘Star’ had the smallest mature berry size. This study develops an effective method based on 3D photogrammetry and 2D instance segmentation that can determine blueberry cluster traits accurately from a large number of samples and can be used for fruit development monitoring, yield estimation, and harvest time prediction.},
keywords = {2D-3D projection, 3D reconstruction, Blueberry traits, deep learning, machine learning, mask R-CNN},
pubstate = {published},
tppubtype = {article}
}
2020
Adke, S.; Mogel, K. H. Von; Jiang, Y.; Li, C.
Instance Segmentation to Estimate Consumption of Corn Ears by Wild Animals for GMO Preference Tests Journal Article
In: Frontiers in Artificial Intelligence, vol. 3, no. 119, 2020.
Links | BibTeX | Tags: deep learning, machine learning, mask R-CNN
@article{adke2020instane,
title = {Instance Segmentation to Estimate Consumption of Corn Ears by Wild Animals for GMO Preference Tests },
author = {S. Adke and K.H. Von Mogel and Y. Jiang and C. Li},
url = {https://www.frontiersin.org/articles/10.3389/frai.2020.593622/abstract},
year = {2020},
date = {2020-12-30},
urldate = {2020-12-30},
journal = {Frontiers in Artificial Intelligence},
volume = {3},
number = {119},
keywords = {deep learning, machine learning, mask R-CNN},
pubstate = {published},
tppubtype = {article}
}
Jiang, Y.; Li, C.; Xu, R.; Sun, S.; Robertson, J. S.; Paterson, A. H.
DeepFlower: a deep learning-based approach to characterize flowering patterns of cotton plants in the field Journal Article
In: Plant Methods, vol. 16, no. 156, 2020.
Links | BibTeX | Tags: deep learning, machine learning
@article{jiang2020deepflower,
title = {DeepFlower: a deep learning-based approach to characterize flowering patterns of cotton plants in the field},
author = {Y. Jiang and C. Li and R. Xu and S. Sun and J. S. Robertson and A.H. Paterson},
url = {https://plantmethods.biomedcentral.com/articles/10.1186/s13007-020-00698-y
https://doi.org/10.1186/s13007-020-00698-y},
year = {2020},
date = {2020-12-07},
urldate = {2020-12-07},
journal = {Plant Methods},
volume = {16},
number = {156},
keywords = {deep learning, machine learning},
pubstate = {published},
tppubtype = {article}
}
Ni, X.; Li, C.; Jiang, H.; Takeda., F.
Deep learning image segmentation and extraction of blueberry fruit traits associated with harvestability and yield Journal Article
In: Horticulture Research, vol. 7, no. 1, pp. 1-14, 2020.
Links | BibTeX | Tags: deep learning, machine learning
@article{Ni2020,
title = {Deep learning image segmentation and extraction of blueberry fruit traits associated with harvestability and yield},
author = {Ni, X. and C. Li and H. Jiang and F. Takeda. },
url = {https://www.nature.com/articles/s41438-020-0323-3},
year = {2020},
date = {2020-07-01},
urldate = {2020-07-01},
journal = {Horticulture Research},
volume = {7},
number = {1},
pages = {1-14},
keywords = {deep learning, machine learning},
pubstate = {published},
tppubtype = {article}
}
Jiang, Yu; Li, Changying
Convolutional neural networks for image-based high throughput plant phenotyping: A review Journal Article
In: Plant Phenomics, vol. 2020, no. 4152816, 2020.
Links | BibTeX | Tags: CNN, deep learning, machine learning, review
@article{Yu2020,
title = {Convolutional neural networks for image-based high throughput plant phenotyping: A review},
author = {Yu Jiang and Changying Li},
url = {https://spj.sciencemag.org/journals/plantphenomics/2020/4152816/},
doi = {https://doi.org/10.34133/2020/4152816.},
year = {2020},
date = {2020-02-20},
urldate = {2020-02-20},
journal = {Plant Phenomics},
volume = {2020},
number = {4152816},
keywords = {CNN, deep learning, machine learning, review},
pubstate = {published},
tppubtype = {article}
}
Zhang, M.; Jiang, Y.; Li, C.; Yang, F.
Fully convolutional networks for blueberry bruising and calyx segmentation using hyperspectral transmittance imaging Journal Article
In: Biosystems Engineering, vol. 192, pp. 159-175, 2020.
Links | BibTeX | Tags: deep learning, hyperspectral, machine learning
@article{Zhang2019b,
title = {Fully convolutional networks for blueberry bruising and calyx segmentation using hyperspectral transmittance imaging },
author = {M. Zhang and Y. Jiang and C. Li and F. Yang},
url = {https://www.sciencedirect.com/science/article/pii/S1537511020300301?dgcid=author},
doi = {https://doi.org/10.1016/j.biosystemseng.2020.01.018},
year = {2020},
date = {2020-01-27},
urldate = {2020-01-27},
journal = {Biosystems Engineering},
volume = {192},
pages = {159-175},
keywords = {deep learning, hyperspectral, machine learning},
pubstate = {published},
tppubtype = {article}
}
2019
Jiang, Y.; Li, C.; Paterson, A.; Robertson, J.
DeepSeedling: Deep convolutional network and Kalman filter for plant seedling detection and counting in the field Journal Article
In: Plant Methods, vol. 15, no. 1, pp. 141, 2019.
Links | BibTeX | Tags: deep learning, machine learning
@article{Jiang2019,
title = {DeepSeedling: Deep convolutional network and Kalman filter for plant seedling detection and counting in the field },
author = { Y. Jiang and C. Li and A. Paterson and J. Robertson},
url = {https://plantmethods.biomedcentral.com/articles/10.1186/s13007-019-0528-3#citeas},
doi = {https://doi.org/10.1186/s13007-019-0528-3},
year = {2019},
date = {2019-11-23},
urldate = {2019-11-23},
journal = {Plant Methods},
volume = {15},
number = {1},
pages = {141},
keywords = {deep learning, machine learning},
pubstate = {published},
tppubtype = {article}
}
2017
Xu, R.; Li, C.; Paterson, A. H.; Jiang, Y.; Sun, S.; Roberson, J.
Aerial Images and Convolutional Neural Network for Cotton Bloom Detection Journal Article
In: Frontiers in Plant Sciences, 8, 2235, 2017.
Abstract | Links | BibTeX | Tags: deep learning, High-throughput phenotyping, machine learning
@article{Xu2018,
title = {Aerial Images and Convolutional Neural Network for Cotton Bloom Detection},
author = {R. Xu and C. Li and A.H. Paterson and Y. Jiang and S. Sun and J. Roberson},
url = {http://sensinglab.engr.uga.edu//srv/htdocs/wp-content/uploads/2019/11/Aerial-Images-and-Convolutional-Neural-Network-for-Cotton-Bloom-Detection.pdf},
doi = {10.3389/fpls.2017.02235},
year = {2017},
date = {2017-12-19},
urldate = {2017-12-19},
journal = {Frontiers in Plant Sciences, 8, 2235},
abstract = {Xu, R., Li, C., Paterson, A. H., Jiang, Y., Sun, S., & Robertson, J. S. (2018). Aerial images and convolutional neural network for cotton bloom detection. Frontiers in Plant Science, 8, 2235.
Monitoring flower development can provide useful information for production management, estimating yield and selecting specific genotypes of crops. The main goal of this study was to develop a methodology to detect and count cotton flowers, or blooms, using color images acquired by an unmanned aerial system. The aerial images were collected from two test fields in 4 days. A convolutional neural network (CNN) was designed and trained to detect cotton blooms in raw images, and their 3D locations were calculated using the dense point cloud constructed from the aerial images with the structure from motion method. The quality of the dense point cloud was analyzed and plots with poor quality were excluded from data analysis. A constrained clustering algorithm was developed to register the same bloom detected from different images based on the 3D location of the bloom. The accuracy and incompleteness of the dense point cloud were analyzed because they affected the accuracy of the 3D location of the blooms and thus the accuracy of the bloom registration result. The constrained clustering algorithm was validated using simulated data, showing good efficiency and accuracy. The bloom count from the proposed method was comparable with the number counted manually with an error of −4 to 3 blooms for the field with a single plant per plot. However, more plots were underestimated in the field with multiple plants per plot due to hidden blooms that were not captured by the aerial images. The proposed methodology provides a high-throughput method to continuously monitor the flowering progress of cotton.},
keywords = {deep learning, High-throughput phenotyping, machine learning},
pubstate = {published},
tppubtype = {article}
}
Monitoring flower development can provide useful information for production management, estimating yield and selecting specific genotypes of crops. The main goal of this study was to develop a methodology to detect and count cotton flowers, or blooms, using color images acquired by an unmanned aerial system. The aerial images were collected from two test fields in 4 days. A convolutional neural network (CNN) was designed and trained to detect cotton blooms in raw images, and their 3D locations were calculated using the dense point cloud constructed from the aerial images with the structure from motion method. The quality of the dense point cloud was analyzed and plots with poor quality were excluded from data analysis. A constrained clustering algorithm was developed to register the same bloom detected from different images based on the 3D location of the bloom. The accuracy and incompleteness of the dense point cloud were analyzed because they affected the accuracy of the 3D location of the blooms and thus the accuracy of the bloom registration result. The constrained clustering algorithm was validated using simulated data, showing good efficiency and accuracy. The bloom count from the proposed method was comparable with the number counted manually with an error of −4 to 3 blooms for the field with a single plant per plot. However, more plots were underestimated in the field with multiple plants per plot due to hidden blooms that were not captured by the aerial images. The proposed methodology provides a high-throughput method to continuously monitor the flowering progress of cotton.
2007
Li, C.; Heinemann, P.
ANN integrated electronic nose system for apple quality evaluation Journal Article
In: Transactions of the ASABE, 50(6), 2285-2294, 2007.
Abstract | Links | BibTeX | Tags: machine learning
@article{Li2007,
title = {ANN integrated electronic nose system for apple quality evaluation},
author = {C. Li and P. Heinemann},
doi = {10.13031/2013.24081},
year = {2007},
date = {2007-07-12},
urldate = {2007-07-12},
journal = {Transactions of the ASABE, 50(6), 2285-2294},
abstract = {The fresh produce industry generates more than one billion dollars each year in the U.S. market. However, fresh produce departments in grocery stores experience as much as 10% loss because the apples contain undetected defects and deteriorate in quality before they can be sold. Apple defects can create sites for pathogen development, which can cause foodborne illness. It is important to develop a non-destructive system for rapid detection and classification of defective fresh produce. In this study, an artificial neural network (ANN) based electronic nose and zNoseTM system was developed to detect physically damaged apples. Principal component analysis was used for clustering plot and feature extraction. The first five principal components were selected for the electronic nose data input, and the first ten principal components were selected for the zNoseTM spectrum data. Different ANN models, back-propagation networks (BP), probabilistic neural networks (PNN), and learning vector quantification networks (LVQ), were built and compared based on their classification accuracy, sensitivity and specificity, generalization, and incremental learning performance. For the Enose data, the BP and PNN classification rate of 85.3% and 85.1%, respectively, was better than the LVQ classification rate of 73.7%; for the zNoseTM data, the three ANN models had similar performances, which were less favorable than the Enose, with classification rates of 77%, 76.8% and 74.3%. The three ANN models' performances were also measured by their sensitivity, specificity, generalization, and incremental learning.},
keywords = {machine learning},
pubstate = {published},
tppubtype = {article}
}
Li, C.; Heinemann, P.; Sherry, R.
Neural network and Bayesian network fusion models to fuse electronic nose and surface acoustic wave sensor data for apple defect detection Journal Article
In: Sensors and Actuators B: Chemical, 125(1), 301-310, 2007.
Abstract | Links | BibTeX | Tags: machine learning
@article{Li2007b,
title = {Neural network and Bayesian network fusion models to fuse electronic nose and surface acoustic wave sensor data for apple defect detection},
author = {C. Li and P. Heinemann and R. Sherry},
doi = {10.1016/j.snb.2007.02.027},
year = {2007},
date = {2007-02-26},
urldate = {2007-02-26},
journal = {Sensors and Actuators B: Chemical, 125(1), 301-310},
abstract = {The Cyranose 320 electronic nose (Enose) and zNose™ are two instruments used to detect volatile profiles. In this research, feature level and decision level multisensor data fusion models, combined with covariance matrix adaptation evolutionary strategy (CMAES), were developed to fuse the Enose and zNose data to improve detection and classification performance for damaged apples compared with using the individual instruments alone. Principal component analysis (PCA) was used for feature extraction and probabilistic neural networks (PNN) were developed as the classifier. Three feature-based fusion schemes were compared. Dynamic selective fusion achieved an average 1.8% and a best 0% classification error rate in a total of 30 independent runs. The static selective fusion approach resulted in a 6.1% classification error rate, which was not as good as using individual sensors (4.2% for the Enose and 2.6% for the zNose) if only selected features were applied. Simply adding the Enose and zNose features without selection (non-selective fusion) worsened the classification performance with a 32.5% classification error rate. This indicated that the feature selection using the CMAES is an indispensable process in multisensor data fusion, especially if multiple sources of sensors contain much irrelevant or redundant information. At the decision level, Bayesian network fusion achieved better performance than two individual sensors, with 11% error rate versus 13% error rate for the Enose and 20% error rate for the zNose. It is shown that both the feature level fusion with the CMAES optimization algorithms and decision level fusion using a Bayesian network as a classifier improved system classification performance. This methodology can also be applied to other sensor fusion applications.},
keywords = {machine learning},
pubstate = {published},
tppubtype = {article}
}
2006
Li, C.; Heinemann, P.; Reed, P.
Using genetic algorithms (GAs) and CMA evolutionary strategy to optimize electronic nose sensor selection Journal Article
In: Transactions of the ASABE, 51(1), 321-330, 2006.
Abstract | Links | BibTeX | Tags: machine learning
@article{Li2006,
title = {Using genetic algorithms (GAs) and CMA evolutionary strategy to optimize electronic nose sensor selection},
author = {C. Li and P. Heinemann and P. Reed},
doi = {10.13031/2013.21505},
year = {2006},
date = {2006-07-06},
urldate = {2006-07-06},
journal = {Transactions of the ASABE, 51(1), 321-330},
abstract = {The high dimensionality of electronic nose data increases the difficulty of their use in classification models. Reducing this high dimensionality helps reduce variable numbers, improve classification accuracy, and reduce computation time and sensor cost. In this research, the Cyranose 320 electronic nose, which was used for apple defect detection, was optimized by selecting only the most relevant of its internal 32 sensors using different selection methods. Two robust heuristic optimization algorithms, genetic algorithm (GA) and covariance matrix adaptation evolutionary strategy (CMAES), were applied and compared. Although both algorithms searched the optimal sensors resulting in a best classification error rate of 4.4%, the average classification error rate of CMA over 30 random seed runs was 5.0% (s.d.=0.006) which was better than 5.2% (s.d.=0.004) from the GA. The final optimal solution sets obtained by integer GA showed that including more sensors did not guarantee better classification performance. The best reduction in classification error rate was 10% while the number of sensors was reduced 78%. This study provided a robust and efficient optimization approach to reduce high data dimensionality of the electronic nose data, which substantially improved electronic nose performance in apple defect detection while potentially reducing the overall electronic nose cost for future specific applications.},
keywords = {machine learning},
pubstate = {published},
tppubtype = {article}
}