|
1 | 1 |
|
| 2 | +@article{buda_systematic_2018, |
| 3 | + title = {A systematic study of the class imbalance problem in convolutional neural networks}, |
| 4 | + volume = {106}, |
| 5 | + issn = {0893-6080}, |
| 6 | + url = {https://www.sciencedirect.com/science/article/pii/S0893608018302107}, |
| 7 | + doi = {10.1016/j.neunet.2018.07.011}, |
| 8 | + abstract = {In this study, we systematically investigate the impact of class imbalance on classification performance of convolutional neural networks ({CNNs}) and compare frequently used methods to address the issue. Class imbalance is a common problem that has been comprehensively studied in classical machine learning, yet very limited systematic research is available in the context of deep learning. In our study, we use three benchmark datasets of increasing complexity, {MNIST}, {CIFAR}-10 and {ImageNet}, to investigate the effects of imbalance on classification and perform an extensive comparison of several methods to address the issue: oversampling, undersampling, two-phase training, and thresholding that compensates for prior class probabilities. Our main evaluation metric is area under the receiver operating characteristic curve ({ROC} {AUC}) adjusted to multi-class tasks since overall accuracy metric is associated with notable difficulties in the context of imbalanced data. Based on results from our experiments we conclude that (i) the effect of class imbalance on classification performance is detrimental; (ii) the method of addressing class imbalance that emerged as dominant in almost all analyzed scenarios was oversampling; (iii) oversampling should be applied to the level that completely eliminates the imbalance, whereas the optimal undersampling ratio depends on the extent of imbalance; (iv) as opposed to some classical machine learning models, oversampling does not cause overfitting of {CNNs}; (v) thresholding should be applied to compensate for prior class probabilities when overall number of properly classified cases is of interest.}, |
| 9 | + pages = {249--259}, |
| 10 | + journaltitle = {Neural Networks}, |
| 11 | + shortjournal = {Neural Networks}, |
| 12 | + author = {Buda, Mateusz and Maki, Atsuto and Mazurowski, Maciej A.}, |
| 13 | + urldate = {2024-02-13}, |
| 14 | + date = {2018-10-01}, |
| 15 | + keywords = {Class imbalance, Convolutional neural networks, Deep learning, Image classification}, |
| 16 | +} |
| 17 | + |
| 18 | +@article{beyan_classifying_2015, |
| 19 | + title = {Classifying imbalanced data sets using similarity based hierarchical decomposition}, |
| 20 | + volume = {48}, |
| 21 | + issn = {0031-3203}, |
| 22 | + url = {https://www.sciencedirect.com/science/article/pii/S003132031400449X}, |
| 23 | + doi = {10.1016/j.patcog.2014.10.032}, |
| 24 | + abstract = {Classification of data is difficult if the data is imbalanced and classes are overlapping. In recent years, more research has started to focus on classification of imbalanced data since real world data is often skewed. Traditional methods are more successful with classifying the class that has the most samples (majority class) compared to the other classes (minority classes). For the classification of imbalanced data sets, different methods are available, although each has some advantages and shortcomings. In this study, we propose a new hierarchical decomposition method for imbalanced data sets which is different from previously proposed solutions to the class imbalance problem. Additionally, it does not require any data pre-processing step as many other solutions need. The new method is based on clustering and outlier detection. The hierarchy is constructed using the similarity of labeled data subsets at each level of the hierarchy with different levels being built by different data and feature subsets. Clustering is used to partition the data while outlier detection is utilized to detect minority class samples. The comparison of the proposed method with state of art the methods using 20 public imbalanced data sets and 181 synthetic data sets showed that the proposed method׳s classification performance is better than the state of art methods. It is especially successful if the minority class is sparser than the majority class. It has accurate performance even when classes have sub-varieties and minority and majority classes are overlapping. Moreover, its performance is also good when the class imbalance ratio is low, i.e. classes are more imbalanced.}, |
| 25 | + pages = {1653--1672}, |
| 26 | + number = {5}, |
| 27 | + journaltitle = {Pattern Recognition}, |
| 28 | + shortjournal = {Pattern Recognition}, |
| 29 | + author = {Beyan, Cigdem and Fisher, Robert}, |
| 30 | + urldate = {2024-02-13}, |
| 31 | + date = {2015-05-01}, |
| 32 | + keywords = {Class imbalance problem, Clustering, Hierarchical decomposition, Minority–majority classes, Outlier detection}, |
| 33 | +} |
| 34 | + |
| 35 | +@article{haixiang_bpso-adaboost-knn_2016, |
| 36 | + title = {{BPSO}-Adaboost-{KNN} ensemble learning algorithm for multi-class imbalanced data classification}, |
| 37 | + volume = {49}, |
| 38 | + issn = {0952-1976}, |
| 39 | + url = {https://www.sciencedirect.com/science/article/pii/S0952197615002110}, |
| 40 | + doi = {10.1016/j.engappai.2015.09.011}, |
| 41 | + abstract = {This paper proposes an ensemble algorithm named of {BPSO}-Adaboost-{KNN} to cope with multi-class imbalanced data classification. The main idea of this algorithm is to integrate feature selection and boosting into ensemble. What’s more, we utilize a novel evaluation metric called {AUCarea} which is especially for multi-class classification. In our model {BPSO} is employed as the feature selection algorithm in which {AUCarea} is chosen as the fitness. For classification, we generate a boosting classifier in which {KNN} is selected as the basic classifier. In order to verify the effectiveness of our method, 19 benchmarks are used in our experiments. The results show that the proposed algorithm improves both the stability and the accuracy of boosting after carrying out feature selection, and the performance of our algorithm is comparable with other state-of-the-art algorithms. In statistical analyses, we apply Bland–Altman analysis to show the consistencies between {AUCarea} and other popular metrics like average G-mean, average F-value etc. Besides, we use linear regression to find deeper correlation between {AUCarea} and other metrics in order to show why {AUCarea} works well in this issue. We also put out a series of statistical studies in order to analyze if there exist significant improvements after feature selection and boosting are employed. At last, the proposed algorithm is applied in oil-bearing of reservoir recognition. The classification precision is up to 99\% in oilsk81-oilsk85 well logging data in Jianghan oilfield of China, which is 20\% higher than {KNN} classifier. Particularly, the proposed algorithm has significant superiority when distinguishing the oil layer from other layers.}, |
| 42 | + pages = {176--193}, |
| 43 | + journaltitle = {Engineering Applications of Artificial Intelligence}, |
| 44 | + shortjournal = {Engineering Applications of Artificial Intelligence}, |
| 45 | + author = {Haixiang, Guo and Yijing, Li and Yanan, Li and Xiao, Liu and Jinling, Li}, |
| 46 | + urldate = {2024-02-13}, |
| 47 | + date = {2016-03-01}, |
| 48 | + keywords = {Classification, Ensemble, Feature selection, Imbalanced data, Oil reservoir}, |
| 49 | +} |
| 50 | + |
| 51 | +@article{gong_rhsboost_2017, |
| 52 | + title = {{RHSBoost}: Improving classification performance in imbalance data}, |
| 53 | + volume = {111}, |
| 54 | + issn = {0167-9473}, |
| 55 | + url = {https://www.sciencedirect.com/science/article/pii/S016794731730018X}, |
| 56 | + doi = {10.1016/j.csda.2017.01.005}, |
| 57 | + shorttitle = {{RHSBoost}}, |
| 58 | + abstract = {Imbalance data are defined as a dataset whose proportion of classes is severely skewed. Classification performance of existing models tends to deteriorate due to class distribution imbalance. In addition, over-representation by majority classes prevents a classifier from paying attention to minority classes, which are generally more interesting. An effective ensemble classification method called {RHSBoost} has been proposed to address the imbalance classification problem. This classification rule uses random undersampling and {ROSE} sampling under a boosting scheme. According to the experimental results, {RHSBoost} appears to be an attractive classification model for imbalance data.}, |
| 59 | + pages = {1--13}, |
| 60 | + journaltitle = {Computational Statistics \& Data Analysis}, |
| 61 | + shortjournal = {Computational Statistics \& Data Analysis}, |
| 62 | + author = {Gong, Joonho and Kim, Hyunjoong}, |
| 63 | + urldate = {2024-02-13}, |
| 64 | + date = {2017-07-01}, |
| 65 | + keywords = {{AUC}, {AdaBoost}, Ensemble, Imbalanced data, {RHSBoost}, Undersampling}, |
| 66 | +} |
| 67 | + |
| 68 | +@article{haixiang_learning_2017, |
| 69 | + title = {Learning from class-imbalanced data: Review of methods and applications}, |
| 70 | + volume = {73}, |
| 71 | + issn = {0957-4174}, |
| 72 | + url = {https://www.sciencedirect.com/science/article/pii/S0957417416307175}, |
| 73 | + doi = {10.1016/j.eswa.2016.12.035}, |
| 74 | + shorttitle = {Learning from class-imbalanced data}, |
| 75 | + abstract = {Rare events, especially those that could potentially negatively impact society, often require humans’ decision-making responses. Detecting rare events can be viewed as a prediction task in data mining and machine learning communities. As these events are rarely observed in daily life, the prediction task suffers from a lack of balanced data. In this paper, we provide an in depth review of rare event detection from an imbalanced learning perspective. Five hundred and seventeen related papers that have been published in the past decade were collected for the study. The initial statistics suggested that rare events detection and imbalanced learning are concerned across a wide range of research areas from management science to engineering. We reviewed all collected papers from both a technical and a practical point of view. Modeling methods discussed include techniques such as data preprocessing, classification algorithms and model evaluation. For applications, we first provide a comprehensive taxonomy of the existing application domains of imbalanced learning, and then we detail the applications for each category. Finally, some suggestions from the reviewed papers are incorporated with our experiences and judgments to offer further research directions for the imbalanced learning and rare event detection fields.}, |
| 76 | + pages = {220--239}, |
| 77 | + journaltitle = {Expert Systems with Applications}, |
| 78 | + shortjournal = {Expert Systems with Applications}, |
| 79 | + author = {Haixiang, Guo and Yijing, Li and Shang, Jennifer and Mingyun, Gu and Yuanyue, Huang and Bing, Gong}, |
| 80 | + urldate = {2023-12-17}, |
| 81 | + date = {2017-05-01}, |
| 82 | + keywords = {Data mining, Imbalanced data, Machine learning, Rare events}, |
| 83 | +} |
| 84 | + |
2 | 85 | @online{tin_kam_ho_random_1995, |
3 | 86 | title = {Random decision forests}, |
4 | 87 | url = {https://ieeexplore.ieee.org/document/598994}, |
@@ -220,23 +303,6 @@ @online{noauthor_quest-ce_nodate |
220 | 303 | langid = {french}, |
221 | 304 | } |
222 | 305 |
|
223 | | -@article{haixiang_learning_2017, |
224 | | - title = {Learning from class-imbalanced data: Review of methods and applications}, |
225 | | - volume = {73}, |
226 | | - issn = {0957-4174}, |
227 | | - url = {https://www.sciencedirect.com/science/article/pii/S0957417416307175}, |
228 | | - doi = {10.1016/j.eswa.2016.12.035}, |
229 | | - shorttitle = {Learning from class-imbalanced data}, |
230 | | - abstract = {Rare events, especially those that could potentially negatively impact society, often require humans’ decision-making responses. Detecting rare events can be viewed as a prediction task in data mining and machine learning communities. As these events are rarely observed in daily life, the prediction task suffers from a lack of balanced data. In this paper, we provide an in depth review of rare event detection from an imbalanced learning perspective. Five hundred and seventeen related papers that have been published in the past decade were collected for the study. The initial statistics suggested that rare events detection and imbalanced learning are concerned across a wide range of research areas from management science to engineering. We reviewed all collected papers from both a technical and a practical point of view. Modeling methods discussed include techniques such as data preprocessing, classification algorithms and model evaluation. For applications, we first provide a comprehensive taxonomy of the existing application domains of imbalanced learning, and then we detail the applications for each category. Finally, some suggestions from the reviewed papers are incorporated with our experiences and judgments to offer further research directions for the imbalanced learning and rare event detection fields.}, |
231 | | - pages = {220--239}, |
232 | | - journaltitle = {Expert Systems with Applications}, |
233 | | - shortjournal = {Expert Systems with Applications}, |
234 | | - author = {Haixiang, Guo and Yijing, Li and Shang, Jennifer and Mingyun, Gu and Yuanyue, Huang and Bing, Gong}, |
235 | | - urldate = {2023-12-17}, |
236 | | - date = {2017-05-01}, |
237 | | - keywords = {Data mining, Imbalanced data, Machine learning, Rare events}, |
238 | | -} |
239 | | - |
240 | 306 | @article{galar_review_2012, |
241 | 307 | title = {A Review on Ensembles for the Class Imbalance Problem: Bagging-, Boosting-, and Hybrid-Based Approaches}, |
242 | 308 | volume = {42}, |
|
0 commit comments