|
1 | 1 |
|
| 2 | +@online{tin_kam_ho_random_1995, |
| 3 | + title = {Random decision forests}, |
| 4 | + url = {https://ieeexplore.ieee.org/document/598994}, |
| 5 | + abstract = {Decision trees are attractive classifiers due to their high execution speed. But trees derived with traditional methods often cannot be grown to arbitrary complexity for possible loss of generalization accuracy on unseen data. The limitation on complexity usually means suboptimal accuracy on training data. Following the principles of stochastic modeling, we propose a method to construct tree-based classifiers whose capacity can be arbitrarily expanded for increases in accuracy for both training and unseen data. The essence of the method is to build multiple trees in randomly selected subspaces of the feature space. Trees in, different subspaces generalize their classification in complementary ways, and their combined classification can be monotonically improved. The validity of the method is demonstrated through experiments on the recognition of handwritten digits.}, |
| 6 | + author = {Tin Kam Ho}, |
| 7 | + urldate = {2023-11-09}, |
| 8 | + date = {1995-08-14}, |
| 9 | + langid = {american}, |
| 10 | +} |
| 11 | + |
| 12 | +@inproceedings{hegde_dimensionality_2016, |
| 13 | + title = {Dimensionality reduction technique for developing undergraduate student dropout model using principal component analysis through R package}, |
| 14 | + url = {https://ieeexplore.ieee.org/abstract/document/7919670}, |
| 15 | + doi = {10.1109/ICCIC.2016.7919670}, |
| 16 | + abstract = {Every educational institute feels proud when its admission closes with expected number of students. The prospective student enters the campus with lots of hopes, dreams and expectations. When their expectations are not met or if they undergo for critical circumstances and makes them drop from their registered program. Predicting undergraduate student dropouts are a major challenge in educational system due to the multidimensionality of data. This paper focuses on dimensionality reduction of multi-behavioral attributes of a 150 students with 51 attribute to identify the factor that affects the early dropout. The dataset dimensionality is reduced through Principal Component Analysis by obtaining the Eigenvalues and Eigenvectors from the covariance matrix by transforming the original attribute into new set attribute without losing the information. Visualization is done with a help of R package factoextra and {FactoMineR}. The further dataset can be used for classification. The discovery of concealed knowledge can be used for better academic planning and early prediction of student dropout.}, |
| 17 | + eventtitle = {2016 {IEEE} International Conference on Computational Intelligence and Computing Research ({ICCIC})}, |
| 18 | + pages = {1--6}, |
| 19 | + booktitle = {2016 {IEEE} International Conference on Computational Intelligence and Computing Research ({ICCIC})}, |
| 20 | + author = {Hegde, Vinayak}, |
| 21 | + urldate = {2024-02-11}, |
| 22 | + date = {2016-12}, |
| 23 | + note = {{ISSN}: 2473-943X}, |
| 24 | + keywords = {Behavioral, Classification, Correlation, Covariance matrices, Education, Eigenvalues and eigenfunctions, {FactoMineR}, Feature extraction, Media, {PCA}, Principal component analysis, Student Survey, dropout, factoextra, undergraduate}, |
| 25 | +} |
| 26 | + |
| 27 | +@article{vieira_corrosion_nodate, |
| 28 | + title = {Corrosion Analysis and Identification Through Integration of Machine Learning and Cyber-Physical Sensors}, |
| 29 | + abstract = {Corrosion poses a significant threat to the integrity and longevity of metal structures, such as plates and buildings, impacting safety, functionality, and economic sustainability. Traditional corrosion detection methods often rely on periodic inspections, which may be time-consuming and prone to human error. Incorporating sensors on this matter improves real-time monitoring and data collection for a comprehensive understanding of the corrosion process. Utilizing machine learning algorithms allows for better analysis and prediction of datasets obtained from sensors strategically placed on structures. This aids engineers in making informed decisions on corrosion remediation. This scientific article explores innovative approaches in corrosion analysis and identification, leveraging the synergy of machine learning technologies and cyber-physical sensors.}, |
| 30 | + author = {Vieira, Bruno Froelich Giora}, |
| 31 | + langid = {english}, |
| 32 | +} |
| 33 | + |
| 34 | +@article{nawfal_autonomous_nodate, |
| 35 | + title = {Autonomous Vehicules in Last-Mile Delivery : Facing Urban Congestion and Sustainability problems}, |
| 36 | + abstract = {This research paper dives into the changing role of autonomous vehicles ({AVs}) in developing last-mile delivery in urban environments. It concerns the important challenges of urban congestion and sustainability, exacerbated by the growing demands of e-commerce. The study exposes the benefits of integrating {AVs} in urban delivery systems and gives a multi-faceted approach on how to successfully achieve this integration despite its challenging complexity. The paper aims to give how we can improve the future of urban transportation and logistics by diving in the implications of {AV} integration in last-mile delivery, from different points of views.}, |
| 37 | + author = {Nawfal, Adil}, |
| 38 | + langid = {english}, |
| 39 | +} |
| 40 | + |
2 | 41 | @online{noauthor_covid-19_nodate, |
3 | 42 | title = {{COVID}-19 data {\textbar} {WHO} {COVID}-19 dashboard}, |
4 | 43 | url = {https://data.who.int/dashboards/covid19/data}, |
@@ -674,14 +713,6 @@ @article{lee_machine_2019 |
674 | 713 | keywords = {big data, class-imbalance, dropout, ensemble, machine learning, oversampling}, |
675 | 714 | } |
676 | 715 |
|
677 | | -@online{noauthor_random_nodate, |
678 | | - title = {Random decision forests}, |
679 | | - url = {https://ieeexplore.ieee.org/document/598994}, |
680 | | - abstract = {Decision trees are attractive classifiers due to their high execution speed. But trees derived with traditional methods often cannot be grown to arbitrary complexity for possible loss of generalization accuracy on unseen data. The limitation on complexity usually means suboptimal accuracy on training data. Following the principles of stochastic modeling, we propose a method to construct tree-based classifiers whose capacity can be arbitrarily expanded for increases in accuracy for both training and unseen data. The essence of the method is to build multiple trees in randomly selected subspaces of the feature space. Trees in, different subspaces generalize their classification in complementary ways, and their combined classification can be monotonically improved. The validity of the method is demonstrated through experiments on the recognition of handwritten digits.}, |
681 | | - urldate = {2023-11-09}, |
682 | | - langid = {american}, |
683 | | -} |
684 | | - |
685 | 716 | @article{m_alban_she_is_with_the_faculty_of_engineering_and_applied_sciences_of_the_technical_university_cotopaxi_neural_2019, |
686 | 717 | title = {Neural Networks to Predict Dropout at the Universities}, |
687 | 718 | volume = {9}, |
|
0 commit comments