@article{heidecker2024criteria, abstract = {The operating environment of a highly automated vehicle is subject to change, e.g., weather, illumination, or the scenario containing different objects and other participants in which the highly automated vehicle has to navigate its passengers safely. These situations must be considered when developing and validating highly automated driving functions. This already poses a problem for training and evaluating deep learning models because without the costly labeling of thousands of recordings, not knowing whether the data contains relevant, interesting data for further model training, it is a guess under which conditions and situations the model performs poorly. For this purpose, we present corner case criteria based on the predictive uncertainty. With our corner case criteria, we are able to detect uncertainty-based corner cases of an object instance segmentation model without relying on ground truth (GT) data. We evaluated each corner case criterion using the COCO and the NuImages dataset to analyze the potential of our approach. We also provide a corner case decision function that allows us to distinguish each object into True Positive (TP), localization and/or classification corner case, or False Positive (FP). We also present our first results of an iterative training cycle that outperforms the baseline and where the data added to the training dataset is selected based on the corner case decision function.}, archiveprefix = {arXiv}, author = {Heidecker, Florian and El-Khateeb, Ahmad and Bieshaar, Maarten and Sick, Bernhard}, eid = {arXiv:2404.11266}, eprint = {2404.11266}, interhash = {ec414ce702660858de2c1af9ff9dfdd9}, intrahash = {368806798fb0154e042f3c9de4945219}, journal = {arXiv e-prints}, pages = {arXiv:2404.11266}, primaryclass = {cs.CV}, title = {Criteria for Uncertainty-based Corner Cases Detection in Instance Segmentation}, url = {https://arxiv.org/abs/2404.11266}, year = 2024 } @article{heidecker2023sampling, abstract = {The examination of uncertainty in the predictions of machine learning (ML) models is receiving increasing attention. One uncertainty modeling technique used for this purpose is Monte-Carlo (MC)-Dropout, where repeated predictions are generated for a single input. Therefore, clustering is required to describe the resulting uncertainty, but only through efficient clustering is it possible to describe the uncertainty from the model attached to each object. This article uses Bayesian Gaussian Mixture (BGM) to solve this problem. In addition, we investigate different values for the dropout rate and other techniques, such as focal loss and calibration, which we integrate into the Mask-RCNN model to obtain the most accurate uncertainty approximation of each instance and showcase it graphically.}, archiveprefix = {arXiv}, author = {Heidecker, Florian and El-Khateeb, Ahmad and Sick, Bernhard}, eid = {arXiv:2305.14977}, eprint = {2305.14977}, interhash = {76a7df169959d6e09829f48c90d640ed}, intrahash = {006ff4a810436da0383fb5966522be02}, journal = {arXiv e-prints}, pages = {arXiv:2305.14977}, primaryclass = {cs.CV}, title = {Sampling-based Uncertainty Estimation for an Instance Segmentation Network}, url = {https://arxiv.org/abs/2305.14977}, year = 2023 }