@inproceedings{vinh2009information,
abstract = {Information theoretic based measures form a fundamental class of similarity measures for comparing clusterings, beside the class of pair-counting based and set-matching based measures. In this paper, we discuss the necessity of correction for chance for information theoretic based measures for clusterings comparison. We observe that the baseline for such measures, i.e. average value between random partitions of a data set, does not take on a constant value, and tends to have larger variation when the ratio between the number of data points and the number of clusters is small. This effect is similar in some other non-information theoretic based measures such as the well-known Rand Index. Assuming a hypergeometric model of randomness, we derive the analytical formula for the expected mutual information value between a pair of clusterings, and then propose the adjusted version for several popular information theoretic based measures. Some examples are given to demonstrate the need and usefulness of the adjusted measures.},
address = {New York, NY, USA},
author = {Vinh, Nguyen Xuan and Epps, Julien and Bailey, James},
booktitle = {ICML '09: Proceedings of the 26th Annual International Conference on Machine Learning},
doi = {10.1145/1553374.1553511},
interhash = {ddd96b934438029873242aeabc26a201},
intrahash = {bed9702898bc8c50faa21eabd068b8d9},
isbn = {978-1-60558-516-1},
location = {Montreal, Quebec, Canada},
pages = {1073--1080},
publisher = {ACM},
title = {Information theoretic measures for clusterings comparison: is a correction for chance necessary?},
url = {http://portal.acm.org/citation.cfm?id=1553511},
year = 2009
}