@inproceedings{conf/wsdm/KohlschutterFN10, author = {Kohlschütter, Christian and Fankhauser, Peter and Nejdl, Wolfgang}, booktitle = {Proc. of 3rd ACM International Conference on Web Search and Data Mining New York City, NY USA (WSDM 2010).}, interhash = {25ea118166ef2f0d5597ca90fa702c9d}, intrahash = {dbc8464d9a298afa49d607d65f2160e2}, title = {Boilerplate Detection using Shallow Text Features}, year = 2010 } @article{journals/nle/ZeschG10, author = {Zesch, Torsten and Gurevych, Iryna}, ee = {http://dx.doi.org/10.1017/S1351324909990167}, interhash = {3300b5457187d0f6c551e63ecb27336c}, intrahash = {76a512bbba5ba8ec5819d469d4611d81}, journal = {Natural Language Engineering}, number = 1, pages = {25-59}, title = {Wisdom of crowds versus wisdom of linguists - measuring the semantic relatedness of words.}, url = {http://dblp.uni-trier.de/db/journals/nle/nle16.html#ZeschG10}, volume = 16, year = 2010 } @inproceedings{conf/icdm/DuBJ10, author = {Du, Lan and Buntine, Wray Lindsay and Jin, Huidong}, booktitle = {ICDM}, crossref = {conf/icdm/2010}, editor = {Webb, Geoffrey I. and 0001, Bing Liu and Zhang, Chengqi and Gunopulos, Dimitrios and Wu, Xindong}, ee = {http://doi.ieeecomputersociety.org/10.1109/ICDM.2010.51}, interhash = {dcde7dbdd419330aabb01d151e23c45c}, intrahash = {5a639efaf1e8fea6b0f309333efd7bee}, isbn = {978-0-7695-4256-0}, pages = {148-157}, publisher = {IEEE Computer Society}, title = {Sequential Latent Dirichlet Allocation: Discover Underlying Topic Structures within a Document.}, url = {http://dblp.uni-trier.de/db/conf/icdm/icdm2010.html#DuBJ10}, year = 2010 } @inproceedings{conf/conll/LevyG14, author = {Levy, Omer and Goldberg, Yoav}, booktitle = {CoNLL}, crossref = {conf/conll/2014}, editor = {Morante, Roser and tau Yih, Wen}, ee = {http://aclweb.org/anthology/W/W14/W14-1618.pdf}, interhash = {680dde1fd83a8dd0d6b2619a8266516e}, intrahash = {23bb00b6abab97ed93e74f3b5b148630}, isbn = {978-1-941643-02-0}, pages = {171-180}, publisher = {ACL}, title = {Linguistic Regularities in Sparse and Explicit Word Representations.}, url = {http://dblp.uni-trier.de/db/conf/conll/conll2014.html#LevyG14}, year = 2014 } @inproceedings{noauthororeditor, author = {Mirowski, Piotr and Ranzato, Marc'Aurelio and LeCun, Yann}, editor = {of the NIPS 2010 Workshop on Deep Learning, Proceedings}, interhash = {b7ce347e904a4ca3263cf6cc1e2253bd}, intrahash = {fc3e0e3af595f9a46df6bc9233df836f}, title = {Dynamic Auto-Encoders for Semantic Indexing}, url = {http://yann.lecun.com/exdb/publis/pdf/mirowski-nipsdl-10.pdf}, year = 2010 } @misc{karampatziakis2013discriminative, abstract = {Representing examples in a way that is compatible with the underlying classifier can greatly enhance the performance of a learning system. In this paper we investigate scalable techniques for inducing discriminative features by taking advantage of simple second order structure in the data. We focus on multiclass classification and show that features extracted from the generalized eigenvectors of the class conditional second moments lead to classifiers with excellent empirical performance. Moreover, these features have attractive theoretical properties, such as inducing representations that are invariant to linear transformations of the input. We evaluate classifiers built from these features on three different tasks, obtaining state of the art results.}, author = {Karampatziakis, Nikos and Mineiro, Paul}, interhash = {befee5ff60893632b4a38edb54e7c975}, intrahash = {47512dd90370c769bfd328d8fd8179ef}, note = {cite arxiv:1310.1934}, title = {Discriminative Features via Generalized Eigenvectors}, url = {http://arxiv.org/abs/1310.1934}, year = 2013 } @misc{yu2013largescale, abstract = {The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.}, author = {Yu, Hsiang-Fu and Jain, Prateek and Kar, Purushottam and Dhillon, Inderjit S.}, interhash = {1252173520757338468a68e028494647}, intrahash = {716e5270c1dcb3a1e4eedf9934859021}, note = {cite arxiv:1307.5101}, title = {Large-scale Multi-label Learning with Missing Labels}, url = {http://arxiv.org/abs/1307.5101}, year = 2013 }