Zesch, T. & Gurevych, I.
(2010):
Wisdom of crowds versus wisdom of linguists - measuring the semantic relatedness of words..
In: Natural Language Engineering,
Ausgabe/Number: 1,
Vol. 16,
Erscheinungsjahr/Year: 2010.
Seiten/Pages: 25-59.
[Volltext] [BibTeX]
[Endnote]
@article{journals/nle/ZeschG10,
author = {Zesch, Torsten and Gurevych, Iryna},
title = {Wisdom of crowds versus wisdom of linguists - measuring the semantic relatedness of words.},
journal = {Natural Language Engineering},
year = {2010},
volume = {16},
number = {1},
pages = {25-59},
url = {http://dblp.uni-trier.de/db/journals/nle/nle16.html#ZeschG10},
keywords = {datasets, kallimachos, measure, posts, relatedness, semantic}
}
%0 = article
%A = Zesch, Torsten and Gurevych, Iryna
%D = 2010
%T = Wisdom of crowds versus wisdom of linguists - measuring the semantic relatedness of words.
%U = http://dblp.uni-trier.de/db/journals/nle/nle16.html#ZeschG10
Du, L.; Buntine, W. L. & Jin, H.
(2010):
Sequential Latent Dirichlet Allocation: Discover Underlying Topic Structures within a Document..
In: ICDM,
[Volltext]
[BibTeX][Endnote]
@inproceedings{conf/icdm/DuBJ10,
author = {Du, Lan and Buntine, Wray Lindsay and Jin, Huidong},
title = {Sequential Latent Dirichlet Allocation: Discover Underlying Topic Structures within a Document.},
editor = {Webb, Geoffrey I. and 0001, Bing Liu and Zhang, Chengqi and Gunopulos, Dimitrios and Wu, Xindong},
booktitle = {ICDM},
publisher = {IEEE Computer Society},
year = {2010},
pages = {148-157},
url = {http://dblp.uni-trier.de/db/conf/icdm/icdm2010.html#DuBJ10},
isbn = {978-0-7695-4256-0},
keywords = {genre, kallimachos, plot, toread}
}
%0 = inproceedings
%A = Du, Lan and Buntine, Wray Lindsay and Jin, Huidong
%B = ICDM
%D = 2010
%I = IEEE Computer Society
%T = Sequential Latent Dirichlet Allocation: Discover Underlying Topic Structures within a Document.
%U = http://dblp.uni-trier.de/db/conf/icdm/icdm2010.html#DuBJ10
Levy, O. & Goldberg, Y.
(2014):
Linguistic Regularities in Sparse and Explicit Word Representations..
In: CoNLL,
[Volltext]
[BibTeX][Endnote]
@inproceedings{conf/conll/LevyG14,
author = {Levy, Omer and Goldberg, Yoav},
title = {Linguistic Regularities in Sparse and Explicit Word Representations.},
editor = {Morante, Roser and tau Yih, Wen},
booktitle = {CoNLL},
publisher = {ACL},
year = {2014},
pages = {171-180},
url = {http://dblp.uni-trier.de/db/conf/conll/conll2014.html#LevyG14},
isbn = {978-1-941643-02-0},
keywords = {kallimachos, posts, representation, similarity, toread, word}
}
%0 = inproceedings
%A = Levy, Omer and Goldberg, Yoav
%B = CoNLL
%D = 2014
%I = ACL
%T = Linguistic Regularities in Sparse and Explicit Word Representations.
%U = http://dblp.uni-trier.de/db/conf/conll/conll2014.html#LevyG14
Yu, H.-F.; Jain, P.; Kar, P. & Dhillon, I. S.
(2013):
Large-scale Multi-label Learning with Missing Labels.
[Volltext] [Kurzfassung] [BibTeX]
[Endnote]
The multi-label classification problem has generated significant interest in
cent years. However, existing approaches do not adequately address two key
allenges: (a) the ability to tackle problems with a large number (say
llions) of labels, and (b) the ability to handle data with missing labels. In
is paper, we directly address both these problems by studying the multi-label
oblem in a generic empirical risk minimization (ERM) framework. Our
amework, despite being simple, is surprisingly able to encompass several
cent label-compression based methods which can be derived as special cases of
r method. To optimize the ERM problem, we develop techniques that exploit the
ructure of specific loss functions - such as the squared loss function - to
fer efficient algorithms. We further show that our learning framework admits
rmal excess risk bounds even in the presence of missing labels. Our risk
unds are tight and demonstrate better generalization performance for low-rank
omoting trace-norm regularization when compared to (rank insensitive)
obenius norm regularization. Finally, we present extensive empirical results
a variety of benchmark datasets and show that our methods perform
gnificantly better than existing label compression based methods and can
ale up to very large datasets such as the Wikipedia dataset.
@misc{yu2013largescale,
author = {Yu, Hsiang-Fu and Jain, Prateek and Kar, Purushottam and Dhillon, Inderjit S.},
title = {Large-scale Multi-label Learning with Missing Labels},
year = {2013},
note = {cite arxiv:1307.5101},
url = {http://arxiv.org/abs/1307.5101},
keywords = {classification, kallimachos, label, large, learning, multi},
abstract = {The multi-label classification problem has generated significant interest inrecent years. However, existing approaches do not adequately address two keychallenges: (a) the ability to tackle problems with a large number (saymillions) of labels, and (b) the ability to handle data with missing labels. Inthis paper, we directly address both these problems by studying the multi-labelproblem in a generic empirical risk minimization (ERM) framework. Ourframework, despite being simple, is surprisingly able to encompass severalrecent label-compression based methods which can be derived as special cases ofour method. To optimize the ERM problem, we develop techniques that exploit thestructure of specific loss functions - such as the squared loss function - tooffer efficient algorithms. We further show that our learning framework admitsformal excess risk bounds even in the presence of missing labels. Our riskbounds are tight and demonstrate better generalization performance for low-rankpromoting trace-norm regularization when compared to (rank insensitive)Frobenius norm regularization. Finally, we present extensive empirical resultson a variety of benchmark datasets and show that our methods performsignificantly better than existing label compression based methods and canscale up to very large datasets such as the Wikipedia dataset.}
}
%0 = misc
%A = Yu, Hsiang-Fu and Jain, Prateek and Kar, Purushottam and Dhillon, Inderjit S.
%B = }
%C =
%D = 2013
%I =
%T = Large-scale Multi-label Learning with Missing Labels}
%U = http://arxiv.org/abs/1307.5101
Mirowski, P.; Ranzato, M. & LeCun, Y.
(2010):
Dynamic Auto-Encoders for Semantic Indexing.
[Volltext]
[BibTeX][Endnote]
@inproceedings{noauthororeditor,
author = {Mirowski, Piotr and Ranzato, Marc'Aurelio and LeCun, Yann},
title = {Dynamic Auto-Encoders for Semantic Indexing},
editor = {of the NIPS 2010 Workshop on Deep Learning, Proceedings},
year = {2010},
url = {http://yann.lecun.com/exdb/publis/pdf/mirowski-nipsdl-10.pdf},
keywords = {deep, kallimachos, lda, learning, model, toread}
}
%0 = inproceedings
%A = Mirowski, Piotr and Ranzato, Marc'Aurelio and LeCun, Yann
%D = 2010
%T = Dynamic Auto-Encoders for Semantic Indexing
%U = http://yann.lecun.com/exdb/publis/pdf/mirowski-nipsdl-10.pdf
Karampatziakis, N. & Mineiro, P.
(2013):
Discriminative Features via Generalized Eigenvectors.
[Volltext] [Kurzfassung] [BibTeX]
[Endnote]
Representing examples in a way that is compatible with the underlying
assifier can greatly enhance the performance of a learning system. In this
per we investigate scalable techniques for inducing discriminative features
taking advantage of simple second order structure in the data. We focus on
lticlass classification and show that features extracted from the generalized
genvectors of the class conditional second moments lead to classifiers with
cellent empirical performance. Moreover, these features have attractive
eoretical properties, such as inducing representations that are invariant to
near transformations of the input. We evaluate classifiers built from these
atures on three different tasks, obtaining state of the art results.
@misc{karampatziakis2013discriminative,
author = {Karampatziakis, Nikos and Mineiro, Paul},
title = {Discriminative Features via Generalized Eigenvectors},
year = {2013},
note = {cite arxiv:1310.1934},
url = {http://arxiv.org/abs/1310.1934},
keywords = {analysis, eigenvector, feature, kallimachos},
abstract = {Representing examples in a way that is compatible with the underlyingclassifier can greatly enhance the performance of a learning system. In thispaper we investigate scalable techniques for inducing discriminative featuresby taking advantage of simple second order structure in the data. We focus onmulticlass classification and show that features extracted from the generalizedeigenvectors of the class conditional second moments lead to classifiers withexcellent empirical performance. Moreover, these features have attractivetheoretical properties, such as inducing representations that are invariant tolinear transformations of the input. We evaluate classifiers built from thesefeatures on three different tasks, obtaining state of the art results.}
}
%0 = misc
%A = Karampatziakis, Nikos and Mineiro, Paul
%B = }
%C =
%D = 2013
%I =
%T = Discriminative Features via Generalized Eigenvectors}
%U = http://arxiv.org/abs/1310.1934
Kohlschütter, C.; Fankhauser, P. & Nejdl, W.
(2010):
Boilerplate Detection using Shallow Text Features.
In: Proc. of 3rd ACM International Conference on Web Search and Data Mining New York City, NY USA (WSDM 2010).,
[BibTeX][Endnote]
@inproceedings{conf/wsdm/KohlschutterFN10,
author = {Kohlschütter, Christian and Fankhauser, Peter and Nejdl, Wolfgang},
title = {Boilerplate Detection using Shallow Text Features},
booktitle = {Proc. of 3rd ACM International Conference on Web Search and Data Mining New York City, NY USA (WSDM 2010).},
year = {2010},
keywords = {features, kallimachos, text, toread}
}
%0 = inproceedings
%A = Kohlschütter, Christian and Fankhauser, Peter and Nejdl, Wolfgang
%B = Proc. of 3rd ACM International Conference on Web Search and Data Mining New York City, NY USA (WSDM 2010).
%D = 2010
%T = Boilerplate Detection using Shallow Text Features