Alonso, O.; Rose, D. E. & Stewart, B.
(2008):
Crowdsourcing for relevance evaluation.
In: SIGIR Forum,
Ausgabe/Number: 2,
Vol. 42,
Verlag/Publisher: ACM.
Erscheinungsjahr/Year: 2008.
Seiten/Pages: 9-15.
[Volltext] [Kurzfassung] [BibTeX]
[Endnote]
Relevance evaluation is an essential part of the development and maintenance of information retrieval systems. Yet traditional evaluation approaches have several limitations; in particular, conducting new editorial evaluations of a search system can be very expensive. We describe a new approach to evaluation called TERC, based on the crowdsourcing paradigm, in which many online users, drawn from a large community, each performs a small evaluation task.
@article{alonso2008crowdsourcing,
author = {Alonso, Omar and Rose, Daniel E. and Stewart, Benjamin},
title = {Crowdsourcing for relevance evaluation},
journal = {SIGIR Forum},
publisher = {ACM},
address = {New York, NY, USA},
year = {2008},
volume = {42},
number = {2},
pages = {9--15},
url = {http://doi.acm.org/10.1145/1480506.1480508},
doi = {10.1145/1480506.1480508},
issn = {0163-5840},
keywords = {ir, crowdsourcing, relevance, evaluation},
abstract = {Relevance evaluation is an essential part of the development and maintenance of information retrieval systems. Yet traditional evaluation approaches have several limitations; in particular, conducting new editorial evaluations of a search system can be very expensive. We describe a new approach to evaluation called TERC, based on the crowdsourcing paradigm, in which many online users, drawn from a large community, each performs a small evaluation task.}
}
%0 = article
%A = Alonso, Omar and Rose, Daniel E. and Stewart, Benjamin
%C = New York, NY, USA
%D = 2008
%I = ACM
%T = Crowdsourcing for relevance evaluation
%U = http://doi.acm.org/10.1145/1480506.1480508