@article{alonso2008crowdsourcing, abstract = {Relevance evaluation is an essential part of the development and maintenance of information retrieval systems. Yet traditional evaluation approaches have several limitations; in particular, conducting new editorial evaluations of a search system can be very expensive. We describe a new approach to evaluation called TERC, based on the crowdsourcing paradigm, in which many online users, drawn from a large community, each performs a small evaluation task.}, acmid = {1480508}, address = {New York, NY, USA}, author = {Alonso, Omar and Rose, Daniel E. and Stewart, Benjamin}, doi = {10.1145/1480506.1480508}, interhash = {8441d7fed92813634f61fa148ef2b870}, intrahash = {4a47833e85558b740788607cb79ba795}, issn = {0163-5840}, issue_date = {December 2008}, journal = {SIGIR Forum}, month = nov, number = 2, numpages = {7}, pages = {9--15}, publisher = {ACM}, title = {Crowdsourcing for relevance evaluation}, url = {http://doi.acm.org/10.1145/1480506.1480508}, volume = 42, year = 2008 }