@techreport{ritchie2009citation, abstract = {This thesis investigates taking words from around citations to scientific papers in order to create an enhanced document representation for improved information retrieval. This method parallels how anchor text is commonly used in Web retrieval. In previous work, words from citing documents have been used as an alternative representation of the cited document but no previous experiment has combined them with a full-text document representation and measured effectiveness in a large scale evaluation. The contributions of this thesis are twofold: firstly, we present a novel document representation, along with experiments to measure its effect on retrieval effectiveness, and, secondly, we document the construction of a new, realistic test collection of scientific research papers, with references (in the bibliography) and their associated citations (in the running text of the paper) automatically annotated. Our experiments show that the citation-enhanced document representation increases retrieval effectiveness across a range of standard retrieval models and evaluation measures. In Chapter 2, we give the background to our work, discussing the various areas from which we draw together ideas: information retrieval, particularly link structure analysis and anchor text indexing, and bibliometrics, in particular citation analysis. We show that there is a close relatedness of ideas between these areas but that these ideas have not been fully explored experimentally. Chapter 3 discusses the test collection paradigm for evaluation of information retrieval systems and describes how and why we built our test collection. In Chapter 4, we introduce the ACL Anthology, the archive of computational linguistics papers that our test collection is centred around. The archive contains the most prominent publications since the beginning of the field in the early 1960s, consisting of one journal plus conferences and workshops, resulting in over 10,000 papers. Chapter 5 describes how the PDF papers are prepared for our experiments, including identification of references and citations in the papers, once converted to plain text, and extraction of citation information to an XML database. Chapter 6 presents our experiments: we show that adding citation terms to the full-text of the papers improves retrieval effectiveness by up to 7.4%, that weighting citation terms higher relative to paper terms increases the improvement and that varying the context from which citation terms are taken has a significant effect on retrieval effectiveness. Our main hypothesis that citation terms enhance a full-text representation of scientific papers is thus proven. There are some limitations to these experiments. The relevance judgements in our test collection are incomplete but we have experimentally verified that the test collection is, nevertheless, a useful evaluation tool. Using the Lemur toolkit constrained the method that we used to weight citation terms; we would like to experiment with a more realistic implementation of term weighting. Our experiments with different citation contexts did not conclude an optimal citation context; we would like to extend the scope of our investigation. Now that our test collection exists, we can address these issues in our experiments and leave the door open for more extensive experimentation. }, address = {Cambridge, UK}, author = {Ritchie, Anna}, institution = {University of Cambridge}, interhash = {f086fdcd7eb1df44ef67b96f2e91996c}, intrahash = {aa4271a2a958fe2c1a65dbdd508d8de7}, issn = {1476-2986}, month = mar, number = 744, title = {Citation context analysis for information retrieval}, url = {https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-744.pdf}, year = 2009 } @book{koester2006fooca, abstract = {This book deals with Formal Concept Analysis (FCA) and its application to Web Information Retrieval. It explains how Web search results retrieved by major Web search engines such as Google or Yahoo can be conceptualized leading to a human-oriented form of representation. A generalization of Web search results is conducted, leading to an FCA-based introduction of FooCA. FooCA is an application in the field of Conceptual Knowledge Processing and supports the idea of a holistic representation of Web Information Retrieval.}, address = {Mühltal}, author = {Koester, Bjoern}, interhash = {fe53b2b1fa6be34259647954fca36bf8}, intrahash = {5571d950ada3ee1892e5c043ac438271}, publisher = {Verlag Allgemeine Wissenschaft}, series = {Beiträge zur begrifflichen Wissensverarbeitung}, title = {FooCA: web information retrieval with formal concept analysis}, url = {http://www.bjoern-koester.de/fooca/web_information_retrieval_with_formal_concept_analysis.html}, year = 2006 } @inproceedings{joachims2002optimizing, abstract = {This paper presents an approach to automatically optimizing the retrieval quality of search engines using clickthrough data. Intuitively, a good information retrieval system should present relevant documents high in the ranking, with less relevant documents following below. While previous approaches to learning retrieval functions from examples exist, they typically require training data generated from relevance judgments by experts. This makes them difficult and expensive to apply. The goal of this paper is to develop a method that utilizes clickthrough data for training, namely the query-log of the search engine in connection with the log of links the users clicked on in the presented ranking. Such clickthrough data is available in abundance and can be recorded at very low cost. Taking a Support Vector Machine (SVM) approach, this paper presents a method for learning retrieval functions. From a theoretical perspective, this method is shown to be well-founded in a risk minimization framework. Furthermore, it is shown to be feasible even for large sets of queries and features. The theoretical results are verified in a controlled experiment. It shows that the method can effectively adapt the retrieval function of a meta-search engine to a particular group of users, outperforming Google in terms of retrieval quality after only a couple of hundred training examples.}, acmid = {775067}, address = {New York, NY, USA}, author = {Joachims, Thorsten}, booktitle = {Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining}, doi = {10.1145/775047.775067}, interhash = {c78df69370bbf12636eaa5233b1fba83}, intrahash = {656a83f1057c5792506d0d656ae81d26}, isbn = {1-58113-567-X}, location = {Edmonton, Alberta, Canada}, numpages = {10}, pages = {133--142}, publisher = {ACM}, title = {Optimizing search engines using clickthrough data}, url = {http://doi.acm.org/10.1145/775047.775067}, year = 2002 } @techreport{gomes2012creating, abstract = {The web became a mass means of publication that has been replacing printed media. However, its information is extremely ephemeral. Currently, most of the information available on the web is less than 1 year old. There are several initiatives worldwide that struggle to archive information from the web before it vanishes. However, search mechanisms to access this information are still limited and do not satisfy their users that demand performance similar to live- web search engines. This paper presents some of the work developed to create an effi�cient and effective searchable web archive service, from data acquisition to user interface design. The results of research were applied in practice to create the Portuguese Web Archive that is publicly available since January 2010. It supports full-text search over 1 billion contents archived from 1996 to 2010. The developed software is available as an open source project.}, address = {Portugal}, author = {Gomes, Daniel and Cruz, David and Miranda, João and Costa, Miguel and Fontes, Simão}, institution = {Foundation for National Scientific Computing}, interhash = {b5c01e5cadcc1d8ef44d48b2022144d2}, intrahash = {da5b8a339b2c3d765c3b0a7bd025af82}, month = may, title = {Creating a searchable web archive}, url = {http://web.ist.utl.pt/joaocarvalhomiranda/docs/other/creating-a-searchable-web-archive-relatorio.pdf}, year = 2012 } @article{alonso2008crowdsourcing, abstract = {Relevance evaluation is an essential part of the development and maintenance of information retrieval systems. Yet traditional evaluation approaches have several limitations; in particular, conducting new editorial evaluations of a search system can be very expensive. We describe a new approach to evaluation called TERC, based on the crowdsourcing paradigm, in which many online users, drawn from a large community, each performs a small evaluation task.}, acmid = {1480508}, address = {New York, NY, USA}, author = {Alonso, Omar and Rose, Daniel E. and Stewart, Benjamin}, doi = {10.1145/1480506.1480508}, interhash = {8441d7fed92813634f61fa148ef2b870}, intrahash = {4a47833e85558b740788607cb79ba795}, issn = {0163-5840}, issue_date = {December 2008}, journal = {SIGIR Forum}, month = nov, number = 2, numpages = {7}, pages = {9--15}, publisher = {ACM}, title = {Crowdsourcing for relevance evaluation}, url = {http://doi.acm.org/10.1145/1480506.1480508}, volume = 42, year = 2008 } @article{cha2007comprehensive, abstract = {Distance or similarity measures are essential to solve many pattern recognition problems such as classification, clustering, and retrieval problems. Various distance/similarity measures that are applicable to compare two probability density functions, pdf in short, are reviewed and categorized in both syntactic and semantic relationships. A correlation coefficient and a hierarchical clustering technique are adopted to reveal similarities among numerous distance/similarity measures.}, author = {Cha, Sung-Hyuk}, interhash = {dfaf5e38d33eaab89f3643b242910c81}, intrahash = {69e7c9ba92a049efa4c70f8f0bfdb4ea}, journal = {International Journal of Mathematical Models and Methods in Applied Sciences}, number = 4, pages = {300--307}, title = {Comprehensive Survey on Distance/Similarity Measures between Probability Density Functions}, url = {http://www.gly.fsu.edu/~parker/geostats/Cha.pdf}, volume = 1, year = 2007 } @book{manning2008introduction, abstract = {"Class-tested and coherent, this textbook teaches classical and web information retrieval, including web search and the related areas of text classification and text clustering from basic concepts. It gives an up-to-date treatment of all aspects of the design and implementation of systems for gathering, indexing, and searching documents; methods for evaluating systems; and an introduction to the use of machine learning methods on text collections. All the important ideas are explained using examples and figures, making it perfect for introductory courses in information retrieval for advanced undergraduates and graduate students in computer science. Based on feedback from extensive classroom experience, the book has been carefully structured in order to make teaching more natural and effective. Slides and additional exercises (with solutions for lecturers) are also available through the book's supporting website to help course instructors prepare their lectures." -- Publisher's description.}, address = {New York}, author = {Manning, Christopher D. and Raghavan, Prabhakar and Schütze, Hinrich}, interhash = {2e574e46b7668a7268e7f02b46f4d9bb}, intrahash = {9f4ab13e07b48b9723113aa74224be65}, isbn = {9780521865715 0521865719}, publisher = {Cambridge University Press}, title = {Introduction to Information Retrieval}, url = {http://www.amazon.com/Introduction-Information-Retrieval-Christopher-Manning/dp/0521865719/ref=sr_1_1?ie=UTF8&qid=1337379279&sr=8-1}, year = 2008 } @inproceedings{poelmans2011mining, abstract = {Formal Concept Analysis (FCA) is an unsupervised clustering technique and many scientific papers are devoted to applying FCA in Information Retrieval (IR) research. We collected 103 papers published between 2003-2009 which mention FCA and information retrieval in the abstract, title or keywords. Using a prototype of our FCA-based toolset CORDIET, we converted the pdf-files containing the papers to plain text, indexed them with Lucene using a thesaurus containing terms related to FCA research and then created the concept lattice shown in this paper. We visualized, analyzed and explored the literature with concept lattices and discovered multiple interesting research streams in IR of which we give an extensive overview. The core contributions of this paper are the innovative application of FCA to the text mining of scientific papers and the survey of the FCA-based IR research. }, author = {Poelmans, Jonas and Elzinga, Paul and Viaene, Stijn and Dedene, Guido and Kuznetsov, Sergei O.}, booktitle = {Industrial Conference on Data Mining - Poster and Industry Proceedings}, editor = {Perner, Petra}, interhash = {b44d11ea5b5a4df8ee30a9c572d82051}, intrahash = {164c37be60c1a47d1727ad9b82f01237}, isbn = {978-3-942954-06-4}, pages = {82--96}, publisher = {IBaI Publishing}, title = {Text Mining Scientific Papers: a Survey on {FCA}-based Information Retrieval Research.}, url = {http://dblp.uni-trier.de/db/conf/incdm/incdm2011p.html#PoelmansEVDK11}, year = 2011 } @inproceedings{zhou2005document, abstract = {The quality of document content, which is an issue that is usually ignored for the traditional ad hoc retrieval task, is a critical issue for Web search. Web pages have a huge variation in quality relative to, for example, newswire articles. To address this problem, we propose a document quality language model approach that is incorporated into the basic query likelihood retrieval model in the form of a prior probability. Our results demonstrate that, on average, the new model is significantly better than the baseline (query likelihood model) in terms of precision at the top ranks.}, acmid = {1099652}, address = {New York, NY, USA}, author = {Zhou, Yun and Croft, W. Bruce}, booktitle = {Proceedings of the 14th ACM International Conference on Information and Knowledge Management}, doi = {10.1145/1099554.1099652}, interhash = {01264e5f48959d326724b405d3898337}, intrahash = {d190feee02f804aea11f19979d3642b8}, isbn = {1-59593-140-6}, location = {Bremen, Germany}, numpages = {2}, pages = {331--332}, publisher = {ACM}, series = {CIKM '05}, title = {Document quality models for web ad hoc retrieval}, url = {http://doi.acm.org/10.1145/1099554.1099652}, year = 2005 } @article{jarvelin2002cumulated, abstract = {Modern large retrieval environments tend to overwhelm their users by their large output. Since all documents are not of equal relevance to their users, highly relevant documents should be identified and ranked first for presentation. In order to develop IR techniques in this direction, it is necessary to develop evaluation approaches and methods that credit IR methods for their ability to retrieve highly relevant documents. This can be done by extending traditional evaluation methods, that is, recall and precision based on binary relevance judgments, to graded relevance judgments. Alternatively, novel measures based on graded relevance judgments may be developed. This article proposes several novel measures that compute the cumulative gain the user obtains by examining the retrieval result up to a given ranked position. The first one accumulates the relevance scores of retrieved documents along the ranked result list. The second one is similar but applies a discount factor to the relevance scores in order to devaluate late-retrieved documents. The third one computes the relative-to-the-ideal performance of IR techniques, based on the cumulative gain they are able to yield. These novel measures are defined and discussed and their use is demonstrated in a case study using TREC data: sample system run results for 20 queries in TREC-7. As a relevance base we used novel graded relevance judgments on a four-point scale. The test results indicate that the proposed measures credit IR methods for their ability to retrieve highly relevant documents and allow testing of statistical significance of effectiveness differences. The graphs based on the measures also provide insight into the performance IR techniques and allow interpretation, for example, from the user point of view.}, address = {New York, NY, USA}, author = {Järvelin, Kalervo and Kekäläinen, Jaana}, doi = {10.1145/582415.582418}, interhash = {c46348827790803e8e7465ffd1a13376}, intrahash = {12176d90012ed75f57996af0b9240d02}, issn = {1046-8188}, journal = {ACM Transactions on Information Systems}, month = oct, number = 4, pages = {422--446}, publisher = {ACM}, title = {Cumulated gain-based evaluation of IR techniques}, url = {http://portal.acm.org/citation.cfm?id=582418}, volume = 20, year = 2002 } @inproceedings{jarvelin2000ir, abstract = {This paper proposes evaluation methods based on the use of non-dichotomous relevance judgements in IR experiments. It is argued that evaluation methods should credit IR methods for their ability to retrieve highly relevant documents. This is desirable from the user point of view in modern large IR environments. The proposed methods are (1) a novel application of P-R curves and average precision computations based on separate recall bases for documents of different degrees of relevance, and (2) two novel measures computing the cumulative gain the user obtains by examining the retrieval result up to a given ranked position. We then demonstrate the use of these evaluation methods in a case study on the effectiveness of query types, based on combinations of query structures and expansion, in retrieving documents of various degrees of relevance. The test was run with a best match retrieval system (In-Query1) in a text database consisting of newspaper articles. The results indicate that the tested strong query structures are most effective in retrieving highly relevant documents. The differences between the query types are practically essential and statistically significant. More generally, the novel evaluation methods and the case demonstrate that non-dichotomous relevance assessments are applicable in IR experiments, may reveal interesting phenomena, and allow harder testing of IR methods.}, address = {New York, NY, USA}, author = {Järvelin, Kalervo and Kekäläinen, Jaana}, booktitle = {SIGIR '00: Proceedings of the 23rd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval}, doi = {10.1145/345508.345545}, interhash = {a62a44c48d24acc64cd6713f21111d72}, intrahash = {12592d5f805db5bd127ee5abae1a4325}, isbn = {1-58113-226-3}, location = {Athens, Greece}, pages = {41--48}, publisher = {ACM}, title = {IR evaluation methods for retrieving highly relevant documents}, url = {http://portal.acm.org/citation.cfm?id=345545}, year = 2000 } @book{baeza-yates1999modern, abstract = {This is a rigorous and complete textbook for a first course on information retrieval from the computer science (as opposed to a user-centred) perspective. The advent of the Internet and the enormous increase in volume of electronically stored information generally has led to substantial work on IR from the computer science perspective - this book provides an up-to-date student oriented treatment of the subject.}, address = {Boston, MA, USA}, author = {Baeza-Yates, Ricardo A. and Ribeiro-Neto, Berthier}, interhash = {6f78177742b3c836218aacfc7fc4c43c}, intrahash = {ead0b4af17c94074fe1c774d2f267617}, isbn = {020139829X}, publisher = {Addison-Wesley Longman Publishing Co., Inc.}, title = {Modern Information Retrieval}, url = {http://portal.acm.org/citation.cfm?id=553876}, year = 1999 }