@article{steenweg2004neuere, abstract = {Ausgehend von neueren Entwicklungen im Publikations- und Informationswesen, die einen nicht unwesentlichen Einfluss auf das zukünftige wissenschaftliche Publizieren haben werden, wird der künftige Zugriff auf Open-Access-Archive thematisiert. Um dringend notwendige Veränderungen herbeizuführen, bedarf es neben einem Mentalitätswandel bei den Autoren auch der Schaffung einer funktionierenden umfassenden Infrastruktur von Dokumentenservern mit Recherche und Archivierung, um die nötige Akzeptanz bei Autoren und Lesern zu erlangen. Eine Möglichkeit dazu böte eine OpenRep-Initiative, ein Netz von modular aufgebauten "Institutional Repositories".}, author = {Steenweg, Helge}, interhash = {8b414acafb896664d8fc05445eec6177}, intrahash = {8ffaeaf3cbc35354380da6a0a7c32aac}, journal = {ABI-Technik}, number = 4, pages = {282-293.}, title = {Neuere Entwicklungen im Informations- und Publikationswesen und ihre möglichen Auswirkungen}, url = {http://kobra.bibliothek.uni-kassel.de/bitstream/urn:nbn:de:hebis:34-200602086382/6/abi4-2004-04.pdf}, volume = 24, year = 2004 } @book{manning2008, author = {Manning, Christopher D. and Raghavan, Prabhakar and Schütze, Hinrich}, interhash = {2e574e46b7668a7268e7f02b46f4d9bb}, intrahash = {9f4ab13e07b48b9723113aa74224be65}, publisher = {Cambridge University Press}, title = {Introduction to Information Retrieval}, year = 2008 } @inproceedings{jaschke2013attribute, abstract = {We propose an approach for supporting attribute exploration by web information retrieval, in particular by posing appropriate queries to search engines, crowd sourcing systems, and the linked open data cloud. We discuss underlying general assumptions for this to work and the degree to which these can be taken for granted.}, author = {Jäschke, Robert and Rudolph, Sebastian}, booktitle = {Contributions to the 11th International Conference on Formal Concept Analysis}, editor = {Cellier, Peggy and Distel, Felix and Ganter, Bernhard}, interhash = {000ab7b0ae3ecd1d7d6ceb39de5c11d4}, intrahash = {45e900e280661d775d8da949baee3747}, month = may, organization = {Technische Universität Dresden}, pages = {19--34}, title = {Attribute Exploration on the Web}, url = {http://nbn-resolving.de/urn:nbn:de:bsz:14-qucosa-113133}, urn = {urn:nbn:de:bsz:14-qucosa-113133}, year = 2013 } @techreport{ritchie2009citation, abstract = {This thesis investigates taking words from around citations to scientific papers in order to create an enhanced document representation for improved information retrieval. This method parallels how anchor text is commonly used in Web retrieval. In previous work, words from citing documents have been used as an alternative representation of the cited document but no previous experiment has combined them with a full-text document representation and measured effectiveness in a large scale evaluation. The contributions of this thesis are twofold: firstly, we present a novel document representation, along with experiments to measure its effect on retrieval effectiveness, and, secondly, we document the construction of a new, realistic test collection of scientific research papers, with references (in the bibliography) and their associated citations (in the running text of the paper) automatically annotated. Our experiments show that the citation-enhanced document representation increases retrieval effectiveness across a range of standard retrieval models and evaluation measures. In Chapter 2, we give the background to our work, discussing the various areas from which we draw together ideas: information retrieval, particularly link structure analysis and anchor text indexing, and bibliometrics, in particular citation analysis. We show that there is a close relatedness of ideas between these areas but that these ideas have not been fully explored experimentally. Chapter 3 discusses the test collection paradigm for evaluation of information retrieval systems and describes how and why we built our test collection. In Chapter 4, we introduce the ACL Anthology, the archive of computational linguistics papers that our test collection is centred around. The archive contains the most prominent publications since the beginning of the field in the early 1960s, consisting of one journal plus conferences and workshops, resulting in over 10,000 papers. Chapter 5 describes how the PDF papers are prepared for our experiments, including identification of references and citations in the papers, once converted to plain text, and extraction of citation information to an XML database. Chapter 6 presents our experiments: we show that adding citation terms to the full-text of the papers improves retrieval effectiveness by up to 7.4%, that weighting citation terms higher relative to paper terms increases the improvement and that varying the context from which citation terms are taken has a significant effect on retrieval effectiveness. Our main hypothesis that citation terms enhance a full-text representation of scientific papers is thus proven. There are some limitations to these experiments. The relevance judgements in our test collection are incomplete but we have experimentally verified that the test collection is, nevertheless, a useful evaluation tool. Using the Lemur toolkit constrained the method that we used to weight citation terms; we would like to experiment with a more realistic implementation of term weighting. Our experiments with different citation contexts did not conclude an optimal citation context; we would like to extend the scope of our investigation. Now that our test collection exists, we can address these issues in our experiments and leave the door open for more extensive experimentation. }, address = {Cambridge, UK}, author = {Ritchie, Anna}, institution = {University of Cambridge}, interhash = {f086fdcd7eb1df44ef67b96f2e91996c}, intrahash = {aa4271a2a958fe2c1a65dbdd508d8de7}, issn = {1476-2986}, month = mar, number = 744, title = {Citation context analysis for information retrieval}, url = {https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-744.pdf}, year = 2009 } @book{manning2008introduction, abstract = {"Class-tested and coherent, this textbook teaches classical and web information retrieval, including web search and the related areas of text classification and text clustering from basic concepts. It gives an up-to-date treatment of all aspects of the design and implementation of systems for gathering, indexing, and searching documents; methods for evaluating systems; and an introduction to the use of machine learning methods on text collections. All the important ideas are explained using examples and figures, making it perfect for introductory courses in information retrieval for advanced undergraduates and graduate students in computer science. Based on feedback from extensive classroom experience, the book has been carefully structured in order to make teaching more natural and effective. Slides and additional exercises (with solutions for lecturers) are also available through the book's supporting website to help course instructors prepare their lectures." -- Publisher's description.}, address = {New York}, author = {Manning, Christopher D. and Raghavan, Prabhakar and Schütze, Hinrich}, interhash = {2e574e46b7668a7268e7f02b46f4d9bb}, intrahash = {9f4ab13e07b48b9723113aa74224be65}, isbn = {9780521865715 0521865719}, publisher = {Cambridge University Press}, title = {Introduction to Information Retrieval}, url = {http://www.amazon.com/Introduction-Information-Retrieval-Christopher-Manning/dp/0521865719/ref=sr_1_1?ie=UTF8&qid=1337379279&sr=8-1}, year = 2008 } @book{koester2006fooca, abstract = {This book deals with Formal Concept Analysis (FCA) and its application to Web Information Retrieval. It explains how Web search results retrieved by major Web search engines such as Google or Yahoo can be conceptualized leading to a human-oriented form of representation. A generalization of Web search results is conducted, leading to an FCA-based introduction of FooCA. FooCA is an application in the field of Conceptual Knowledge Processing and supports the idea of a holistic representation of Web Information Retrieval.}, address = {Mühltal}, author = {Koester, Bjoern}, interhash = {fe53b2b1fa6be34259647954fca36bf8}, intrahash = {5571d950ada3ee1892e5c043ac438271}, publisher = {Verlag Allgemeine Wissenschaft}, series = {Beiträge zur begrifflichen Wissensverarbeitung}, title = {FooCA: web information retrieval with formal concept analysis}, url = {http://www.bjoern-koester.de/fooca/web_information_retrieval_with_formal_concept_analysis.html}, year = 2006 } @inproceedings{joachims2002optimizing, abstract = {This paper presents an approach to automatically optimizing the retrieval quality of search engines using clickthrough data. Intuitively, a good information retrieval system should present relevant documents high in the ranking, with less relevant documents following below. While previous approaches to learning retrieval functions from examples exist, they typically require training data generated from relevance judgments by experts. This makes them difficult and expensive to apply. The goal of this paper is to develop a method that utilizes clickthrough data for training, namely the query-log of the search engine in connection with the log of links the users clicked on in the presented ranking. Such clickthrough data is available in abundance and can be recorded at very low cost. Taking a Support Vector Machine (SVM) approach, this paper presents a method for learning retrieval functions. From a theoretical perspective, this method is shown to be well-founded in a risk minimization framework. Furthermore, it is shown to be feasible even for large sets of queries and features. The theoretical results are verified in a controlled experiment. It shows that the method can effectively adapt the retrieval function of a meta-search engine to a particular group of users, outperforming Google in terms of retrieval quality after only a couple of hundred training examples.}, acmid = {775067}, address = {New York, NY, USA}, author = {Joachims, Thorsten}, booktitle = {Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining}, doi = {10.1145/775047.775067}, interhash = {c78df69370bbf12636eaa5233b1fba83}, intrahash = {656a83f1057c5792506d0d656ae81d26}, isbn = {1-58113-567-X}, location = {Edmonton, Alberta, Canada}, numpages = {10}, pages = {133--142}, publisher = {ACM}, title = {Optimizing search engines using clickthrough data}, url = {http://doi.acm.org/10.1145/775047.775067}, year = 2002 } @techreport{gomes2012creating, abstract = {The web became a mass means of publication that has been replacing printed media. However, its information is extremely ephemeral. Currently, most of the information available on the web is less than 1 year old. There are several initiatives worldwide that struggle to archive information from the web before it vanishes. However, search mechanisms to access this information are still limited and do not satisfy their users that demand performance similar to live- web search engines. This paper presents some of the work developed to create an effi�cient and effective searchable web archive service, from data acquisition to user interface design. The results of research were applied in practice to create the Portuguese Web Archive that is publicly available since January 2010. It supports full-text search over 1 billion contents archived from 1996 to 2010. The developed software is available as an open source project.}, address = {Portugal}, author = {Gomes, Daniel and Cruz, David and Miranda, João and Costa, Miguel and Fontes, Simão}, institution = {Foundation for National Scientific Computing}, interhash = {b5c01e5cadcc1d8ef44d48b2022144d2}, intrahash = {da5b8a339b2c3d765c3b0a7bd025af82}, month = may, title = {Creating a searchable web archive}, url = {http://web.ist.utl.pt/joaocarvalhomiranda/docs/other/creating-a-searchable-web-archive-relatorio.pdf}, year = 2012 } @article{alonso2008crowdsourcing, abstract = {Relevance evaluation is an essential part of the development and maintenance of information retrieval systems. Yet traditional evaluation approaches have several limitations; in particular, conducting new editorial evaluations of a search system can be very expensive. We describe a new approach to evaluation called TERC, based on the crowdsourcing paradigm, in which many online users, drawn from a large community, each performs a small evaluation task.}, acmid = {1480508}, address = {New York, NY, USA}, author = {Alonso, Omar and Rose, Daniel E. and Stewart, Benjamin}, doi = {10.1145/1480506.1480508}, interhash = {8441d7fed92813634f61fa148ef2b870}, intrahash = {4a47833e85558b740788607cb79ba795}, issn = {0163-5840}, issue_date = {December 2008}, journal = {SIGIR Forum}, month = nov, number = 2, numpages = {7}, pages = {9--15}, publisher = {ACM}, title = {Crowdsourcing for relevance evaluation}, url = {http://doi.acm.org/10.1145/1480506.1480508}, volume = 42, year = 2008 } @article{cha2007comprehensive, abstract = {Distance or similarity measures are essential to solve many pattern recognition problems such as classification, clustering, and retrieval problems. Various distance/similarity measures that are applicable to compare two probability density functions, pdf in short, are reviewed and categorized in both syntactic and semantic relationships. A correlation coefficient and a hierarchical clustering technique are adopted to reveal similarities among numerous distance/similarity measures.}, author = {Cha, Sung-Hyuk}, interhash = {dfaf5e38d33eaab89f3643b242910c81}, intrahash = {69e7c9ba92a049efa4c70f8f0bfdb4ea}, journal = {International Journal of Mathematical Models and Methods in Applied Sciences}, number = 4, pages = {300--307}, title = {Comprehensive Survey on Distance/Similarity Measures between Probability Density Functions}, url = {http://www.gly.fsu.edu/~parker/geostats/Cha.pdf}, volume = 1, year = 2007 } @book{manning2008introduction, abstract = {"Class-tested and coherent, this textbook teaches classical and web information retrieval, including web search and the related areas of text classification and text clustering from basic concepts. It gives an up-to-date treatment of all aspects of the design and implementation of systems for gathering, indexing, and searching documents; methods for evaluating systems; and an introduction to the use of machine learning methods on text collections. All the important ideas are explained using examples and figures, making it perfect for introductory courses in information retrieval for advanced undergraduates and graduate students in computer science. Based on feedback from extensive classroom experience, the book has been carefully structured in order to make teaching more natural and effective. Slides and additional exercises (with solutions for lecturers) are also available through the book's supporting website to help course instructors prepare their lectures." -- Publisher's description.}, address = {New York}, author = {Manning, Christopher D. and Raghavan, Prabhakar and Schütze, Hinrich}, interhash = {2e574e46b7668a7268e7f02b46f4d9bb}, intrahash = {9f4ab13e07b48b9723113aa74224be65}, isbn = {9780521865715 0521865719}, publisher = {Cambridge University Press}, title = {Introduction to Information Retrieval}, url = {http://www.amazon.com/Introduction-Information-Retrieval-Christopher-Manning/dp/0521865719/ref=sr_1_1?ie=UTF8&qid=1337379279&sr=8-1}, year = 2008 } @inproceedings{poelmans2011mining, abstract = {Formal Concept Analysis (FCA) is an unsupervised clustering technique and many scientific papers are devoted to applying FCA in Information Retrieval (IR) research. We collected 103 papers published between 2003-2009 which mention FCA and information retrieval in the abstract, title or keywords. Using a prototype of our FCA-based toolset CORDIET, we converted the pdf-files containing the papers to plain text, indexed them with Lucene using a thesaurus containing terms related to FCA research and then created the concept lattice shown in this paper. We visualized, analyzed and explored the literature with concept lattices and discovered multiple interesting research streams in IR of which we give an extensive overview. The core contributions of this paper are the innovative application of FCA to the text mining of scientific papers and the survey of the FCA-based IR research. }, author = {Poelmans, Jonas and Elzinga, Paul and Viaene, Stijn and Dedene, Guido and Kuznetsov, Sergei O.}, booktitle = {Industrial Conference on Data Mining - Poster and Industry Proceedings}, editor = {Perner, Petra}, interhash = {b44d11ea5b5a4df8ee30a9c572d82051}, intrahash = {164c37be60c1a47d1727ad9b82f01237}, isbn = {978-3-942954-06-4}, pages = {82--96}, publisher = {IBaI Publishing}, title = {Text Mining Scientific Papers: a Survey on {FCA}-based Information Retrieval Research.}, url = {http://dblp.uni-trier.de/db/conf/incdm/incdm2011p.html#PoelmansEVDK11}, year = 2011 } @inproceedings{hotho2006folkrank, abstract = { In social bookmark tools users are setting up lightweight conceptual structures called folksonomies. Currently, the information retrieval support is limited. We present a formal model and a new search algorithm for folksonomies, called FolkRank, that exploits the structure of the folksonomy. The proposed algorithm is also applied to find communities within the folksonomy and is used to structure search results. All findings are demonstrated on a large scale dataset. A long version of this paper has been published at the European Semantic Web Conference 2006.}, author = {Hotho, Andreas and Jäschke, Robert and Schmitz, Christoph and Stumme, Gerd}, booktitle = {Proc. FGIR 2006}, interhash = {3468dc3fed17eadf2e7c6ff06fbb34a3}, intrahash = {4d8b4f79814691fbe6db8357d63206a1}, title = {FolkRank: A Ranking Algorithm for Folksonomies}, url = {http://www.kde.cs.uni-kassel.de/stumme/papers/2006/hotho2006folkrank.pdf}, year = 2006 } @proceedings{themenheft2007webmining, editor = {Hotho, Andreas and Stumme, Gerd}, interhash = {83c28b86f2ac897e906660e54e6fffc0}, intrahash = {c73311bb72ad480d74125dbc9d94c450}, journal = {Künstliche Intelligenz}, number = 3, pages = {5-8}, title = {Themenheft Web Mining, Künstliche Intelligenz}, url = {http://www.kuenstliche-intelligenz.de/index.php?id=7758}, year = 2007 } @article{themenheft2007webmining, author = {Hotho, Andreas and Stumme, Gerd}, interhash = {39f94bf3a1663d9cec6a6cb8354a9bd9}, intrahash = {e9535ec82afa53f44a1b37704aa9a71f}, journal = {Künstliche Intelligenz}, number = 3, pages = {5-8}, title = {Mining the World Wide Web -- Methods, Ap- plications, and Perspectives}, url = {http://www.kuenstliche-intelligenz.de/index.php?id=7758}, year = 2007 } @book{metzler2011featurecentric, asin = {3642228976}, author = {Metzler, Donald}, dewey = {005}, ean = {9783642228971}, edition = 2012, interhash = {4e473a9657c556434612d006a5a21460}, intrahash = {22e5fe8501844167b64a5aed595f4372}, isbn = {3642228976}, publisher = {Springer}, title = {A Feature-Centric View of Information Retrieval}, url = {http://www.amazon.com/Feature-Centric-View-Information-Retrieval/dp/3642228976}, year = 2011 } @inproceedings{zhou2005document, abstract = {The quality of document content, which is an issue that is usually ignored for the traditional ad hoc retrieval task, is a critical issue for Web search. Web pages have a huge variation in quality relative to, for example, newswire articles. To address this problem, we propose a document quality language model approach that is incorporated into the basic query likelihood retrieval model in the form of a prior probability. Our results demonstrate that, on average, the new model is significantly better than the baseline (query likelihood model) in terms of precision at the top ranks.}, acmid = {1099652}, address = {New York, NY, USA}, author = {Zhou, Yun and Croft, W. Bruce}, booktitle = {Proceedings of the 14th ACM International Conference on Information and Knowledge Management}, doi = {10.1145/1099554.1099652}, interhash = {01264e5f48959d326724b405d3898337}, intrahash = {d190feee02f804aea11f19979d3642b8}, isbn = {1-59593-140-6}, location = {Bremen, Germany}, numpages = {2}, pages = {331--332}, publisher = {ACM}, series = {CIKM '05}, title = {Document quality models for web ad hoc retrieval}, url = {http://doi.acm.org/10.1145/1099554.1099652}, year = 2005 } @article{broder2002taxonomy, author = {Broder, A.}, interhash = {1bfc1fd93c01979b73e05ae519a46bce}, intrahash = {36085c6aefab8fc5bc9903e2ecb96e00}, journal = {ACM SIGIR Forum}, number = 2, pages = {3-10}, title = {A taxonomy of Web search}, volume = 36, year = 2002 } @article{PeSt08, abstract = {Folksonomies in Wissensrepr{\"a}sentation und Information Retrieval. Die popul{\"a}ren Web 2.0-Dienste werden von Prosumern -- Produzenten und gleichsam Konsumenten -- nicht nur dazu genutzt, Inhalte zu produzieren, sondern auch, um sie inhaltlich zu erschlie{\ss}en. Folksonomies erlauben es dem Nutzer, Dokumente mit eigenen Schlagworten, sog. Tags, zu beschreiben, ohne dabei auf gewisse Regeln oder Vorgaben achten zu m{\"u}ssen. Neben einigen Vorteilen zeigen Folksonomies aber auch zahlreiche Schw{\"a}chen (u. a. einen Mangel an Pr{\"a}zision). Um diesen Nachteilen gr{\"o}{\ss}tenteils entgegenzuwirken, schlagen wir eine Interpretation der Tags als nat{\"u}rlichsprachige W{\"o}rter vor. Dadurch ist es uns m{\"o}glich, Methoden des Natural Language Processing (NLP) auf die Tags anzuwenden und so linguistische Probleme der Tags zu beseitigen. Dar{\"u}ber hinaus diskutieren wir Ans{\"a}tze und weitere Vorschl{\"a}ge (Tagverteilungen, Kollaboration und akteurspezifische Aspekte) hinsichtlich eines Relevance Rankings von getaggten Dokumenten. Neben Vorschl{\"a}gen auf {\"a}hnliche Dokumente ({\glqq}more like this!{\grqq}) erlauben Folksonomies auch Hinweise auf verwandte Nutzer und damit auf Communities ({\glqq}more like me!{\grqq}). Folksonomies in Knowledge Representation and Information Retrieval In Web 2.0 services {\grqq}prosumers” -- producers and consumers -- collaborate not only for the purpose of creating content, but to index these pieces of information as well. Folksonomies permit actors to describe documents with subject headings, {\grqq}tags{\grqq}, without regarding any rules. Apart from a lot of benefits folksonomies have many shortcomings (e.g., lack of precision). In order to solve some of the problems we propose interpreting tags as natural language terms. Accordingly, we can introduce methods of NLP to solve the tags’ linguistic problems. Additionally, we present criteria for tagged documents to create a ranking by relevance (tag distribution, collaboration and actor-based aspects). Besides recommending similar documents ({\glqq}more like this!{\grqq}) folksonomies can be used for the recommendation of similar users and communities ({\glqq}more like me!{\grqq}). }, author = {Peters, Isabella and Stock, Wolfgang G.}, interhash = {93b09c0700650150065232180fb23115}, intrahash = {3abe2759f6837cbd247021cb26bcf760}, issn = {1434-4653}, journal = {Information -- Wissenschaft und Praxis}, localfile = {Wissenschaftliche Bibliothek/dokumente/StPe08.pdf}, number = 2, pages = {77--90}, title = {{Folksonomies in Wissensrepr{\"a}sentation und Information Retrieval}}, url = {http://www.phil-fak.uni-duesseldorf.de/infowiss/admin/public_dateien/files/1/1204547968stock212_h.htm}, volume = {59 }, year = 2008 } @article{steenweg2004neuere, abstract = {Ausgehend von neueren Entwicklungen im Publikations- und Informationswesen, die einen nicht unwesentlichen Einfluss auf das zukünftige wissenschaftliche Publizieren haben werden, wird der künftige Zugriff auf Open-Access-Archive thematisiert. Um dringend notwendige Veränderungen herbeizuführen, bedarf es neben einem Mentalitätswandel bei den Autoren auch der Schaffung einer funktionierenden umfassenden Infrastruktur von Dokumentenservern mit Recherche und Archivierung, um die nötige Akzeptanz bei Autoren und Lesern zu erlangen. Eine Möglichkeit dazu böte eine OpenRep-Initiative, ein Netz von modular aufgebauten "Institutional Repositories".}, author = {Steenweg, Helge}, interhash = {8b414acafb896664d8fc05445eec6177}, intrahash = {8ffaeaf3cbc35354380da6a0a7c32aac}, journal = {ABI-Technik}, number = 4, pages = {282-293.}, title = {Neuere Entwicklungen im Informations- und Publikationswesen und ihre möglichen Auswirkungen}, url = {http://kobra.bibliothek.uni-kassel.de/bitstream/urn:nbn:de:hebis:34-200602086382/6/abi4-2004-04.pdf}, volume = 24, year = 2004 }