@inproceedings{jaeschke2009testing, abstract = {The challenge to provide tag recommendations for collaborative tagging systems has attracted quite some attention of researchers lately. However, most research focused on the evaluation and development of appropriate methods rather than tackling the practical challenges of how to integrate recommendation methods into real tagging systems, record and evaluate their performance. In this paper we describe the tag recommendation framework we developed for our social bookmark and publication sharing system BibSonomy. With the intention to develop, test, and evaluate recommendation algorithms and supporting cooperation with researchers, we designed the framework to be easily extensible, open for a variety of methods, and usable independent from BibSonomy. Furthermore, this paper presents a �rst evaluation of two exemplarily deployed recommendation methods.}, address = {New York, NY, USA}, author = {Jäschke, Robert and Eisterlehner, Folke and Hotho, Andreas and Stumme, Gerd}, booktitle = {RecSys '09: Proceedings of the third ACM Conference on Recommender Systems}, doi = {10.1145/1639714.1639790}, interhash = {440fafda1eccf4036066f457eb6674a0}, intrahash = {21fdf612ba6b356fb1b311fc9369f32d}, isbn = {978-1-60558-435-5}, location = {New York, NY, USA}, pages = {369--372}, publisher = {ACM}, title = {Testing and Evaluating Tag Recommenders in a Live System}, url = {http://www.kde.cs.uni-kassel.de/pub/pdf/jaeschke2009testing.pdf}, vgwort = {15}, year = 2009 } @inproceedings{jaeschke2009testingKDML, abstract = {The challenge to provide tag recommendations for collaborative tagging systems has attracted quite some attention of researchers lately. However, most research focused on evaluation and development of appropriate methods rather than tackling the practical challenges of how to integrate recommendation methods into real tagging systems, record and evaluate their performance. In this paper we describe the tag recommendation framework we developed for our social bookmark and publication sharing system BibSonomy. With the intention to develop, test, and evaluate recommendation algorithms and supporting cooperation with researchers, we designed the framework to be easily extensible, open for a variety of methods, and usable independent from BibSonomy. Furthermore, this paper presents an evaluation of two exemplarily deployed recommendation methods, demonstrating the power of the framework.}, author = {Jäschke, Robert and Eisterlehner, Folke and Hotho, Andreas and Stumme, Gerd}, booktitle = {Workshop on Knowledge Discovery, Data Mining, and Machine Learning}, editor = {Benz, Dominik and Janssen, Frederik}, interhash = {440fafda1eccf4036066f457eb6674a0}, intrahash = {5e8f40e610e723e966676772aa205f80}, month = sep, pages = {44--51}, title = {Testing and Evaluating Tag Recommenders in a Live System}, url = {http://lwa09.informatik.tu-darmstadt.de/pub/KDML/WebHome/kdml09_R.Jaeschke_et_al.pdf}, vgwort = {30}, year = 2009 } @inproceedings{cosley2002referee, abstract = {Automated recommendation (e.g., personalized product recommendation on an ecommerce web site) is an increasingly valuable service associated with many databases--typically online retail catalogs and web logs. Currently, a major obstacle for evaluating recommendation algorithms is the lack of any standard, public, real-world testbed appropriate for the task. In an attempt to fill this gap, we have created REFEREE, a framework for building recommender systems using ResearchIndex--a huge online digital library of computer science research papers--so that anyone in the research community can develop, deploy, and evaluate recommender systems relatively easily and quickly. Research Index is in many ways ideal for evaluating recommender systems, especially so-called hybrid recommenders that combine information filtering and collaborative filtering techniques. The documents in the database are associated with a wealth of content information (author, title, abstract, full text) and collaborative information (user behaviors), as well as linkage information via the citation structure. Our framework supports more realistic evaluation metrics that assess user buy-in directly, rather than resorting to offline metrics like prediction accuracy that may have little to do with end user utility. The sheer scale of ResearchIndex (over 500,000 documents with thousands of user accesses per hour) will force algorithm designers to make real-world trade-offs that consider performance, not just accuracy. We present our own tradeoff decisions in building an example hybrid recommender called PD-Live. The algorithm uses content-based similarity information to select a set of documents from which to recommend, and collaborative information to rank the documents. PD-Live performs reasonably well compared to other recommenders in ResearchIndex.}, author = {Cosley, Dan and Lawrence, Steve and Pennock, David M.}, booktitle = {VLDB '02: Proceedings of the 28th international conference on Very Large Data Bases}, interhash = {cd87b54cacc63e242421dc2ecde84926}, intrahash = {f5008da10f55ecc550525a3d49c45944}, location = {Hong Kong, China}, pages = {35--46}, publisher = {VLDB Endowment}, title = {REFEREE: an open framework for practical testing of recommender systems using ResearchIndex}, url = {http://portal.acm.org/citation.cfm?id=1287369.1287374}, year = 2002 } @inproceedings{brodsky2008card, abstract = {This paper proposes a framework for Composite Alternative Recommendation Development (CARD), which supports composite product and service definitions, top-k decision optimization, and dynamic preference learning. Composite services are characterized by a set of sub-services, which, in turn, can be composite or atomic. Each atomic and composite service is associated with metrics, such as cost, duration, and enjoyment ranking. The framework is based on the Composite Recommender Knowledge Base, which is composed of views, including Service Metric Views that specify services and their metrics; Recommendation Views that specify the ranking definition to balance optimality and diversity; parametric Transformers that specify how service metrics are defined in terms of metrics of its subservices; and learning sets from which the unknown parameters in the transformers are iteratively learned. Also introduced in the paper is the top-k selection criterion that, based on a vector of utility metrics, provides the balance between the optimality of individual metrics and the diversity of recommendations. To exemplify the framework, specific views are developed for a travel package recommender system.}, address = {New York, NY, USA}, author = {Brodsky, Alexander and Henshaw, Sylvia Morgan and Whittle, Jon}, booktitle = {RecSys '08: Proceedings of the 2008 ACM conference on Recommender systems}, doi = {10.1145/1454008.1454037}, interhash = {c9cd132d4f0763c4fcf094cd738fbd54}, intrahash = {2938e17e594e801df3e9f07e0f06a513}, isbn = {978-1-60558-093-7}, location = {Lausanne, Switzerland}, pages = {171--178}, publisher = {ACM}, title = {CARD: a decision-guidance framework and application for recommending composite alternatives}, url = {http://portal.acm.org/citation.cfm?id=1454037}, year = 2008 }