@inproceedings{sarwar2001itembased, acmid = {372071}, address = {New York, NY, USA}, author = {Sarwar, Badrul and Karypis, George and Konstan, Joseph and Riedl, John}, booktitle = {Proceedings of the 10th international conference on World Wide Web}, doi = {10.1145/371920.372071}, interhash = {043d1aaba0f0b8c01d84edd517abedaf}, intrahash = {16f38785d7829500ed41c610a5eff9a2}, isbn = {1-58113-348-0}, location = {Hong Kong, Hong Kong}, numpages = {11}, pages = {285--295}, publisher = {ACM}, title = {Item-based collaborative filtering recommendation algorithms}, url = {http://doi.acm.org/10.1145/371920.372071}, year = 2001 } @article{li2011design, abstract = {We examine the use of modern recommender system technology to aid command awareness in complex software applications. We first describe our adaptation of traditional recommender system algorithms to meet the unique requirements presented by the domain of software commands. A user study showed that our item-based collaborative filtering algorithm generates 2.1 times as many good suggestions as existing techniques. Motivated by these positive results, we propose a design space framework and its associated algorithms to support both global and contextual recommendations. To evaluate the algorithms, we developed the CommunityCommands plug-in for AutoCAD. This plug-in enabled us to perform a 6-week user study of real-time, within-application command recommendations in actual working environments. We report and visualize command usage behaviors during the study, and discuss how the recommendations affected users behaviors. In particular, we found that the plug-in successfully exposed users to new commands, as unique commands issued significantly increased.}, acmid = {1970380}, address = {New York, NY, USA}, articleno = {6}, author = {Li, Wei and Matejka, Justin and Grossman, Tovi and Konstan, Joseph A. and Fitzmaurice, George}, doi = {10.1145/1970378.1970380}, interhash = {73e969e2db8875605d0b1fa8a0d6333d}, intrahash = {3aec947c38d63f96a4242c95a8c85ee7}, issn = {1073-0516}, issue_date = {June 2011}, journal = {ACM Transactions on Computer-Human Interaction}, month = jul, number = 2, numpages = {35}, pages = {6:1--6:35}, publisher = {ACM}, title = {Design and evaluation of a command recommendation system for software applications}, url = {http://doi.acm.org/10.1145/1970378.1970380}, volume = 18, year = 2011 } @inproceedings{mcnee2002recommending, abstract = {Collaborative filtering has proven to be valuable for recommending items in many different domains. In this paper, we explore the use of collaborative filtering to recommend research papers, using the citation web between papers to create the ratings matrix. Specifically, we tested the ability of collaborative filtering to recommend citations that would be suitable additional references for a target research paper. We investigated six algorithms for selecting citations, evaluating them through offline experiments against a database of over 186,000 research papers contained in ResearchIndex. We also performed an online experiment with over 120 users to gauge user opinion of the effectiveness of the algorithms and of the utility of such recommendations for common research tasks. We found large differences in the accuracy of the algorithms in the offline experiment, especially when balanced for coverage. In the online experiment, users felt they received quality recommendations, and were enthusiastic about the idea of receiving recommendations in this domain.}, acmid = {587096}, address = {New York, NY, USA}, author = {McNee, Sean M. and Albert, Istvan and Cosley, Dan and Gopalkrishnan, Prateep and Lam, Shyong K. and Rashid, Al Mamunur and Konstan, Joseph A. and Riedl, John}, booktitle = {Proceedings of the 2002 ACM conference on Computer supported cooperative work}, doi = {10.1145/587078.587096}, interhash = {7178849aab57a025dff76e177d64be9b}, intrahash = {50f94e753fad76222bd33cbe591f9360}, isbn = {1-58113-560-2}, location = {New Orleans, Louisiana, USA}, numpages = {10}, pages = {116--125}, publisher = {ACM}, series = {CSCW '02}, title = {On the recommending of citations for research papers}, url = {http://doi.acm.org/10.1145/587078.587096}, year = 2002 } @inproceedings{McNee:2006:AEA:1125451.1125659, abstract = {Recommender systems have shown great potential to help users find interesting and relevant items from within a large information space. Most research up to this point has focused on improving the accuracy of recommender systems. We believe that not only has this narrow focus been misguided, but has even been detrimental to the field. The recommendations that are most accurate according to the standard metrics are sometimes not the recommendations that are most useful to users. In this paper, we propose informal arguments that the recommender community should move beyond the conventional accuracy metrics and their associated experimental methodologies. We propose new user-centric directions for evaluating recommender systems.}, acmid = {1125659}, address = {New York, NY, USA}, author = {McNee, Sean M. and Riedl, John and Konstan, Joseph A.}, booktitle = {CHI '06 extended abstracts on Human factors in computing systems}, doi = {10.1145/1125451.1125659}, interhash = {fe396fbce5daacd374196ad688e3f149}, intrahash = {4b9fddbd766a9247856641989a778b23}, isbn = {1-59593-298-4}, location = {Montr\&\#233;al, Qu\&\#233;bec, Canada}, numpages = {5}, pages = {1097--1101}, publisher = {ACM}, series = {CHI EA '06}, title = {Being accurate is not enough: how accuracy metrics have hurt recommender systems}, url = {http://doi.acm.org/10.1145/1125451.1125659}, year = 2006 } @inproceedings{ekstrand2010automatically, abstract = {All new researchers face the daunting task of familiarizing themselves with the existing body of research literature in their respective fields. Recommender algorithms could aid in preparing these lists, but most current algorithms do not understand how to rate the importance of a paper within the literature, which might limit their effectiveness in this domain. We explore several methods for augmenting existing collaborative and content-based filtering algorithms with measures of the influence of a paper within the web of citations. We measure influence using well-known algorithms, such as HITS and PageRank, for measuring a node's importance in a graph. Among these augmentation methods is a novel method for using importance scores to influence collaborative filtering. We present a task-centered evaluation, including both an offline analysis and a user study, of the performance of the algorithms. Results from these studies indicate that collaborative filtering outperforms content-based approaches for generating introductory reading lists.}, acmid = {1864740}, address = {New York, NY, USA}, author = {Ekstrand, Michael D. and Kannan, Praveen and Stemper, James A. and Butler, John T. and Konstan, Joseph A. and Riedl, John T.}, booktitle = {Proceedings of the fourth ACM conference on Recommender systems}, doi = {10.1145/1864708.1864740}, interhash = {71ea85067f7d5f46bbb3a5da7e18ba34}, intrahash = {fbe0d5fca62781e5156d04e20d324a46}, isbn = {978-1-60558-906-0}, location = {Barcelona, Spain}, numpages = {8}, pages = {159--166}, publisher = {ACM}, title = {Automatically building research reading lists}, url = {http://doi.acm.org/10.1145/1864708.1864740}, year = 2010 } @inproceedings{McNee:2006:DLS:1180875.1180903, abstract = {If recommenders are to help people be more productive, they need to support a wide variety of real-world information seeking tasks, such as those found when seeking research papers in a digital library. There are many potential pitfalls, including not knowing what tasks to support, generating recommendations for the wrong task, or even failing to generate any meaningful recommendations whatsoever. We posit that different recommender algorithms are better suited to certain information seeking tasks. In this work, we perform a detailed user study with over 130 users to understand these differences between recommender algorithms through an online survey of paper recommendations from the ACM Digital Library. We found that pitfalls are hard to avoid. Two of our algorithms generated 'atypical' recommendations recommendations that were unrelated to their input baskets. Users reacted accordingly, providing strong negative results for these algorithms. Results from our 'typical' algorithms show some qualitative differences, but since users were exposed to two algorithms, the results may be biased. We present a wide variety of results, teasing out differences between algorithms. Finally, we succinctly summarize our most striking results as "Don't Look Stupid" in front of users.}, acmid = {1180903}, address = {New York, NY, USA}, author = {McNee, Sean M. and Kapoor, Nishikant and Konstan, Joseph A.}, booktitle = {Proceedings of the 2006 20th anniversary conference on Computer supported cooperative work}, doi = {10.1145/1180875.1180903}, interhash = {24be686d042a3a4a710d9ff22dee0f2e}, intrahash = {7775150ca225770019bd94db9be5db40}, isbn = {1-59593-249-6}, location = {Banff, Alberta, Canada}, numpages = {10}, pages = {171--180}, publisher = {ACM}, series = {CSCW '06}, title = {Don't look stupid: avoiding pitfalls when recommending research papers}, url = {http://doi.acm.org/10.1145/1180875.1180903}, year = 2006 } @inproceedings{mcnee2006stupid, abstract = {If recommenders are to help people be more productive, they need to support a wide variety of real-world information seeking tasks, such as those found when seeking research papers in a digital library. There are many potential pitfalls, including not knowing what tasks to support, generating recommendations for the wrong task, or even failing to generate any meaningful recommendations whatsoever. We posit that different recommender algorithms are better suited to certain information seeking tasks. In this work, we perform a detailed user study with over 130 users to understand these differences between recommender algorithms through an online survey of paper recommendations from the ACM Digital Library. We found that pitfalls are hard to avoid. Two of our algorithms generated 'atypical' recommendations recommendations that were unrelated to their input baskets. Users reacted accordingly, providing strong negative results for these algorithms. Results from our 'typical' algorithms show some qualitative differences, but since users were exposed to two algorithms, the results may be biased. We present a wide variety of results, teasing out differences between algorithms. Finally, we succinctly summarize our most striking results as "Don't Look Stupid" in front of users.}, acmid = {1180903}, address = {New York, NY, USA}, author = {McNee, Sean M. and Kapoor, Nishikant and Konstan, Joseph A.}, booktitle = {Proceedings of the 2006 20th anniversary conference on Computer supported cooperative work}, doi = {10.1145/1180875.1180903}, interhash = {24be686d042a3a4a710d9ff22dee0f2e}, intrahash = {7775150ca225770019bd94db9be5db40}, isbn = {1-59593-249-6}, location = {Banff, Alberta, Canada}, numpages = {10}, pages = {171--180}, publisher = {ACM}, series = {CSCW '06}, title = {Don't look stupid: avoiding pitfalls when recommending research papers}, url = {http://doi.acm.org/10.1145/1180875.1180903}, year = 2006 } @article{Herlocker:2004:ECF:963770.963772, abstract = {Recommender systems have been evaluated in many, often incomparable, ways. In this article, we review the key decisions in evaluating collaborative filtering recommender systems: the user tasks being evaluated, the types of analysis and datasets being used, the ways in which prediction quality is measured, the evaluation of prediction attributes other than quality, and the user-based evaluation of the system as a whole. In addition to reviewing the evaluation strategies used by prior researchers, we present empirical results from the analysis of various accuracy metrics on one content domain where all the tested metrics collapsed roughly into three equivalence classes. Metrics within each equivalency class were strongly correlated, while metrics from different equivalency classes were uncorrelated.}, acmid = {963772}, address = {New York, NY, USA}, author = {Herlocker, Jonathan L. and Konstan, Joseph A. and Terveen, Loren G. and Riedl, John T.}, doi = {10.1145/963770.963772}, interhash = {f8a70731d983634ac7105896d101c9d2}, intrahash = {c3a659108a568db1fba183c680dd1fd2}, issn = {1046-8188}, issue = {1}, journal = {ACM Trans. Inf. Syst.}, month = {January}, numpages = {49}, pages = {5--53}, privnote = {bla bla}, publisher = {ACM}, title = {Evaluating collaborative filtering recommender systems}, url = {http://doi.acm.org/10.1145/963770.963772}, volume = 22, year = 2004 } @inproceedings{ziegler2005improving, abstract = {In this work we present topic diversification, a novel method designed to balance and diversify personalized recommenda- tion lists in order to reflect the user�s complete spectrum of interests. Though being detrimental to average accuracy, we show that our method improves user satisfaction with rec- ommendation lists, in particular for lists generated using the common item-based collaborative filtering algorithm. Our work builds upon prior research on recommender sys- tems, looking at properties of recommendation lists as en- tities in their own right rather than specifically focusing on the accuracy of individual recommendations. We introduce the intra-list similarity metric to assess the topical diver- sity of recommendation lists and the topic diversification approach for decreasing the intra-list similarity. We evalu- ate our method using book recommendation data, including online analysis on 361, 349 ratings and an online study in- volving more than 2, 100 subjects.}, address = {Chiba, Japan}, author = {Ziegler, Cai-Nicolas and McNee, Sean and Konstan, Joseph and Lausen, Georg}, booktitle = {Proceedings of the 14th International World Wide Web Conference}, file = {ziegler2005improving.pdf:ziegler2005improving.pdf:PDF}, interhash = {0a7f89e65c4a0a5e45aa69a54a5600e6}, intrahash = {1c70855a788c17e3a94a7ecc00177f6c}, lastdatemodified = {2006-09-30}, lastname = {Ziegler}, month = May, own = {notown}, pdf = {null}, publisher = {ACM Press}, read = {notread}, title = {Improving Recommendation Lists Through Topic Diversification}, year = 2005 } @inproceedings{herlocker2000explaining, abstract = {Automated collaborative filtering (ACF) systems predict a person's affinity for items or information by connecting that person's recorded interests with the recorded interests of a community of people and sharing ratings between like-minded persons. However, current recommender systems are black boxes, providing no transparency into the working of the recommendation. Explanations provide that transparency, exposing the reasoning and data behind a recommendation. In this paper, we address explanation interfaces for ACF systems - how they should be implemented and why they should be implemented. To explore how, we present a model for explanations based on the user's conceptual model of the recommendation process. We then present experimental results demonstrating what components of an explanation are the most compelling. To address why, we present experimental evidence that shows that providing explanations can improve the acceptance of ACF systems. We also describe some initial explorations into measuring how explanations can improve the filtering performance of users.}, address = {New York, NY, USA}, author = {Herlocker, Jonathan L. and Konstan, Joseph A. and Riedl, John}, booktitle = {CSCW '00: Proceedings of the 2000 ACM Conference on Computer Supported Cooperative Work}, doi = {10.1145/358916.358995}, interhash = {92273b87585b39bd394cb77f5a81ff1f}, intrahash = {85b8ec0aa805890a1e82156eebdb079b}, isbn = {1-58113-222-0}, location = {Philadelphia, Pennsylvania, United States}, pages = {241--250}, publisher = {ACM}, title = {Explaining collaborative filtering recommendations}, url = {http://portal.acm.org/citation.cfm?id=358995}, year = 2000 } @inproceedings{cosley2003believing, abstract = {Recommender systems use people's opinions about items in an information domain to help people choose other items. These systems have succeeded in domains as diverse as movies, news articles, Web pages, and wines. The psychological literature on conformity suggests that in the course of helping people make choices, these systems probably affect users' opinions of the items. If opinions are influenced by recommendations, they might be less valuable for making recommendations for other users. Further, manipulators who seek to make the system generate artificially high or low recommendations might benefit if their efforts influence users to change the opinions they contribute to the recommender. We study two aspects of recommender system interfaces that may affect users' opinions: the rating scale and the display of predictions at the time users rate items. We find that users rate fairly consistently across rating scales. Users can be manipulated, though, tending to rate toward the prediction the system shows, whether the prediction is accurate or not. However, users can detect systems that manipulate predictions. We discuss how designers of recommender systems might react to these findings.}, address = {New York, NY, USA}, author = {Cosley, Dan and Lam, Shyong K. and Albert, Istvan and Konstan, Joseph A. and Riedl, John}, booktitle = {CHI '03: Proceedings of the SIGCHI Conference on Human Factors in Computing Systems}, doi = {10.1145/642611.642713}, interhash = {1b7ceacc5ada8aecc41e6684c0852702}, intrahash = {30230be1037c17a6ff958eb66b45d3a3}, isbn = {1-58113-630-7}, location = {Ft. Lauderdale, Florida, USA}, pages = {585--592}, publisher = {ACM}, title = {Is seeing believing?: how recommender system interfaces affect users' opinions}, url = {http://portal.acm.org/citation.cfm?id=642611.642713&type=series}, year = 2003 } @inproceedings{sarwar2001item, abstract = {Recommender systems apply knowledge discovery techniques to the problem of making personalized recommendations for information, products or services during a live interaction. These systems, especially the k-nearest neighbor collaborative filtering based ones, are achieving widespread success on the Web. The tremendous growth in the amount of available information and the number of visitors to Web sites in recent years poses some key challenges for recommender systems. These are: producing high quality recommendations, performing many recommendations per second for millions of users and items and achieving high coverage in the face of data sparsity. In traditional collaborative filtering systems the amount of work increases with the number of participants in the system. New recommender system technologies are needed that can quickly produce high quality recommendations, even for very large-scale problems. To address these issues we have explored item-based collaborative filtering techniques. Item-based techniques first analyze the user-item matrix to identify relationships between different items, and then use these relationships to indirectly compute recommendations for users. In this paper we analyze different item-based recommendation generation algorithms. We look into different techniques for computing item-item similarities (e.g., item-item correlation vs. cosine similarities between item vectors) and different techniques for obtaining recommendations from them (e.g., weighted sum vs. regression model). Finally, we experimentally evaluate our results and compare them to the basic k-nearest neighbor approach. Our experiments suggest that item-based algorithms provide dramatically better performance than user-based algorithms, while at the same time providing better quality than the best available user-based algorithms.}, address = {New York, NY, USA}, author = {Sarwar, Badrul and Karypis, George and Konstan, Joseph and Riedl, John}, booktitle = {WWW '01: Proceedings of the 10th International Conference on World Wide Web}, doi = {10.1145/371920.372071}, interhash = {043d1aaba0f0b8c01d84edd517abedaf}, intrahash = {a6461157c8102d34b8001c7d33a42684}, isbn = {1-58113-348-0}, location = {Hong Kong}, pages = {285--295}, publisher = {ACM}, title = {Item-based collaborative filtering recommendation algorithms}, url = {http://portal.acm.org/citation.cfm?id=372071}, year = 2001 } @inproceedings{conf/www/SarwarKKR01, author = {Sarwar, Badrul M. and Karypis, George and Konstan, Joseph A. and Riedl, John}, booktitle = {WWW}, ee = {http://doi.acm.org/10.1145/371920.372071}, interhash = {043d1aaba0f0b8c01d84edd517abedaf}, intrahash = {f349b429624935212ebeed613b89794f}, pages = {285-295}, title = {Item-based collaborative filtering recommendation algorithms.}, url = {http://www10.org/cdrom/papers/pdf/p519.pdf}, year = 2001 } @inproceedings{citeulike:1007839, abstract = {If recommenders are to help people be more productive, they need to support a wide variety of real-world information seeking tasks, such as those found when seeking research papers in a digital library. There are many potential pitfalls, including not knowing what tasks to support, generating recommendations for the wrong task, or even failing to generate any meaningful recommendations whatsoever. We posit that different recommender algorithms are better suited to certain information seeking tasks. In this work, we perform a detailed user study with over 130 users to understand these differences between recommender algorithms through an online survey of paper recommendations from the ACM Digital Library. We found that pitfalls are hard to avoid. Two of our algorithms generated 'atypical' recommendations recommendations that were unrelated to their input baskets. Users reacted accordingly, providing strong negative results for these algorithms. Results from our 'typical' algorithms show some qualitative differences, but since users were exposed to two algorithms, the results may be biased. We present a wide variety of results, teasing out differences between algorithms. Finally, we succinctly summarize our most striking results as "Don't Look Stupid" in front of users.}, address = {New York, NY, USA}, author = {Mcnee, Sean M. and Kapoor, Nishikant and Konstan, Joseph A.}, booktitle = {CSCW '06: Proceedings of the 2006 20th anniversary conference on Computer supported cooperative work}, citeulike-article-id = {1007839}, doi = {10.1145/1180875.1180903}, interhash = {24be686d042a3a4a710d9ff22dee0f2e}, intrahash = {1b3d2da89316b7610ca6ceee7f1b1e7f}, isbn = {1595932496}, journal = {CSCW '06: Proceedings of the 2006 20th anniversary conference on Computer supported cooperative work}, pages = {171--180}, priority = {5}, publisher = {ACM Press}, title = {Don't look stupid: avoiding pitfalls when recommending research papers}, url = {http://portal.acm.org/citation.cfm?id=1180903}, year = 2006 } @article{herlocker2004evaluating, abstract = {Recommender systems have been evaluated in many, often incomparable, ways. In this article, we review the key decisions in evaluating collaborative filtering recommender systems: the user tasks being evaluated, the types of analysis and datasets being used, the ways in which prediction quality is measured, the evaluation of prediction attributes other than quality, and the user-based evaluation of the system as a whole. In addition to reviewing the evaluation strategies used by prior researchers, we present empirical results from the analysis of various accuracy metrics on one content domain where all the tested metrics collapsed roughly into three equivalence classes. Metrics within each equivalency class were strongly correlated, while metrics from different equivalency classes were uncorrelated.}, address = {New York, NY, USA}, author = {Herlocker, Jonathan L. and Konstan, Joseph A. and Terveen, Loren G. and Riedl, John T.}, doi = {http://doi.acm.org/10.1145/963770.963772}, interhash = {f8a70731d983634ac7105896d101c9d2}, intrahash = {bdd3980bb3c297d1b84ceb0c7729d397}, issn = {1046-8188}, journal = {ACM Trans. Inf. Syst.}, number = 1, pages = {5--53}, publisher = {ACM Press}, title = {Evaluating collaborative filtering recommender systems}, url = {http://portal.acm.org/citation.cfm?id=963770.963772}, volume = 22, year = 2004 } @inproceedings{conf/www/SarwarKKR01, author = {Sarwar, Badrul M. and Karypis, George and Konstan, Joseph A. and Riedl, John}, booktitle = {WWW}, ee = {http://doi.acm.org/10.1145/371920.372071}, interhash = {043d1aaba0f0b8c01d84edd517abedaf}, intrahash = {f349b429624935212ebeed613b89794f}, pages = {285-295}, title = {Item-based collaborative filtering recommendation algorithms.}, url = {http://dblp.uni-trier.de/db/conf/www/www2001.html#SarwarKKR01}, year = 2001 } @inproceedings{skkr02item, author = {Sarwar, Badrul M. and Karypis, George and Konstan, Joseph A. and Riedl, John}, booktitle = {Proceedings of the 10th International WWW Conference}, ee = {http://doi.acm.org/10.1145/371920.372071}, interhash = {043d1aaba0f0b8c01d84edd517abedaf}, intrahash = {e2a0446da3d69b4d98da6e525e1b363f}, pages = {285-295}, title = {Item-based collaborative filtering recommendation algorithms}, url = {http://dblp.uni-trier.de/db/conf/www/www2001.html#SarwarKKR01}, year = 2001 }