@article{Anicich01112014, abstract = {Interpreting scholarly contributions solely on the basis of the number, and not nature, of citations is inherently flawed because contradictory as well as confirmatory findings feed into the same metric, capturing popularity at the expense of precision. I propose a citation and indexing procedure that would conveniently integrate information about research trends while imposing minimal burden on the producers and consumers of research. Under the proposed system, citations appearing in the reference list of research reports would be superscripted with letters corresponding to one of the following six categories: references to findings that are Consistent with the current findings, are Replicated by the current findings, are Inconsistent with the current findings, Failed to be replicated by the current findings, were used to build Theory, or were used to cite Methodologies. I explain how the resulting CRIF-TM data could be summarized and perpetually updated by an online indexing service. I provide an example to demonstrate how these superscripts could be conveniently and unobtrusively presented in the reference list of forthcoming articles. Finally, I examine the anticipated benefits, limitations, and implementation challenges of the proposed citation and indexing procedure.}, author = {Anicich, Eric M.}, doi = {10.1177/1745691614549772}, eprint = {http://pps.sagepub.com/content/9/6/682.full.pdf+html}, interhash = {af5e16af5f2861d1e53f02d8e58cf221}, intrahash = {ead9a503ae90b7f74d16739d7e813454}, journal = {Perspectives on Psychological Science}, number = 6, pages = {682-691}, title = {What Lies Within: Superscripting References to Reveal Research Trends}, url = {http://pps.sagepub.com/content/9/6/682.abstract}, volume = 9, year = 2014 } @inproceedings{wang2010claper, abstract = {Classical papers are of great help for beginners to get familiar with a new research area. However, digging them out is a difficult problem. This paper proposes Claper, a novel academic recommendation system based on two proven principles: the Principle of Download Persistence and the Principle of Citation Approaching (we prove them based on real-world datasets). The principle of download persistence indicates that classical papers have few decreasing download frequencies since they were published. The principle of citation approaching indicates that a paper which cites a classical paper is likely to cite citations of that classical paper. Our experimental results based on large-scale real-world datasets illustrate Claper can effectively recommend classical papers of high quality to beginners and thus help them enter their research areas.}, author = {Wang, Yonggang and Zhai, Ennan and Hu, Jianbin and Chen, Zhong}, booktitle = {Proceedings of the seventh International Conference on Fuzzy Systems and Knowledge Discovery}, doi = {10.1109/FSKD.2010.5569227}, interhash = {7180ddaf1c1765a45fd244027bd0bf43}, intrahash = {7da72bf2f0538afad9377a0d50c263b4}, month = aug, pages = {2777--2781}, publisher = {IEEE}, title = {Claper: Recommend classical papers to beginners}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5569227}, volume = 6, year = 2010 } @inproceedings{he2011citation, abstract = {Automatic recommendation of citations for a manuscript is highly valuable for scholarly activities since it can substantially improve the efficiency and quality of literature search. The prior techniques placed a considerable burden on users, who were required to provide a representative bibliography or to mark passages where citations are needed. In this paper we present a system that considerably reduces this burden: a user simply inputs a query manuscript (without a bibliography) and our system automatically finds locations where citations are needed. We show that naïve approaches do not work well due to massive noise in the document corpus. We produce a successful approach by carefully examining the relevance between segments in a query manuscript and the representative segments extracted from a document corpus. An extensive empirical evaluation using the CiteSeerX data set shows that our approach is effective.}, acmid = {1935926}, address = {New York, NY, USA}, author = {He, Qi and Kifer, Daniel and Pei, Jian and Mitra, Prasenjit and Giles, C. Lee}, booktitle = {Proceedings of the fourth ACM international conference on Web search and data mining}, doi = {10.1145/1935826.1935926}, interhash = {7e98aaf26a7ed6cc624249a3ab570d7a}, intrahash = {bbd320f03d13c6cfff4b6f9e6b4630f7}, isbn = {978-1-4503-0493-1}, location = {Hong Kong, China}, numpages = {10}, pages = {755--764}, publisher = {ACM}, title = {Citation recommendation without author supervision}, url = {http://doi.acm.org/10.1145/1935826.1935926}, year = 2011 } @inproceedings{bethard2010should, abstract = {Scientists depend on literature search to find prior work that is relevant to their research ideas. We introduce a retrieval model for literature search that incorporates a wide variety of factors important to researchers, and learns the weights of each of these factors by observing citation patterns. We introduce features like topical similarity and author behavioral patterns, and combine these with features from related work like citation count and recency of publication. We present an iterative process for learning weights for these features that alternates between retrieving articles with the current retrieval model, and updating model weights by training a supervised classifier on these articles. We propose a new task for evaluating the resulting retrieval models, where the retrieval system takes only an abstract as its input and must produce as output the list of references at the end of the abstract's article. We evaluate our model on a collection of journal, conference and workshop articles from the ACL Anthology Reference Corpus. Our model achieves a mean average precision of 28.7, a 12.8 point improvement over a term similarity baseline, and a significant improvement both over models using only features from related work and over models without our iterative learning.}, acmid = {1871517}, address = {New York, NY, USA}, author = {Bethard, Steven and Jurafsky, Dan}, booktitle = {Proceedings of the 19th ACM international conference on Information and knowledge management}, doi = {10.1145/1871437.1871517}, interhash = {1cdf6c7da38af251279e9fb915266af2}, intrahash = {369206c7472baeaa5ecefef586e16c6a}, isbn = {978-1-4503-0099-5}, location = {Toronto, ON, Canada}, numpages = {10}, pages = {609--618}, publisher = {ACM}, title = {Who should I cite: learning literature search models from citation behavior}, url = {http://doi.acm.org/10.1145/1871437.1871517}, year = 2010 } @inproceedings{Strohman:2007:RCA:1277741.1277868, abstract = {We approach the problem of academic literature search by considering an unpublished manuscript as a query to a search system. We use the text of previous literature as well as the citation graph that connects it to find relevant related material. We evaluate our technique with manual and automatic evaluation methods, and find an order of magnitude improvement in mean average precision as compared to a text similarity baseline.}, acmid = {1277868}, address = {New York, NY, USA}, author = {Strohman, Trevor and Croft, W. Bruce and Jensen, David}, booktitle = {Proceedings of the 30th annual international ACM SIGIR conference on Research and development in information retrieval}, doi = {10.1145/1277741.1277868}, interhash = {a34279add7d7a9f3c564735b7b8dcd44}, intrahash = {7a0b1ff2a40b3989ef8d83daabd91159}, isbn = {978-1-59593-597-7}, location = {Amsterdam, The Netherlands}, numpages = {2}, pages = {705--706}, publisher = {ACM}, title = {Recommending citations for academic papers}, url = {http://doi.acm.org/10.1145/1277741.1277868}, year = 2007 } @inproceedings{mcnee2002recommending, abstract = {Collaborative filtering has proven to be valuable for recommending items in many different domains. In this paper, we explore the use of collaborative filtering to recommend research papers, using the citation web between papers to create the ratings matrix. Specifically, we tested the ability of collaborative filtering to recommend citations that would be suitable additional references for a target research paper. We investigated six algorithms for selecting citations, evaluating them through offline experiments against a database of over 186,000 research papers contained in ResearchIndex. We also performed an online experiment with over 120 users to gauge user opinion of the effectiveness of the algorithms and of the utility of such recommendations for common research tasks. We found large differences in the accuracy of the algorithms in the offline experiment, especially when balanced for coverage. In the online experiment, users felt they received quality recommendations, and were enthusiastic about the idea of receiving recommendations in this domain.}, acmid = {587096}, address = {New York, NY, USA}, author = {McNee, Sean M. and Albert, Istvan and Cosley, Dan and Gopalkrishnan, Prateep and Lam, Shyong K. and Rashid, Al Mamunur and Konstan, Joseph A. and Riedl, John}, booktitle = {Proceedings of the 2002 ACM conference on Computer supported cooperative work}, doi = {10.1145/587078.587096}, interhash = {7178849aab57a025dff76e177d64be9b}, intrahash = {50f94e753fad76222bd33cbe591f9360}, isbn = {1-58113-560-2}, location = {New Orleans, Louisiana, USA}, numpages = {10}, pages = {116--125}, publisher = {ACM}, series = {CSCW '02}, title = {On the recommending of citations for research papers}, url = {http://doi.acm.org/10.1145/587078.587096}, year = 2002 } @inproceedings{1271658, abstract = {Bibliometric analysis is used as a measuring activity technique for basic research. There are many country level analyses of trends in scientific publications. These analyses give us an understanding of the macro-scale character of scientific activities. However, it is difficult to capture the qualitative evolution of scientific activities through them. In this regard, a meso-scale analysis of science activities, i.e., analysis of "research areas", is suitable for grasping qualitative changes in scientific activities. In this study, we develop a new method for mapping science at the research area level. Our method consists of two parts: constructing research areas from scientific publications and content analysis by experts. Research areas are explored through a co-citation analysis, and a map of science was generated to analyze how research areas relate to each other. This method contributes to endeavours to understand and track the changing nature of science.}, address = {Washington, DC, USA}, author = {SAKA, Ayaka and IGAMI, Masatsura}, booktitle = {IV '07: Proceedings of the 11th International Conference Information Visualization}, doi = {http://dx.doi.org/10.1109/IV.2007.77}, interhash = {1586085e24335ab7d0f8f5530d32552d}, intrahash = {a9168950512836c2155af1ed6dc99453}, isbn = {0-7695-2900-3}, pages = {453--458}, publisher = {IEEE Computer Society}, title = {Mapping Modern Science Using Co-citation Analysis}, url = {http://portal.acm.org/citation.cfm?id=1270398.1271658}, year = 2007 } @article{Butler:2008:Nature:18172465, author = {Butler, D}, doi = {10.1038/451006a}, interhash = {d4652ec77b4ae09062fac4676cea6bb7}, intrahash = {d930c99c8e9c7fdf8ded3e9edb0762b0}, journal = {Nature}, month = Jan, number = 7174, pages = {6-6}, pmid = {18172465}, title = {Free journal-ranking tool enters citation market}, url = {http://www.nature.com/news/2008/080102/full/451006a.html}, volume = 451, year = 2008 }