@article{thijs2006influence,
abstract = {In earlier studies by the authors, basic regularities of author self-citations have been analysed. These regularities are related to the ageing, to the relation between self-citations and foreign citations, to the interdependence of self-citations with other bibliometric indicators and to the influence of co-authorship on self-citation behaviour. Although both national and subject specific peculiarities influence the share of self-citations at the macro level, the authors came to the conclusion that - at this level of aggregation - there is practically no need for excluding self-citations. The aim of the present study is to answer the question in how far the influence of author self-citations on bibliometric meso-indicators deviates from that at the macro level, and to what extent national reference standards can be used in bibliometric meso analyses. In order to study the situation at the institutional level, a selection of twelve European universities representing different countries and different research profiles have been made. The results show a quite complex situation at the meso-level, therefore we suggest the usage of both indicators, including and excluding self-citations.},
affiliation = {Katholieke Universiteit Leuven, Steunpunt O&O Statistieken Leuven (Belgium) Leuven (Belgium)},
author = {Thijs, Bart and Glänzel, Wolfgang},
interhash = {82ea078d91ba87557fb69d7fba5171bc},
intrahash = {c360454b0f49b781ccbbe16840f54b35},
issn = {0138-9130},
journal = {Scientometrics},
keyword = {Informatik},
note = {10.1007/s11192-006-0006-3},
number = 1,
pages = {71-80},
publisher = {Akadémiai Kiadó, co-published with Springer Science+Business Media B.V., Formerly Kluwer Academic Publishers B.V.},
title = {The influence of author self-citations on bibliometric meso-indicators. The case of european universities},
url = {http://dx.doi.org/10.1007/s11192-006-0006-3},
volume = 66,
year = 2006
}
@misc{shuai2012scientific,
abstract = {We analyze the online response of the scientific community to the preprint
publication of scholarly articles. We employ a cohort of 4,606 scientific
articles submitted to the preprint database arXiv.org between October 2010 and
April 2011. We study three forms of reactions to these preprints: how they are
downloaded on the arXiv.org site, how they are mentioned on the social media
site Twitter, and how they are cited in the scholarly record. We perform two
analyses. First, we analyze the delay and time span of article downloads and
Twitter mentions following submission, to understand the temporal configuration
of these reactions and whether significant differences exist between them.
Second, we run correlation tests to investigate the relationship between
Twitter mentions and both article downloads and article citations. We find that
Twitter mentions follow rapidly after article submission and that they are
correlated with later article downloads and later article citations, indicating
that social media may be an important factor in determining the scientific
impact of an article.},
author = {Shuai, Xin and Pepe, Alberto and Bollen, Johan},
interhash = {8331e7736f3cc8296cafd7e6397dc010},
intrahash = {6619e035ee8e7e72ccc9aa32f2acea8e},
note = {cite arxiv:1202.2461},
title = {How the Scientific Community Reacts to Newly Submitted Preprints:
Article Downloads, Twitter Mentions, and Citations},
url = {http://arxiv.org/abs/1202.2461},
year = 2012
}
@article{Larsen:2010:Scientometrics:20700371,
abstract = {The growth rate of scientific publication has been studied from 1907 to 2007 using available data from a number of literature databases, including Science Citation Index (SCI) and Social Sciences Citation Index (SSCI). Traditional scientific publishing, that is publication in peer-reviewed journals, is still increasing although there are big differences between fields. There are no indications that the growth rate has decreased in the last 50 years. At the same time publication using new channels, for example conference proceedings, open archives and home pages, is growing fast. The growth rate for SCI up to 2007 is smaller than for comparable databases. This means that SCI was covering a decreasing part of the traditional scientific literature. There are also clear indications that the coverage by SCI is especially low in some of the scientific areas with the highest growth rate, including computer science and engineering sciences. The role of conference proceedings, open access archives and publications published on the net is increasing, especially in scientific fields with high growth rates, but this has only partially been reflected in the databases. The new publication channels challenge the use of the big databases in measurements of scientific productivity or output and of the growth rate of science. Because of the declining coverage and this challenge it is problematic that SCI has been used and is used as the dominant source for science indicators based on publication and citation numbers. The limited data available for social sciences show that the growth rate in SSCI was remarkably low and indicate that the coverage by SSCI was declining over time. National Science Indicators from Thomson Reuters is based solely on SCI, SSCI and Arts and Humanities Citation Index (AHCI). Therefore the declining coverage of the citation databases problematizes the use of this source.},
author = {Larsen, P O and von Ins, M},
doi = {10.1007/s11192-010-0202-z},
interhash = {cfb4b308f2ca153eaa7540b7d64b3577},
intrahash = {abdc38dfe051e5b29c8742ab3b950b9c},
journal = {Scientometrics},
month = sep,
number = 3,
pages = {575-603},
pmid = {20700371},
title = {The rate of growth in scientific publication and the decline in coverage provided by Science Citation Index},
url = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2909426/},
volume = 84,
year = 2010
}
@article{ioannidis2014published,
abstract = { In a 2005 paper that has been accessed more than a million times, John Ioannidis explained why most published research findings were false. Here he revisits the topic, this time to address how to improve matters.
The presence of web-based communities is a distinctive signature of Web 2.0. The web-based feature means that information propagation within each community is highly facilitated, promoting complex collective dynamics in view of information exchange. In this work, we focus on a community of scientists and study, in particular, how the awareness of a scientific paper is spread. Our work is based on the web usage statistics obtained from the PLoS Article Level Metrics dataset compiled by PLoS. The cumulative number of HTML views was found to follow a long tail distribution which is reasonably well-fitted by a lognormal one. We modeled the diffusion of information by a random multiplicative process, and thus extracted the rates of information spread at different stages after the publication of a paper. We found that the spread of information displays two distinct decay regimes: a rapid downfall in the first month after publication, and a gradual power law decay afterwards. We identified these two regimes with two distinct driving processes: a short-term behavior driven by the fame of a paper, and a long-term behavior consistent with citation statistics. The patterns of information spread were found to be remarkably similar in data from different journals, but there are intrinsic differences for different types of web usage (HTML views and PDF downloads versus XML). These similarities and differences shed light on the theoretical understanding of different complex systems, as well as a better design of the corresponding web applications that is of high potential marketing impact.
}, author = {Yan, Koon-Kiu and Gerstein, Mark}, doi = {10.1371/journal.pone.0019917}, interhash = {5ff7675888626380767e22ad7f672279}, intrahash = {221dd554089fd1b1918b345fffbd74ce}, journal = {PLoS ONE}, month = {05}, number = 5, pages = {e19917}, publisher = {Public Library of Science}, title = {The Spread of Scientific Information: Insights from the Web Usage Statistics in PLoS Article-Level Metrics}, url = {http://dx.doi.org/10.1371%2Fjournal.pone.0019917}, volume = 6, year = 2011 } @inproceedings{peters2011crowdsourcing, abstract = {Qualitative journal evaluation makes use of cumulated content descriptions of single articles. These can either be represented by author-generated keywords, professionally indexed subject headings, automatically extracted terms or by reader-generated tags as used in social bookmarking systems. It is assumed that particularly the users? view on article content differs significantly from the authors? or indexers? perspectives. To verify this assumption, title and abstract terms, author keywords, Inspec subject headings, KeyWords PlusTM and tags are compared by calculating the overlap between the respective datasets. Our approach includes extensive term preprocessing (i.e. stemming, spelling unifications) to gain a homogeneous term collection. When term overlap is calculated for every single document of the dataset, similarity values are low. Thus, the presented study confirms the assumption, that the different types of keywords each reflect a different perspective of the articles? contents and that tags (cumulated across articles) can be used in journal evaluation to represent a reader-specific view on published content.}, author = {Peters, Isabella and Haustein, Stefanie and Terliesner, Jens}, booktitle = {ACM WebSci'11}, interhash = {def78a2b12565187bcac0cf08089b7a1}, intrahash = {8e03cf8d57f903da395c07e9a9125f08}, month = {June}, note = {WebSci Conference 2011}, pages = {1--4}, title = {Crowdsourcing in Article Evaluation}, url = {http://journal.webscience.org/487/}, year = 2011 } @article{haley2014ranking, abstract = {Recently, Harzing's Publish or Perish software was updated to include Microsoft Academic Search as a second citation database search option for computing various citation-based metrics. This article explores the new search option by scoring 50 top economics and finance journals and comparing them with the results obtained using the original Google Scholar-based search option. The new database delivers significantly smaller scores for all metrics, but the rank correlations across the two databases for the h-index, g-index, AWCR, and e-index are significantly correlated, especially when the time frame is restricted to more recent years. Comparisons are also made to the Article Influence score from eigenfactor.org and to the RePEc h-index, both of which adjust for journal-level self-citations.}, author = {Haley, M. Ryan}, doi = {10.1002/asi.23080}, interhash = {4c6796cff62fe5c8a8cf638f9785cd14}, intrahash = {29feb827b9f64fa5828eb4e6298d38f7}, issn = {2330-1643}, journal = {Journal of the Association for Information Science and Technology}, number = 5, pages = {1079--1084}, title = {Ranking top economics and finance journals using Microsoft academic search versus Google scholar: How does the new publish or perish option compare?}, url = {http://dx.doi.org/10.1002/asi.23080}, volume = 65, year = 2014 } @article{bornmann2008citation, abstract = {Purpose – The purpose of this paper is to present a narrative review of studies on the citing behavior of scientists, covering mainly research published in the last 15 years. Based on the results of these studies, the paper seeks to answer the question of the extent to which scientists are motivated to cite a publication not only to acknowledge intellectual and cognitive influences of scientific peers, but also for other, possibly non‐scientific, reasons.Design/methodology/approach – The review covers research published from the early 1960s up to mid‐2005 (approximately 30 studies on citing behavior‐reporting results in about 40 publications).Findings – The general tendency of the results of the empirical studies makes it clear that citing behavior is not motivated solely by the wish to acknowledge intellectual and cognitive influences of colleague scientists, since the individual studies reveal also other, in part non‐scientific, factors that play a part in the decision to cite. However, the results of the studies must also be deemed scarcely reliable: the studies vary widely in design, and their results can hardly be replicated. Many of the studies have methodological weaknesses. Furthermore, there is evidence that the different motivations of citers are “not so different or ‘randomly given’ to such an extent that the phenomenon of citation would lose its role as a reliable measure of impact”.Originality/value – Given the increasing importance of evaluative bibliometrics in the world of scholarship, the question “What do citation counts measure?” is a particularly relevant and topical issue. }, author = {Bornmann, Lutz and Daniel, Hans‐Dieter}, doi = {10.1108/00220410810844150}, eprint = {http://dx.doi.org/10.1108/00220410810844150}, interhash = {ef016be783f4956817cded258543ece3}, intrahash = {544d3243f7c7327b946292a80f9b6451}, journal = {Journal of Documentation}, number = 1, pages = {45-80}, title = {What do citation counts measure? A review of studies on citing behavior}, url = {http://dx.doi.org/10.1108/00220410810844150 }, volume = 64, year = 2008 } @article{phelan1999compendium, abstract = {This paper examines a number of the criticisms that citation analysis has been subjected to over the years. It is argued that many of these criticisms have been based on only limited examinations of data in particular contexts and it remains unclear how broadly applicable these problems are to research conducted at different levels of analysis, in specific field, and among various national data sets. Relevant evidence is provided from analysis of Australian and international data. }, author = {Phelan, Thomas J.}, doi = {10.1007/BF02458472}, interhash = {a8e468c0850ef735517484b121e30630}, intrahash = {a9d0ef4078c380cb07619a545ed4144d}, issn = {0138-9130}, journal = {Scientometrics}, language = {English}, number = 1, pages = {117-136}, publisher = {Kluwer Academic Publishers}, title = {A compendium of issues for citation analysis}, url = {http://dx.doi.org/10.1007/BF02458472}, volume = 45, year = 1999 } @article{bonzi1991motivations, abstract = {The citation motivations among 51 self citing authors in several natural science disciplines were investigated. Results of a survey on reasons for both self citation and citation to others show that there are very few differences in motivation, and that there are plausible intellectual grounds for those differences which are substantial. Analysis of exposure in text reveals virtually no differences between self citations and citations to others. Analysis of individual disciplines also uncover no substantive differences in either motivation or exposure in text.}, author = {Bonzi, Susan and Snyder, H.W.}, doi = {10.1007/BF02017571}, interhash = {b531a253fae4751735918d6d5c8b44bd}, intrahash = {fcd88cce5ca6a7c99cb4726921752a1b}, issn = {0138-9130}, journal = {Scientometrics}, language = {English}, number = 2, pages = {245-254}, publisher = {Kluwer Academic Publishers}, title = {Motivations for citation: A comparison of self citation and citation to others}, url = {http://dx.doi.org/10.1007/BF02017571}, volume = 21, year = 1991 } @article{cerinek2015network, abstract = {We analyze the data about works (papers, books) from the time period 1990–2010 that are collected in Zentralblatt MATH database. The data were converted into four 2-mode networks (works }, author = {Cerinšek, Monika and Batagelj, Vladimir}, doi = {10.1007/s11192-014-1419-z}, interhash = {e65f748684210857bb19dc7f69d65f86}, intrahash = {bcba93fd0e6381289c489cbab20bbec7}, issn = {0138-9130}, journal = {Scientometrics}, language = {English}, number = 1, pages = {977-1001}, publisher = {Springer Netherlands}, title = {Network analysis of Zentralblatt MATH data}, url = {http://dx.doi.org/10.1007/s11192-014-1419-z}, volume = 102, year = 2015 } @article{albarrn2011references, abstract = {This article studies massive evidence about references made and citations received after a 5-year citation window by 3.7 million articles published in 1998 to 2002 in 22 scientific fields. We find that the distributions of references made and citations received share a number of basic features across sciences. Reference distributions are rather skewed to the right while citation distributions are even more highly skewed: The mean is about 20 percentage points to the right of the median, and articles with a remarkable or an outstanding number of citations represent about 9% of the total. Moreover, the existence of a power law representing the upper tail of citation distributions cannot be rejected in 17 fields whose articles represent 74.7% of the total. Contrary to the evidence in other contexts, the value of the scale parameter is above 3.5 in 13 of the 17 cases. Finally, power laws are typically small, but capture a considerable proportion of the total citations received.}, author = {Albarrán, Pedro and Ruiz-Castillo, Javier}, doi = {10.1002/asi.21448}, interhash = {79502663727fcbd4834a423f4e3212a3}, intrahash = {f20e50e960696bab3b39b628718dd850}, issn = {1532-2890}, journal = {Journal of the American Society for Information Science and Technology}, number = 1, pages = {40--49}, publisher = {Wiley Subscription Services, Inc., A Wiley Company}, title = {References made and citations received by scientific articles}, url = {http://dx.doi.org/10.1002/asi.21448}, volume = 62, year = 2011 } @article{brzezinski2015power, abstract = {Modeling distributions of citations to scientific papers is crucial for understanding how science develops. However, there is a considerable empirical controversy on which statistical model fits the citation distributions best. This paper is concerned with rigorous empirical detection of power-law behaviour in the distribution of citations received by the most highly cited scientific papers. We have used a large, novel data set on citations to scientific papers published between 1998 and 2002 drawn from Scopus. The power-law model is compared with a number of alternative models using a likelihood ratio test. We have found that the power-law hypothesis is rejected for around half of the Scopus fields of science. For these fields of science, the Yule, power-law with exponential cut-off and log-normal distributions seem to fit the data better than the pure power-law model. On the other hand, when the power-law hypothesis is not rejected, it is usually empirically indistinguishable from most of the alternative models. The pure power-law model seems to be the best model only for the most highly cited papers in “Physics and Astronomy”. Overall, our results seem to support theories implying that the most highly cited scientific papers follow the Yule, power-law with exponential cut-off or log-normal distribution. Our findings suggest also that power laws in citation distributions, when present, account only for a very small fraction of the published papers (less than 1 % for most of science fields) and that the power-law scaling parameter (exponent) is substantially higher (from around 3.2 to around 4.7) than found in the older literature.}, author = {Brzezinski, Michal}, doi = {10.1007/s11192-014-1524-z}, interhash = {b162eddb3ff76a9eef5daf450da934c0}, intrahash = {8ef9a6fbfcca3d599ca500cf4f9a2e39}, issn = {0138-9130}, journal = {Scientometrics}, language = {English}, number = 1, pages = {213-228}, publisher = {Springer Netherlands}, title = {Power laws in citation distributions: evidence from Scopus}, url = {http://dx.doi.org/10.1007/s11192-014-1524-z}, volume = 103, year = 2015 }