@article{bechhofer2013linked, abstract = {Scientific data represents a significant portion of the linked open data cloud and scientists stand to benefit from the data fusion capability this will afford. Publishing linked data into the cloud, however, does not ensure the required reusability. Publishing has requirements of provenance, quality, credit, attribution and methods to provide the reproducibility that enables validation of results. In this paper we make the case for a scientific data publication model on top of linked data and introduce the notion of Research Objects as first class citizens for sharing and publishing.}, author = {Bechhofer, Sean and Buchan, Iain and De Roure, David and Missier, Paolo and Ainsworth, John and Bhagat, Jiten and Couch, Philip and Cruickshank, Don and Delderfield, Mark and Dunlop, Ian and Gamble, Matthew and Michaelides, Danius and Owen, Stuart and Newman, David and Sufi, Shoaib and Goble, Carole}, doi = {10.1016/j.future.2011.08.004}, interhash = {8df8b7069a622aa2eae6d74e5fdc0a6b}, intrahash = {f500b67a045765125183e23c827991d2}, issn = {0167-739X}, journal = {Future Generation Computer Systems}, number = 2, pages = {599--611}, title = {Why linked data is not enough for scientists}, url = {http://www.sciencedirect.com/science/article/pii/S0167739X11001439}, volume = 29, year = 2013 } @inproceedings{vandesompel2010httpbased, abstract = {Dereferencing a URI returns a representation of the current state of the resource identified by that URI. But, on the Web representations of prior states of a resource are also available, for example, as resource versions in Content Management Systems or archival resources in Web Archives such as the Internet Archive. This paper introduces a resource versioning mechanism that is fully based on HTTP and uses datetime as a global version indicator. The approach allows "follow your nose" style navigation both from the current time-generic resource to associated time-specific version resources as well as among version resources. The proposed versioning mechanism is congruent with the Architecture of the World Wide Web, and is based on the Memento framework that extends HTTP with transparent content negotiation in the datetime dimension. The paper shows how the versioning approach applies to Linked Data, and by means of a demonstrator built for DBpedia, it also illustrates how it can be used to conduct a time-series analysis across versions of Linked Data descriptions.}, author = {Van de Sompel, Herbert and Sanderson, Robert and Nelson, Michael L. and Balakireva, Lyudmila L. and Shankar, Harihar and Ainsworth, Scott}, booktitle = {Proceedings of Linked Data on the Web (LDOW2010)}, interhash = {0c517e7799d2c2da3f9b2a0daff27885}, intrahash = {8f9405e8056dd827d9c72a48e229a65a}, number = {1003.3661}, publisher = {arXiv}, series = {cs.DL}, title = {An HTTP-Based Versioning Mechanism for Linked Data}, url = {http://arxiv.org/abs/1003.3661}, year = 2010 } @incollection{rula2012diversity, abstract = {An increasing amount of data is published and consumed on the Web according to the Linked Data paradigm. In consideration of both publishers and consumers, the temporal dimension of data is important. In this paper we investigate the characterisation and availability of temporal information in Linked Data at large scale. Based on an abstract definition of temporal information we conduct experiments to evaluate the availability of such information using the data from the 2011 Billion Triple Challenge (BTC) dataset. Focusing in particular on the representation of temporal meta-information, i.e., temporal information associated with RDF statements and graphs, we investigate the approaches proposed in the literature, performing both a quantitative and a qualitative analysis and proposing guidelines for data consumers and publishers. Our experiments show that the amount of temporal information available in the LOD cloud is still very small; several different models have been used on different datasets, with a prevalence of approaches based on the annotation of RDF documents.}, address = {Berlin/Heidelberg}, author = {Rula, Anisa and Palmonari, Matteo and Harth, Andreas and Stadtmüller, Steffen and Maurino, Andrea}, booktitle = {The Semantic Web – ISWC 2012}, doi = {10.1007/978-3-642-35176-1_31}, editor = {Cudré-Mauroux, Philippe and Heflin, Jeff and Sirin, Evren and Tudorache, Tania and Euzenat, Jérôme and Hauswirth, Manfred and Parreira, JosianeXavier and Hendler, Jim and Schreiber, Guus and Bernstein, Abraham and Blomqvist, Eva}, interhash = {ea17ab98217d3ed32b06425a83fb25ab}, intrahash = {2bf73337f9b2ca5abc5e07d1ee48cc30}, isbn = {978-3-642-35175-4}, pages = {492--507}, publisher = {Springer }, series = {Lecture Notes in Computer Science}, title = {On the Diversity and Availability of Temporal Information in Linked Open Data}, url = {http://dx.doi.org/10.1007/978-3-642-35176-1_31}, volume = 7649, year = 2012 } @article{bernerslee2013readwrite, abstract = {This paper discusses issues that will affect the future development of the Web, either increasing its power and utility, or alternatively suppressing its development. It argues for the importance of the continued development of the Linked Data Web, and describes the use of linked open data as an important component of that. Second, the paper defends the Web as a read–write medium, and goes on to consider how the read–write Linked Data Web could be achieved.}, author = {Berners-Lee, Tim and O’Hara, Kieron}, doi = {10.1098/rsta.2012.0513}, eprint = {http://rsta.royalsocietypublishing.org/content/371/1987/20120513.full.pdf+html}, interhash = {d7441404d63f5e6303e1c17f0aa27a8c}, intrahash = {9ec5e708342fac1e2ea2726cb7e2acd8}, journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences}, number = 1987, title = {The read–write Linked Data Web}, url = {http://rsta.royalsocietypublishing.org/content/371/1987/20120513.abstract}, volume = 371, year = 2013 } @article{baresi2006toward, abstract = {Traditional software development is based on the closed-world assumption that the boundary between system and environment is known and unchanging. However, this assumption no longer works within today's unpredictable open-world settings, which demands techniques that let software react to changes by self-organizing its structure and self-adapting its behavior.}, acmid = {1175938}, address = {Los Alamitos, CA, USA}, author = {Baresi, Luciano and Di Nitto, Elisabetta and Ghezzi, Carlo}, doi = {10.1109/MC.2006.362}, interhash = {8901685f20a7b2a9334c3637eb33e785}, intrahash = {c9448fcba918899d9e149a4f5babfc2e}, issn = {0018-9162}, issue_date = {October 2006}, journal = {Computer}, month = oct, number = 10, numpages = {8}, pages = {36--43}, publisher = {IEEE Computer Society Press}, title = {Toward Open-World Software: Issue and Challenges}, url = {http://dx.doi.org/10.1109/MC.2006.362}, volume = 39, year = 2006 } @article{bizer2009dbpedia, abstract = {The DBpedia project is a community effort to extract structured information from Wikipedia and to make this information accessible on the Web. The resulting DBpedia knowledge base currently describes over 2.6 million entities. For each of these entities, DBpedia defines a globally unique identifier that can be dereferenced over the Web into a rich RDF description of the entity, including human-readable definitions in 30 languages, relationships to other resources, classifications in four concept hierarchies, various facts as well as data-level links to other Web data sources describing the entity. Over the last year, an increasing number of data publishers have begun to set data-level links to DBpedia resources, making DBpedia a central interlinking hub for the emerging Web of Data. Currently, the Web of interlinked data sources around DBpedia provides approximately 4.7 billion pieces of information and covers domains such as geographic information, people, companies, films, music, genes, drugs, books, and scientific publications. This article describes the extraction of the DBpedia knowledge base, the current status of interlinking DBpedia with other data sources on the Web, and gives an overview of applications that facilitate the Web of Data around DBpedia.}, author = {Bizer, Christian and Lehmann, Jens and Kobilarov, Georgi and Auer, Sören and Becker, Christian and Cyganiak, Richard and Hellmann, Sebastian}, doi = {10.1016/j.websem.2009.07.002}, interhash = {087f766f30469cbc881c83ad156a104a}, intrahash = {560097dc36a8e66b69db5cb22c1fa334}, issn = {1570-8268}, journal = {Web Semantics: Science, Services and Agents on the World Wide Web}, number = 3, pages = {154--165}, title = {DBpedia - A crystallization point for the Web of Data}, url = {http://www.sciencedirect.com/science/article/pii/S1570826809000225}, volume = 7, year = 2009 } @inproceedings{suchanek2007semantic, abstract = {We present YAGO, a light-weight and extensible ontology with high coverage and quality. YAGO builds on entities and relations and currently contains more than 1 million entities and 5 million facts. This includes the Is-A hierarchy as well as non-taxonomic relations between entities (such as HASONEPRIZE). The facts have been automatically extracted from Wikipedia and unified with WordNet, using a carefully designed combination of rule-based and heuristic methods described in this paper. The resulting knowledge base is a major step beyond WordNet: in quality by adding knowledge about individuals like persons, organizations, products, etc. with their semantic relationships - and in quantity by increasing the number of facts by more than an order of magnitude. Our empirical evaluation of fact correctness shows an accuracy of about 95%. YAGO is based on a logically clean model, which is decidable, extensible, and compatible with RDFS. Finally, we show how YAGO can be further extended by state-of-the-art information extraction techniques.}, acmid = {1242667}, address = {New York, NY, USA}, author = {Suchanek, Fabian M. and Kasneci, Gjergji and Weikum, Gerhard}, booktitle = {Proceedings of the 16th international conference on World Wide Web}, doi = {10.1145/1242572.1242667}, interhash = {1d2c2b23ce2a6754d12c4364e19c574c}, intrahash = {84ae693c0a6dfb6d4b051b0b6dbd3668}, isbn = {978-1-59593-654-7}, location = {Banff, Alberta, Canada}, numpages = {10}, pages = {697--706}, publisher = {ACM}, title = {YAGO: a core of semantic knowledge}, url = {http://doi.acm.org/10.1145/1242572.1242667}, year = 2007 } @incollection{auer2007dbpedia, abstract = {DBpedia is a community effort to extract structured information from Wikipedia and to make this information available on the Web. DBpedia allows you to ask sophisticated queries against datasets derived from Wikipedia and to link other datasets on the Web to Wikipedia data. We describe the extraction of the DBpedia datasets, and how the resulting information is published on the Web for human- and machine-consumption. We describe some emerging applications from the DBpedia community and show how website authors can facilitate DBpedia content within their sites. Finally, we present the current status of interlinking DBpedia with other open datasets on the Web and outline how DBpedia could serve as a nucleus for an emerging Web of open data.}, address = {Berlin/Heidelberg}, author = {Auer, Sören and Bizer, Christian and Kobilarov, Georgi and Lehmann, Jens and Cyganiak, Richard and Ives, Zachary}, booktitle = {The Semantic Web}, doi = {10.1007/978-3-540-76298-0_52}, editor = {Aberer, Karl and Choi, Key-Sun and Noy, Natasha and Allemang, Dean and Lee, Kyung-Il and Nixon, Lyndon and Golbeck, Jennifer and Mika, Peter and Maynard, Diana and Mizoguchi, Riichiro and Schreiber, Guus and Cudré-Mauroux, Philippe}, interhash = {ba9f8a17de78f7864934ddb96afa67df}, intrahash = {b00f9f95ba1970164ad70aa227719c6e}, isbn = {978-3-540-76297-3}, pages = {722--735}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {DBpedia: A Nucleus for a Web of Open Data}, url = {http://dx.doi.org/10.1007/978-3-540-76298-0_52}, volume = 4825, year = 2007 } @article{goodwin2008geographical, abstract = {Ordnance Survey, the national mapping agency of Great Britain, is investigating how semantic web technologies assist its role as a geographical information provider. A major part of this work involves the development of prototype products and datasets in RDF. This article discusses the production of an example dataset for the administrative geography of Great Britain, demonstrating the advantages of explicitly encoding topological relations between geographic entities over traditional spatial queries. We also outline how these data can be linked to other datasets on the web of linked data and some of the challenges that this raises.}, author = {Goodwin, John and Dolbear, Catherine and Hart, Glen}, doi = {10.1111/j.1467-9671.2008.01133.x}, interhash = {ea248d549690eceb8e7aa06ccb24e226}, intrahash = {08412bb4afca1e86d0cca0a8a083f2a2}, issn = {1467-9671}, journal = {Transactions in GIS}, pages = {19--30}, publisher = {Blackwell Publishing Ltd}, title = {Geographical Linked Data: The Administrative Geography of Great Britain on the Semantic Web}, url = {http://dx.doi.org/10.1111/j.1467-9671.2008.01133.x}, volume = 12, year = 2008 } @article{haklay2008openstreetmap, abstract = {The OpenStreetMap project is a knowledge collective that provides user-generated street maps. OSM follows the peer production model that created Wikipedia; its aim is to create a set of map data that's free to use, editable, and licensed under new copyright schemes. A considerable number of contributors edit the world map collaboratively using the OSM technical infrastructure, and a core group, estimated at approximately 40 volunteers, dedicate their time to creating and improving OSM's infrastructure, including maintaining the server, writing the core software that handles the transactions with the server, and creating cartographical outputs. There's also a growing community of software developers who develop software tools to make OSM data available for further use across different application domains, software platforms, and hardware devices. The OSM project's hub is the main OSM Web site.}, author = {Haklay, M. and Weber, P.}, doi = {10.1109/MPRV.2008.80}, interhash = {923ccd3197978c219f72a51133875942}, intrahash = {5eba3bf5036162b2fc05d3a6c5ae7faa}, issn = {1536-1268}, journal = {Pervasive Computing}, month = oct, number = 4, pages = {12--18}, publisher = {IEEE}, title = {OpenStreetMap: User-Generated Street Maps}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4653466&tag=1}, volume = 7, year = 2008 } @inproceedings{guha2004rating, abstract = {In the offline world, we look to the people we trust and those they trust for reliable information. In this paper, we present a computational model of this phenomenon and show how it can be used to identify high quality content in an Open Rating System, i.e., a system in which any user can rate content. We present a case study (Epinions.com) of a system based on this model and describe a new platform called PeopleNet for harnessing this phenomenon in an open distributed fashion.}, author = {Guha, R}, booktitle = {1st Workshop on Friend of a Friend, Social Networking and the Semantic Web}, interhash = {5c7e0fa8ee4d5a1e204a7153e346e37e}, intrahash = {57ae21fc256d225e99dff9b74ea1e243}, month = sep, title = {Open rating systems}, url = {http://www.w3.org/2001/sw/Europe/events/foaf-galway/papers/fp/open_rating_systems/wot.pdf}, year = 2004 }