@article{bizer2009dbpedia, abstract = {The DBpedia project is a community effort to extract structured information from Wikipedia and to make this information accessible on the Web. The resulting DBpedia knowledge base currently describes over 2.6 million entities. For each of these entities, DBpedia defines a globally unique identifier that can be dereferenced over the Web into a rich RDF description of the entity, including human-readable definitions in 30 languages, relationships to other resources, classifications in four concept hierarchies, various facts as well as data-level links to other Web data sources describing the entity. Over the last year, an increasing number of data publishers have begun to set data-level links to DBpedia resources, making DBpedia a central interlinking hub for the emerging Web of Data. Currently, the Web of interlinked data sources around DBpedia provides approximately 4.7 billion pieces of information and covers domains such as geographic information, people, companies, films, music, genes, drugs, books, and scientific publications. This article describes the extraction of the DBpedia knowledge base, the current status of interlinking DBpedia with other data sources on the Web, and gives an overview of applications that facilitate the Web of Data around DBpedia.}, author = {Bizer, Christian and Lehmann, Jens and Kobilarov, Georgi and Auer, Sören and Becker, Christian and Cyganiak, Richard and Hellmann, Sebastian}, doi = {10.1016/j.websem.2009.07.002}, interhash = {087f766f30469cbc881c83ad156a104a}, intrahash = {560097dc36a8e66b69db5cb22c1fa334}, issn = {1570-8268}, journal = {Web Semantics: Science, Services and Agents on the World Wide Web}, number = 3, pages = {154--165}, title = {DBpedia - A crystallization point for the Web of Data}, url = {http://www.sciencedirect.com/science/article/pii/S1570826809000225}, volume = 7, year = 2009 } @inproceedings{suchanek2007semantic, abstract = {We present YAGO, a light-weight and extensible ontology with high coverage and quality. YAGO builds on entities and relations and currently contains more than 1 million entities and 5 million facts. This includes the Is-A hierarchy as well as non-taxonomic relations between entities (such as HASONEPRIZE). The facts have been automatically extracted from Wikipedia and unified with WordNet, using a carefully designed combination of rule-based and heuristic methods described in this paper. The resulting knowledge base is a major step beyond WordNet: in quality by adding knowledge about individuals like persons, organizations, products, etc. with their semantic relationships - and in quantity by increasing the number of facts by more than an order of magnitude. Our empirical evaluation of fact correctness shows an accuracy of about 95%. YAGO is based on a logically clean model, which is decidable, extensible, and compatible with RDFS. Finally, we show how YAGO can be further extended by state-of-the-art information extraction techniques.}, acmid = {1242667}, address = {New York, NY, USA}, author = {Suchanek, Fabian M. and Kasneci, Gjergji and Weikum, Gerhard}, booktitle = {Proceedings of the 16th international conference on World Wide Web}, doi = {10.1145/1242572.1242667}, interhash = {1d2c2b23ce2a6754d12c4364e19c574c}, intrahash = {84ae693c0a6dfb6d4b051b0b6dbd3668}, isbn = {978-1-59593-654-7}, location = {Banff, Alberta, Canada}, numpages = {10}, pages = {697--706}, publisher = {ACM}, title = {YAGO: a core of semantic knowledge}, url = {http://doi.acm.org/10.1145/1242572.1242667}, year = 2007 } @incollection{auer2007dbpedia, abstract = {DBpedia is a community effort to extract structured information from Wikipedia and to make this information available on the Web. DBpedia allows you to ask sophisticated queries against datasets derived from Wikipedia and to link other datasets on the Web to Wikipedia data. We describe the extraction of the DBpedia datasets, and how the resulting information is published on the Web for human- and machine-consumption. We describe some emerging applications from the DBpedia community and show how website authors can facilitate DBpedia content within their sites. Finally, we present the current status of interlinking DBpedia with other open datasets on the Web and outline how DBpedia could serve as a nucleus for an emerging Web of open data.}, address = {Berlin/Heidelberg}, author = {Auer, Sören and Bizer, Christian and Kobilarov, Georgi and Lehmann, Jens and Cyganiak, Richard and Ives, Zachary}, booktitle = {The Semantic Web}, doi = {10.1007/978-3-540-76298-0_52}, editor = {Aberer, Karl and Choi, Key-Sun and Noy, Natasha and Allemang, Dean and Lee, Kyung-Il and Nixon, Lyndon and Golbeck, Jennifer and Mika, Peter and Maynard, Diana and Mizoguchi, Riichiro and Schreiber, Guus and Cudré-Mauroux, Philippe}, interhash = {ba9f8a17de78f7864934ddb96afa67df}, intrahash = {b00f9f95ba1970164ad70aa227719c6e}, isbn = {978-3-540-76297-3}, pages = {722--735}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {DBpedia: A Nucleus for a Web of Open Data}, url = {http://dx.doi.org/10.1007/978-3-540-76298-0_52}, volume = 4825, year = 2007 } @inproceedings{martins2008extracting, abstract = {Geo-temporal criteria are important for filtering, grouping and prioritizing information resources. This presents techniques for extracting semantic geo-temporal information from text, using simple text mining methods that leverage on a gazetteer. A prototype system, implementing the proposed methods and capable of displaying information over maps and timelines, is described. This prototype can take input in RSS, demonstrating the application to content from many different online sources. Experimental results demonstrate the efficiency and accuracy of the proposed approaches.}, author = {Martins, B. and Manguinhas, H. and Borbinha, J.}, booktitle = {Proceedings of the International Conference on Semantic Computing}, doi = {10.1109/ICSC.2008.86}, interhash = {d03fecb6b3261ffa0a5e11789b188883}, intrahash = {5a889bc7d9e81cb1d294cb83b767bf64}, month = aug, pages = {1--9}, publisher = {IEEE Computer Society}, title = {Extracting and Exploring the Geo-Temporal Semantics of Textual Resources}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4597167}, year = 2008 } @article{goodwin2008geographical, abstract = {Ordnance Survey, the national mapping agency of Great Britain, is investigating how semantic web technologies assist its role as a geographical information provider. A major part of this work involves the development of prototype products and datasets in RDF. This article discusses the production of an example dataset for the administrative geography of Great Britain, demonstrating the advantages of explicitly encoding topological relations between geographic entities over traditional spatial queries. We also outline how these data can be linked to other datasets on the web of linked data and some of the challenges that this raises.}, author = {Goodwin, John and Dolbear, Catherine and Hart, Glen}, doi = {10.1111/j.1467-9671.2008.01133.x}, interhash = {ea248d549690eceb8e7aa06ccb24e226}, intrahash = {08412bb4afca1e86d0cca0a8a083f2a2}, issn = {1467-9671}, journal = {Transactions in GIS}, pages = {19--30}, publisher = {Blackwell Publishing Ltd}, title = {Geographical Linked Data: The Administrative Geography of Great Britain on the Semantic Web}, url = {http://dx.doi.org/10.1111/j.1467-9671.2008.01133.x}, volume = 12, year = 2008 } @inproceedings{tramp2010weaving, abstract = {In this paper we tackle some of the most pressing obstacles of the emerging Linked Data Web, namely the quality, timeliness and coherence as well as direct end user benefits. We present an approach for complementing the Linked Data Web with a social dimension by extending the well-known Pingback mechanism, which is a technological cornerstone of the blogosphere, towards a Semantic Pingback. It is based on the advertising of an RPC service for propagating typed RDF links between Data Web resources. Semantic Pingback is downwards compatible with conventional Pingback implementations, thus allowing to connect and interlink resources on the Social Web with resources on the Data Web. We demonstrate its usefulness by showcasing use cases of the Semantic Pingback implementations in the semantic wiki OntoWiki and the Linked Data interface for database-backed Web applications Triplify. }, address = {Berlin / Heidelberg}, author = {Tramp, Sebastian and Frischmuth, Philipp and Ermilov, Timofey and Auer, Sören}, booktitle = {Proceedings of the EKAW 2010 - Knowledge Engineering and Knowledge Management by the Masses; 11th October-15th October 2010 - Lisbon, Portugal}, editor = {Cimiano, P. and Pinto, H.S.}, interhash = {c2e23e5a78627b560bb3103ecd52a410}, intrahash = {0e32b9750ee77fcdf2162f70aaee5622}, month = oct, pages = {135--149}, publisher = {Springer}, series = {Lecture Notes in Artificial Intelligence}, timestamp = {2010.06.16}, title = {Weaving a Social Data Web with Semantic Pingback}, url = {http://svn.aksw.org/papers/2010/EKAW_SemanticPingback/public.pdf}, volume = 6317, year = 2010 }