@inproceedings{volkel2006semantic, abstract = {Wikipedia is the world's largest collaboratively edited source of encyclopaedic knowledge. But in spite of its utility, its contents are barely machine-interpretable. Structural knowledge, e.,g. about how concepts are interrelated, can neither be formally stated nor automatically processed. Also the wealth of numerical data is only available as plain text and thus can not be processed by its actual meaning.We provide an extension to be integrated in Wikipedia, that allows the typing of links between articles and the specification of typed data inside the articles in an easy-to-use manner.Enabling even casual users to participate in the creation of an open semantic knowledge base, Wikipedia has the chance to become a resource of semantic statements, hitherto unknown regarding size, scope, openness, and internationalisation. These semantic enhancements bring to Wikipedia benefits of today's semantic technologies: more specific ways of searching and browsing. Also, the RDF export, that gives direct access to the formalised knowledge, opens Wikipedia up to a wide range of external applications, that will be able to use it as a background knowledge base.In this paper, we present the design, implementation, and possible uses of this extension.}, address = {New York, NY, USA}, author = {V\"{o}lkel, Max and Kr\"{o}tzsch, Markus and Vrandecic, Denny and Haller, Heiko and Studer, Rudi}, booktitle = {WWW '06: Proceedings of the 15th international conference on World Wide Web}, doi = {10.1145/1135777.1135863}, interhash = {2847e16839b842552f6f495ceda1d5d1}, intrahash = {a204b4b6cad0255ec900ba59aec73485}, isbn = {1-59593-323-9}, location = {Edinburgh, Scotland}, pages = {585--594}, publisher = {ACM}, title = {Semantic Wikipedia}, url = {http://portal.acm.org/citation.cfm?id=1135863}, year = 2006 } @inproceedings{ankolekar2007two, abstract = {A common perception is that there are two competing visions for the future evolution of the Web: the Semantic Web and Web 2.0. A closer look, though, reveals that the core technologies and concerns of these two approaches are complementary and that each field can and must draw from the other’s strengths. We believe that future web applications will retain the Web 2.0 focus on community and usability, while drawing on Semantic Web infrastructure to facilitate mashup-like information sharing. However, there are several open issues that must be addressed before such applications can become commonplace. In this paper, we outline a semantic weblogs scenario that illustrates the potential for combining Web 2.0 and Semantic Web technologies, while highlighting the unresolved issues that impede its realization. Nevertheless, we believe that the scenario can be realized in the short-term. We point to recent progress made in resolving each of the issues as well as future research directions for each of the communities.}, address = {New York, NY, USA}, author = {Ankolekar, Anupriya and Krötzsch, Markus and Tran, Thanh and Vrandecic, Denny}, booktitle = {WWW '07: Proceedings of the 16th international conference on World Wide Web}, doi = {http://doi.acm.org/10.1145/1242572.1242684}, file = {ankolekar2007two.pdf:ankolekar2007two.pdf:PDF}, groups = {public}, interhash = {1e51bd6cd043142a8de98b93e82b68b1}, intrahash = {6b493ae653fcff556997f30273d766b9}, isbn = {978-1-59593-654-7}, location = {Banff, Alberta, Canada}, pages = {825--834}, publisher = {ACM Press}, timestamp = {2007-08-05 16:27:33}, title = {The two cultures: mashing up web 2.0 and the semantic web}, url = {http://portal.acm.org/citation.cfm?id=1242684&coll=GUIDE&dl=ACM&CFID=21633871&CFTOKEN=81037701}, username = {dbenz}, year = 2007 } @inproceedings{1135863, abstract = {Wikipedia is the world's largest collaboratively edited source of encyclopaedic knowledge. But in spite of its utility, its contents are barely machine-interpretable. Structural knowledge, e.,g. about how concepts are interrelated, can neither be formally stated nor automatically processed. Also the wealth of numerical data is only available as plain text and thus can not be processed by its actual meaning.We provide an extension to be integrated in Wikipedia, that allows the typing of links between articles and the specification of typed data inside the articles in an easy-to-use manner.Enabling even casual users to participate in the creation of an open semantic knowledge base, Wikipedia has the chance to become a resource of semantic statements, hitherto unknown regarding size, scope, openness, and internationalisation. These semantic enhancements bring to Wikipedia benefits of today's semantic technologies: more specific ways of searching and browsing. Also, the RDF export, that gives direct access to the formalised knowledge, opens Wikipedia up to a wide range of external applications, that will be able to use it as a background knowledge base.In this paper, we present the design, implementation, and possible uses of this extension.}, address = {New York, NY, USA}, author = {V\"{o}lkel, Max and Kr\"{o}tzsch, Markus and Vrandecic, Denny and Haller, Heiko and Studer, Rudi}, booktitle = {WWW '06: Proceedings of the 15th international conference on World Wide Web}, doi = {10.1145/1135777.1135863}, interhash = {2847e16839b842552f6f495ceda1d5d1}, intrahash = {a204b4b6cad0255ec900ba59aec73485}, isbn = {1-59593-323-9}, location = {Edinburgh, Scotland}, pages = {585--594}, publisher = {ACM}, title = {Semantic Wikipedia}, url = {http://portal.acm.org/citation.cfm?id=1135863}, year = 2006 } @article{voelker2008aeon, abstract = {OntoClean is an approach towards the formal evaluation of taxonomic relations in ontologies. The application of OntoClean consists of two main steps. First, concepts are tagged according to meta-properties known as rigidity, unity, dependency and identity. Second, the tagged concepts are checked according to predefined constraints to discover taxonomic errors. Although OntoClean is well documented in numerous publications, it is still used rather infrequently due to the high costs of application. Especially, the manual tagging of concepts with the correct meta-properties requires substantial efforts of highly experienced ontology engineers. In order to facilitate the use of OntoClean and to enable the evaluation of real-world ontologies, we provide AEON, a tool which automatically tags concepts with appropriate OntoClean meta-properties and performs the constraint checking. We use the Web as an embodiment of world knowledge, where we search for patterns that indicate how to properly tag concepts. We thoroughly evaluated our approach against a manually created gold standard. The evaluation shows the competitiveness of our approach while at the same time significantly lowering the costs. All of our results, i.e. the tool AEON as well as the experiment data, are publicly available.}, address = {Amsterdam, The Netherlands, The Netherlands}, author = {Völker, Johanna and Vrandečić, Denny and Sure, York and Hotho, Andreas}, interhash = {f14794f4961d0127dc50c1938eaef7ea}, intrahash = {f8f0bb3e3495e7627770b470d1a5f1a3}, issn = {1570-5838}, journal = {Applied Ontology}, number = {1-2}, pages = {41--62}, publisher = {IOS Press}, title = {AEON - An approach to the automatic evaluation of ontologies}, url = {http://portal.acm.org/citation.cfm?id=1412422}, volume = 3, year = 2008 } @article{voelker2008aeon, abstract = {OntoClean is an approach towards the formal evaluation of taxonomic relations in ontologies. The application of OntoClean consists of two main steps. First, concepts are tagged according to meta-properties known as rigidity, unity, dependency and identity. Second, the tagged concepts are checked according to predefined constraints to discover taxonomic errors. Although OntoClean is well documented in numerous publications, it is still used rather infrequently due to the high costs of application. Especially, the manual tagging of concepts with the correct meta-properties requires substantial efforts of highly experienced ontology engineers. In order to facilitate the use of OntoClean and to enable the evaluation of real-world ontologies, we provide AEON, a tool which automatically tags concepts with appropriate OntoClean meta-properties and performs the constraint checking. We use the Web as an embodiment of world knowledge, where we search for patterns that indicate how to properly tag concepts. We thoroughly evaluated our approach against a manually created gold standard. The evaluation shows the competitiveness of our approach while at the same time significantly lowering the costs. All of our results, i.e. the tool AEON as well as the experiment data, are publicly available.}, address = {Amsterdam, The Netherlands, The Netherlands}, author = {Völker, Johanna and Vrandečić, Denny and Sure, York and Hotho, Andreas}, interhash = {f14794f4961d0127dc50c1938eaef7ea}, intrahash = {f8f0bb3e3495e7627770b470d1a5f1a3}, issn = {1570-5838}, journal = {Applied Ontology}, number = {1-2}, pages = {41--62}, publisher = {IOS Press}, title = {AEON - An approach to the automatic evaluation of ontologies}, url = {http://portal.acm.org/citation.cfm?id=1412422}, volume = 3, year = 2008 } @inproceedings{voelker1:07:eswc, author = {Völker, Johanna and Vrandecic, Denny and Sure, York and Hotho, Andreas}, booktitle = {Proceedings of the European Semantic Web Conference, ESWC2007}, editor = {Franconi, Enrico and Kifer, Michael and May, Wolfgang}, interhash = {5a5b17f5657ccff6fa7fd17dae4ae503}, intrahash = {c5c43ae4a719e6e935a9ca1a4aca906b}, month = {July}, publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, title = {{Learning Disjointness}}, url = {http://www.eswc2007.org/pdf/eswc07-voelker1.pdf}, vgwort = {26}, volume = 4519, year = 2007 } @article{1551, abstract = {Wikipedia is the world's largest collaboratively edited source of encyclopaedic knowledge. But in spite of its utility, its content is barely machine-interpretable and only weakly structured. With Semantic MediaWiki we provide an extension that enables wiki-users to semantically annotate wiki pages, based on which the wiki contents can be browsed, searched, and reused in novel ways. In this paper, we give an extended overview of Semantic MediaWiki and discuss experiences regarding performance and current applications.}, author = {Krötzsch, Markus and Vrandecic, Denny and Völkel, Max and Haller, Heiko and Studer, Rudi}, interhash = {7957ab402fcb10d64e148f499deacba4}, intrahash = {03d24fef49e40d9dec474d04d0b27000}, journal = {Journal of Web Semantics}, month = DEC, note = {To appear.}, title = {Semantic Wikipedia}, url = {http://korrekt.org/papers/KroetzschVrandecicVoelkelHaller_SemanticMediaWiki_2007.pdf}, year = 2007 } @inproceedings{semediawiki05, author = {Krötzsch, Markus and Vrandečić, Denny and Völkel, Max}, booktitle = {Proceedings of the WikiMania2005}, interhash = {a6ef1487a785a3353031e3f079a27361}, intrahash = {f9a54ea36dcb4931c1f323d80b2e7d5b}, title = {Wikipedia and the Semantic Web - The Missing Links}, url = {http://www.aifb.uni-karlsruhe.de/WBS/mak/pub/wikimania.pdf}, year = 2005 } @article{kmjournaldiligent, author = {Vrandecic, Denny and Pinto, H. Sofia and Sure, York and Tempich, Christoph}, interhash = {0cabab7456df24ce9111c8960af42c5d}, intrahash = {167b670252215232dc59829364e361a2}, journal = {Journal of Knowledge Management}, month = OCT, number = 5, pages = {85-96}, title = {The DILIGENT Knowledge Processes}, url = {http://www.aifb.uni-karlsruhe.de/WBS/ysu/publications/2005_kmjournal_diligent.pdf}, volume = 9, year = 2005 }