@inproceedings{auer2007what, abstract = {Wikis are established means for the collaborative authoring, versioning and publishing of textual articles. The Wikipedia project, for example, succeeded in creating the by far largest encyclopedia just on the basis of a wiki. Recently, several approaches have been proposed on how to extend wikis to allow the creation of structured and semantically enriched content. However, the means for creating semantically enriched structured content are already available and are, although unconsciously, even used by Wikipedia authors. In this article, we present a method for revealing this structured content by extracting information from template instances. We suggest ways to efficiently query the vast amount of extracted information (e.g. more than 8 million RDF statements for the English Wikipedia version alone), leading to astonishing query answering possibilities (such as for the title question). We analyze the quality of the extracted content, and propose strategies for quality improvements with just minor modifications of the wiki systems being currently used.}, author = {Auer, S{\"o}ren and Lehmann, Jens}, bibsource = {DBLP, http://dblp.uni-trier.de}, booktitle = {ESWC}, crossref = {DBLP:conf/esws/2007}, ee = {http://dx.doi.org/10.1007/978-3-540-72667-8_36}, file = {auer2007what.pdf:auer2007what.pdf:PDF}, groups = {public}, interhash = {2b70ab546da1b45f5350d3ff742c4288}, intrahash = {b8e464b4a672530bf91c9189f17cca73}, pages = {503-517}, timestamp = {2010-02-23 14:49:49}, title = {What Have Innsbruck and Leipzig in Common? Extracting Semantics from Wiki Content}, url = {http://www.springerlink.com/content/3131t21p634191n2/}, username = {dbenz}, year = 2007 } @inproceedings{nazir2008extraction, abstract = {Social aspects are critical in the decision making process for social actors (human beings). Social aspects can be categorized into social interaction, social communities, social groups or any kind of behavior that emerges from interlinking, overlapping or similarities between interests of a society. These social aspects are dynamic and emergent. Therefore, interlinking them in a social structure, based on bipartite affiliation network, may result in isolated graphs. The major reason is that as these correspondences are dynamic and emergent, they should be coupled with more than a single affiliation in order to sustain the interconnections during interest evolutions. In this paper we propose to interlink actors using multiple tripartite graphs rather than a bipartite graph which was the focus of most of the previous social network building techniques. The utmost benefit of using tripartite graphs is that we can have multiple and hierarchical links between social actors. Therefore in this paper we discuss the extraction, plotting and analysis methods of tripartite relations between authors, articles and categories from Wikipedia. Furthermore, we also discuss the advantages of tripartite relationships over bipartite relationships. As a conclusion of this study we argue based on our results that to build useful, robust and dynamic social networks, actors should be interlinked in one or more tripartite networks.}, author = {Nazir, F. and Takeda, H.}, booktitle = {IEEE International Symposium on Technology and Society}, doi = {10.1109/ISTAS.2008.4559785}, file = {nazir2008extraction.pdf:nazir2008extraction.pdf:PDF}, groups = {public}, interhash = {7d3cb02c1c7774fe43e4303f0d3c37a4}, intrahash = {c3cca9801ab1e6d2598be1041c19618c}, isbn = {978-1-4244-1669-1}, month = jun, organization = {IEEE}, pages = {1--13}, timestamp = {2010-02-04 14:24:37}, title = {Extraction and analysis of tripartite relationships from Wikipedia}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4559785}, username = {dbenz}, year = 2008 } @inproceedings{silva2009semiautomatic, abstract = {This paper introduces WikiOnto: a system that assists in the extraction and modeling of topic ontologies in a semi-automatic manner using a preprocessed document corpus derived from Wikipedia. Based on the Wikipedia XML Corpus, we present a three-tiered framework for extracting topic ontologies in quick time and a modeling environment to refine these ontologies. Using natural language processing (NLP) and other machine learning (ML) techniques along with a very rich document corpus, this system proposes a solution to a task that is generally considered extremely cumbersome. The initial results of the prototype suggest strong potential of the system to become highly successful in ontology extraction and modeling and also inspire further research on extracting ontologies from other semi-structured document corpora as well.}, author = {Silva, L. De and Jayaratne, L.}, booktitle = {Applications of Digital Information and Web Technologies, 2009. ICADIWT '09. Second International Conference on the}, doi = {10.1109/ICADIWT.2009.5273871}, file = {silva2009semiautomatic.pdf:silva2009semiautomatic.pdf:PDF}, groups = {public}, interhash = {c1996cb9e69de56e2bb2f8e763fe0482}, intrahash = {66bec053541e521fbe68c0119806ae49}, month = {Aug.}, pages = {446-451}, timestamp = {2010-02-23 12:54:40}, title = {Semi-automatic extraction and modeling of ontologies using Wikipedia XML Corpus}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?isnumber=5273826&arnumber=5273871&count=156&index=116}, username = {dbenz}, year = 2009 }