@inproceedings{chan2009mathematical, abstract = {Human computation is a technique that makes use of human abilities for computation to solve problems. Social games use the power of the Internet game players to solve human computation problems. In previous works, many social games were proposed and were quite successful, but no formal framework exists for designing social games in general. A formal framework is important because it lists out the design elements of a social game, the characteristics of a human computation problem, and their relationships. With a formal framework, it simplifies the way to design a social game for a specific problem. In this paper, our contributions are: (1) formulate a formal model on social games, (2) analyze the framework and derive some interesting properties based on model's interactions, (3) illustrate how some current social games can be realized with the proposed formal model, and (4) describe how to design a social game for solving a specific problem with the use of the proposed formal model. This paper presents a set of design guidelines derived from the formal model and demonstrates that the model can help to design a social game for solving a specific problem in a formal and structural way.}, author = {Chan, Kam Tong and King, I. and Yuen, Man-Ching}, booktitle = {Proceedings of the International Conference on Computational Science and Engineering, CSE '09}, doi = {10.1109/CSE.2009.166}, interhash = {a54732b662bcb0d763139a38f6525b56}, intrahash = {216d582316e970eb498423ee8448edbe}, month = aug, pages = {1205--1210}, title = {Mathematical Modeling of Social Games}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5283086&tag=1}, volume = 4, year = 2009 } @inproceedings{chang2009reading, abstract = {Probabilistic topic models are a popular tool for the unsupervised analysis of text, providing both a predictive model of future text and a latent topic representation of the corpus. Practitioners typically assume that the latent space is semantically meaningful. It is used to check models, summarize the corpus, and guide exploration of its contents. However, whether the latent space is interpretable is in need of quantitative evaluation. In this paper, we present new quantitative methods for measuring semantic meaning in inferred topics. We back these measures with large-scale user studies, showing that they capture aspects of the model that are undetected by previous measures of model quality based on held-out likelihood. Surprisingly, topic models which perform better on held-out likelihood may infer less semantically meaningful topics.}, author = {Chang, Jonathan and Boyd-Graber, Jordan L. and Gerrish, Sean and Wang, Chong and Blei, David M.}, booktitle = {NIPS}, editor = {Bengio, Yoshua and Schuurmans, Dale and Lafferty, John D. and Williams, Christopher K. I. and Culotta, Aron}, interhash = {48210cee941ee21e6282798e28270a6d}, intrahash = {cd4cf8ff8a676ca7bbc4201ddbc2d024}, isbn = {9781615679119}, pages = {288--296}, publisher = {Curran Associates, Inc.}, title = {Reading Tea Leaves: How Humans Interpret Topic Models}, url = {http://books.nips.cc/papers/files/nips22/NIPS2009_0125.pdf}, year = 2009 }