@article{mueller-2014b, abstract = {The combination of ubiquitous and social computing is an emerging research area which integrates different but complementary methods, techniques and tools. In this paper, we focus on the Ubicon platform, its applications, and a large spectrum of analysis results. Ubicon provides an extensible framework for building and hosting applications targeting both ubiquitous and social environments. We summarize the architecture and exemplify its implementation using four real-world applications built on top of Ubicon. In addition, we discuss several scientific experiments in the context of these applications in order to give a better picture of the potential of the framework, and discuss analysis results using several real-world data sets collected utilizing Ubicon.}, author = {Atzmueller, Martin and Becker, Martin and Kibanov, Mark and Scholz, Christoph and Doerfel, Stephan and Hotho, Andreas and Macek, Bjoern-Elmar and Mitzlaff, Folke and Mueller, Juergen and Stumme, Gerd}, doi = {10.1080/13614568.2013.873488}, interhash = {6364e034fa868644b30618dc887c0270}, intrahash = {d38f1e01e735253b4cad2c98c1027659}, issn = {1361-4568}, journal = {New Review of Hypermedia and Multimedia}, month = {#mar#}, number = 20, pages = {53--77}, title = {Ubicon and its Applications for Ubiquitous Social Computing}, url = {http://dx.doi.org/10.1080/13614568.2013.873488}, volume = 1, year = 2014 } @article{atzmueller2014ubicon, abstract = {The combination of ubiquitous and social computing is an emerging research area which integrates different but complementary methods, techniques and tools. In this paper, we focus on the Ubicon platform, its applications, and a large spectrum of analysis results. Ubicon provides an extensible framework for building and hosting applications targeting both ubiquitous and social environments. We summarize the architecture and exemplify its implementation using four real-world applications built on top of Ubicon. In addition, we discuss several scientific experiments in the context of these applications in order to give a better picture of the potential of the framework, and discuss analysis results using several real-world data sets collected utilizing Ubicon.}, author = {Atzmueller, Martin and Becker, Martin and Kibanov, Mark and Scholz, Christoph and Doerfel, Stephan and Hotho, Andreas and Macek, Bjoern-Elmar and Mitzlaff, Folke and Mueller, Juergen and Stumme, Gerd}, doi = {10.1080/13614568.2013.873488}, eprint = {http://www.tandfonline.com/doi/pdf/10.1080/13614568.2013.873488}, interhash = {6364e034fa868644b30618dc887c0270}, intrahash = {5d1ed63c337f8473d2b5b3b6c02a5f20}, journal = {New Review of Hypermedia and Multimedia}, number = 1, pages = {53-77}, title = {Ubicon and its applications for ubiquitous social computing}, url = {http://www.tandfonline.com/doi/abs/10.1080/13614568.2013.873488}, volume = 20, year = 2014 } @article{robertson2013programming, abstract = {The aim of ‘programming the global computer’ was identified by Milner and others as one of the grand challenges of computing research. At the time this phrase was coined, it was natural to assume that this objective might be achieved primarily through extending programming and specification languages. The Internet, however, has brought with it a different style of computation that (although harnessing variants of traditional programming languages) operates in a style different to those with which we are familiar. The ‘computer’ on which we are running these computations is a social computer in the sense that many of the elementary functions of the computations it runs are performed by humans, and successful execution of a program often depends on properties of the human society over which the program operates. These sorts of programs are not programmed in a traditional way and may have to be understood in a way that is different from the traditional view of programming. This shift in perspective raises new challenges for the science of the Web and for computing in general.}, author = {Robertson, David and Giunchiglia, Fausto}, doi = {10.1098/rsta.2012.0379}, eprint = {http://rsta.royalsocietypublishing.org/content/371/1987/20120379.full.pdf+html}, interhash = {c671d953e4eb09fc3fe67f93ccd2024c}, intrahash = {a802922683b23455f903551ee2b24b42}, journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences}, month = mar, number = 1987, title = {Programming the social computer}, url = {http://rsta.royalsocietypublishing.org/content/371/1987/20120379.abstract}, volume = 371, year = 2013 } @inproceedings{derose2008building, abstract = {The rapid growth of Web communities has motivated many solutions for building community data portals. These solutions follow roughly two approaches. The first approach (e.g., Libra, Citeseer, Cimple) employs semi-automatic methods to extract and integrate data from a multitude of data sources. The second approach (e.g., Wikipedia, Intellipedia) deploys an initial portal in wiki format, then invites community members to revise and add material. In this paper we consider combining the above two approaches to building community portals. The new hybrid machine-human approach brings significant benefits. It can achieve broader and deeper coverage, provide more incentives for users to contribute, and keep the portal more up-to-date with less user effort. In a sense, it enables building "community wikipedias", backed by an underlying structured database that is continuously updated using automatic techniques. We outline our ideas for the new approach, describe its challenges and opportunities, and provide initial solutions. Finally, we describe a real-world implementation and preliminary experiments that demonstrate the utility of the new approach.}, author = {DeRose, P. and Chai, Xiaoyong and Gao, B.J. and Shen, W. and Doan, An Hai and Bohannon, P. and Zhu, Xiaojin}, booktitle = {24th International Conference on Data Engineering}, doi = {10.1109/ICDE.2008.4497473}, interhash = {00f45357225b1e75ed93bddb8d456fb7}, intrahash = {38a2e84d3dfd845d9c260d5f15161c6f}, month = apr, pages = {646--655}, publisher = {IEEE}, title = {Building Community Wikipedias: A Machine-Human Partnership Approach}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4497473&tag=1}, year = 2008 } @inproceedings{jeffery2008payasyougo, abstract = {A primary challenge to large-scale data integration is creating semantic equivalences between elements from different data sources that correspond to the same real-world entity or concept. Dataspaces propose a pay-as-you-go approach: automated mechanisms such as schema matching and reference reconciliation provide initial correspondences, termed candidate matches, and then user feedback is used to incrementally confirm these matches. The key to this approach is to determine in what order to solicit user feedback for confirming candidate matches.

In this paper, we develop a decision-theoretic framework for ordering candidate matches for user confirmation using the concept of the value of perfect information (VPI). At the core of this concept is a utility function that quantifies the desirability of a given state; thus, we devise a utility function for dataspaces based on query result quality. We show in practice how to efficiently apply VPI in concert with this utility function to order user confirmations. A detailed experimental evaluation on both real and synthetic datasets shows that the ordering of user feedback produced by this VPI-based approach yields a dataspace with a significantly higher utility than a wide range of other ordering strategies. Finally, we outline the design of Roomba, a system that utilizes this decision-theoretic framework to guide a dataspace in soliciting user feedback in a pay-as-you-go manner.}, acmid = {1376701}, address = {New York, NY, USA}, author = {Jeffery, Shawn R. and Franklin, Michael J. and Halevy, Alon Y.}, booktitle = {Proceedings of the 2008 ACM SIGMOD international conference on Management of data}, doi = {10.1145/1376616.1376701}, interhash = {3ceaf563712b776c1ed97a8cb061f63b}, intrahash = {3bff24fb9eb1e39fa97a524aabb8dee9}, isbn = {978-1-60558-102-6}, location = {Vancouver, Canada}, numpages = {14}, pages = {847--860}, publisher = {ACM}, title = {Pay-as-you-go user feedback for dataspace systems}, url = {http://doi.acm.org/10.1145/1376616.1376701}, year = 2008 } @article{raykar2010learning, abstract = {For many supervised learning tasks it may be infeasible (or very expensive) to obtain objective and reliable labels. Instead, we can collect subjective (possibly noisy) labels from multiple experts or annotators. In practice, there is a substantial amount of disagreement among the annotators, and hence it is of great practical interest to address conventional supervised learning problems in this scenario. In this paper we describe a probabilistic approach for supervised learning when we have multiple annotators providing (possibly noisy) labels but no absolute gold standard. The proposed algorithm evaluates the different experts and also gives an estimate of the actual hidden labels. Experimental results indicate that the proposed method is superior to the commonly used majority voting baseline.}, acmid = {1859894}, author = {Raykar, Vikas C. and Yu, Shipeng and Zhao, Linda H. and Valadez, Gerardo Hermosillo and Florin, Charles and Bogoni, Luca and Moy, Linda}, interhash = {8113daf47997fddf48e4c6c79f2eba56}, intrahash = {14220abe8babfab01c0cdd5ebd5e4b7c}, issn = {1532-4435}, issue_date = {3/1/2010}, journal = {Journal of Machine Learning Research}, month = aug, numpages = {26}, pages = {1297--1322}, publisher = {JMLR.org}, title = {Learning From Crowds}, url = {http://dl.acm.org/citation.cfm?id=1756006.1859894}, volume = 11, year = 2010 } @incollection{li2011incorporating, abstract = {In scientific cooperation network, ambiguous author names may occur due to the existence of multiple authors with the same name. Users of these networks usually want to know the exact author of a paper, whereas we do not have any unique identifier to distinguish them. In this paper, we focus ourselves on such problem, we propose a new method that incorporates user feedback into the model for name disambiguation of scientific cooperation network. Perceptron is used as the classifier. Two features and a constraint drawn from user feedback are incorporated into the perceptron to enhance the performance of name disambiguation. Specifically, we construct user feedback as a training stream, and refine the perceptron continuously. Experimental results show that the proposed algorithm can learn continuously and significantly outperforms the previous methods without introducing user interactions.}, address = {Berlin/Heidelberg}, affiliation = {Intelligent and Distributed Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, Wuhan, 430074 P.R. China}, author = {Li, Yuhua and Wen, Aiming and Lin, Quan and Li, Ruixuan and Lu, Zhengding}, booktitle = {Web-Age Information Management}, doi = {10.1007/978-3-642-23535-1_39}, editor = {Wang, Haixun and Li, Shijun and Oyama, Satoshi and Hu, Xiaohua and Qian, Tieyun}, interhash = {3baace12cb4481dcceb53c2d47f413b5}, intrahash = {96f2ae8551126527c2dfe69c8fa22f6c}, isbn = {978-3-642-23534-4}, keyword = {Computer Science}, pages = {454--466}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Incorporating User Feedback into Name Disambiguation of Scientific Cooperation Network}, url = {http://dx.doi.org/10.1007/978-3-642-23535-1_39}, volume = 6897, year = 2011 } @article{lofi2012information, abstract = {Recent years brought tremendous advancements in the area of automated information extraction. But still, problem scenarios remain where even state-of-the-art algorithms do not provide a satisfying solution. In these cases, another aspiring recent trend can be exploited to achieve the required extraction quality: explicit crowdsourcing of human intelligence tasks. In this paper, we discuss the synergies between information extraction and crowdsourcing. In particular, we methodically identify and classify the challenges and fallacies that arise when combining both approaches. Furthermore, we argue that for harnessing the full potential of either approach, true hybrid techniques must be considered. To demonstrate this point, we showcase such a hybrid technique, which tightly interweaves information extraction with crowdsourcing and machine learning to vastly surpass the abilities of either technique.}, address = {Berlin/Heidelberg}, affiliation = {Institut für Informationssysteme, Technische Universität Braunschweig, Braunschweig, Germany}, author = {Lofi, Christoph and Selke, Joachim and Balke, Wolf-Tilo}, doi = {10.1007/s13222-012-0092-8}, interhash = {941feeaa7bb134e0a5f8b5c0225756b8}, intrahash = {37cc8f1d19105a073544d6594fbbc033}, issn = {1618-2162}, journal = {Datenbank-Spektrum}, keyword = {Computer Science}, number = 2, pages = {109--120}, publisher = {Springer}, title = {Information Extraction Meets Crowdsourcing: A Promising Couple}, url = {http://dx.doi.org/10.1007/s13222-012-0092-8}, volume = 12, year = 2012 } @inproceedings{paton2011feedback, abstract = {User feedback is gaining momentum as a means of addressing the difficulties underlying information integration tasks. It can be used to assist users in building information integration systems and to improve the quality of existing systems, e.g., in dataspaces. Existing proposals in the area are confined to specific integration sub-problems considering a specific kind of feedback sought, in most cases, from a single user. We argue in this paper that, in order to maximize the benefits that can be drawn from user feedback, it should be considered and managed as a first class citizen. Accordingly, we present generic operations that underpin the management of feedback within information integration systems, and that are applicable to feedback of different kinds, potentially supplied by multiple users with different expectations. We present preliminary solutions that can be adopted for realizing such operations, and sketch a research agenda for the information integration community.}, author = {Paton, Norman W. and Fernandes, Alvaro A. A. and Hedeler, Cornelia and Embury, Suzanne M.}, booktitle = {Proceedings of the Conference on Innovative Data Systems Research (CIDR)}, interhash = {1874e5c09919244808457021d2d884d1}, intrahash = {cd75210156615616e4f25c91143040c4}, pages = {175--183}, title = {User Feedback as a First Class Citizen in Information Integration Systems}, url = {http://www.cidrdb.org/cidr2011/Papers/CIDR11_Paper21.pdf}, year = 2011 } @inproceedings{marcus2011crowdsourced, abstract = {Amazon's Mechanical Turk (\MTurk") service allows users to post short tasks (\HITs") that other users can receive a small amount of money for completing. Common tasks on the system include labelling a collection of images, com- bining two sets of images to identify people which appear in both, or extracting sentiment from a corpus of text snippets. Designing a work ow of various kinds of HITs for ltering, aggregating, sorting, and joining data sources together is common, and comes with a set of challenges in optimizing the cost per HIT, the overall time to task completion, and the accuracy of MTurk results. We propose Qurk, a novel query system for managing these work ows, allowing crowd- powered processing of relational databases. We describe a number of query execution and optimization challenges, and discuss some potential solutions.}, author = {Marcus, Adam and Wu, Eugene and Madden, Samuel and Miller, Robert C.}, booktitle = {Proceedings of the 5th Biennial Conference on Innovative Data Systems Research}, doi = {1721.1/62827}, interhash = {b6b7d67c3c09259fb2d5df3f52e24c9d}, intrahash = {29723ba38aa6039091769cd2f69a1514}, month = jan, pages = {211--214}, publisher = {CIDR}, title = {Crowdsourced Databases: Query Processing with People}, url = {http://dspace.mit.edu/handle/1721.1/62827}, year = 2011 } @inproceedings{horowitz2010anatomy, abstract = {We present Aardvark, a social search engine. With Aardvark, users ask a question, either by instant message, email, web input, text message, or voice. Aardvark then routes the question to the person in the user's extended social network most likely to be able to answer that question. As compared to a traditional web search engine, where the challenge lies in finding the right document to satisfy a user's information need, the challenge in a social search engine like Aardvark lies in finding the right person to satisfy a user's information need. Further, while trust in a traditional search engine is based on authority, in a social search engine like Aardvark, trust is based on intimacy. We describe how these considerations inform the architecture, algorithms, and user interface of Aardvark, and how they are reflected in the behavior of Aardvark users.}, acmid = {1772735}, address = {New York, NY, USA}, author = {Horowitz, Damon and Kamvar, Sepandar D.}, booktitle = {Proceedings of the 19th international conference on World wide web}, doi = {10.1145/1772690.1772735}, interhash = {418d79b49ede3a8d15ef5eb8453094f0}, intrahash = {787ecbd5796ada03f15bdda85497e1fd}, isbn = {978-1-60558-799-8}, location = {Raleigh, North Carolina, USA}, numpages = {10}, pages = {431--440}, publisher = {ACM}, title = {The anatomy of a large-scale social search engine}, url = {http://doi.acm.org/10.1145/1772690.1772735}, year = 2010 } @inproceedings{chan2009mathematical, abstract = {Human computation is a technique that makes use of human abilities for computation to solve problems. Social games use the power of the Internet game players to solve human computation problems. In previous works, many social games were proposed and were quite successful, but no formal framework exists for designing social games in general. A formal framework is important because it lists out the design elements of a social game, the characteristics of a human computation problem, and their relationships. With a formal framework, it simplifies the way to design a social game for a specific problem. In this paper, our contributions are: (1) formulate a formal model on social games, (2) analyze the framework and derive some interesting properties based on model's interactions, (3) illustrate how some current social games can be realized with the proposed formal model, and (4) describe how to design a social game for solving a specific problem with the use of the proposed formal model. This paper presents a set of design guidelines derived from the formal model and demonstrates that the model can help to design a social game for solving a specific problem in a formal and structural way.}, author = {Chan, Kam Tong and King, I. and Yuen, Man-Ching}, booktitle = {Proceedings of the International Conference on Computational Science and Engineering, CSE '09}, doi = {10.1109/CSE.2009.166}, interhash = {a54732b662bcb0d763139a38f6525b56}, intrahash = {216d582316e970eb498423ee8448edbe}, month = aug, pages = {1205--1210}, title = {Mathematical Modeling of Social Games}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5283086&tag=1}, volume = 4, year = 2009 } @inproceedings{parameswaran2011answering, abstract = {For some problems, human assistance is needed in addition to automated (algorithmic) computation. In sharp contrast to existing data management approaches, where human input is either ad-hoc or is never used, we describe the design of the first declarative language involving human-computable functions, standard relational operators, as well as algorithmic computation. We consider the challenges involved in optimizing queries posed in this language, in particular, the tradeoffs between uncertainty, cost and performance, as well as combination of human and algorithmic evidence. We believe that the vision laid out in this paper can act as a road-map for a new area of data management research where human computation is routinely used in data analytics.}, author = {Parameswaran, Aditya and Polyzotis, Neoklis}, booktitle = {Conference on Inovative Data Systems Research (CIDR 2011)}, interhash = {037601fdcba1c499a3e89b1427235489}, intrahash = {8c11ab0f21767c79cd694a795eddf169}, month = jan, pages = {160--166}, title = {Answering Queries using Humans, Algorithms and Databases}, url = {http://ilpubs.stanford.edu:8090/986/}, year = 2011 } @inproceedings{heymann2011turkalytics, abstract = {We present "Turkalytics," a novel analytics tool for human computation systems. Turkalytics processes and reports logging events from workers in real-time and has been shown to scale to over one hundred thousand logging events per day. We present a state model for worker interaction that covers the Mechanical Turk (the SCRAP model) and a data model that demonstrates the diversity of data collected by Turkalytics. We show that Turkalytics is effective at data collection, in spite of it being unobtrusive. Lastly, we describe worker locations, browser environments, activity information, and other examples of data collected by our tool.}, acmid = {1963473}, address = {New York, NY, USA}, author = {Heymann, Paul and Garcia-Molina, Hector}, booktitle = {Proceedings of the 20th international conference on World wide web}, doi = {10.1145/1963405.1963473}, interhash = {6d183b7917745ec2ef531e66e18f4bcd}, intrahash = {9461e2c2c5f0a6304ad6017a56788217}, isbn = {978-1-4503-0632-4}, location = {Hyderabad, India}, numpages = {10}, pages = {477--486}, publisher = {ACM}, title = {Turkalytics: analytics for human computation}, url = {http://doi.acm.org/10.1145/1963405.1963473}, year = 2011 } @article{selke2012pushing, abstract = {By incorporating human workers into the query execution process crowd-enabled databases facilitate intelligent, social capabilities like completing missing data at query time or performing cognitive operators. But despite all their flexibility, crowd-enabled databases still maintain rigid schemas. In this paper, we extend crowd-enabled databases by flexible query-driven schema expansion, allowing the addition of new attributes to the database at query time. However, the number of crowd-sourced mini-tasks to fill in missing values may often be prohibitively large and the resulting data quality is doubtful. Instead of simple crowd-sourcing to obtain all values individually, we leverage the usergenerated data found in the Social Web: By exploiting user ratings we build perceptual spaces, i.e., highly-compressed representations of opinions, impressions, and perceptions of large numbers of users. Using few training samples obtained by expert crowd sourcing, we then can extract all missing data automatically from the perceptual space with high quality and at low costs. Extensive experiments show that our approach can boost both performance and quality of crowd-enabled databases, while also providing the flexibility to expand schemas in a query-driven fashion.}, acmid = {2168655}, author = {Selke, Joachim and Lofi, Christoph and Balke, Wolf-Tilo}, interhash = {8d2c0e1e49d00f11fa124deeea4a7dbe}, intrahash = {41224a60badfeefb0fe2cea85f2a4ff0}, issn = {2150-8097}, issue_date = {February 2012}, journal = {Proceedings of the VLDB Endowment}, month = feb, number = 6, numpages = {12}, pages = {538--549}, publisher = {VLDB Endowment}, title = {Pushing the boundaries of crowd-enabled databases with query-driven schema expansion}, url = {http://dl.acm.org/citation.cfm?id=2168651.2168655}, volume = 5, year = 2012 } @techreport{parameswaran2011declarative, abstract = {Crowdsourcing enables programmers to incorporate ``human computation'' as a building block in algorithms that cannot be fully automated, such as text analysis and image recognition. Similarly, humans can be used as a building block in data-intensive applications --- providing, comparing, and verifying data used by applications. Building upon the decades-long success of declarative approaches to conventional data management, we use a similar approach for data-intensive applications that incorporate humans. Specifically, declarative queries are posed over stored relational data as well as data computed on-demand from the crowd, and the underlying system orchestrates the computation of query answers. We present Deco, a database system for declarative crowdsourcing. We describe Deco's data model, query language, and our initial prototype. Deco's data model was designed to be general (it can be instantiated to other proposed models), flexible (it allows methods for uncertainty resolution and external access to be plugged in), and principled (it has a precisely-defined semantics). Syntactically, Deco's query language is a simple extension to SQL. Based on Deco's data model, we define a precise semantics for arbitrary queries involving both stored data and data obtained from the crowd. We then describe the Deco query processor, which respects our semantics while coping with the unique combination of latency, monetary cost, and uncertainty introduced in the crowdsourcing environment. Finally, we describe our current system implementation, and we discuss the novel query optimization challenges that form the core of our ongoing work.}, author = {Parameswaran, Aditya and Park, Hyunjung and Garcia-Molina, Hector and Polyzotis, Neoklis and Widom, Jennifer}, institution = {Stanford University}, interhash = {af28066d0b21d87a9ef90f63d7e6095f}, intrahash = {4de5dd97e5466c9f1fc63c0d23b4d90a}, number = 1015, publisher = {Stanford InfoLab}, title = {Deco: Declarative Crowdsourcing}, url = {http://ilpubs.stanford.edu:8090/1015/}, year = 2011 } @inproceedings{minder2011crowdlang, abstract = {Crowdsourcing markets such as Amazon’s Mechanical Turk provide an enormous potential for accomplishing work by combining human and machine computation. Today crowdsourcing is mostly used for massive parallel information processing for a variety of tasks such as image labeling. However, as we move to more sophisticated problem-solving there is little knowledge about managing dependencies between steps and a lack of tools for doing so. As the contribution of this paper, we present a concept of an executable, model-based programming language and a general purpose framework for accomplishing more sophisticated problems. Our approach is inspired by coordination theory and an analysis of emergent collective intelligence. We illustrate the applicability of our proposed language by combining machine and human computation based on existing interaction patterns for several general computation problems.}, author = {Minder, Patrick and Bernstein, Abraham}, booktitle = {Proceedings of the 3rd Human Computation Workshop}, interhash = {0f708aa0b0eb867beb89fe42a9e1a068}, intrahash = {fe3477c51c6a2159ec1c72ecf299f1fb}, pages = {103--108}, publisher = {AAAI Press}, series = {AAAI Workshops}, title = {CrowdLang - First Steps Towards Programmable Human Computers for General Computation}, url = {https://www.aaai.org/ocs/index.php/WS/AAAIW11/paper/viewFile/3891/4251}, year = 2011 } @article{malone2010collective, abstract = {A user’s guide to the building blocks of collective intelligence: By recombining CI “genes” according to the work required, managers can design the powerful system they need. }, author = {Malone, Thomas W. and Laubacher, Robert and Dellarocas, Chrysanthos}, interhash = {3453f67171b371fe5fa926edeb09447b}, intrahash = {80e0aad3a7fddd9f4c1102c9f1d19df0}, journal = {Sloan Management Review}, number = 3, pages = {21--31}, publisher = {MIT}, title = {The Collective Intelligence Genome}, url = {http://www.lhstech.com/chair/Articles/malone.pdf}, volume = 51, year = 2010 } @inproceedings{little2010turkit, abstract = {Mechanical Turk (MTurk) provides an on-demand source of human computation. This provides a tremendous opportunity to explore algorithms which incorporate human computation as a function call. However, various systems challenges make this difficult in practice, and most uses of MTurk post large numbers of independent tasks. TurKit is a toolkit for prototyping and exploring algorithmic human computation, while maintaining a straight-forward imperative programming style. We present the crash-and-rerun programming model that makes TurKit possible, along with a variety of applications for human computation algorithms. We also present case studies of TurKit used for real experiments across different fields.}, acmid = {1866040}, address = {New York, NY, USA}, author = {Little, Greg and Chilton, Lydia B. and Goldman, Max and Miller, Robert C.}, booktitle = {Proceedings of the 23nd annual ACM symposium on User interface software and technology}, doi = {10.1145/1866029.1866040}, interhash = {a2b44d507345037242e3590eee0ab671}, intrahash = {e364db8e8ee1992a0fdb5f37f425b1a7}, isbn = {978-1-4503-0271-5}, location = {New York, New York, USA}, numpages = {10}, pages = {57--66}, publisher = {ACM}, title = {TurKit: human computation algorithms on mechanical turk}, url = {http://doi.acm.org/10.1145/1866029.1866040}, year = 2010 } @inproceedings{kittur2011crowdforge, abstract = {Micro-task markets such as Amazon's Mechanical Turk represent a new paradigm for accomplishing work, in which employers can tap into a large population of workers around the globe to accomplish tasks in a fraction of the time and money of more traditional methods. However, such markets have been primarily used for simple, independent tasks, such as labeling an image or judging the relevance of a search result. Here we present a general purpose framework for accomplishing complex and interdependent tasks using micro-task markets. We describe our framework, a web-based prototype, and case studies on article writing, decision making, and science journalism that demonstrate the benefits and limitations of the approach.}, acmid = {2047202}, address = {New York, NY, USA}, author = {Kittur, Aniket and Smus, Boris and Khamkar, Susheel and Kraut, Robert E.}, booktitle = {Proceedings of the 24th annual ACM symposium on User interface software and technology}, doi = {10.1145/2047196.2047202}, interhash = {96bc968750689063846b513c9dac7a57}, intrahash = {e1022258d8e73b250ff625ce2e10095b}, isbn = {978-1-4503-0716-1}, location = {Santa Barbara, California, USA}, numpages = {10}, pages = {43--52}, publisher = {ACM}, title = {CrowdForge: crowdsourcing complex work}, url = {http://doi.acm.org/10.1145/2047196.2047202}, year = 2011 }