@inproceedings{57e6d7fffa9747bb97264c71d4ccf971,
title = "Mini-crowdsourcing end-user assessment of intelligent assistants: A cost-benefit study",
abstract = "Intelligent assistants sometimes handle tasks too important to be trusted implicitly. End users can establish trust via systematic assessment, but such assessment is costly. This paper investigates whether, when, and how bringing a small crowd of end users to bear on the assessment of an intelligent assistant is useful from a cost/benefit perspective. Our results show that a mini-crowd of testers supplied many more benefits than the obvious decrease in workload, but these benefits did not scale linearly as mini-crowd size increased - there was a point of diminishing returns where the cost-benefit ratio became less attractive.",
keywords = "crowdsourcing, end-user programming, machine learning, testing",
author = "Amber Shinsel and Todd Kulesza and Margaret Burnett and William Curran and Alex Groce and Simone Stumpf and Wong, {Weng Keen}",
year = "2011",
doi = "10.1109/VLHCC.2011.6070377",
language = "English (US)",
isbn = "9781457712456",
series = "Proceedings - 2011 IEEE Symposium on Visual Languages and Human Centric Computing, VL/HCC 2011",
pages = "47--54",
booktitle = "Proceedings - 2011 IEEE Symposium on Visual Languages and Human Centric Computing, VL/HCC 2011",
note = "2011 IEEE Symposium on Visual Languages and Human Centric Computing, VL/HCC 2011 ; Conference date: 18-09-2011 Through 22-09-2011",
}