@article { title = {Peer assessment for massive open online courses (MOOCs)}, year = {2014}, month = {07/2014}, author = {Suen, Hoi}, editor = {McGreal, Rory and Conrad, Dianne}, keywords = {peer assessment, , MOOC, formative evaluation, credibility index, calibrated peer review}, journal = {The International Review of Research in Open and Distributed Learning}, volume = {15}, issue = {3}, pages = {313-327}, issn = {1492-3831}, abstract = {The teach-learn-assess cycle in education is broken in a typical massive open online course (MOOC). Without formative assessment and feedback, MOOCs amount to information dump or broadcasting shows, not educational experiences. A number of remedies have been attempted to bring formative assessment back into MOOCs, each with its own limits and problems. The most widely applicable approach for all MOOCs to date is to use peer assessment to provide the necessary feedback. However, unmoderated peer assessment results suffer from a lack of credibility. Several methods are available today to improve on the accuracy of peer assessment results. Some combination of these methods may be necessary to make peer assessment results sufficiently accurate to be useful for formative assessment. Such results can also help to facilitate peer learning, online discussion forums, and may possibly augment summative evaluation for credentialing.}, refereed = {yes}, url = {http://www.irrodl.org/index.php/irrodl/article/view/1680}, attachments = {1680-14699-1-PB.pdf}, }