@article { title = {Peer grading in a MOOC: Reliability, validity, and perceived effects}, year = {2014}, month = {07/2014}, author = {Luo, Heng and Robinson, Anthony and Park, Jae-Young}, keywords = {validity, reliability, peer grading, MOOCs, MOOC research}, journal = {Online Learning}, volume = {18}, issue = {2}, pages = {1-14}, issn = {2472-5730}, abstract = {Peer grading affords a scalable and sustainable way of providing assessment and feedback to a massive student population, and has been used in massive open online courses (MOOCs) on the Coursera platform. However, currently there is little empirical evidence to support the credentials of peer grading as a learning assessment method in the MOOC context. To address this research need, this study examined 1825 peer grading assignments collected from a Coursera MOOC with the purpose of investigating the reliability and validity of peer grading as well as its perceived effects on students’ MOOC learning experience. The empirical findings proved that the aggregate ratings of student graders can provide peer grading scores that were fairly consistent and highly similar to the instructor grading scores. Student responses to a survey also show that the peer grading activity was well received as the majority of MOOC students believed it was fair, useful, beneficial, and would recommend it to be included in future MOOC offerings. Based on the empirical results, this study concludes with a set of principles for designing and implementing peer grading activities in the MOOC context.}, refereed = {yes}, doi = {10.24059/olj.v18i2.429}, url = {https://olj.onlinelearningconsortium.org/index.php/olj/article/view/429}, attachments = {429-2333-1-PB.pdf}, }