@article{Gibson_Luong_Cho_Moh_Zanin_Djatmiko_Aandahl_2022, place={Melbourne, Australia}, title={Where’s the harm? Screening student evaluations of teaching for offensive, threatening or distressing comments}, volume={38}, url={https://ajet.org.au/index.php/AJET/article/view/6133}, DOI={10.14742/ajet.6133}, abstractNote={<p>Student evaluation surveys provide educational institutions with important feedback regarding the student experience of teaching and courses; however, qualitative comments can contain offensive, insulting or threatening content. Large educational institutions generate thousands of comments per academic term; therefore, manual screening processes to find potentially harmful comments are not generally feasible. We developed a methodology for semi-automated screening of student comments that incorporates a machine learning decision support system and a detailed psychological assessment protocol. In a case study at a large public Australian university, our system identified 4,258 out of 62,049 (6.9%) comments as potentially harmful and requiring further review. Feedback from stakeholders demonstrates that this methodology is useful in reducing staff workload and could be broadly applied to different settings.</p> <p><em>Implications for practice or policy:</em></p> <ul> <li>Educational institutions can adopt this methodology to dramatically decrease the number of working hours required to screen harmful free-text comments.</li> <li>Researchers can use the proposed psychology-based assessment as an example of how to develop a protocol to categorise comments.</li> <li>Educators and researchers can use this case study to follow best practices to develop their own decision support system that implements free-text comment classifiers.</li> </ul>}, number={2}, journal={Australasian Journal of Educational Technology}, author={Gibson, Matthew J. and Luong, Justin and Cho, Hanbit and Moh, Bryan and Zanin, Simone and Djatmiko, Mentari and Aandahl, R. Zach}, year={2022}, month={Apr.}, pages={35–48} }