Human head-eye coordination is a complex behavior, shaped by physiological constraints, psychological context, and gaze intent. Current context-specific gaze models in both psychology and graphics fail to produce plausible head-eye coordination for general patterns of human gaze behavior. In this paper, we: 1) propose and validate an experimental protocol to collect head-eye motion data during sequential look-at tasks in Virtual Reality; 2) identify factors influencing head-eye coordination using this data; and 3) introduce a head-eye coordinated Inverse Kinematic gaze model Head-EyeK that integrates these insights. Our evaluation of Head-EyeK is three-fold: we show the impact of algorithmic parameters on gaze behavior; we show a favorable comparison to prior art both quantitatively against ground-truth data, and qualitatively using a perceptual study; and we show multiple scenarios of complex gaze behavior credibly animated using Head-EyeK.
@article{pan2025headeyek,
title = {Head-{{EyeK}}: {{Head-eye Coordination}} and {{Control Learned}} in {{Virtual Reality}}},
shorttitle = {Head-{{EyeK}}},
author = {Pan, Yifang and Sidenmark, Ludwig and Singh, Karan},
year = {2025},
month = jul,
journal = {IEEE Transactions on Visualization and Computer Graphics},
number = {01},
pages = {1--11},
publisher = {IEEE Computer Society},
issn = {1077-2626},
doi = {10.1109/TVCG.2025.3589333},
urldate = {2025-08-01},
langid = {english}
}