@article{lacey-peters-sathian-07_cross-modal-object-recognition-viewpoint, title={Cross-modal object recognition is viewpoint-independent}, author={Simon Lacey and Andrew Peters and K. Sathian}, journal={PloS one}, volume={2}, number={9}, pages={e890}, year={2007} Background Previous research suggests that visual and haptic object recognition are viewpoint-dependent both within- and cross-modally. However, this conclusion may not be generally valid as it was reached using objects oriented along their extended y-axis, resulting in differential surface processing in vision and touch. In the present study, we removed this differential by presenting objects along the z-axis, thus making all object surfaces more equally available to vision and touch. Methodology/Principal Findings Participants studied previously unfamiliar objects, in groups of four, using either vision or touch. Subsequently, they performed a four-alternative forced-choice object identification task with the studied objects presented in both unrotated and rotated (180° about the x-, y-, and z-axes) orientations. Rotation impaired within-modal recognition accuracy in both vision and touch, but not cross-modal recognition accuracy. Within-modally, visual recognition accuracy was reduced by rotation about the x- and y-axes more than the z-axis, whilst haptic recognition was equally affected by rotation about all three axes. Cross-modal (but not within-modal) accuracy correlated with spatial (but not object) imagery scores. Conclusions/Significance The viewpoint-independence of cross-modal object identification points to its mediation by a high-level abstract representation. The correlation between spatial imagery scores and cross-modal performance suggest that construction of this high-level representation is linked to the ability to perform spatial transformations. Within-modal viewpoint-dependence appears to have a different basis in vision than in touch, possibly due to surface occlusion being important in vision but not touch. }} @article{norman-norman-04_visual-haptic-perception-of-natural-object-shape, title={The visual and haptic perception of natural object shape}, author={Norman, J.F. and Norman, H.F. and Clayton, A.M. and Lianekhammy, J. and Zielke, G.}, journal={Attention, Perception, \& Psychophysics}, volume={66}, number={2}, pages={342--351}, year={2004}, publisher={Springer} In this study, we evaluated observers’ ability to compare naturally shaped three-dimensional (3-D) objects, using their senses of vision and touch. In one experiment, the observers haptically manipulated 1 object and then indicated which of 12 visible objects possessed the same shape. In the second experiment, pairs of objects were presented, and the observers indicated whether their 3-D shape was the same or different. The 2 objects were presented either unimodally (vision–vision or haptic–haptic) or cross-modally (vision–haptic or haptic–vision). In both experiments, the observers were able to compare 3-D shape across modalities with reasonably high levels of accuracy. In Experiment 1, for example, the observers’ matching performance rose to 72% correct (chance performance was 8.3%) after five experimental sessions. In Experiment 2, small (but significant) differences in performance were obtained between the unimodal vision–vision condition and the two cross-modal conditions. Taken together, the results suggest that vision and touch have functionally overlapping, but not necessarily equivalent, representations of 3-D shape. }}