0
На рассмотрении

bibbase doesn't produce full list from zotero my publications source

Michael Tarr 2 недели назад обновлен Christian Fritz 2 недели назад 4

So bb sees "my publications" from zotero just fine. It generates the bibtex file appended below...

But all I get in bb is this single publication:

Image 177

I tried resetting, etc. relinking, but continues to do this.

thanks in advance.

-mike

@article{vuong_rotation_2004, title = {Rotation direction affects object recognition}, volume = {44}, copyright = {All rights reserved}, doi = {10.1016/j.visres.2004.02.002}, abstract = {What role does dynamic information play in object recognition? To address this question, we probed observers' memory for novel objects rotating in depth. Irrespective of object discriminability, performance was affected by an object's rotation direction. This effect was obtained despite the same shape information and views being shown for different rotation directions. This direction effect was eliminated when either static images or animations that did not depict globally coherent rotation were used. Overall, these results suggest that dynamic information, that is, the spatiotemporal ordering of object views, provides information independent of shape or view information to a recognition system.}, language = {eng}, number = {14}, journal = {Vision Res}, author = {Vuong, Quoc C and Tarr, Michael J}, year = {2004}, pmid = {15136006}, note = {Place: England ISBN: 0042-6989}, pages = {1717--1730}, } @article{Leeds2013, title = {Comparing visual representations across human {fMRI} and computational vision.}, volume = {13(13)}, copyright = {All rights reserved}, doi = {10.1167/13.13.25}, abstract = {Feedforward visual object perception recruits a cortical network that is assumed to be hierarchical, progressing from basic visual features to complete object representations. However, the nature of the intermediate features related to this transformation remains poorly understood. Here, we explore how well different computer vision recognition models account for neural object encoding across the human cortical visual pathway as measured using fMRI. These neural data, collected during the viewing of 60 images of real-world objects, were analyzed with a searchlight procedure as in Kriegeskorte, Goebel, and Bandettini (2006): Within each searchlight sphere, the obtained patterns of neural activity for all 60 objects were compared to model responses for each computer recognition algorithm using representational dissimilarity analysis (Kriegeskorte et al., 2008). Although each of the computer vision methods significantly accounted for some of the neural data, among the different models, the scale invariant feature transform (Lowe, 2004), encoding local visual properties gathered from "interest points," was best able to accurately and consistently account for stimulus representations within the ventral pathway. More generally, when present, significance was observed in regions of the ventral-temporal cortex associated with intermediate-level object perception. Differences in model effectiveness and the neural location of significant matches may be attributable to the fact that each model implements a different featural basis for representing objects (e.g., more holistic or more parts-based). Overall, we conclude that well-known computer vision recognition systems may serve as viable proxies for theories of intermediate visual object representation}, language = {eng}, number = {25}, journal = {J Vis}, author = {Leeds, Daniel D and Seibert, Darren A and Pyles, John A and Tarr, Michael J}, year = {2013}, pmid = {24273227}, note = {Place: United States ISBN: 1534-7362}, pages = {1--27}, } @incollection{gauthier_visual_2016, title = {Visual {Object} {Recognition}: {Do} {We} ({Finally}) {Know} {More} {Now} {Than} {We} {Did}?}, volume = {2}, copyright = {All rights reserved}, abstract = {How do we recognize objects despite changes in their appearance? The past three decades have been witness to intense debates regarding both whether objects are encoded invariantly with respect to viewing conditions and whether specialized, separable mechanisms are used for the recognition of different object categories. We argue that such dichotomous debates ask the wrong question. Much more important is the nature of object representations: What are features that enable invariance or differential processing between categories? Although the nature of object features is still an unanswered question, new methods for connecting data to models show significant potential for helping us to better understand neural codes for objects. Most prominently, new approaches to analyzing data from functional magnetic resonance imaging, including neural decoding and representational similarity analysis, and new computational models of vision, including convolutional neural networks, have enabled a much more nuanced understanding of visual representation. Convolutional neural networks are particularly intriguing as a tool for studying biological vision in that this class of artificial vision systems, based on biologically plausible deep neural networks, exhibits visual recognition capabilities that are approaching those of human observers. As these models improve in their recognition performance, it appears that they also become more effective in predicting and accounting for neural responses in the ventral cortex. Applying these and other deep models to empirical data shows great promise for enabling future progress in the study of visual recognition.}, booktitle = {Annual {Review} of {Vision} {Science}}, author = {Gauthier, I. and Tarr, M.J.}, year = {2016}, pages = {377--396}, } @incollection{peissig_visual_2007, title = {Visual object recognition: do we know more now than we did 20 years ago?}, volume = {58}, copyright = {All rights reserved}, abstract = {We review the progress made in the field of object recognition over the past two decades. Structural-description models, making their appearance in the early 1980s, inspired a wealth of empirical research. Moving to the 1990s, psychophysical evidence for view-based accounts of recognition challenged some of the fundamental assumptions of structural-description theories. The 1990s also saw increased interest in the neurophysiological study of high-level visual cortex, the results of which provide some constraints on how objects may be represented. By 2000, neuroimaging arose as a viable means for connecting neurons to behavior. One of the most striking fMRI results has been category selectivity, which provided further constraints for models of object recognition. Despite this progress, the field is still faced with the challenge of developing a comprehensive theory that integrates this ever-increasing body of results and explains how we perceive and recognize objects.}, language = {eng}, booktitle = {Annu {Rev} {Psychol}}, author = {Peissig, Jessie J and Tarr, Michael J}, year = {2007}, pmid = {16903801}, note = {Place: United States ISBN: 0066-4308}, pages = {75--96}, } @article{williams_structural_1997, title = {Structural processing and implicit memory for possible and impossible figures}, volume = {23}, copyright = {All rights reserved}, number = {6}, journal = {Journal of Experimental Psychology: Learning, Memory, and Cognition}, author = {Williams, P and Tarr, M J}, year = {1997}, pages = {1344--1361}, } @article{hayward_spatial_1995, title = {Spatial language and spatial representation}, volume = {55}, copyright = {All rights reserved}, journal = {Cognition}, author = {Hayward, W G and Tarr, M J}, year = {1995}, pages = {39--84}, } @article{Tarr1997, title = {To {What} {Extent} {Do} {Unique} {Parts} {Influence} {Recognition} {Across} {Changes} in {Viewpoint}?}, volume = {8}, copyright = {All rights reserved}, issn = {0956-7976}, doi = {10.1111/j.1467-9280.1997.tb00439.x}, abstract = {We investigated howvarying the numberof unique partswithin an object influences recognition across changes in viewpoint. The stimuli were shaded objects composed of five 3D volumes linked end-toend with varying connection angles. Of the five volumes, zero, one, three, or five were qualitatively distinct (e.g., brick versus cone), the rest being tubes. Sequential-matching and naming tasks were used to assess the recognition of these stimuli over rotations in depth. Three major results stand out. First, regardless of the number of distinct parts, therewas increasingly poorer recognition performance with increasing change in viewpoint. Second, the impact of viewpoint change for objects with one unique part was less than that for the other objects. Third, additional parts beyond a single unique part produced strong viewpoint dependency comparable to that obtained for objects with no distinct parts. Thus, visual recognition may be explained by a view-based theory in which viewpoint-specific representations encode both quantitative and qualitative features.}, number = {4}, journal = {Psychological Science}, author = {Tarr, Michael J. and Bulthoff, Heinrich H and Zabinski, Manon and Blanz, Volker}, year = {1997}, note = {ISBN: 0956-7976}, pages = {282--289}, } @article{gauthier_training_1998, title = {Training 'greeble' experts: a framework for studying expert object recognition processes.}, volume = {38}, copyright = {All rights reserved}, abstract = {Twelve participants were trained to be experts at identifying a set of 'Greebles', novel objects that, like faces, all share a common spatial configuration. Tests comparing expert with novice performance revealed: (1) a surprising mix of generalizability and specificity in expert object recognition processes; and (2) that expertise is a multi-faceted phenomenon, neither adequately described by a single term nor adequately assessed by a single task. Greeble recognition by a simple neural-network model is also evaluated, and the model is found to account surprisingly well for both generalization and individuation using a single set of processes and representations.}, language = {eng}, number = {15-16}, journal = {Vision Res}, author = {Gauthier, I and Williams, P and Tarr, M J and Tanaka, J}, year = {1998}, pmid = {9798007}, note = {Place: ENGLAND ISBN: 0042-6989}, pages = {2401--2428}, } @article{Gauthier2002, title = {Unraveling mechanisms for expert object recognition: bridging brain activity and behavior.}, volume = {28}, copyright = {All rights reserved}, abstract = {Behavioral sensitivity to object transformations and the response to novel objects (Greebles) in the fusiform face area (FFA) was measured several times during expertise training. Sensitivity to 3 transformations increased with expertise: (a) configural changes in which halves of objects were misaligned, (b) configural changes in which some of the object parts were moved, and (c) the substitution of an object part with a part from a different object. The authors found that holistic-configural effects can arise from object representations that are differentiated in terms of features or parts. Moreover, a holistic-inclusive effect was correlated with changes in the right FFA. Face recognition may not be unique in its reliance on holistic processing, measured in terms of both behavior and brain activation.}, language = {eng}, number = {2}, journal = {J Exp Psychol Hum Percept Perform}, author = {Gauthier, Isabel and Tarr, Michael J}, year = {2002}, pmid = {11999864}, note = {Place: United States ISBN: 0096-1523}, pages = {431--446}, } @article{leeds_exploration_2014, title = {Exploration of complex visual feature spaces for object perception.}, volume = {8}, copyright = {All rights reserved}, doi = {10.3389/fncom.2014.00106}, abstract = {The mid- and high-level visual properties supporting object perception in the ventral visual pathway are poorly understood. In the absence of well-specified theory, many groups have adopted a data-driven approach in which they progressively interrogate neural units to establish each unit's selectivity. Such methods are challenging in that they require search through a wide space of feature models and stimuli using a limited number of samples. To more rapidly identify higher-level features underlying human cortical object perception, we implemented a novel functional magnetic resonance imaging method in which visual stimuli are selected in real-time based on BOLD responses to recently shown stimuli. This work was inspired by earlier primate physiology work, in which neural selectivity for mid-level features in IT was characterized using a simple parametric approach (Hung et al., 2012). To extend such work to human neuroimaging, we used natural and synthetic object stimuli embedded in feature spaces constructed on the basis of the complex visual properties of the objects themselves. During fMRI scanning, we employed a real-time search method to control continuous stimulus selection within each image space. This search was designed to maximize neural responses across a pre-determined 1 cm(3) brain region within ventral cortex. To assess the value of this method for understanding object encoding, we examined both the behavior of the method itself and the complex visual properties the method identified as reliably activating selected brain regions. We observed: (1) Regions selective for both holistic and component object features and for a variety of surface properties; (2) Object stimulus pairs near one another in feature space that produce responses at the opposite extremes of the measured activity range. Together, these results suggest that real-time fMRI methods may yield more widely informative measures of selectivity within the broad classes of visual features associated with cortical object representation}, language = {eng}, journal = {Front Comput Neurosci}, author = {Leeds, Daniel D and Pyles, John A and Tarr, Michael J}, year = {2014}, pmid = {25309408}, note = {Place: Switzerland ISBN: 1662-5188}, pages = {106}, } @article{tarr_mental_1989, title = {Mental rotation and orientation-dependence in shape recognition}, volume = {21}, copyright = {All rights reserved}, number = {2}, journal = {Cognitive Psychology}, author = {Tarr, M J and Pinker, S}, year = {1989}, pages = {233--282}, } @article{gauthier_levels_1997, title = {Levels of categorization in visual recognition studied using functional magnetic resonance imaging.}, volume = {7}, copyright = {All rights reserved}, abstract = {BACKGROUND: Recent functional neuroimaging results implicate part of the ventral temporal lobe of the brain in face recognition, and have, together with neurophysiological findings, been used as evidence for a face-specific neural module in the brain. Experimental designs, however, have often failed to distinguish between the class of the object used as the stimulus (face or non-face) and the level of categorization at which the stimulus is recognized (the 'basic' level, such as 'bird', at which familiar objects are first recognized, or more subordinate levels - 'sparrow', for example - which require additional perceptual processing). We have used echo-planar functional magnetic resonance imaging to compare brain activation for the matching of non-face objects with subordinate-level and basic-level descriptors. RESULTS: The additional visual processing required to verify the subordinate level of a picture over its basic level was associated with activation of the fusiform and inferior temporal gyri (FIT) as well as the temporal poles. These areas correspond closely to those previously implicated in the processing of facial images. CONCLUSIONS: Our results indicate that areas of the ventral visual pathway that have been associated with face recognition are sensitive to manipulations of the categorization level of non-face objects. This idea offers an alternative to the dominant view that FIT may be organized according to conceptual categories, and our results establish the importance of manipulating task requirements when evaluating a 'neural module' hypothesis.}, language = {eng}, number = {9}, journal = {Curr Biol}, author = {Gauthier, I and Anderson, A W and Tarr, M J and Skudlarski, P and Gore, J C}, year = {1997}, pmid = {9285718}, note = {Place: ENGLAND ISBN: 0960-9822}, pages = {645--651}, } @article{Naor-Raz2003, title = {Is color an intrinsic property of object representation?}, volume = {32}, copyright = {All rights reserved}, journal = {Perception}, author = {Naor-Raz, G and Tarr, M J and Kersten, D}, year = {2003}, pages = {667--680}, } @article{pyles_explicating_2013, title = {Explicating the {Face} {Perception} {Network} with {White} {Matter} {Connectivity}}, volume = {8}, copyright = {All rights reserved}, issn = {19326203}, doi = {10.1371/journal.pone.0061611}, abstract = {A network of multiple brain regions is recruited in face perception. Our understanding of the functional properties of this network can be facilitated by explicating the structural white matter connections that exist between its functional nodes. We accomplished this using functional MRI (fMRI) in combination with fiber tractography on high angular resolution diffusion weighted imaging data. We identified the three nodes of the core face network: the "occipital face area" (OFA), the "fusiform face area" (mid-fusiform gyrus or mFus), and the superior temporal sulcus (STS). Additionally, a region of the anterior temporal lobe (aIT), implicated as being important for face perception was identified. Our data suggest that we can further divide the OFA into mu ltiple anatomically distinct clusters - a partitioning consistent with several recent neuroimaging results. More generally, structural white matter connectivity within this network revealed: 1) Connectivity between aIT and mFus, and between aIT and occipital regions, consistent with studies implicating this posterior to anterior pathway as critical to normal face processing; 2) Strong connectivity between mFus and each of the occipital face-selective regions, suggesting that these three areas may subserve different functional roles; 3) Almost no connectivity between STS and mFus, or between STS and the other face-selective regions. Overall, our findings suggest a re-evaluation of the "core" face network with respect to what functional areas are or are not included in this network. © 2013 Pyles et al.}, number = {4}, journal = {PLoS ONE}, author = {Pyles, J.A. and Verstynen, T.D. and Schneider, W. and Tarr, M.J.}, year = {2013}, } @article{Rossion2002, title = {Expertise training with novel objects leads to left-lateralized facelike electrophysiological responses}, volume = {13}, copyright = {All rights reserved}, abstract = {Scalp event-related potentials (ERPs) in humans indicate that face and object processing differ approximately 170 ms following stimulus presentation, at the point of the N170 occipitotemporal component. The N170 is delayed and enhanced to inverted faces but not to inverted objects. We tested whether this inversion effect reflects early mechanisms exclusive to faces or whether it generalizes to other stimuli as a function of visual expertise. ERPs to upright and inverted faces and novel objects (Greebles) were recorded in 10 participants before and after 2 weeks of expertise training with Greebles. The N170 component was observed for both faces and Greebles. The results are consistent with previous reports in that the N170 was delayed and enhanced for inverted faces at recording sites in both hemispheres. For Greebles, the same effect of inversion was observed only for experts, primarily in the left hemisphere. These results suggest that the mechanisms underlying the electrophysiological face-inversion effect extend to visually homogeneous nonface object categories, at least in the left hemisphere, but only when such mechanisms are recruited by expertise.}, language = {eng}, number = {3}, journal = {Psychol Sci}, author = {Rossion, B and Gauthier, I and Goffaux, V and Tarr, M J and Crommelinck, M}, year = {2002}, pmid = {12009046}, note = {ISBN: 0956-7976}, pages = {250--257}, } @article{aminoff_applying_2015, title = {Applying artificial vision models to human scene understanding}, volume = {9}, copyright = {All rights reserved}, doi = {10.3389/fncom.2015.00008}, abstract = {How do we understand the complex patterns of neural responses that underlie scene understanding? Studies of the network of brain regions held to be scene-selective – the parahippocampal/lingual region (PPA), the retrosplenial complex (RSC), and the occipital place area (TOS) – have typically focused on single visual dimensions (e.g., size), rather than the high-dimensional feature space in which scenes are likely to be neurally represented. Here we leverage well-specified artificial vision systems to explicate a more complex understanding of how scenes are encoded in this functional network. We correlated similarity matrices within three different scene-spaces arising from: 1) BOLD activity in scene-selective brain regions; 2) behavioral measured judgments of visually-perceived scene similarity; and 3) several different computer vision models. These correlations revealed: 1) models that relied on mid- and high-level scene attributes showed the highest correlations with the patterns of neural activity within the scene-selective network; 2) NEIL and SUN – the models that best accounted for the patterns obtained from PPA and TOS – were different from the GIST model that best accounted for the pattern obtained from RSC; 3) The best performing models outperformed behaviorally-measured judgments of scene similarity in accounting for neural data. One computer vision method – NEIL (“Never-Ending-Image-Learner”), which incorporates visual features learned as statistical regularities across web-scale numbers of scenes – showed significant correlations with neural activity in all three scene-selective regions and was one of the two models best able to account for variance in the PPA and TOS. We suggest that these results are a promising first step in explicating more fine-grained models of neural scene understanding, including developing a clearer picture of the division of labor among the components of the functional scene-selective brain network.}, language = {English}, journal = {Frontiers in Computational Neuroscience}, author = {Aminoff, Elissa Michele and Toneva, Mariya and Shrivastava, Abhinav and Chen, Xinlei and Misra, Ishan and Gupta, Abhinav and Tarr, Michael J}, year = {2015}, note = {Publisher: Frontiers ISBN: 1662-5188}, } @article{tarr_virtual_2002, title = {Virtual reality in behavioral neuroscience and beyond}, volume = {5 Suppl}, copyright = {All rights reserved}, abstract = {Virtual reality (VR) has finally come of age for serious applications in the behavioral neurosciences. After capturing the public imagination a decade ago, enthusiasm for VR flagged due to hardware limitations, an absent commercial market and manufacturers who dropped the mass-market products that normally drive technological development. Recently, however, improvements in computer speed, quality of head-mounted displays and wide-area tracking systems have made VR attractive for both research and real-world applications in neuroscience, cognitive science and psychology. New and exciting applications for VR have emerged in research, training, rehabilitation, teleoperation, virtual archeology and tele-immersion.}, journal = {Nat Neurosci}, author = {Tarr, M J and Warren, W H}, year = {2002}, pmid = {12403993}, pages = {1089--1092}, } @article{tarr_viewpoint-dependent_1998, title = {Do viewpoint-dependent mechanisms generalize across members of a class?}, volume = {67}, number = {1-2}, journal = {Cognition}, author = {Tarr, M J and Gauthier, I}, year = {1998}, pages = {71--108}, } @article{foo_humans_2005, title = {Do humans integrate routes into a cognitive map? {Map}- versus landmark-based navigation of novel shortcuts.}, volume = {31}, copyright = {All rights reserved}, doi = {10.1037/0278-7393.31.2.195}, abstract = {Do humans integrate experience on specific routes into metric survey knowledge of the environment, or do they depend on a simpler strategy of landmark navigation? The authors tested this question using a novel shortcut paradigm during walking in a virtual environment. The authors find that participants could not take successful shortcuts in a desert world but could do so with dispersed landmarks in a forest. On catch trials, participants were drawn toward the displaced landmarks whether the landmarks were clustered near the target location or along the shortcut route. However, when landmarks appeared unreliable, participants fell back on coarse survey knowledge. Like honeybees (F. C. Dyer, 1991), humans do not appear to derive accurate cognitive maps from path integration to guide navigation but, instead, depend on landmarks when they are available.}, language = {eng}, number = {2}, journal = {J Exp Psychol Learn Mem Cogn}, author = {Foo, Patrick and Warren, William H and Duchon, Andrew and Tarr, Michael J}, year = {2005}, pmid = {15755239}, note = {Place: United States ISBN: 0278-7393}, pages = {195--215}, } @article{phillips_co-analysis_2012, title = {Co-analysis of {Brain} {Structure} and {Function} using {fMRI} and {Diffusion}-weighted {Imaging}}, volume = {69}, copyright = {All rights reserved}, doi = {10.3791/4125}, number = {e4125}, journal = {J. Vis. Exp.}, author = {Phillips, J S and Greenberg, A S and Pyles, J A and Pathak, S K and Behrmann, M and Schneider, W and Tarr, M J}, year = {2012}, } @article{roos_can_2013, title = {Can singular examples change implicit racial attitudes in the real-world?}, volume = {4}, copyright = {All rights reserved}, doi = {10.3389/fpsyg.2013.00594}, number = {594}, journal = {Front. Psychol.}, author = {Roos, L E and Lebrecht, S and Tanaka, J W and Tarr, M J}, year = {2013}, } @article{aminoff_associative_2015, title = {Associative processing is inherent in scene perception}, volume = {10}, copyright = {All rights reserved}, issn = {1932-6203}, url = {http://dx.plos.org/10.1371/journal.pone.0128840}, doi = {10.1371/journal.pone.0128840}, abstract = {How are complex visual entities such as scenes represented in the human brain? More con- cretely, along what visual and semantic dimensions are scenes encoded in memory? One hypothesis is that global spatial properties provide a basis for categorizing the neural re- sponse patterns arising from scenes. In contrast, non-spatial properties, such as single ob- jects, also account for variance in neural responses. The list of critical scene dimensions has continued to grow—sometimes in a contradictory manner—coming to encompass prop- erties such as geometric layout, big/small, crowded/sparse, and three-dimensionality.We demonstrate that these dimensionsmay be better understood within themore general framework of associative properties. That is, across both the perceptual and semantic do- mains, features of scene representations are related to one another through learned associ- ations. Critically, the components of such associations are consistent with the dimensions that are typically invoked to account for scene understanding and its neural bases. Using fMRI, we show that non-scene stimuli displaying novel associations across identities or lo- cations recruit putatively scene-selective regions of the human brain (the parahippocampal/ lingual region, the retrosplenial complex, and the transverse occipital sulcus/occipital place area).Moreover, we find that the voxel-wise neural patterns arising from these associations are significantly correlated with the neural patterns arising from everyday scenes providing critical evidence whether the same encoding principals underlie both types of processing. These neuroimaging results provide evidence for the hypothesis that the neural representa- tion of scenes is better understood within the broader theoretical framework of associative processing. In addition, the results demonstrate a division of labor that arises across scene- selective regions when processing associations and scenes providing better understanding of the functional roles of each region within the cortical network that mediates scene processing.}, number = {6}, journal = {PloS ONE}, author = {Aminoff, E. M. and Tarr, M. J.}, year = {2015}, pmid = {26070142}, note = {ISBN: 10.1371/journal.pone.0128840}, pages = {e0128840}, } @article{gauthier_are_2004, title = {Are {Greebles} like faces? {Using} the neuropsychological exception to test the rule.}, volume = {42}, copyright = {All rights reserved}, doi = {10.1016/j.neuropsychologia.2004.04.025}, abstract = {Which image geometries count as face-like and which do not? Across multiple experiments, novel objects called Greebles have been used to argue that face-specific effects can be obtained with non-face stimuli under certain situations, in particular with expert observers. However, this claim depends on the argument that these non-face stimuli are not a priori treated by the face processing system. To address this question, CK, a neuropsychological patient well-known for exhibiting severe visual object agnosia and dyslexia but intact face processing, was tested with Greebles. CK performed poorly on Greebles, indicating that his intact face-specific abilities do not extend to include Greebles. These results suggest that insofar as CK is relying on face-specific visual processes, these processes do not a priori treat Greebles as faces.}, language = {eng}, number = {14}, journal = {Neuropsychologia}, author = {Gauthier, Isabel and Behrmann, Marlene and Tarr, Michael J}, year = {2004}, pmid = {15381026}, note = {Place: England ISBN: 0028-3932}, pages = {1961--1970}, } @article{leeds_method_2016, title = {A method for real-time visual stimulus selection in the study of cortical object perception}, volume = {133}, copyright = {All rights reserved}, issn = {10959572}, doi = {10.1016/j.neuroimage.2016.02.071}, abstract = {© 2016 Elsevier Inc. The properties utilized by visual object perception in the mid- and high-level ventral visual pathway are poorly understood. To better establish and explore possible models of these properties, we adopt a data-driven approach in which we repeatedly interrogate neural units using functional Magnetic Resonance Imaging (fMRI) to establish each unit's image selectivity. This approach to imaging necessitates a search through a broad space of stimulus properties using a limited number of samples. To more quickly identify the complex visual features underlying human cortical object perception, we implemented a new functional magnetic resonance imaging protocol in which visual stimuli are selected in real-time based on BOLD responses to recently shown images. Two variations of this protocol were developed, one relying on natural object stimuli and a second based on synthetic object stimuli, both embedded in feature spaces based on the complex visual properties of the objects. During fMRI scanning, we continuously controlled stimulus selection in the context of a real-time search through these image spaces in order to maximize neural responses across pre-determined 1 cm 3 rain regions. Elsewhere we have reported the patterns of cortical selectivity revealed by this approach (Leeds et al., 2014). In contrast, here our objective is to present more detailed methods and explore the technical and biological factors influencing the behavior of our real-time stimulus search. We observe that: 1) Searches converged more reliably when exploring a more precisely parameterized space of synthetic objects; 2) real-time estimation of cortical responses to stimuli is reasonably consistent; 3) search behavior was acceptably robust to delays in stimulus displays and subject motion effects. Overall, our results indicate that real-time fMRI methods may provide a valuable platform for continuing study of localized neural selectivity, both for visual object representation and beyond.}, journal = {NeuroImage}, author = {Leeds, D.D. and Tarr, M.J.}, year = {2016}, pages = {529--548}, } @article{Gauthier1999, title = {Activation of the middle fusiform 'face area' increases with expertise in recognizing novel objects.}, volume = {2}, copyright = {All rights reserved}, doi = {10.1038/9224}, abstract = {Part of the ventral temporal lobe is thought to be critical for face perception, but what determines this specialization remains unknown. We present evidence that expertise recruits the fusiform gyrus 'face area'. Functional magnetic resonance imaging (fMRI) was used to measure changes associated with increasing expertise in brain areas selected for their face preference. Acquisition of expertise with novel objects (greebles) led to increased activation in the right hemisphere face areas for matching of upright greebles as compared to matching inverted greebles. The same areas were also more activated in experts than in novices during passive viewing of greebles. Expertise seems to be one factor that leads to specialization in the face area.}, language = {eng}, number = {6}, journal = {Nat Neurosci}, author = {Gauthier, I and Tarr, M J and Anderson, A W and Skudlarski, P and Gore, J C}, year = {1999}, pmid = {10448223}, note = {Place: UNITED STATES ISBN: 1097-6256}, pages = {568--573}, } @inproceedings{yang_state-space_2016, title = {A state-space model of cross-region dynamic connectivity in {MEG}/{EEG}}, copyright = {All rights reserved}, abstract = {© 2016 NIPS Foundation - All Rights Reserved. Cross-region dynamic connectivity, which describes the spatio-temporal dependence of neural activity among multiple brain regions of interest (ROIs), can provide important information for understanding cognition. For estimating such connectivity, magnetoencephalography (MEG) and electroencephalography (EEG) are well-suited tools because of their millisecond temporal resolution. However, localizing source activity in the brain requires solving an under-determined linear problem. In typical two-step approaches, researchers first solve the linear problem with generic priors assuming independence across ROIs, and secondly quantify cross-region connectivity. In this work, we propose a one-step state-space model to improve estimation of dynamic connectivity. The model treats the mean activity in individual ROIs as the state variable and describes non-stationary dynamic dependence across ROIs using time-varying auto-regression. Compared with a two-step method, which first obtains the commonly used minimum-norm estimates of source activity, and then fits the auto-regressive model, our state-space model yielded smaller estimation errors on simulated data where the model assumptions held. When applied on empirical MEG data from one participant in a scene-processing experiment, our state-space model also demonstrated intriguing preliminary results, indicating leading and lagged linear dependence between the early visual cortex and a higher-level scene-sensitive region, which could reflect feedforward and feedback information flow within the visual cortex during scene processing.}, booktitle = {Advances in {Neural} {Information} {Processing} {Systems}}, author = {Yang, Y. and Aminoff, E.M. and Tarr, M.J. and Kass, R.E.}, year = {2016}, note = {ISSN: 10495258}, } @article{gauthier_can_1999, title = {Can face recognition really be dissociated from object recognition?}, volume = {11}, copyright = {All rights reserved}, abstract = {We argue that the current literature on prosopagnosia fails to demonstrate unequivocal evidence for a disproportionate impairment for faces as compared to nonface objects. Two prosopagnosic subjects were tested for the discrimination of objects from several categories (face as well as nonface) at different levels of categorization (basic, subordinate, and exemplar levels). Several dependent measures were obtained including accuracy, signal detection measures, and response times. The results from Experiments 1 to 4 demonstrate that, in simultaneous-matching tasks, response times may reveal impairments with nonface objects in subjects whose error rates only indicate a face deficit. The results from Experiments 5 and 6 show that, given limited stimulus presentation times for face and nonface objects, the same subjects may demonstrate a deficit for both stimulus categories in sensitivity. In Experiments 7, 8 and 9, a match-to-sample task that places greater demands on memory led to comparable recognition sensitivity with both face and nonface objects. Regardless of object category, the prosopagnosic subjects were more affected by manipulations of the level of categorization than normal controls. This result raises questions regarding neuropsychological evidence for the modularity of face recognition, as well as its theoretical and methodological foundations.}, language = {eng}, number = {4}, journal = {J Cogn Neurosci}, author = {Gauthier, I and Behrmann, M and Tarr, M J}, year = {1999}, pmid = {10471845}, note = {Place: UNITED STATES ISBN: 0898-929X}, pages = {349--370}, } @article{Chang2019, title = {{BOLD5000}, a public {fMRI} dataset while viewing 5000 visual images}, volume = {6}, copyright = {All rights reserved}, issn = {2052-4463}, url = {https://doi.org/10.1038/s41597-019-0052-3}, doi = {10.1038/s41597-019-0052-3}, abstract = {Vision science, particularly machine vision, has been revolutionized by introducing large-scale image datasets and statistical learning approaches. Yet, human neuroimaging studies of visual perception still rely on small numbers of images (around 100) due to time-constrained experimental procedures. To apply statistical learning approaches that include neuroscience, the number of images used in neuroimaging must be significantly increased. We present BOLD5000, a human functional MRI (fMRI) study that includes almost 5,000 distinct images depicting real-world scenes. Beyond dramatically increasing image dataset size relative to prior fMRI studies, BOLD5000 also accounts for image diversity, overlapping with standard computer vision datasets by incorporating images from the Scene UNderstanding (SUN), Common Objects in Context (COCO), and ImageNet datasets. The scale and diversity of these image datasets, combined with a slow event-related fMRI design, enables fine-grained exploration into the neural representation of a wide range of visual features, categories, and semantics. Concurrently, BOLD5000 brings us closer to realizing Marr’s dream of a singular vision science–the intertwined study of biological and computer vision.}, number = {1}, journal = {Scientific Data}, author = {Chang, Nadine and Pyles, John A and Marcus, Austin and Gupta, Abhinav and Tarr, Michael J and Aminoff, Elissa M}, year = {2019}, pages = {49}, } @article{bukach_beyond_2006, title = {Beyond faces and modularity: the power of an expertise framework.}, volume = {10}, copyright = {All rights reserved}, doi = {10.1016/j.tics.2006.02.004}, abstract = {Studies of perceptual expertise typically ask whether the mechanisms underlying face recognition are domain specific or domain general. This debate has so dominated the literature that it has masked the more general usefulness of the expertise framework for studying the phenomenon of category specialization. Here we argue that the value of an expertise framework is not solely dependent on its relevance to face recognition. Beyond offering an alternative to domain-specific accounts of face specialization in terms of interactions between experience, task demands, and neural biases, expertise studies reveal principles of perceptual learning that apply to many different domains and forms of expertise. As such the expertise framework provides a unique window onto the functional plasticity of the mind and brain.}, language = {eng}, number = {4}, journal = {Trends Cogn Sci}, author = {Bukach, Cindy M and Gauthier, Isabel and Tarr, Michael J}, year = {2006}, pmid = {16516534}, note = {Place: England ISBN: 1364-6613}, pages = {159--166}, } @article{behrmann_behavioral_2005, title = {Behavioral change and its neural correlates in visual agnosia after expertise training.}, volume = {17}, copyright = {All rights reserved}, doi = {10.1162/0898929053467613}, abstract = {Agnosia, the impairment in object and face recognition despite intact vision and intelligence, is one of the most intriguing and debilitating neuropsychological deficits. The goal of this study was to determine whether S.M., an individual with longstanding visual agnosia and concomitant prosopagnosia, can be retrained to perform visual object recognition and, if so, what neural substrates mediate this reacquisition. Additionally, of interest is the extent to which training on one type of visual stimulus generalizes to other visual stimuli, as this informs our understanding of the organization of ventral visual cortex. Greebles were chosen as the stimuli for retraining given that, in neurologically normal individuals, these stimuli can engage the fusiform face area. Posttraining, S.M. showed significant improvement in recognizing Greebles, although he did not attain normal levels of performance. He was also able to recognize untrained Greebles and showed improvement in recognizing common objects. Surprisingly, his performance on face recognition, albeit poor initially, was even more impaired following training. A comparison of pre- and postintervention functional neuroimaging data mirrored the behavioral findings: Face-selective voxels in the fusiform gyrus prior to training were no longer so and were, in fact, more Greeble-selective. The findings indicate potential for experience-dependent dynamic reorganization in agnosia with the possibility that residual neural tissue, with limited capacity, will compete for representations.}, language = {eng}, number = {4}, journal = {J Cogn Neurosci}, author = {Behrmann, Marlene and Marotta, Jonathan and Gauthier, Isabel and Tarr, Michael J and McKeeff, Thomas J}, year = {2005}, pmid = {15829077}, note = {Place: United States ISBN: 0898-929X}, pages = {554--568}, } @article{gauthier_becoming_1997, title = {Becoming a "{Greeble}" expert: exploring mechanisms for face recognition.}, volume = {37}, copyright = {All rights reserved}, abstract = {Sensitivity to configural changes in face processing has been cited as evidence for face-exclusive mechanisms. Alternatively, general mechanisms could be fine-tuned by experience with homogeneous stimuli. We tested sensitivity to configural transformations for novices and experts with nonface stimuli ("Greebles"). Parts of transformed Greebles were identified via forced-choice recognition. Regardless of expertise level, the recognition of parts in the Studied configuration was better than in isolation, suggesting an object advantage. For experts, recognizing Greeble parts in a Transformed configuration was slower than in the Studied configuration, but only at upright. Thus, expertise with visually similar objects, not faces per se, may produce configural sensitivity.}, language = {eng}, number = {12}, journal = {Vision Res}, author = {Gauthier, I and Tarr, M J}, year = {1997}, pmid = {9231232}, note = {Place: ENGLAND ISBN: 0042-6989}, pages = {1673--1682}, } @article{bursley_awake_2016, title = {Awake, {Offline} {Processing} during {Associative} {Learning}.}, volume = {11}, copyright = {All rights reserved}, doi = {10.1371/journal.pone.0127522}, abstract = {Offline processing has been shown to strengthen memory traces and enhance learning in the absence of conscious rehearsal or awareness. Here we evaluate whether a brief, two-minute offline processing period can boost associative learning and test a memory reactivation account for these offline processing effects. After encoding paired associates, subjects either completed a distractor task for two minutes or were immediately tested for memory of the pairs in a counterbalanced, within-subjects functional magnetic resonance imaging study. Results showed that brief, awake, offline processing improves memory for associate pairs. Moreover, multi-voxel pattern analysis of the neuroimaging data suggested reactivation of encoded memory representations in dorsolateral prefrontal cortex during offline processing. These results signify the first demonstration of awake, active, offline enhancement of associative memory and suggest that such enhancement is accompanied by the offline reactivation of encoded memory representations}, language = {eng}, number = {4}, journal = {PLoS One}, author = {Bursley, James K and Nestor, Adrian and Tarr, Michael J and Creswell, J David}, year = {2016}, pmid = {27119345}, note = {Place: United States ISBN: 1932-6203}, pages = {e0127522}, } @article{tarr_when_1990, title = {When {Does} {Human} {Object} {Recognition} {Use} a {Viewer}-{Centered} {Reference} {Frame}?}, volume = {1}, copyright = {All rights reserved}, issn = {14679280}, doi = {10.1111/j.1467-9280.1990.tb00209.x}, abstract = {How do people recognize an object in different orientations? One theory is that the visual system describes the object relative to a reference frame centered on the object, resulting in a representation that is invariant across orientations. Chronometric data show that this is true only when an object can be identified uniquely by the arrangement of its parts along a single dimension. When an object can only be distinguished by an arrangement of its parts along more than one dimension, people mentally rotate it to a familiar orientation. This finding suggests that the human visual reference frame is tied to egocentric coordinates. © 1990, Association for Psychological Science. All rights reserved.}, number = {4}, journal = {Psychological Science}, author = {Tarr, M.J. and Pinker, S.}, year = {1990}, } @article{blanz_what_1999, title = {What object attributes determine canonical views?}, volume = {28}, copyright = {All rights reserved}, issn = {03010066}, abstract = {We investigated preferred or canonical views for familiar and three-dimensional non-sense objects using computer-graphics psychophysics. We assessed the canonical views for objects by allowing participants to actively rotate realistically shaded three-dimensional models in real-time. Objects were viewed on a Silicon Graphics workstation and manipulated in virtual space with a three-degree-of-freedom input device. In the first experiment, participants adjusted each object to the viewpoint from which they would take a photograph if they planned to use the object to illustrate a brochure. In the second experiment, participants mentally imaged each object on the basis of the name and then adjusted the object to the viewpoint from which they imagined it. In both experiments, there was a large degree of consistency across participants in terms of the preferred view for a given object. Our results provide new insights on the geometrical, experiential, and functional attributes that determine canonical views under ecological conditions.}, number = {5}, journal = {Perception}, author = {Blanz, V. and Tarr, M.J. and Bülthoff, H.H.}, year = {1999}, } @article{tarr_what_2001, title = {What defines a view?}, volume = {41}, copyright = {All rights reserved}, issn = {00426989}, doi = {10.1016/S0042-6989(01)00024-4}, abstract = {At a given instant we see only visible surfaces, not an object's complete 3D appearance. Thus, objects may be represented as discrete 'views' showing only those features visible from a limited range of viewpoints. We address how to define a view using Koenderink's (Koenderink \& Van Doorn, Biol. Cybernet. 32 (1979) 211.) geometric method for enumerating complete sets of stable views as aspect graphs. Using objects with known aspect graphs, five experiments examined whether the perception of orientation is sensitive to the qualitative features that define aspect graphs. Highest sensitivity to viewpoin t changes was observed at locations where the theory predicts qualitative transitions, although some transitions did not affect performance. Hypotheses about why humans ignore some transitions offer insights into mechanisms for object representation. © 2001 Elsevier Science Ltd.}, number = {15}, journal = {Vision Research}, author = {Tarr, M.J. and Kriegman, D.J.}, year = {2001}, } @article{tarr_why_1998, title = {Why the visual recognition system might encode the effects of illumination}, volume = {38}, copyright = {All rights reserved}, issn = {00426989}, doi = {10.1016/S0042-6989(98)00041-8}, abstract = {A key problem in recognition is that the image of an object depends on the lighting conditions. We investigated whether recognition is sensitive to illumination using 3-D objects that were lit from either the left or right, varying both the shading and the cast shadows. In experiments 1 and 2 participants judged whether two sequentially presented objects were the same regardless of illumination. Experiment 1 used six objects that were easily discriminated and that were rendered with cast shadows. While no cost was found in sensitivity, there was a response time cost over a change in lighting direction. Experiment 2 included six additional objects that were similar to the original six objects making recognition more difficult. The objects were rendered with cast shadows, no shadows, and as a control, white shadows. With normal shadows a change in lighting direction produced costs in both sensitivity and response times. With white shadows there was a much larger cost in sensitivity and a comparable cost in response times. Without cast shadows there was no cost in either measure, but the overall performance was poorer. Experiment 3 used a naming task in which names were assigned to six objects rendered with cast shadows. Participants practised identifying the objects in two viewpoints lit from a single lighting direction. Viewpoint and illumination invariance were then tested over new viewpoints and illuminations. Costs in both sensitivity and response time were found for naming the familiar objects in unfamiliar lighting directions regardless of whether the viewpoint was familiar or unfamiliar. Together these results suggest that illumination effects such as shadow edges: (1) affect visual memory; (2) serve the function of making unambigous the three-dimensional shape; and (3) are modeled with respect to object shape, rather than simply encoded in lens of their effects in the image.}, number = {15-16}, journal = {Vision Research}, author = {Tarr, M.J. and Kersten, D. and Bülthoff, H.H.}, year = {1998}, } @article{Wang2023, title = {Better models of human high-level visual cortex emerge from natural language supervision with a large and diverse dataset}, volume = {5}, copyright = {All rights reserved}, issn = {2522-5839}, url = {https://doi.org/10.1038/s42256-023-00753-y}, doi = {10.1038/s42256-023-00753-y}, abstract = {High-performing neural networks for vision have dramatically advanced our ability to account for neural data in biological systems. Recently, further improvement in performance of these neural networks has been catalysed by joint training on images and natural language, increased dataset sizes and data diversity. We explored whether the same factors (joint training, dataset size and diversity) support similar improvements in the prediction of visual responses in the human brain. We used models pretrained with Contrastive Language-Image Pretraining (CLIP)—which learns image embeddings that best match text embeddings of image captions from diverse, large-scale datasets—to study visual representations. We built voxelwise encoding models based on CLIP image features to predict brain responses to real-world images. We found that ResNet50 with CLIP is a better model of high-level visual cortex, explaining up to R2 = 79\% of variance in voxel responses in held-out test data, a substantial increase from models trained only with image/label pairs (ImageNet trained ResNet) or text (BERT). Comparisons across different model backbones ruled out network architecture as a factor in performance improvements. Comparisons across models that controlled for dataset size and data diversity demonstrated that language feedback along with large and diverse datasets are important factors in explaining neural responses in high-level visual brain regions. Visualizations of model embeddings and principal component analysis revealed that our models capture both global and fine-grained semantic dimensions represented within human visual cortex.}, number = {12}, journal = {Nature Machine Intelligence}, author = {Wang, Aria Y and Kay, Kendrick and Naselaris, Thomas and Tarr, Michael J and Wehbe, Leila}, year = {2023}, pages = {1415--1426}, } @article{henderson_texture_2023, title = {A texture statistics encoding model reveals hierarchical feature selectivity across human visual cortex}, copyright = {All rights reserved}, url = {http://www.jneurosci.org/content/early/2023/04/27/JNEUROSCI.1822-22.2023.abstract}, doi = {10.1523/JNEUROSCI.1822-22.2023}, abstract = {Mid-level features, such as contour and texture, provide a computational link between low- and high-level visual representations. While the nature of mid-level representations in the brain is not fully understood, past work has suggested a texture statistics model (P-S model; Portilla and Simoncelli, 2000) is a candidate for predicting neural responses in areas V1-V4 as well as human behavioral data. However, it is not currently known how well this model accounts for the responses of higher visual cortex to natural scene images. To examine this, we constructed single-voxel encoding models based on P-S statistics and fit the models to fMRI data from human subjects (both sexes) from the Natural Scenes Dataset (Allen et al., 2021). We demonstrate that the texture statistics encoding model can predict the held-out responses of individual voxels in early retinotopic areas and higher-level category-selective areas. The ability of the model to reliably predict signal in higher visual cortex suggests that the representation of texture statistics features is widespread throughout the brain. Furthermore, using variance partitioning analyses we identify which features are most uniquely predictive of brain responses, and show that the contributions of higher-order texture features increases from early areas to higher areas on the ventral and lateral surfaces. We also demonstrate that patterns of sensitivity to texture statistics can be used to recover broad organizational axes within visual cortex, including dimensions that capture semantic image content. These results provide a key step forward in characterizing how mid-level feature representations emerge hierarchically across the visual system.Significance Statement:Intermediate visual features, like texture, play an important role in cortical computations and may contribute to tasks like object and scene recognition. Here, we used a texture model proposed in past work to construct encoding models that predict the responses of neural populations in human visual cortex (measured with fMRI) to natural scene stimuli. We show that responses of neural populations at multiple levels of the visual system can be predicted by this model, and that the model is able to reveal an increase in the complexity of feature representations from early retinotopic cortex to higher areas of ventral and lateral visual cortex. These results support the idea that texture-like representations may play a broad underlying role in visual processing.}, journal = {The Journal of Neuroscience}, author = {Henderson, Margaret M and Tarr, Michael J and Wehbe, Leila}, month = may, year = {2023}, pages = {JN--RM--1822--22}, } @article{Aminoff2021, title = {Functional {Context} {Affects} {Scene} {Processing}}, volume = {33}, copyright = {All rights reserved}, issn = {0898-929X}, url = {https://doi.org/10.1162/jocn_a_01694}, doi = {10.1162/jocn_a_01694}, abstract = {Rapid visual perception is often viewed as a bottom–up process. Category-preferred neural regions are often characterized as automatic, default processing mechanisms for visual inputs of their categorical preference. To explore the sensitivity of such regions to top–down information, we examined three scene-preferring brain regions, the occipital place area (OPA), the parahippocampal place area (PPA), and the retrosplenial complex (RSC), and tested whether the processing of outdoor scenes is influenced by the functional contexts in which they are seen. Context was manipulated by presenting real-world landscape images as if being viewed through a window or within a picture frame—manipulations that do not affect scene content but do affect one's functional knowledge regarding the scene. This manipulation influences neural scene processing (as measured by fMRI): The OPA and the PPA exhibited greater neural activity when participants viewed images as if through a window as compared with within a picture frame, whereas the RSC did not show this difference. In a separate behavioral experiment, functional context affected scene memory in predictable directions (boundary extension). Our interpretation is that the window context denotes three-dimensionality, therefore rendering the perceptual experience of viewing landscapes as more realistic. Conversely, the frame context denotes a 2-D image. As such, more spatially biased scene representations in the OPA and the PPA are influenced by differences in top–down, perceptual expectations generated from context. In contrast, more semantically biased scene representations in the RSC are likely to be less affected by top–down signals that carry information about the physical layout of a scene.}, number = {5}, journal = {Journal of Cognitive Neuroscience}, author = {Aminoff, Elissa M and Tarr, Michael J}, month = apr, year = {2021}, pages = {933--945}, } @book{Tarr2016, title = {Can big data help us understand human vision?}, copyright = {All rights reserved}, isbn = {978-1-315-41356-3}, abstract = {© 2017 Taylor \& Francis.Big Data seems to have an ever-increasing impact on our daily lives. Its application to human vision has been no less impactful. In particular, Big Data methods have been applied to both content and data analysis, enabling a new, more fine-grained understanding of how the brain encodes information about the visual environment. With respect to content, the most significant advance has been the use of large-scale, hierarchical models-typically “convolutional neural networks” or “deep networks”-to explicate how high-level visual tasks such as object categorization can be achieved based on learning across millions of images. With respect to data analysis, complex patterns underlying visual behavior can be identified in neural data using modern machine-learning methods or “multi-variate pattern analysis.” In this chapter, we discuss the pros and cons of these applications of Big Data, including limitations in how we can interpret results. In the end, we conclude that Big Data methods hold great promise for pursuing the challenges faced by both vision scientists and, more generally, cognitive neuroscientists.}, author = {Tarr, M.J. and Aminoff, E.M.}, year = {2016}, doi = {10.4324/9781315413570}, note = {Publication Title: Big Data in Cognitive Science}, } @article{Tarr1994, title = {A computational and evolutionary perspective on the role of representation in vision}, volume = {60}, copyright = {All rights reserved}, abstract = {Recently, the assumed goal of computer vision, reconstructing a representation of the scene, has been critcized as unproductive and impractical. Critics have suggested that the reconstructive approach should be supplanted by a new purposive approach that emphasizes functionality and task driven perception at the cost of general vision. In response to these arguments, we claim that the recovery paradigm central to the reconstructive approach is viable, and, moreover, provides a promising framework for understanding and modeling general purpose vision in humans and machines. An examination of the goals of vision from an evolutionary perspective and a case study involving the recovery of optic flow support this hypothesis. In particular, while we acknowledge that there are instances where the purposive approach may be appropriate, these are insufficient for implementing the wide range of visual tasks exhibited by humans (the kind of flexible vision system presumed to be an end-goal of artificial intelligence). Furthermore, there are instances, such as recent work on the estimation of optic flow, where the recovery paradigm may yield useful and robust results. Thus, contrary to certain claims, the purposive approach does not obviate the need for recovery and reconstruction of flexible representations of the world. © 1994 Academic Press. All rights reserved.}, number = {1}, journal = {Computer Vision, Graphics, and Image Processing: Image Understanding}, author = {Tarr, M.J. J and Black, M.J. J}, year = {1994}, note = {Publisher: Yale University}, pages = {65--73}, } @article{laidlaw_comparing_2005, title = {Comparing {2D} vector field visualization methods: {A} user study}, volume = {11}, copyright = {All rights reserved}, issn = {10772626}, doi = {10.1109/TVCG.2005.4}, abstract = {We present results from a user study that compared six visualization methods for two-dimensional vector data. Users performed three simple but representative tasks using visualizations from each method: 1) locating all critical points in an image, 2) identifying critical point types, and 3) advecting a particle. Visualization methods included two that used different spatial distributions of short arrow icons, two that used different distributions of integral curves, one that used wedges located to suggest flow lines, and line-integral convolution (LIC). Results show different strengths and weaknesses for each method. We found that users performed these tasks better with methods that: 1) showed the sign of vectors within the vector field, 2) visually represented integral curves, and 3) visually represented the locations of critical points. Expert user performance was not statistically different from nonexpert user performance. We used several methods to analyze the data including omnibus analysis of variance, pairwise t-tests, and graphical analysis using inferential confidence intervals. We concluded that using the inferential confidence intervals for displaying the overall pattern of results for each task measure and for performing subsequent pairwise comparisons of the condition means was the best method for analyzing the data in this study. These results provide quantitative support for some of the anecdotal evidence concerning visualization methods. The tasks and testing framework also provide a basis for comparing other visualization methods, for creating more effective methods and for defining additional tasks to further understand the tradeoffs among the methods. In the future, we also envision extending this work to more ambitious comparisons, such as evaluating two-dimensional vectors on two-dimensional surfaces embedded in three-dimensional space and defining analogous tasks for three-dimensional visualization methods.}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics}, author = {Laidlaw, D.H. and Kirby, R.M. and Jackson, C.D. and Davidson, J.S. and Miller, T.S. and Da Silva, M. and Warren, W.H. and Tarr, M.J.}, year = {2005}, } @article{schultz_are_1997, title = {Are face identity and emotion processed automatically?}, volume = {5}, copyright = {All rights reserved}, issn = {10538119}, number = {4 PART II}, journal = {NeuroImage}, author = {Schultz, R.T. and Gauthier, I. and Fulbright, R. and Anderson, A.W. and Lacadie, C. and Skudlarski, P. and Tarr, M.J. and Cohen, D.J. and Gore, J.C.}, year = {1997}, } @article{harrison_geometry_2001, title = {The geometry of "cognitive maps": {Metric} vs. ordinal structure}, volume = {1}, copyright = {All rights reserved}, issn = {15347362}, doi = {10.1167/1.3.137}, abstract = {What geometric properties of the environment are preserved in spatial knowledge for navigation? In the first of a series of studies of this question, we investigate whether human navigation on learned routes relies on metric distances and angles or ordinal relations among junctions and paths. The experiments were conducted during active walking in a 40 × 40 ft virtual environment of a garden or hedge maze, generated on an SGI workstation. Participants wore a head-mounted display (60 deg H × 40 deg V) and head position was measured by a hybrid sonic/inertial tracking system (50 ms latency). Initially, participants learned the layout of ten places in the garden (statue, well, bird bath, fountain, etc.) by free exploration. They were then tested on their ability to walk to specified places from a "home" location while their routes and times were recorded. On random catch trials, the virtual garden was distorted in two ways. To test metric distance, the garden was stretched along its main axis by 160 or 137 percent, such that walking a learned distance would result in turning at the wrong junction. Similarly, to test metric angle, a "radial arm" section of the garden was rotated by 45 deg, such that turning a learned angle would result in taking the wrong path. Results suggest that route navigation relies more on ordinal relations among junctions and paths than on metric distances and angles.}, number = {3}, journal = {Journal of Vision}, author = {Harrison, M.C. and Warren, W.H. and Tarr, M.J.}, year = {2001}, } @article{peissig_contrast_2003, title = {Contrast reversals in faces and objects: {The} effect of albedo}, volume = {3}, copyright = {All rights reserved}, issn = {15347362}, doi = {10.1167/3.9.823}, abstract = {Faces are more difficult to recognize than exemplars of other object categories, e.g., chairs, when viewed in reverse contrast (Subramaniam \& Biederman, 1997). The visual mechanisms underlying this phenomenon, however, are not well understood. One possibility is that faces and objects rely on separate systems - one sensitive and the other insensitive to contrast reversal. Alternatively, the complex pigmentation patterns characteristic of faces may contribute to this phenomenon (Bruce \& Langton, 1994; Liu et al., 1999; Kemp et al., 1996). To date there has not been a direct comparison of the same faces or objects with and without adbedo information (here, defined as the reflectance map of a surface independent of shading/shadows). We tested observers in a same/different sequential-matching task using grayscale images of the same face models with or without albedo information. Observers were instructed to base their judgments on face identity, ignoring any changes in contrast. On trials in which faces were shown in different contrasts (e.g., positive-negative), observers responded more slowly and less accurately as compared to trials in which both faces were shown in the same contrast. This decrement in performance was significantly greater for faces with albedo than for faces without albedo. These results indicate that reported differences between faces and objects across contrast reversal may be attributed to the presence or absence of informative surface information and not to stimulus category. This hypothesis was tested further by comparing the effect of contrast reversal on non-face objects with and without albedo. We conclude that contrast reversal disrupts the recognition of both faces and objects to a greater degree in the presence of informative albedo. That is, another putative dissociation between face and object processing can be accounted for by factors other than the object category per se.}, number = {9}, journal = {Journal of Vision}, author = {Peissig, J.J. and Vuong, Q.C. and Harrison, M.C. and Tarr, M.J.}, year = {2003}, } @article{foo_dependence_2002, title = {Dependence on path integration and landmarks when learning a new environment}, volume = {2}, copyright = {All rights reserved}, issn = {15347362}, doi = {10.1167/2.7.419}, abstract = {Like honeybees (Dyer, 1991), humans do not seem to build a metric "cognitive map" from path integration, but rely on visual landmarks to take novel short-cuts in a known environment (Foo et al., 2001). In the present experiment we investigate dependence on path integration and local landmarks when learning the layout of a new environment. Do people, like ants (Collett, et al., 1999), first depend on path integration to learn the locations of landmarks, and then shift to rely on landmarks? Or do they rely on landmarks from the beginning? Participants walked in a 40 × 40 ft virtual environment while head position and orientation were measured with a sonic/inertial tracker. Training: participants learned two legs of a triangle with feedback: the paths from Home to A and Home to B. A configuration of colored posts surrounded the A location, with another cluster placed on the path between H and B. Test: participants walked the learned legs or the novel short-cut between A and B without feedback. On catch trials, one cluster of posts were translated by 2m, so as to probe reliance on landmarks denoting a place (H-A), a known route (H-B), a route home (B-H), or a short-cut (B-A). Catch trials were initiated immediately after the onset of learning to track changes in the reliance on landmarks vs. path integration. Preliminary results suggest that participants' early use of path integration decreases during learning so they come to rely on landmarks that are associated with targets and routes. Thus, like ants, active navigation becomes dependent on local landmarks with increased familiarity with the environment.}, number = {7}, journal = {Journal of Vision}, author = {Foo, P. and Warren, W. and Tarr, M.}, year = {2002}, } @article{cutzu_canonical_1997, title = {Canonical views of common 3d objects: psychophysical and computational studies}, volume = {38}, copyright = {All rights reserved}, issn = {01460404}, abstract = {Purpose. Subjects consistently rate certain object views as being better or more canonical than others. Since goodness-of-view (GOV) is consistent across SS and experimental tasks it may reflect measurable image or 3D object properties. We investigated the factors determining GOV for 7 common objects. Methods. Psychophysics GOV was measured for 12 different viewpoints of each object in a paired comparison task in which SS selected the best view in all possible view pairs. Pairwise view similarities were derived from same/different RT measured in a sequential viewpoint matching paradigm run on all possible view pairs of the same object. 5 SS participated in each experiment. Simulations Two viewpointdependent geometrical properties were measured for each object: visible 3D surface area and the 30 → ID imaging deformation of visible surfaces. A similairity measure was derived from the distances between object silhouettes extracted from the views. A measure of the information content of an object view was derived from the edge map of the image. Viewpoint stability was numerically estimated for each tested view. Results. GOV correlated significantly with summed perceptual similarity to the other views of the same object. The summed similarity score was interpreted as the typicality of the view. Distances among views measured in silhouette space reproduced the pattern of perceptual similarities. GOV increased with visible 3D surface area and decreased with imaging deformation. GOV was highly correlated (0.85) with total edge length in an edge-detected version of the image, and significantly correlated with contour perimeter. While locally unstable views tend to be subjectively bad, the stable viewpoints were not always good. Conclusions. GOV is interprétable as typicality and while it relates to viewpoint-dependent 3D geometrical properties it is best explained by characteristics of the intensity edges and image silhouette. Support was provided by the Office of Naval Research contract \# N00014-93-1-0305 to MJT.}, number = {4}, journal = {Investigative Ophthalmology and Visual Science}, author = {Cutzu, F. and Edelman, S. and Tarr, M.J.}, year = {1997}, } @book{hayward_visual_2005, title = {Visual perception {II}: {High}-level vision}, copyright = {All rights reserved}, isbn = {978-1-84860-817-7}, author = {Hayward, W.G. and Tarr, M.J.}, year = {2005}, doi = {10.4135/9781848608177.n2}, note = {Publication Title: Handbook of Cognition}, } @article{Tarr2000, title = {{FFA}: a flexible fusiform area for subordinate-level visual processing automatized by expertise.}, volume = {3}, copyright = {All rights reserved}, doi = {10.1038/77666}, language = {eng}, number = {8}, journal = {Nat Neurosci}, author = {Tarr, M J and Gauthier, I}, year = {2000}, pmid = {10903568}, note = {Place: UNITED STATES ISBN: 1097-6256}, pages = {764--769}, } @article{Lebrecht2012, title = {Can {Neural} {Signals} for {Visual} {Preference} {Predict} {Real}-{World} {Choices}?}, volume = {62}, copyright = {All rights reserved}, doi = {10.1525/bio.2012.62.11.2}, number = {11}, journal = {BioScience}, author = {Lebrecht, Sophie and Tarr, Michael J}, year = {2012}, pages = {937--938}, } @article{tarr_rotating_1995, title = {Rotating objects to recognize them: {A} case study of the role of viewpoint dependency in the recognition of three-dimensional objects}, volume = {2}, copyright = {All rights reserved}, number = {1}, journal = {Psychonomic Bulletin and Review}, author = {Tarr, M J}, year = {1995}, pages = {55--82}, } @misc{righi_category-selective_2013, title = {Category-{Selective} {Recruitment} of the {Fusiform} {Gyrus} with {Chess} {Expertise}}, copyright = {All rights reserved}, publisher = {Psychology Press}, author = {Righi, Giulia and Tarr, Michael J and Kingon, Ashley}, editor = {Staszewski, James J}, year = {2013}, note = {Publication Title: Expertise and Skill Acquisition: The Impact of William G. Chase Place: New York, NY}, } @article{swann_configuration_2004, title = {Configuration {Protection} {Harmonized}}, volume = {94}, copyright = {All rights reserved}, journal = {The Trademark Reporter}, author = {Swann, J B and Tarr, M J}, year = {2004}, pages = {1182}, } @misc{tarr_visual_1994, title = {Visual representation}, copyright = {All rights reserved}, publisher = {Academic Press}, author = {Tarr, M J}, editor = {Ramachandran, V S}, year = {1994}, note = {Pages: 503-512 Publication Title: Encyclopedia of Human Behavior Volume: 4 Place: San Diego}, } @misc{tarr_geon_1994, title = {Geon recognition is viewpoint dependent}, copyright = {All rights reserved}, author = {Tarr, M J and Hayward, W G and Gauthier, I and Williams, P}, year = {1994}, note = {Publication Title: 35th Annual Meeting of the Psychonomic Society Place: St. Louis, MO}, } @article{righi_are_2004, title = {Are chess experts any different from face, bird, or {Greeble} experts?}, volume = {4}, copyright = {All rights reserved}, number = {8}, journal = {Journal of Vision}, author = {Righi, G and Tarr, M J}, year = {2004}, pages = {504a}, } @article{xu_fine-grained_2013, title = {Fine-grained temporal coding of visually-similar categories in the ventral visual pathway and prefrontal cortex}, volume = {4}, copyright = {All rights reserved}, doi = {10.3389/fpsyg.2013.00684}, journal = {Frontiers in Psychology}, author = {Xu, Yang and D'Lauro, Christopher and Pyles, John A and Kass, Robert E and Tarr, Michael J}, year = {2013}, note = {ISBN: 1664-1078}, } @article{gauthier_bold_2002, title = {{BOLD} activity during mental rotation and viewpoint-dependent object recognition.}, volume = {34}, copyright = {All rights reserved}, abstract = {We measured brain activity during mental rotation and object recognition with objects rotated around three different axes. Activity in the superior parietal lobe (SPL) increased proportionally to viewpoint disparity during mental rotation, but not during object recognition. In contrast, the fusiform gyrus was preferentially recruited in a viewpoint-dependent manner in recognition as compared to mental rotation. In addition, independent of the effect of viewpoint, object recognition was associated with ventral areas and mental rotation with dorsal areas. These results indicate that the similar behavioral effects of viewpoint obtained in these two tasks are based on different neural substrates. Such findings call into question the hypothesis that mental rotation is used to compensate for changes in viewpoint during object recognition.}, language = {eng}, number = {1}, journal = {Neuron}, author = {Gauthier, Isabel and Hayward, William G and Tarr, Michael J and Anderson, Adam W and Skudlarski, Pawel and Gore, John C}, year = {2002}, pmid = {11931750}, note = {Place: United States ISBN: 0896-6273}, pages = {161--171}, }

На рассмотрении

Could you send me the link to your page?

It was a caching issue. I loaded the URL once with &nocache=1 to force-update the cache, and not it's showing. How recently did you add these additional publications to your Zotero?

By the way, I recommend against using iframes for embedding.

Сервис поддержки клиентов работает на платформе UserEcho