@article{Navalpakkam_Itti05vr,
  author = {V. Navalpakkam and L. Itti},
  title = {Modeling the influence of task on attention},
  journal = {Vision Research},
  volume = {45},
  number = {2},
  pages = {205-231},
  year = {2005},
  month = {Jan},
  abstract = {We propose a computational model for the task-specific guidance of visual attention in real-world scenes. Our model emphasizes four aspects that are important in biological vision: determining task-relevance of an entity, biasing attention for the low-level visual features of desired targets, recognizing these targets using the same low-level features, and incrementally building a visual map of task-relevance at every scene location. Given a task definition in the form of keywords, the model first determines and stores the task-relevant entities in working memory, using prior knowledge stored in long-term memory. It attempts to detect the most relevant entity by biasing its visual attention system with the entity's learned low-level features. It attends to the most salient location in the scene, and attempts to recognize the attended object through hierarchical matching against object representations stored in long-term memory. It updates its working memory with the task-relevance of the recognized entity and updates a topographic task-relevance map with the location and relevance of the recognized entity. The model is tested on three types of tasks: single-target detection in 343 natural and synthetic images, where biasing for the target accelerates target detection over two-fold on average; sequential multiple-target detection in 28 natural images, where biasing, recognition, working memory and long term memory contribute to rapidly finding all targets; and learning a map of likely locations of cars from a video clip filmed while driving on a highway. The model's performance on search for single features and feature conjunctions is consistent with existing pyschophysical data. These results of our biologically-motivated architecture suggest that the model may provide a reasonable approximation to many brain processes involved in complex task-driven visual behaviors.},
  keywords = {Attention ; top-down ; bottom-up ; object detection ; recognition ; task-relevance ; scene analysis},
  type = {bu ; td ; mod ; sc},
  file = {http://iLab.usc.edu/publications/doc/Navalpakkam_Itti05vr.pdf},
  if = {2003 impact factor: 1.958},
}

