@conference {12437, title = {Domain adaptation for object recognition: An unsupervised approach}, booktitle = {2011 IEEE International Conference on Computer Vision (ICCV)}, year = {2011}, month = {2011/11/06/13}, pages = {999 - 1006}, publisher = {IEEE}, organization = {IEEE}, abstract = {Adapting the classifier trained on a source domain to recognize instances from a new target domain is an important problem that is receiving recent attention. In this paper, we present one of the first studies on unsupervised domain adaptation in the context of object recognition, where we have labeled data only from the source domain (and therefore do not have correspondences between object categories across domains). Motivated by incremental learning, we create intermediate representations of data between the two domains by viewing the generative subspaces (of same dimension) created from these domains as points on the Grassmann manifold, and sampling points along the geodesic between them to obtain subspaces that provide a meaningful description of the underlying domain shift. We then obtain the projections of labeled source domain data onto these subspaces, from which a discriminative classifier is learnt to classify projected data from the target domain. We discuss extensions of our approach for semi-supervised adaptation, and for cases with multiple source and target domains, and report competitive results on standard datasets.}, keywords = {Data models, data representations, discriminative classifier, Feature extraction, Grassmann manifold, image sampling, incremental learning, labeled source domain, Manifolds, measurement, object category, Object recognition, Principal component analysis, sampling points, semisupervised adaptation, target domain, underlying domain shift, unsupervised approach, unsupervised domain adaptation, Unsupervised learning, vectors}, isbn = {978-1-4577-1101-5}, doi = {10.1109/ICCV.2011.6126344}, author = {Gopalan,R. and Ruonan Li and Chellapa, Rama} } @conference {12034, title = {The influence of shape on image correspondence}, booktitle = {2nd International Symposium on 3D Data Processing, Visualization and Transmission, 2004. 3DPVT 2004. Proceedings}, year = {2004}, month = {2004/09/06/9}, pages = {945 - 952}, publisher = {IEEE}, organization = {IEEE}, abstract = {We examine the implications of shape on the process of finding dense correspondence and half-occlusions for a stereo pair of images. The desired property of the depth map is that it should be a piecewise continuous function which is consistent with the images and which has the minimum number of discontinuities. To zeroeth order, piecewise continuity becomes piecewise constancy. Using this approximation, we first discuss an approach for dealing with such a fronto-parallel shapeless world, and the problems involved therein. We then introduce horizontal and vertical slant to create a first order approximation to piecewise continuity. We highlight the fact that a horizontally slanted surface (ie. having depth variation in the direction of the separation of the two cameras) appears horizontally stretched in one image as compared to the other image. Thus, while corresponding two images, N pixels on a scanline in one image may correspond to a different number of pixels M in the other image, which has consequences with regard to sampling and occlusion detection. We also discuss the asymmetry between vertical and horizontal slant, and the central role of nonhorizontal edges in the context of vertical slant. Using experiments, we discuss cases where existing algorithms fail, and how the incorporation of new constraints provides correct results.}, keywords = {Automation, CAMERAS, Computational modeling, first order approximation, Geometrical optics, hidden feature removal, image sampling, Image segmentation, Layout, occlusion detection, piecewise continuous function, Pixel, SHAPE, Simulated annealing, stereo image processing, surface fitting}, isbn = {0-7695-2223-8}, doi = {10.1109/TDPVT.2004.1335418}, author = {Ogale, A. S and Aloimonos, J.} } @conference {12055, title = {Polydioptric camera design and 3D motion estimation}, booktitle = {2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings}, volume = {2}, year = {2003}, month = {2003/06/18/20}, pages = {II- 294-301 vol.2 - II- 294-301 vol.2}, publisher = {IEEE}, organization = {IEEE}, abstract = {Most cameras used in computer vision applications are still based on the pinhole principle inspired by our own eyes. It has been found though that this is not necessarily the optimal image formation principle for processing visual information using a machine. We describe how to find the optimal camera for 3D motion estimation by analyzing the structure of the space formed by the light rays passing through a volume of space. Every camera corresponds to a sampling pattern in light ray space, thus the question of camera design can be rephrased as finding the optimal sampling pattern with regard to a given task. This framework suggests that large field-of-view multi-perspective (polydioptric) cameras are the optimal image sensors for 3D motion estimation. We conclude by proposing design principles for polydioptric cameras and describe an algorithm for such a camera that estimates its 3D motion in a scene independent and robust manner.}, keywords = {3D motion estimation, Algorithm design and analysis, Application software, CAMERAS, Computer vision, Eyes, field-of-view camera, Image motion analysis, image sampling, image sensor, Image sensors, Layout, light ray, Motion estimation, multiperspective camera, optimal camera, optimal image formation, optimal sampling pattern, pinhole principle, polydioptric camera design, ray space, scene independent estimation, space structure analysis, stereo image processing, visual information processing}, isbn = {0-7695-1900-8}, doi = {10.1109/CVPR.2003.1211483}, author = {Neumann, J. and Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @conference {14188, title = {A spherical eye from multiple cameras (makes better models of the world)}, booktitle = {Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2001. CVPR 2001}, volume = {1}, year = {2001}, month = {2001///}, pages = {I-576- I-583 vol.1 - I-576- I-583 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {The paper describes an imaging system that has been designed specifically for the purpose of recovering egomotion and structure from video. The system consists of six cameras in a network arranged so that they sample different parts of the visual sphere. This geometric configuration has provable advantages compared to small field of view cameras for the estimation of the system{\textquoteright}s own motion and consequently the estimation of shape models from the individual cameras. The reason is that inherent ambiguities of confusion between translation and rotation disappear. We provide algorithms for the calibration of the system and 3D motion estimation. The calibration is based on a new geometric constraint that relates the images of lines parallel in space to the rotation between the cameras. The 3D motion estimation uses a constraint relating structure directly to image gradients.}, keywords = {3D motion estimation, Calibration, camera network, CAMERAS, Computer vision, egomotion recovery, geometric configuration, geometric constraint, image gradients, image sampling, imaging system, Laboratories, Layout, Motion estimation, multiple cameras, Pixel, Robot vision systems, SHAPE, shape models, Space technology, spherical eye, system calibration, video, video cameras, video signal processing, visual sphere sampling}, isbn = {0-7695-1272-0}, doi = {10.1109/CVPR.2001.990525}, author = {Baker, P. and Ferm{\"u}ller, Cornelia and Aloimonos, J. and Pless, R.} }