@article {20295, title = {Biofilms Comprise a Component of the Annual Cycle of Vibrio cholerae in the Bay of Bengal Estuary}, journal = {mBio}, year = {2018}, month = {Feb-05-2018}, pages = {e00483-18}, abstract = {Vibrio cholerae, an estuarine bacterium, is the causative agent of cholera, a severe diarrheal disease that demonstrates seasonal incidence in Bangladesh. In an extensive study of V. cholerae occurrence in a natural aquatic environment, water and plankton samples were collected biweekly between December 2005 and November 2006 from Mathbaria, an estuarine village of Bangladesh near the mangrove forests of the Sundarbans. Toxigenic V. cholerae exhibited two seasonal growth peaks, one in spring (March to May) and another in autumn (September to November), corresponding to the two annual seasonal outbreaks of cholera in this region. The total numbers of bacteria determined by heterotrophic plate count (HPC), representing culturable bacteria, accounted for 1\% to 2.7\% of the total numbers obtained using acridine orange direct counting (AODC). The highest bacterial culture counts, including toxigenic V. cholerae, were recorded in the spring. The direct fluorescent antibody (DFA) assay was used to detect V. cholerae O1 cells throughout the year, as free-living cells, within clusters, or in association with plankton. V. cholerae O1 varied significantly in morphology, appearing as distinctly rod-shaped cells in the spring months, while small coccoid cells within thick clusters of biofilm were observed during interepidemic periods of the year, notably during the winter months. Toxigenic V. cholerae O1 was culturable in natural water during the spring when the temperature rose sharply. The results of this study confirmed biofilms to be a means of persistence for bacteria and an integral component of the annual life cycle of toxigenic V. cholerae in the estuarine environment of Bangladesh.}, doi = {10.1128/mBio.00483-18}, url = {https://mbio.asm.org/content/9/2/e00483-18}, author = {Sultana, Marzia and Nusrin, Suraia and Hasan, Nur A. and Sadique, Abdus and Ahmed, Kabir U. and Islam, Atiqul and Hossain, Anwar and Longini, Ira and Nizam, Azhar and Huq, Anwar and Siddique, Abul K. and Sack, David A. and Sack, Richard B. and Rita R Colwell and Alam, Munirul}, editor = {Vidaver, Anne K.} } @article {20252, title = {Deep-learning-assisted Volume Visualization}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2018}, month = {Jan-01-2018}, pages = {1 - 1}, issn = {1077-2626}, doi = {10.1109/TVCG.2018.2796085}, url = {http://ieeexplore.ieee.org/document/8265023/http://xplorestaging.ieee.org/ielx7/2945/4359476/08265023.pdf?arnumber=8265023}, author = {Cheng, Hsueh-Chien and Cardone, Antonio and Jain, Somay and Krokos, Eric and Narayan, Kedar and Subramaniam, Sriram and Varshney, Amitabh} } @article {20320, title = {Application of a paper based device containing a new culture medium to detect Vibrio cholerae in water samples collected in Haiti}, journal = {Journal of Microbiological Methods}, volume = {133}, year = {2017}, month = {Jan-02-2017}, pages = {23 - 31}, issn = {01677012}, doi = {10.1016/j.mimet.2016.12.014}, url = {https://www.sciencedirect.com/science/article/pii/S0167701216303578?via\%3Dihub}, author = {Briquaire, Romain and Rita R Colwell and Boncy, Jacques and Rossignol, Emmanuel and Dardy, Aline and Pandini, Isabelle and Villeval, Fran{\c c}ois and Machuron, Jean-Louis and Huq, Anwar and Rashed, Shah and Vandevelde, Thierry and Rozand, Christine} } @proceedings {20156, title = {Deep-learning-assisted visualization for live-cell images}, year = {2017}, month = {09/2017}, publisher = {IEEE}, address = {Beijing, China}, abstract = {Analyzing live-cell images is particularly challenging because cells move at the same time they undergo systematic changes. Visually inspecting live-cell images therefore involves simultaneously tracking individual cells and detecting relevant spatio-temporal changes. The high cognitive burden of such a complex task makes this kind of analysis inefficient and error-prone. In this paper we describe a deep-learning-assisted visualization based on automatically derived high-level features to identify target cell changes in live-cell images. Applying a novel user-mediated color assignment scheme that maps abstract features into corresponding colors, we create color-based visual annotations that facilitate visual reasoning and analysis of complex time varying live-cell imagery datasets. The visual representations can be used to study temporal changes in cells, such as the morphological changes in cell at various stages of life cycle.}, keywords = {deep learning, live-cell images, Visualization}, author = {Hsueh-Chien Cheng and Cardone, Antonio and Krokos, Eric and Stoica, Bogdan and Faden, Alan and Varshney, Amitabh} } @conference {20155, title = {Interactive exploration of microstructural features in gigapixel microscopy images}, booktitle = {IEEE International Conference on Image Processing}, year = {2017}, month = {09/2017}, publisher = {IEEE}, organization = {IEEE}, address = {Beijing, China}, abstract = {Modern imaging technologies enable the study of microstructural features, which require capturing the finest details in high-resolution gigapixel images. Nevertheless, the resolution disparity between gigapixel images and megapixel displays presents a challenge to effective visual analysis because subtle texture differences are hardly perceivable at coarser resolutions. In this paper we present a hierarchical segmentation technique based on joint distribution of intensity and noise-resistant local binary patterns to differentiate subtle microstructural textures across various scales. The coarse-to-fine segmentation procedure subdivides each parent segment into texturally-distinct child segments at progressively higher resolutions. The hierarchical structure of segments allows creating intermediate segmentation results interactively. Based on the intermediate results, we highlight regions with texture differences using distinct colors, which provide salient visual hints to users despite of the current viewing resolution. Our new technique has been validated on large microscopy images and shows promising results.}, keywords = {gigapixel images, Image segmentation}, author = {Hsueh-Chien Cheng and Cardone, Antonio and Varshney, Amitabh} } @article {20329, title = {Climate influence on Vibrio and associated human diseases during the past half-century in the coastal North Atlantic}, journal = {Proceedings of the National Academy of Sciences}, year = {2016}, month = {Nov-08-2017}, pages = {E5062 - E5071}, abstract = {Climate change is having a dramatic impact on marine animal and plant communities but little is known of its influence on marine prokaryotes, which represent the largest living biomass in the world oceans and play a fundamental role in maintaining life on our planet. In this study, for the first time to our knowledge, experimental evidence is provided on the link between multidecadal climatic variability in the temperate North Atlantic and the presence and spread of an important group of marine prokaryotes, the vibrios, which are responsible for several infections in both humans and animals. Using archived formalin-preserved plankton samples collected by the Continuous Plankton Recorder survey over the past half-century (1958{\textendash}2011), we assessed retrospectively the relative abundance of vibrios, including human pathogens, in nine areas of the North Atlantic and North Sea and showed correlation with climate and plankton changes. Generalized additive models revealed that long-term increase in Vibrio abundance is promoted by increasing sea surface temperatures (up to \~{}1.5 {\textdegree}C over the past 54 y) and is positively correlated with the Northern Hemisphere Temperature (NHT) and Atlantic Multidecadal Oscillation (AMO) climatic indices (P < 0.001). Such increases are associated with an unprecedented occurrence of environmentally acquired Vibrio infections in the human population of Northern Europe and the Atlantic coast of the United States in recent years.}, issn = {0027-8424}, doi = {10.1073/pnas.1609157113}, url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1609157113}, author = {Vezzulli, Luigi and Grande, Chiara and Reid, Philip C. and {\'e}laou{\"e}t, Pierre and Edwards, Martin and {\"o}fle, Manfred G. and Brettar, Ingrid and Rita R Colwell and Pruzzo, Carla} } @article {20357, title = {Phylodynamic Analysis of Clinical and Environmental Vibrio cholerae Isolates from Haiti Reveals Diversification Driven by Positive Selection}, journal = {mBio}, year = {2014}, month = {Jul-12-2016}, abstract = {Phylodynamic analysis of genome-wide single-nucleotide polymorphism (SNP) data is a powerful tool to investigate underlying evolutionary processes of bacterial epidemics. The method was applied to investigate a collection of 65 clinical and environmental isolates of Vibrio cholerae from Haiti collected between 2010 and 2012. Characterization of isolates recovered from environmental samples identified a total of four toxigenic V. cholerae O1 isolates, four non-O1/O139 isolates, and a novel nontoxigenic V. cholerae O1 isolate with the classical tcpA gene. Phylogenies of strains were inferred from genome-wide SNPs using coalescent-based demographic models within a Bayesian framework. A close phylogenetic relationship between clinical and environmental toxigenic V. cholerae O1 strains was observed. As cholera spread throughout Haiti between October 2010 and August 2012, the population size initially increased and then fluctuated over time. Selection analysis along internal branches of the phylogeny showed a steady accumulation of synonymous substitutions and a progressive increase of nonsynonymous substitutions over time, suggesting diversification likely was driven by positive selection. Short-term accumulation of nonsynonymous substitutions driven by selection may have significant implications for virulence, transmission dynamics, and even vaccine efficacy.}, doi = {10.1128/mBio.01824-14}, url = {http://mbio.asm.org/lookup/doi/10.1128/mBio.01824-14}, author = {Azarian, Taj and Ali, Afsar and Johnson, Judith A. and Mohr, David and Prosperi, Mattia and Veras, Nazle M. and Jubair, Mohammed and Strickland, Samantha L. and Rashid, Mohammad H. and Alam, Meer T. and Weppelmann, Thomas A. and Katz, Lee S. and Tarr, Cheryl L. and Rita R Colwell and Morris, J. Glenn and Salemi, Marco} } @inbook {19629, title = {Adaptive and Concurrent Secure Computation from New Adaptive, Non-malleable Commitments}, booktitle = {Advances in Cryptology - ASIACRYPT 2013}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {316 - 336}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a unified approach for obtaining general secure computation that achieves adaptive-Universally Composable (UC)-security. Using our approach we essentially obtain all previous results on adaptive concurrent secure computation, both in relaxed models (e.g., quasi-polynomial time simulation), as well as trusted setup models (e.g., the CRS model, the imperfect CRS model). This provides conceptual simplicity and insight into what is required for adaptive and concurrent security, as well as yielding improvements to set-up assumptions and/or computational assumptions in known models. Additionally, we provide the first constructions of concurrent secure computation protocols that are adaptively secure in the timing model, and the non-uniform simulation model. As a corollary we also obtain the first adaptively secure multiparty computation protocol in the plain model that is secure under bounded-concurrency. Conceptually, our approach can be viewed as an adaptive analogue to the recent work of Lin, Pass and Venkitasubramaniam [STOC {\textquoteleft}09], who considered only non-adaptive adversaries. Their main insight was that the non-malleability requirement could be decoupled from the simulation requirement to achieve UC-security. A main conceptual contribution of this work is, quite surprisingly, that it is still the case even when considering adaptive security. A key element in our construction is a commitment scheme that satisfies a strong definition of non-malleability. Our new primitive of concurrent equivocal non-malleable commitments, intuitively, guarantees that even when a man-in-the-middle adversary observes concurrent equivocal commitments and decommitments, the binding property of the commitments continues to hold for commitments made by the adversary. This definition is stronger than previous ones, and may be of independent interest. Previous constructions that satisfy our definition have been constructed in setup models, but either require existence of stronger encryption schemes such as CCA-secure encryption or require independent {\textquotedblleft}trapdoors{\textquotedblright} provided by the setup for every pair of parties to ensure non-malleability. A main technical contribution of this work is to provide a construction that eliminates these requirements and requires only a single trapdoor.}, keywords = {Algorithm Analysis and Problem Complexity, Applications of Mathematics, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-642-42032-0, 978-3-642-42033-7}, url = {http://link.springer.com/chapter/10.1007/978-3-642-42033-7_17}, author = {Dana Dachman-Soled and Malkin, Tal and Raykova, Mariana and Venkitasubramaniam, Muthuramakrishnan}, editor = {Sako, Kazue and Sarkar, Palash} } @article {19587, title = {From Principles to Practice with Class in the First Year}, journal = {arXiv:1306.4713 [cs]}, year = {2013}, month = {2013/06/19/}, abstract = {We propose a bridge between functional and object-oriented programming in the first-year curriculum. Traditionally, curricula that begin with functional programming transition to a professional, usually object-oriented, language in the second course. This transition poses obstacles for students, and often results in confusing the details of development environments, syntax, and libraries with the fundamentals of OO programming that the course should focus on. Instead, we propose to begin the second course with a sequence of custom teaching languages which minimize the transition from the first course, and allow students to focus on core ideas. After working through the sequence of pedagogical languages, we then transition to Java, at which point students have a strong command of the basic principles. We have 3 years of experience with this course, with notable success.}, keywords = {Computer Science - Programming Languages}, url = {http://arxiv.org/abs/1306.4713}, author = {Tobin-Hochstadt, Sam and David Van Horn} } @article {20364, title = {Identification of bacteria in enrichment cultures of sulfate reducers in the Cariaco Basin water column employing Denaturing Gradient Gel Electrophoresis of 16S ribosomal RNA gene fragments}, journal = {Aquatic Biosystems}, volume = {9}, year = {2013}, month = {Jan-01-2013}, pages = {17}, abstract = {Background The Cariaco Basin is characterized by pronounced and predictable vertical layering of microbial communities dominated by reduced sulfur species at and below the redox transition zone. Marine water samples were collected in May, 2005 and 2006, at the sampling stations A (10{\textdegree}30' N, 64{\textdegree}40' W), B (10{\textdegree}40' N, 64{\textdegree}45' W) and D (10{\textdegree}43{\textquoteright}N, 64{\textdegree}32{\textquoteright}W) from different depths, including surface, redox interface, and anoxic zones. In order to enrich for sulfate reducing bacteria (SRB), water samples were inoculated into anaerobic media amended with lactate or acetate as carbon source. To analyze the composition of enrichment cultures, we performed DNA extraction, PCR-DGGE, and sequencing of selected bands. Results DGGE results indicate that many bacterial genera were present that are associated with the sulfur cycle, including Desulfovibrio spp., as well as heterotrophs belonging to Vibrio, Enterobacter, Shewanella, Fusobacterium, Marinifilum, Mariniliabilia, and Spirochaeta. These bacterial populations are related to sulfur coupling and carbon cycles in an environment of variable redox conditions and oxygen availability. Conclusions In our studies, we found an association of SRB-like Desulfovibrio with Vibrio species and other genera that have a previously defined relevant role in sulfur transformation and coupling of carbon and sulfur cycles in an environment where there are variable redox conditions and oxygen availability. This study provides new information about microbial species that were culturable on media for SRB at anaerobic conditions at several locations and water depths in the Cariaco Basin.}, issn = {2046-9063}, doi = {10.1186/2046-9063-9-17}, url = {http://aquaticbiosystems.biomedcentral.com/articles/10.1186/2046-9063-9-17}, author = {Bozo-Hurtado, Lorelei and Garc{\'\i}a-Amado, M and Chistoserdov, Andrei and Varela, Ramon and Narvaez, J and Rita R Colwell and Su{\'a}rez, Paula} } @article {20368, title = {Ocean Warming and Spread of Pathogenic Vibrios in the Aquatic Environment}, journal = {Microbial Ecology}, year = {2013}, month = {Jan-05-2013}, pages = {817 - 825}, abstract = {Vibrios are among the most common bacteria that inhabit surface waters throughout the world and are responsible for a number of severe infections both in humans and animals. Several reports recently showed that human Vibrio illnesses are increasing worldwide including fatal acute diarrheal diseases, such as cholera, gastroenteritis, wound infections, and septicemia. Many scientists believe this increase may be associated with global warming and rise in sea surface temperature (SST), although not enough evidence is available to support a causal link between emergence of Vibrio infections and climate warming. The effect of increased SST in promoting spread of vibrios in coastal and brackish waters is considered a causal factor explaining this trend. Field and laboratory studies carried out over the past 40 years supported this hypothesis, clearly showing temperature promotes Vibrio growth and persistence in the aquatic environment. Most recently, a long-term retrospective microbiological study carried out in the coastal waters of the southern North Sea provided the first experimental evidence for a positive and significant relationship between SST and Vibrio occurrence over a multidecadal time scale. As a future challenge, macroecological studies of the effects of ocean warming on Vibrio persistence and spread in the aquatic environment over large spatial and temporal scales would conclusively support evidence acquired to date combined with studies of the impact of global warming on epidemiologically relevant variables, such as host susceptibility and exposure. Assessing a causal link between ongoing climate change and enhanced growth and spread of vibrios and related illness is expected to improve forecast and mitigate future outbreaks associated with these pathogens.}, issn = {0095-3628}, doi = {10.1007/s00248-012-0163-2}, url = {http://link.springer.com/10.1007/s00248-012-0163-2}, author = {Vezzulli, Luigi and Rita R Colwell and Pruzzo, Carla} } @article {19590, title = {Optimizing Abstract Abstract Machines}, journal = {arXiv:1211.3722 [cs]}, year = {2013}, note = {Comment: Proceedings of the International Conference on Functional Programming 2013 (ICFP 2013). Boston, Massachusetts. September, 2013}, month = {2013///}, abstract = {The technique of abstracting abstract machines (AAM) provides a systematic approach for deriving computable approximations of evaluators that are easily proved sound. This article contributes a complementary step-by-step process for subsequently going from a naive analyzer derived under the AAM approach, to an efficient and correct implementation. The end result of the process is a two to three order-of-magnitude improvement over the systematically derived analyzer, making it competitive with hand-optimized implementations that compute fundamentally less precise results.}, keywords = {Computer Science - Programming Languages, F.3.2}, url = {http://arxiv.org/abs/1211.3722}, author = {Johnson, J. Ian and Labich, Nicholas and Might, Matthew and David Van Horn} } @article {19372, title = {Parallel geometric classification of stem cells by their three-dimensional morphology}, journal = {Computational Science \& Discovery}, volume = {6}, year = {2013}, month = {01/2013}, pages = {015007}, doi = {10.1088/1749-4699/6/1/015007}, author = {Juba,Derek and Cardone, Antonio and Ip, Cheuk Yiu and Simon Jr, Carl G and K Tison, Christopher and Kumar, Girish and Brady,Mary and Varshney, Amitabh} } @inbook {19641, title = {On the Centrality of Off-Line E-Cash to Concrete Partial Information Games}, booktitle = {Security and Cryptography for Networks}, series = {Lecture Notes in Computer Science}, year = {2012}, month = {2012/01/01/}, pages = {264 - 280}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Cryptography has developed numerous protocols for solving {\textquotedblleft}partial information games{\textquotedblright} that are seemingly paradoxical. Some protocols are generic (e.g., secure multi-party computation) and others, due to the importance of the scenario they represent, are designed to solve a concrete problem directly. Designing efficient and secure protocols for (off-line) e-cash, e-voting, and e-auction are some of the most heavily researched concrete problems, representing various settings where privacy and correctness of the procedure is highly important. In this work, we initiate the exploration of the relationships among e-cash, e-voting and e-auction in the universal composability (UC) framework, by considering general variants of the three problems. In particular, we first define ideal functionalities for e-cash, e-voting, and e-auction, and then give a construction of a protocol that UC-realizes the e-voting (resp., e-auction) functionality in the e-cash hybrid model. This (black-box) reducibility demonstrates the centrality of off-line e-cash and implies that designing a solution to e-cash may bear fruits in other areas. Constructing a solution to one protocol problem based on a second protocol problem has been traditional in cryptography, but typically has concentrated on building complex protocols on simple primitives (e.g., secure multi-party computation from Oblivious Transfer, signature from one-way functions, etc.). The novelty here is reducibility among mature protocols and using the ideal functionality as a design tool in realizing other ideal functionalities. We suggest this new approach, and we only consider the very basic general properties from the various primitives to demonstrate its viability. Namely, we only consider the basic coin e-cash model, the e-voting that is correct and private and relies on trusted registration, and e-auction relying on a trusted auctioneer. Naturally, relationships among protocols with further properties (i.e., extended functionalities), using the approach advocated herein, are left as open questions.}, keywords = {Computer Appl. in Administrative Data Processing, Computer Communication Networks, Data Encryption, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-642-32927-2, 978-3-642-32928-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-32928-9_15}, author = {Choi, Seung Geol and Dana Dachman-Soled and Yung, Moti}, editor = {Visconti, Ivan and Prisco, Roberto De} } @inbook {19634, title = {Efficient Password Authenticated Key Exchange via Oblivious Transfer}, booktitle = {Public Key Cryptography {\textendash} PKC 2012}, series = {Lecture Notes in Computer Science}, year = {2012}, month = {2012/01/01/}, pages = {449 - 466}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a new framework for constructing efficient password authenticated key exchange (PAKE) protocols based on oblivious transfer (OT). Using this framework, we obtain: an efficient and simple UC-secure PAKE protocol that is secure against adaptive corruptions without erasures. efficient and simple PAKE protocols under the Computational Diffie-Hellman (CDH) assumption and the hardness of factoring. (Previous efficient constructions rely on hash proof systems, which appears to be inherently limited to decisional assumptions.) All of our constructions assume a common reference string (CRS) but do not rely on random oracles.}, keywords = {adaptive security, Algorithm Analysis and Problem Complexity, Computer Communication Networks, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, oblivious transfer, Password Authenticated Key Exchange, search assumptions, Systems and Data Security, UC security}, isbn = {978-3-642-30056-1, 978-3-642-30057-8}, url = {http://link.springer.com/chapter/10.1007/978-3-642-30057-8_27}, author = {Canetti, Ran and Dana Dachman-Soled and Vaikuntanathan, Vinod and Wee, Hoeteck}, editor = {Fischlin, Marc and Buchmann, Johannes and Manulis, Mark} } @article {14484, title = {The first international workshop on entity-oriented search (EOS)}, journal = {SIGIR Forum}, volume = {45}, year = {2012}, month = {2012/01//}, pages = {43 - 50}, abstract = {The First International Workshop on Entity-Oriented Search (EOS) workshop was held on July 28, 2011 in Beijing, China, in conjunction with the 34th Annual International ACM SIGIR Conference (SIGIR 2011). The objective for the workshop was to bring together academic researchers and industry practitioners working on entity-oriented search to discuss tasks and challenges, and to uncover the next frontiers for academic research on the topic. The workshop program accommodated two invited talks, eleven refereed papers divided into three technical paper sessions, and a group discussion.}, isbn = {0163-5840}, doi = {10.1145/2093346.2093353}, url = {http://doi.acm.org/10.1145/2093346.2093353}, author = {Balog,Krisztian and de Vries,Arjen P. and Serdyukov,Pavel and Wen,Ji-Rong} } @article {19718, title = {Genomic insights to SAR86, an abundant and uncultivated marine bacterial lineage.}, journal = {ISME J}, volume = {6}, year = {2012}, month = {2012 Jun}, pages = {1186-99}, abstract = {

Bacteria in the 16S rRNA clade SAR86 are among the most abundant uncultivated constituents of microbial assemblages in the surface ocean for which little genomic information is currently available. Bioinformatic techniques were used to assemble two nearly complete genomes from marine metagenomes and single-cell sequencing provided two more partial genomes. Recruitment of metagenomic data shows that these SAR86 genomes substantially increase our knowledge of non-photosynthetic bacteria in the surface ocean. Phylogenomic analyses establish SAR86 as a basal and divergent lineage of γ-proteobacteria, and the individual genomes display a temperature-dependent distribution. Modestly sized at 1.25-1.7 Mbp, the SAR86 genomes lack several pathways for amino-acid and vitamin synthesis as well as sulfate reduction, trends commonly observed in other abundant marine microbes. SAR86 appears to be an aerobic chemoheterotroph with the potential for proteorhodopsin-based ATP generation, though the apparent lack of a retinal biosynthesis pathway may require it to scavenge exogenously-derived pigments to utilize proteorhodopsin. The genomes contain an expanded capacity for the degradation of lipids and carbohydrates acquired using a wealth of tonB-dependent outer membrane receptors. Like the abundant planktonic marine bacterial clade SAR11, SAR86 exhibits metabolic streamlining, but also a distinct carbon compound specialization, possibly avoiding competition.

}, keywords = {Computational Biology, Gammaproteobacteria, Genome, Bacterial, Genomic Library, metagenomics, Oceans and Seas, Phylogeny, plankton, Rhodopsin, RNA, Ribosomal, 16S, Seawater}, issn = {1751-7370}, doi = {10.1038/ismej.2011.189}, author = {Dupont, Chris L and Rusch, Douglas B and Yooseph, Shibu and Lombardo, Mary-Jane and Richter, R Alexander and Valas, Ruben and Novotny, Mark and Yee-Greenbaum, Joyclyn and Jeremy D Selengut and Haft, Dan H and Halpern, Aaron L and Lasken, Roger S and Nealson, Kenneth and Friedman, Robert and Venter, J Craig} } @article {19129, title = {Identification of Coli Surface Antigen 23, a Novel Adhesin of Enterotoxigenic Escherichia coli}, journal = {Infection and immunity}, volume = {80}, year = {2012}, month = {2012}, pages = {2791 - 2801}, abstract = {Enterotoxigenic Escherichia coli (ETEC) is an important cause of diarrhea, mainly in developing countries. Although there are 25 different ETEC adhesins described in strains affecting humans, between 15\% and 50\% of the clinical isolates from different geographical regions are negative for these adhesins, suggesting that additional unidentified adhesion determinants might be present. Here, we report the discovery of Coli Surface Antigen 23 (CS23), a novel adhesin expressed by an ETEC serogroup O4 strain (ETEC 1766a), which was negative for the previously known ETEC adhesins, albeit it has the ability to adhere to Caco-2 cells. CS23 is encoded by an 8.8-kb locus which contains 9 open reading frames (ORFs), 7 of them sharing significant identity with genes required for assembly of K88-related fimbriae. This gene locus, named aal (adhesion-associated locus), is required for the adhesion ability of ETEC 1766a and was able to confer this adhesive phenotype to a nonadherent E. coli HB101 strain. The CS23 major structural subunit, AalE, shares limited identity with known pilin proteins, and it is more closely related to the CS13 pilin protein CshE, carried by human ETEC strains. Our data indicate that CS23 is a new member of the diverse adhesin repertoire used by ETEC strains.}, author = {Del Canto, F. and Botkin, D.J. and Valenzuela, P. and Popov, V. and Ruiz-Perez, F. and Nataro, J.P. and Levine, M.M. and Stine, O.C. and Pop, Mihai and Torres, A.G. and others} } @article {19589, title = {Introspective Pushdown Analysis of Higher-Order Programs}, journal = {arXiv:1207.1813 [cs]}, year = {2012}, note = {Comment: Proceedings of the 17th ACM SIGPLAN International Conference on Functional Programming, 2012, ACM}, month = {2012/07/07/}, abstract = {In the static analysis of functional programs, pushdown flow analysis and abstract garbage collection skirt just inside the boundaries of soundness and decidability. Alone, each method reduces analysis times and boosts precision by orders of magnitude. This work illuminates and conquers the theoretical challenges that stand in the way of combining the power of these techniques. The challenge in marrying these techniques is not subtle: computing the reachable control states of a pushdown system relies on limiting access during transition to the top of the stack; abstract garbage collection, on the other hand, needs full access to the entire stack to compute a root set, just as concrete collection does. \emph{Introspective} pushdown systems resolve this conflict. Introspective pushdown systems provide enough access to the stack to allow abstract garbage collection, but they remain restricted enough to compute control-state reachability, thereby enabling the sound and precise product of pushdown analysis and abstract garbage collection. Experiments reveal synergistic interplay between the techniques, and the fusion demonstrates "better-than-both-worlds" precision.}, keywords = {Computer Science - Programming Languages, D.3.4, F.3.2}, url = {http://arxiv.org/abs/1207.1813}, author = {Earl, Christopher and Sergey, Ilya and Might, Matthew and David Van Horn} } @article {18540, title = {Lithium: Event-Driven Network Control}, volume = {GT-CS-12-03}, year = {2012}, month = {2012///}, institution = {Georgia Institute of Technology}, abstract = {This paper introduces event-driven network control, a network control framework that makes networks easier to manage by automating many tasks that must currently be performed by manually modifying low-level, distributed, and complex device configuration. We identify four policy domains that inherently capture many events: time, user, history, and traffic flow. We then present Lithium, an event-driven network control framework that can implement policies expressed using these domains. Lithium can support policies that automatically react to a wide range of events, from fluctuations in traffic volumes to changes in the time of day. Lithium allows network operators to specify networkwide policies in terms of a high-level, event-driven policy model, as opposed to configuring individual network devices with low-level commands. To show that Lithium is practical, general, and applicable in different types of network scenarios, we have deployed Lithium in both a campus network and a home network and used it to implement more flexible and dynamic network policies. We also perform evaluations to show that Lithium introduces negligible overhead beyond a conventional OpenFlow-based control framework.}, url = {http://hdl.handle.net/1853/43377}, author = {Kim,H. and Voellmy,A. and Burnett,S. and Feamster, Nick and Clark,R.} } @article {20381, title = {Long-term effects of ocean warming on the prokaryotic community: evidence from the vibrios}, journal = {The ISME Journal}, volume = {6111114882511}, year = {2012}, month = {Jan-01-2012}, pages = {21 - 30}, abstract = {The long-term effects of ocean warming on prokaryotic communities are unknown because of lack of historical data. We overcame this gap by applying a retrospective molecular analysis to the bacterial community on formalin-fixed samples from the historical Continuous Plankton Recorder archive, which is one of the longest and most geographically extensive collections of marine biological samples in the world. We showed that during the last half century, ubiquitous marine bacteria of the Vibrio genus, including Vibrio cholerae, increased in dominance within the plankton-associated bacterial community of the North Sea, where an unprecedented increase in bathing infections related to these bacteria was recently reported. Among environmental variables, increased sea surface temperature explained 45\% of the variance in Vibrio data, supporting the view that ocean warming is favouring the spread of vibrios and may be the cause of the globally increasing trend in their associated diseases.}, issn = {1751-7362}, doi = {10.1038/ismej.2011.89}, url = {http://www.nature.com/articles/ismej201189}, author = {Vezzulli, Luigi and Brettar, Ingrid and Pezzati, Elisabetta and Reid, Philip C and Rita R Colwell and H{\"o}fle, Manfred G and Pruzzo, Carla} } @conference {17630, title = {Networking lessons: From computers to water}, booktitle = {2012 Fourth International Conference on Communication Systems and Networks (COMSNETS)}, year = {2012}, month = {2012/01/03/7}, pages = {1 - 6}, publisher = {IEEE}, organization = {IEEE}, abstract = {As an instance of using IT to green non-IT domains, we consider the question whether lessons from computer networking can be applied in water distribution networks to improve their energy footprint and/or efficiency. Our contributions in this work are: (i) we identify several areas where principles from computer networking can be used to better water distribution; (ii) we focus on a specific infrastructure enhancement problem caused by increasing demands on a water utility network and present solutions (similar to those used in computer networks) that optimize both operational expenditure and total cost of ownership. We validate our solutions through simulations and compare their efficacy against techniques that are traditionally used in enhancing water networks. Our results show that lessons from computer networks can help in enhancing water networks.}, keywords = {Biological cells, computer networking, Computer networks, COMPUTERS, energy footprint, Genetic algorithms, green nonIT domains, infrastructure enhancement problem, Internet, network theory (graphs), networking lessons, planning, pricing, water distribution networks, water supply, water utility network}, isbn = {978-1-4673-0296-8}, doi = {10.1109/COMSNETS.2012.6151373}, author = {Narayanan,I. and Sarangan,V. and Vasan, A. and Srinivasan, Aravind and Sivasubramaniam,A.} } @article {20385, title = {Role of GbpA protein, an important virulence-related colonization factor, for Vibrio cholerae{\textquoteright}s survival in the aquatic environment}, journal = {Environmental Microbiology Reports}, year = {2012}, month = {Jan-08-2012}, pages = {439 - 445}, abstract = {Vibrio cholerae N-acetyl glucosamine-binding protein A (GbpA) is a chitin binding protein and a virulence factor involved in the colonization of human intestine. We investigated the distribution and genetic variations of gbpA in 488 V. cholerae strains of environmental and clinical origin, belonging to different serogroups and biotypes. We found that the gene is consistently present and highly conserved including an environmental V. cholerae-related strain of ancestral origin. The gene was also consistently expressed in a number of representative V. cholerae strains cultured in laboratory aquatic microcosms under conditions simulating those found in temperate marine environments. Functional analysis carried out on V. cholerae O1 El Tor N16961 showed that GbpA is not involved in adhesion to inorganic surfaces but promotes interaction with environmental biotic substrates (plankton and bivalve hepatopancreas cells) representing known marine reservoir or host for the bacterium. It is suggested that the ability of GbpA to colonize human intestinal cells most probably originated from its primary function in the aquatic environment.}, doi = {10.1111/j.1758-2229.2012.00356.x}, url = {http://doi.wiley.com/10.1111/j.1758-2229.2012.00356.x}, author = {Stauder, Monica and Huq, Anwar and Pezzati, Elisabetta and Grim, Christopher J. and Ramoino, Paola and Pane, Luigi and Rita R Colwell and Pruzzo, Carla and Vezzulli, Luigi} } @article {17929, title = {Speeding Up Particle Trajectory Simulations under Moving Force Fields using GPUs}, journal = {Journal of Computing and Information Science in Engineering}, year = {2012}, month = {2012///}, abstract = {In this paper, we introduce a GPU-based framework forsimulating particle trajectories under both static and dynamic force fields. By exploiting the highly parallel nature of the problem and making efficient use of the available hardware, our simulator exhibits a significant speedup over its CPU- based analog. We apply our framework to a specific experi- mental simulation: the computation of trapping probabilities associated with micron-sized silica beads in optical trapping workbenches. When evaluating large numbers of trajectories (4096), we see approximately a 356 times speedup of the GPU-based simulator over its CPU-based counterpart. }, author = {Patro,R. and Dickerson,J. P. and Bista,S. and Gupta,S.K. and Varshney, Amitabh} } @article {19717, title = {Whole genome analysis of Leptospira licerasiae provides insight into leptospiral evolution and pathogenicity.}, journal = {PLoS Negl Trop Dis}, volume = {6}, year = {2012}, month = {2012}, pages = {e1853}, abstract = {

The whole genome analysis of two strains of the first intermediately pathogenic leptospiral species to be sequenced (Leptospira licerasiae strains VAR010 and MMD0835) provides insight into their pathogenic potential and deepens our understanding of leptospiral evolution. Comparative analysis of eight leptospiral genomes shows the existence of a core leptospiral genome comprising 1547 genes and 452 conserved genes restricted to infectious species (including L. licerasiae) that are likely to be pathogenicity-related. Comparisons of the functional content of the genomes suggests that L. licerasiae retains several proteins related to nitrogen, amino acid and carbohydrate metabolism which might help to explain why these Leptospira grow well in artificial media compared with pathogenic species. L. licerasiae strains VAR010(T) and MMD0835 possess two prophage elements. While one element is circular and shares homology with LE1 of L. biflexa, the second is cryptic and homologous to a previously identified but unnamed region in L. interrogans serovars Copenhageni and Lai. We also report a unique O-antigen locus in L. licerasiae comprised of a 6-gene cluster that is unexpectedly short compared with L. interrogans in which analogous regions may include >90 such genes. Sequence homology searches suggest that these genes were acquired by lateral gene transfer (LGT). Furthermore, seven putative genomic islands ranging in size from 5 to 36 kb are present also suggestive of antecedent LGT. How Leptospira become naturally competent remains to be determined, but considering the phylogenetic origins of the genes comprising the O-antigen cluster and other putative laterally transferred genes, L. licerasiae must be able to exchange genetic material with non-invasive environmental bacteria. The data presented here demonstrate that L. licerasiae is genetically more closely related to pathogenic than to saprophytic Leptospira and provide insight into the genomic bases for its infectiousness and its unique antigenic characteristics.

}, keywords = {DNA, Bacterial, Evolution, Molecular, Gene Transfer, Horizontal, Genome, Bacterial, Genomic islands, HUMANS, Leptospira, Molecular Sequence Data, Multigene Family, Prophages, Sequence Analysis, DNA, Virulence factors}, issn = {1935-2735}, doi = {10.1371/journal.pntd.0001853}, author = {Ricaldi, Jessica N and Fouts, Derrick E and Jeremy D Selengut and Harkins, Derek M and Patra, Kailash P and Moreno, Angelo and Lehmann, Jason S and Purushe, Janaki and Sanka, Ravi and Torres, Michael and Webster, Nicholas J and Vinetz, Joseph M and Matthias, Michael A} } @article {19583, title = {Abstracting Abstract Machines: A Systematic Approach to Higher-Order Program Analysis}, journal = {arXiv:1105.1743 [cs]}, year = {2011}, note = {Comment: Communications of the ACM, Research Highlight}, month = {2011/05/09/}, abstract = {Predictive models are fundamental to engineering reliable software systems. However, designing conservative, computable approximations for the behavior of programs (static analyses) remains a difficult and error-prone process for modern high-level programming languages. What analysis designers need is a principled method for navigating the gap between semantics and analytic models: analysis designers need a method that tames the interaction of complex languages features such as higher-order functions, recursion, exceptions, continuations, objects and dynamic allocation. We contribute a systematic approach to program analysis that yields novel and transparently sound static analyses. Our approach relies on existing derivational techniques to transform high-level language semantics into low-level deterministic state-transition systems (with potentially infinite state spaces). We then perform a series of simple machine refactorings to obtain a sound, computable approximation, which takes the form of a non-deterministic state-transition systems with finite state spaces. The approach scales up uniformly to enable program analysis of realistic language features, including higher-order functions, tail calls, conditionals, side effects, exceptions, first-class continuations, and even garbage collection.}, keywords = {Computer Science - Programming Languages, F.3.2, F.4.1}, url = {http://arxiv.org/abs/1105.1743}, author = {David Van Horn and Might, Matthew} } @conference {13069, title = {AVSS 2011 demo session: A large-scale benchmark dataset for event recognition in surveillance video}, booktitle = {Advanced Video and Signal-Based Surveillance (AVSS), 2011 8th IEEE International Conference on}, year = {2011}, month = {2011/09/30/2}, pages = {527 - 528}, abstract = {We introduce to the surveillance community the VIRAT Video Dataset[1], which is a new large-scale surveillance video dataset designed to assess the performance of event recognition algorithms in realistic scenes1.}, doi = {10.1109/AVSS.2011.6027400}, author = {Oh,Sangmin and Hoogs,Anthony and Perera,Amitha and Cuntoor,Naresh and Chen,Chia-Chih and Lee,Jong Taek and Mukherjee,Saurajit and Aggarwal, JK and Lee,Hyungtae and Davis, Larry S. and Swears,Eran and Wang,Xiaoyang and Ji,Qiang and Reddy,Kishore and Shah,Mubarak and Vondrick,Carl and Pirsiavash,Hamed and Ramanan,Deva and Yuen,Jenny and Torralba,Antonio and Song,Bi and Fong,Anesco and Roy-Chowdhury,Amit and Desai,Mita} } @article {16247, title = {Bacillus Anthracis Comparative Genome Analysis in Support of the Amerithrax Investigation}, journal = {Proceedings of the National Academy of SciencesPNAS}, volume = {108}, year = {2011}, month = {2011/03/22/}, pages = {5027 - 5032}, abstract = {Before the anthrax letter attacks of 2001, the developing field of microbial forensics relied on microbial genotyping schemes based on a small portion of a genome sequence. Amerithrax, the investigation into the anthrax letter attacks, applied high-resolution whole-genome sequencing and comparative genomics to identify key genetic features of the letters{\textquoteright} Bacillus anthracis Ames strain. During systematic microbiological analysis of the spore material from the letters, we identified a number of morphological variants based on phenotypic characteristics and the ability to sporulate. The genomes of these morphological variants were sequenced and compared with that of the B. anthracis Ames ancestor, the progenitor of all B. anthracis Ames strains. Through comparative genomics, we identified four distinct loci with verifiable genetic mutations. Three of the four mutations could be directly linked to sporulation pathways in B. anthracis and more specifically to the regulation of the phosphorylation state of Spo0F, a key regulatory protein in the initiation of the sporulation cascade, thus linking phenotype to genotype. None of these variant genotypes were identified in single-colony environmental B. anthracis Ames isolates associated with the investigation. These genotypes were identified only in B. anthracis morphotypes isolated from the letters, indicating that the variants were not prevalent in the environment, not even the environments associated with the investigation. This study demonstrates the forensic value of systematic microbiological analysis combined with whole-genome sequencing and comparative genomics.}, isbn = {0027-8424, 1091-6490}, doi = {10.1073/pnas.1016657108}, url = {http://www.pnas.org/content/108/12/5027}, author = {Rasko,David A and Worsham,Patricia L and Abshire,Terry G and Stanley,Scott T and Bannan,Jason D and Wilson,Mark R and Langham,Richard J and Decker,R. Scott and Jiang,Lingxia and Read,Timothy D. and Phillippy,Adam M and Salzberg,Steven L. and Pop, Mihai and Van Ert,Matthew N and Kenefic,Leo J and Keim,Paul S and Fraser-Liggett,Claire M and Ravel,Jacques} } @conference {17982, title = {Brief announcement: better speedups for parallel max-flow}, booktitle = {Proceedings of the 23rd ACM symposium on Parallelism in algorithms and architectures}, year = {2011}, month = {2011///}, pages = {131 - 134}, abstract = {We present a parallel solution to the Maximum-Flow (Max-Flow) problem, suitable for a modern many-core architec- ture. We show that by starting from a PRAM algorithm, following an established {\textquotedblleft}programmer{\textquoteright}s workflow{\textquotedblright} and tar- geting XMT, a PRAM-inspired many-core architecture, we achieve significantly higher speed-ups than previous approaches. Comparison with the fastest known serial max-flow imple- mentation on a modern CPU demonstrates for the first time potential for orders-of-magnitude performance improvement for Max-Flow. Using XMT, the PRAM Max-Flow algorithm is also much easier to program than for other parallel plat- forms, contributing a powerful example toward dual valida- tion of both PRAM algorithmics and XMT. }, author = {Caragea,G.C. and Vishkin, Uzi} } @article {13280, title = {Computing morse decompositions for triangulated terrains: an analysis and an experimental evaluation}, journal = {Image Analysis and Processing{\textendash}ICIAP 2011}, year = {2011}, month = {2011///}, pages = {565 - 574}, abstract = {We consider the problem of extracting the morphology of a terrain discretized as a triangle mesh. We discuss first how to transpose Morse theory to the discrete case in order to describe the morphology of triangulated terrains. We review algorithms for computing Morse decompositions, that we have adapted and implemented for triangulated terrains. We compare the the Morse decompositions produced by them, by considering two different metrics.}, doi = {10.1007/978-3-642-24085-0_58}, author = {Vitali,M. and De Floriani, Leila and Magillo,P.} } @article {18558, title = {Countering Botnets: Anomaly-Based Detection, Comprehensive Analysis, and Efficient Mitigation}, year = {2011}, month = {2011/05//}, institution = {GEORGIA TECH RESEARCH CORP ATLANTA}, abstract = {We cover five general areas: (1) botnet detection, (2) botnet analysis, (3) botnet mitigation, (4) add-on tasks to the original contract, including the Conficker Working Group Lessons Learned, Layer-8 Exploration of Botnet Organization, and DREN research, and (5) commercialization in this paper. We have successfully developed new botnet detection and analysis capabilities in this project. These algorithms have been evaluated using real-world data, and have been put into actual, deployed systems. The most significant technical developments include a new dynamic reputation systems for DNS domains, a scalable anomaly detection system for botnet detection in very large network, and a transparent malware analysis system. In addition, on several occasions we have used our botnet data and analysis to help law enforcement agencies arrest botmasters. We also have had great success transitioning technologies to commercial products that are now used by government agencies, ISPs, and major corporations.}, keywords = {*ELECTRONIC SECURITY, *INFORMATION SECURITY, *INTERNET, *INTRUSION DETECTION(COMPUTERS), algorithms, BGP ROUTE INJECTION, BGP(BORDER GATEWAY PROTOCOLS), BOTNET DETECTION, BOTNET TRACEBACK AND ATTRIBUTION, BOTNETS(MALWARE), CLIENT SERVER SYSTEMS, COMMUNICATIONS PROTOCOLS, COMPUTER PROGRAMMING AND SOFTWARE, COMPUTER SYSTEMS MANAGEMENT AND STANDARDS, CYBER ATTACKS, CYBER SECURITY, CYBERNETICS, CYBERTERRORISM, CYBERWARFARE, DATA PROCESSING SECURITY, DNS BASED MONITORING, DNS BASED REDIRECTION, DNS(DOMAIN NAME SYSTEMS), INFORMATION SCIENCE, INTERNET BROWSERS, ISP(INTERNET SERVICE PROVIDERS), MALWARE, MALWARE ANALYSIS, Online Systems, WUAFRLDHS1BOTN}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA543919}, author = {Lee,Wenke and Dagon,David and Giffin,Jon and Feamster, Nick and Ollman,Gunter and Westby,Jody and Wesson,Rick and Vixie,Paul} } @conference {19441, title = {A Dive into Online Community Properties}, booktitle = {CSCW {\textquoteright}11}, series = {CSCW {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {725 - 728}, publisher = {ACM}, organization = {ACM}, abstract = {As digital communities grow in size their feature sets also grow with them. Different users have different experiences with the same tools and communities. Enterprises and other organizations seeking to leverage these communities need a straightforward way to analyze and compare a variety of salient attributes of these communities. We describe a taxonomy and tool for crowd-sourcing user based evaluations of enterprise relevant attributes of digital communities and present the results of a small scale study on its usefulness and stability across multiple raters.}, keywords = {enterprise, Online communities, Taxonomy, Visualization}, isbn = {978-1-4503-0556-3}, url = {http://doi.acm.org/10.1145/1958824.1958955}, author = {Wagstrom, Patrick and Martino, Jacquelyn and von Kaenel, Juerg and Marshini Chetty and Thomas, John and Jones, Lauretta} } @article {19580, title = {A family of abstract interpretations for static analysis of concurrent higher-order programs}, journal = {arXiv:1103.5167 [cs]}, year = {2011}, note = {Comment: The 18th International Static Analysis Symposium (SAS 2011)}, month = {2011/03/26/}, abstract = {We develop a framework for computing two foundational analyses for concurrent higher-order programs: (control-)flow analysis (CFA) and may-happen-in-parallel analysis (MHP). We pay special attention to the unique challenges posed by the unrestricted mixture of first-class continuations and dynamically spawned threads. To set the stage, we formulate a concrete model of concurrent higher-order programs: the P(CEK*)S machine. We find that the systematic abstract interpretation of this machine is capable of computing both flow and MHP analyses. Yet, a closer examination finds that the precision for MHP is poor. As a remedy, we adapt a shape analytic technique-singleton abstraction-to dynamically spawned threads (as opposed to objects in the heap). We then show that if MHP analysis is not of interest, we can substantially accelerate the computation of flow analysis alone by collapsing thread interleavings with a second layer of abstraction.}, keywords = {Computer Science - Programming Languages}, url = {http://arxiv.org/abs/1103.5167}, author = {Might, Matthew and David Van Horn} } @article {12454, title = {A Fast Bilinear Structure from Motion Algorithm Using a Video Sequence and Inertial Sensors}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {33}, year = {2011}, month = {2011/01//}, pages = {186 - 193}, abstract = {In this paper, we study the benefits of the availability of a specific form of additional information-the vertical direction (gravity) and the height of the camera, both of which can be conveniently measured using inertial sensors and a monocular video sequence for 3D urban modeling. We show that in the presence of this information, the SfM equations can be rewritten in a bilinear form. This allows us to derive a fast, robust, and scalable SfM algorithm for large scale applications. The SfM algorithm developed in this paper is experimentally demonstrated to have favorable properties compared to the sparse bundle adjustment algorithm. We provide experimental evidence indicating that the proposed algorithm converges in many cases to solutions with lower error than state-of-art implementations of bundle adjustment. We also demonstrate that for the case of large reconstruction problems, the proposed algorithm takes lesser time to reach its solution compared to bundle adjustment. We also present SfM results using our algorithm on the Google StreetView research data set.}, keywords = {3D urban modeling, algorithms, Artificial intelligence, CAMERAS, computer vision., Convergence, fast bilinear structure, Google StreetView research data set, Image Interpretation, Computer-Assisted, Image reconstruction, Image sensors, Image sequences, Imaging, Three-Dimensional, inertial sensors, Information Storage and Retrieval, Linear systems, minimization, MOTION, motion algorithm, Motion estimation, multiple view geometry, Pattern Recognition, Automated, Sensors, SfM equations, sparse bundle adjustment algorithm, structure from motion, Three dimensional displays, vertical direction, Video Recording, video sequence, video signal processing}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2010.163}, author = {Ramachandran, M. and Veeraraghavan,A. and Chellapa, Rama} } @conference {17175, title = {From slacktivism to activism: participatory culture in the age of social media}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {819 - 822}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Social networking sites (e.g. Facebook), microblogging services (e.g. Twitter), and content-sharing sites (e.g. YouTube and Flickr) have introduced the opportunity for wide-scale, online social participation. Visibility of national and international priorities such as public health, political unrest, disaster relief, and climate change has increased, yet we know little about the benefits - and possible costs - of engaging in social activism via social media. These powerful social issues introduce a need for scientific research into technology mediated social participation. What are the actual, tangible benefits of "greening" Twitter profile pictures in support of the Iranian elections? Does cartooning a Facebook profile picture really raise awareness of child abuse? Are there unintended negative effects through low-risk, low-cost technology-mediated participation? And, is there a difference - in both outcome and engagement level - between different types of online social activism? This SIG will investigate technology mediated social participation through a critical lens, discussing both the potential positive and negative outcomes of such participation. Approaches to designing for increased participation, evaluating effects of participation, and next steps in scientific research directions will be discussed.}, keywords = {activism, change, design, participation, slacktivism, social media}, isbn = {978-1-4503-0268-5}, doi = {10.1145/1979742.1979543}, url = {http://doi.acm.org/10.1145/1979742.1979543}, author = {Rotman,Dana and Vieweg,Sarah and Yardi,Sarita and Chi,Ed and Preece,Jenny and Shneiderman, Ben and Pirolli,Peter and Glaisyer,Tom} } @article {14619, title = {Genome-Wide Survey of Natural Selection on Functional, Structural, and Network Properties of Polymorphic Sites in Saccharomyces Paradoxus}, journal = {Molecular Biology and EvolutionMol Biol Evol}, volume = {28}, year = {2011}, month = {2011/09/01/}, pages = {2615 - 2627}, abstract = {Background. To characterize the genetic basis of phenotypic evolution, numerous studies have identified individual genes that have likely evolved under natural selection. However, phenotypic changes may represent the cumulative effect of similar evolutionary forces acting on functionally related groups of genes. Phylogenetic analyses of divergent yeast species have identified functional groups of genes that have evolved at significantly different rates, suggestive of differential selection on the functional properties. However, due to environmental heterogeneity over long evolutionary timescales, selection operating within a single lineage may be dramatically different, and it is not detectable via interspecific comparisons alone. Moreover, interspecific studies typically quantify selection on protein-coding regions using the Dn/Ds ratio, which cannot be extended easily to study selection on noncoding regions or synonymous sites. The population genetic-based analysis of selection operating within a single lineage ameliorates these limitations. Findings. We investigated selection on several properties associated with genes, promoters, or polymorphic sites, by analyzing the derived allele frequency spectrum of single nucleotide polymorphisms (SNPs) in 28 strains of Saccharomyces paradoxus. We found evidence for significant differential selection between many functionally relevant categories of SNPs, underscoring the utility of function-centric approaches for discovering signatures of natural selection. When comparable, our findings are largely consistent with previous studies based on interspecific comparisons, with one notable exception: our study finds that mutations from an ancient amino acid to a relatively new amino acid are selectively disfavored, whereas interspecific comparisons have found selection against ancient amino acids. Several of our findings have not been addressed through prior interspecific studies: we find that synonymous mutations from preferred to unpreferred codons are selected against and that synonymous SNPs in the linker regions of proteins are relatively less constrained than those within protein domains. Conclusions. We present the first global survey of selection acting on various functional properties in S. paradoxus. We found that selection pressures previously detected over long evolutionary timescales have also shaped the evolution of S. paradoxus. Importantly, we also make novel discoveries untenable via conventional interspecific analyses.}, keywords = {derived allele frequency, Evolution, natural selection, yeast}, isbn = {0737-4038, 1537-1719}, doi = {10.1093/molbev/msr085}, url = {http://mbe.oxfordjournals.org/content/28/9/2615}, author = {Vishnoi,Anchal and Sethupathy,Praveen and Simola,Daniel and Plotkin,Joshua B. and Hannenhalli, Sridhar} } @article {19588, title = {Higher-Order Symbolic Execution via Contracts}, journal = {arXiv:1103.1362 [cs]}, year = {2011}, month = {2011/03/07/}, abstract = {We present a new approach to automated reasoning about higher-order programs by extending symbolic execution to use behavioral contracts as symbolic values, enabling symbolic approximation of higher-order behavior. Our approach is based on the idea of an abstract reduction semantics that gives an operational semantics to programs with both concrete and symbolic components. Symbolic components are approximated by their contract and our semantics gives an operational interpretation of contracts-as-values. The result is a executable semantics that soundly predicts program behavior, including contract failures, for all possible instantiations of symbolic components. We show that our approach scales to an expressive language of contracts including arbitrary programs embedded as predicates, dependent function contracts, and recursive contracts. Supporting this feature-rich language of specifications leads to powerful symbolic reasoning using existing program assertions. We then apply our approach to produce a verifier for contract correctness of components, including a sound and computable approximation to our semantics that facilitates fully automated contract verification. Our implementation is capable of verifying contracts expressed in existing programs, and of justifying valuable contract-elimination optimizations.}, keywords = {Computer Science - Programming Languages}, url = {http://arxiv.org/abs/1103.1362}, author = {Tobin-Hochstadt, Sam and David Van Horn} } @article {18514, title = {How Many Tiers? Pricing in the Internet Transit Market}, journal = {SIGCOMM-Computer Communication Review}, volume = {41}, year = {2011}, month = {2011///}, pages = {194 - 194}, abstract = {ISPs are increasingly selling {\textquotedblleft}tiered{\textquotedblright} contracts, which offer Inter-net connectivity to wholesale customers in bundles, at rates based on the cost of the links that the traffic in the bundle is traversing. Although providers have already begun to implement and deploy tiered pricing contracts, little is known about how to structure them. Although contracts that sell connectivity on finer granularities im- prove market efficiency, they are also more costly for ISPs to im- plement and more difficult for customers to understand. Our goal is to analyze whether current tiered pricing practices in the whole- sale transit market yield optimal profits for ISPs and whether better bundling strategies might exist. In the process, we offer two contri- butions: (1) we develop a novel way of mapping traffic and topol- ogy data to a demand and cost model; and (2) we fit this model on three large real-world networks: an European transit ISP, a content distribution network, and an academic research network, and run counterfactuals to evaluate the effects of different bundling strate- gies. Our results show that the common ISP practice of structuring tiered contracts according to the cost of carrying the traffic flows (e.g., offering a discount for traffic that is local) can be suboptimal and that dividing contracts based on both traffic demand and the cost of carrying it into only three or four tiers yields near-optimal profit for the ISP. }, author = {Valancius,V. and Lumezanu,C. and Feamster, Nick and Johari,R. and Vazirani,V. V} } @conference {13074, title = {A large-scale benchmark dataset for event recognition in surveillance video}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2011 IEEE Conference on}, year = {2011}, month = {2011/06//}, pages = {3153 - 3160}, abstract = {We introduce a new large-scale video dataset designed to assess the performance of diverse visual event recognition algorithms with a focus on continuous visual event recognition (CVER) in outdoor areas with wide coverage. Previous datasets for action recognition are unrealistic for real-world surveillance because they consist of short clips showing one action by one individual [15, 8]. Datasets have been developed for movies [11] and sports [12], but, these actions and scene conditions do not apply effectively to surveillance videos. Our dataset consists of many outdoor scenes with actions occurring naturally by non-actors in continuously captured videos of the real world. The dataset includes large numbers of instances for 23 event types distributed throughout 29 hours of video. This data is accompanied by detailed annotations which include both moving object tracks and event examples, which will provide solid basis for large-scale evaluation. Additionally, we propose different types of evaluation modes for visual recognition tasks and evaluation metrics along with our preliminary experimental results. We believe that this dataset will stimulate diverse aspects of computer vision research and help us to advance the CVER tasks in the years ahead.}, keywords = {algorithm;evaluation, CVER, databases;, databases;video, dataset;moving, event, metrics;large-scale, object, recognition, recognition;diverse, recognition;video, scenes;surveillance, surveillance;visual, tasks;computer, tracks;outdoor, video, video;computer, vision;continuous, vision;image, visual}, doi = {10.1109/CVPR.2011.5995586}, author = {Oh,Sangmin and Hoogs, A. and Perera,A. and Cuntoor, N. and Chen,Chia-Chih and Lee,Jong Taek and Mukherjee,S. and Aggarwal, JK and Lee,Hyungtae and Davis, Larry S. and Swears,E. and Wang,Xioyang and Ji,Qiang and Reddy,K. and Shah,M. and Vondrick,C. and Pirsiavash,H. and Ramanan,D. and Yuen,J. and Torralba,A. and Song,Bi and Fong,A. and Roy-Chowdhury, A. and Desai,M.} } @article {12864, title = {Long-term effects of ocean warming on the prokaryotic community: evidence from the vibrios}, journal = {The ISME Journal}, volume = {6}, year = {2011}, month = {2011/07/14/}, pages = {21 - 30}, abstract = {The long-term effects of ocean warming on prokaryotic communities are unknown because of lack of historical data. We overcame this gap by applying a retrospective molecular analysis to the bacterial community on formalin-fixed samples from the historical Continuous Plankton Recorder archive, which is one of the longest and most geographically extensive collections of marine biological samples in the world. We showed that during the last half century, ubiquitous marine bacteria of the Vibrio genus, including Vibrio cholerae, increased in dominance within the plankton-associated bacterial community of the North Sea, where an unprecedented increase in bathing infections related to these bacteria was recently reported. Among environmental variables, increased sea surface temperature explained 45\% of the variance in Vibrio data, supporting the view that ocean warming is favouring the spread of vibrios and may be the cause of the globally increasing trend in their associated diseases.}, keywords = {ecophysiology, ecosystems, environmental biotechnology, geomicrobiology, ISME J, microbe interactions, microbial communities, microbial ecology, microbial engineering, microbial epidemiology, microbial genomics, microorganisms}, isbn = {1751-7362}, doi = {10.1038/ismej.2011.89}, url = {http://www.nature.com/ismej/journal/v6/n1/full/ismej201189a.html?WT.ec_id=ISMEJ-201201}, author = {Vezzulli,Luigi and Brettar,Ingrid and Pezzati,Elisabetta and Reid,Philip C. and Rita R Colwell and H{\"o}fle,Manfred G. and Pruzzo,Carla} } @article {17980, title = {A Low-Overhead Asynchronous Interconnection Network for GALS Chip Multiprocessors}, journal = {Computer-Aided Design of Integrated Circuits and Systems, IEEE Transactions on}, volume = {30}, year = {2011}, month = {2011/04//}, pages = {494 - 507}, abstract = {A new asynchronous interconnection network is introduced for globally-asynchronous locally-synchronous (GALS) chip multiprocessors. The network eliminates the need for global clock distribution, and can interface multiple synchronous timing domains operating at unrelated clock rates. In particular, two new highly-concurrent asynchronous components are introduced which provide simple routing and arbitration/merge functions. Post-layout simulations in identical commercial 90 nm technology indicate that comparable recent synchronous router nodes have 5.6-10.7 more energy per packet and 2.8-6.4 greater area than the new asynchronous nodes. Under random traffic, the network provides significantly lower latency and identical throughput over the entire operating range of the 800 MHz network and through mid-range traffic rates for the 1.36 GHz network, but with degradation at higher traffic rates. Preliminary evaluations are also presented for a mixed-timing (GALS) network in a shared-memory parallel architecture, running both random traffic and parallel benchmark kernels, as well as directions for further improvement.}, keywords = {1.36, 800, 90, architecture;size, architectures;shared, asynchronous, benchmark, chip, chips;multiprocessor, distribution, distribution;frequency, GALS, GHz;frequency, interconnection, kernel;post-layout, layout;clock, locally-synchronous, memory, MHz;globally-asynchronous, multiple, multiprocessor;clock, multiprocessor;interface, network;mixed-timing, network;network, networks;microprocessor, networks;network, nm;circuit, Parallel, routing;network-on-chip;parallel, routing;parallel, simulation;random, synchronous, systems;, timing;low-overhead, traffic;shared-memory}, isbn = {0278-0070}, doi = {10.1109/TCAD.2011.2114970}, author = {Horak,M.N. and Nowick,S.M. and Carlberg,M. and Vishkin, Uzi} } @conference {17904, title = {MDMap: A system for data-driven layout and exploration of molecular dynamics simulations}, booktitle = {Biological Data Visualization (BioVis), 2011 IEEE Symposium on}, year = {2011}, month = {2011/10//}, pages = {111 - 118}, abstract = {Contemporary molecular dynamics simulations result in a glut of simulation data, making analysis and discovery a difficult and burdensome task. We present MDMap, a system designed to summarize long-running molecular dynamics (MD) simulations. We represent a molecular dynamics simulation as a state transition graph over a set of intermediate (stable and semi-stable) states. The transitions amongst the states together with their frequencies represent the flow of a biomolecule through the trajectory space. MDMap automatically determines potential intermediate conformations and the transitions amongst them by analyzing the conformational space explored by the MD simulation. MDMap is an automated system to visualize MD simulations as state-transition diagrams, and can replace the current tedious manual layouts of biomolecular folding landscapes with an automated tool. The layout of the representative states and the corresponding transitions among them is presented to the user as a visual synopsis of the long-running MD simulation. We compare and contrast multiple presentations of the state transition diagrams, such as conformational embedding, and spectral, hierarchical, and force-directed graph layouts. We believe this system could provide a road-map for the visualization of other stochastic time-varying simulations in a variety of different domains.}, keywords = {computing;digital, driven, DYNAMICS, exploration;data, folding, graph;stochastic, landscapes;data, layout;molecular, MDMap;biomolecular, method;stochastic, processes;, simulation;graph, simulations;state, simulations;trajectory, space;biology, theory;molecular, time-varying, transition}, doi = {10.1109/BioVis.2011.6094055}, author = {Patro,R. and Ip, Cheuk Yiu and Bista,S. and Cho,S.S. and Thirumalai,D. and Varshney, Amitabh} } @article {18242, title = {Modeling and Analysis of Correlated Binary Fingerprints for Content Identification}, journal = {Information Forensics and Security, IEEE Transactions on}, volume = {6}, year = {2011}, month = {2011/09//}, pages = {1146 - 1159}, abstract = {Multimedia identification via content fingerprints is used in many applications, such as content filtering on user-generated content websites, and automatic multimedia identification and tagging. A compact $\#$x201C;fingerprint $\#$x201D; is computed for each multimedia signal that captures robust and unique properties of the perceptual content, which is later used for identifying the multimedia. Several different multimedia fingerprinting schemes have been proposed in the literature and have been evaluated through experiments. To complement these experimental evaluations and provide guidelines for choosing system parameters and designing better schemes, this paper develops models for content fingerprinting and provides an analysis of the identification performance under these models. As a first step, bounds on the identification accuracy and the required fingerprint length for the simplest case when the fingerprint bits are modeled as i.i.d. are summarized. Markov Random Fields are then used to address more realistic settings of fingerprints with correlated components. The optimal likelihood ratio detector is derived and a statistical physics inspired approach for computing the probability of detection and probability of false alarm is described. The analysis shows that the commonly used Hamming distance detection criterion is susceptible to correlations among fingerprint bits, whereas the optimal log-likelihood ratio decision rule yields 5-20\% improvement in the accuracy over a range of correlations. Simulation results demonstrate the validity of the theoretical predictions.}, keywords = {alarm, approach;user, binary, computing;, content, criterion;Markov, decision, DETECTION, detector;statistical, distance, fields;content, filtering;content, filtering;multimedia, fingerprinting, fingerprints;detection, generated, Hamming, identification;correlated, identification;optimal, inspired, likelihood, Physics, probability;false, probability;log, processes;Web, random, ratio, rule;multimedia, schemes;multimedia, sites;information, Websites;Markov}, isbn = {1556-6013}, doi = {10.1109/TIFS.2011.2152394}, author = {Varna,A.L. and M. Wu} } @conference {18245, title = {Modeling temporal correlations in content fingerprints}, booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2011 IEEE International Conference on}, year = {2011}, month = {2011/05//}, pages = {1860 - 1863}, abstract = {Previous analysis of content fingerprints has mainly focused on the case of independent and identically distributed finger prints. Practical fingerprints, however, exhibit correlations between components computed from successive frames. In this paper, a Markov chain based model is used to capture the temporal correlations, and the suitability of this model is evaluated through experiments on a video database. The results indicate that the Markov chain model is a good fit only in a certain regime. A hybrid model is then developed to account for this behavior and a corresponding adaptive detector is derived. The adaptive detector achieves better identification accuracy at a small computational expense.}, keywords = {chain, correlations;video, database;Markov, databases;, databases;video, detection;fingerprint, detector;certain, fingerprints;hybrid, identification;temporal, Markov, model;adaptive, model;temporal, processes;adaptive, regime;content, signal}, doi = {10.1109/ICASSP.2011.5946868}, author = {Varna,A.L. and M. Wu} } @article {18587, title = {Modeling Tiered Pricing in the Internet Transit Market}, journal = {Arxiv preprint arXiv:1112.3740}, year = {2011}, month = {2011/12/16/}, abstract = {ISPs are increasingly selling "tiered" contracts, which offer Internet connectivity to wholesale customers in bundles, at rates based on the cost of the links that the traffic in the bundle is traversing. Although providers have already begun to implement and deploy tiered pricing contracts, little is known about how such pricing affects ISPs and their customers. While contracts that sell connectivity on finer granularities improve market efficiency, they are also more costly for ISPs to implement and more difficult for customers to understand. In this work we present two contributions: (1) we develop a novel way of mapping traffic and topology data to a demand and cost model; and (2) we fit this model on three large real-world networks: an European transit ISP, a content distribution network, and an academic research network, and run counterfactuals to evaluate the effects of different pricing strategies on both the ISP profit and the consumer surplus. We highlight three core findings. First, ISPs gain most of the profits with only three or four pricing tiers and likely have little incentive to increase granularity of pricing even further. Second, we show that consumer surplus follows closely, if not precisely, the increases in ISP profit with more pricing tiers. Finally, the common ISP practice of structuring tiered contracts according to the cost of carrying the traffic flows (e.g., offering a discount for traffic that is local) can be suboptimal and that dividing contracts based on both traffic demand and the cost of carrying it into only three or four tiers yields near-optimal profit for the ISP.}, keywords = {Computer Science - Networking and Internet Architecture}, url = {http://arxiv.org/abs/1112.3740}, author = {Valancius,Vytautas and Lumezanu,Cristian and Feamster, Nick and Johari,Ramesh and Vazirani,Vijay V.} } @conference {17299, title = {Motivation for Participation in Online Neighborhood Watch Communities: An Empirical Study Involving Invitation Letters}, booktitle = {Privacy, Security, Risk and Trust (PASSAT), 2011 IEEE Third International Conference on and 2011 IEEE Third International Confernece on Social Computing (SocialCom)}, year = {2011}, month = {2011/10/09/11}, pages = {760 - 765}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper presents a three-part experiment designed to investigate the motivations of users of a community safety and neighborhood watch social networking website. The experiment centers around an intervention into the invitation system that current users employ to invite nonmembers to join the site, and involves several versions of an invitation email which differ by expressing one of four possible motivations for using such a site. The research presented investigates how potential users{\textquoteright} choice of whether or not to join the site is affected by the use case presented by the invitation. It also includes an investigation of the motivations of current users of the site, as reported in an online survey. The experiment yielded no significant difference in responses to the emails. Overall, invitations that included a specific motivation slightly outperformed those which did not, but not to a statistically significant degree. We conclude that although users have specific motivations for using the site, as reported in the survey, attempting to increase response rates to invitation emails by suggesting use cases of the site is surprisingly unlikely to be successful.}, keywords = {Art, Communities, community safety, Electronic mail, Interviews, invitation email, invitations, motivation, neighborhood watch, Online communities, online neighborhood watch communities, online survey, participation, Safety, Security, social media, Social network services, social networking (online), social networking Website}, isbn = {978-1-4577-1931-8}, doi = {10.1109/PASSAT/SocialCom.2011.108}, author = {Violi,N. and Shneiderman, Ben and Hanson,A. and Rey,P. J} } @conference {18861, title = {Multi-material compliant mechanisms for mobile millirobots}, year = {2011}, month = {2011/05//}, pages = {3169 - 3174}, abstract = {This paper describes a new process for fabricating planar, multi-material, compliant mechanisms, intended for use in small scale robotics. The process involves laser cutting the mechanism geometry from a rigid material, and refilling the joint areas with a second, elastomeric material. This method allows for a large set of potential materials, with a wide range of material properties, to be used in combination to create mechanisms with highly tailored mechanical properties. These multi-material compliant mechanisms have minimum feature sizes of approximately 100 $\#$x00B5;m and have demonstrated long lifetimes, easily surviving 100,000 bending cycles. We also present the first use of these compliant mechanisms in a 2.5cm $\#$x00D7; 2.5cm $\#$x00D7; 7.5cm, 6g hexapod. This hexapod has been demonstrated moving at speeds up to 6 cm/s, with a predicted maximum speed of up to 17 cm/s.}, doi = {10.1109/ICRA.2011.5980543}, author = {Vogtmann,Dana E. and Gupta, Satyandra K. and Bergbreiter,Sarah} } @conference {12449, title = {P2C2: Programmable pixel compressive camera for high speed imaging}, booktitle = {2011 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2011}, month = {2011/06/20/25}, pages = {329 - 336}, publisher = {IEEE}, organization = {IEEE}, abstract = {We describe an imaging architecture for compressive video sensing termed programmable pixel compressive camera (P2C2). P2C2 allows us to capture fast phenomena at frame rates higher than the camera sensor. In P2C2, each pixel has an independent shutter that is modulated at a rate higher than the camera frame-rate. The observed intensity at a pixel is an integration of the incoming light modulated by its specific shutter. We propose a reconstruction algorithm that uses the data from P2C2 along with additional priors about videos to perform temporal super-resolution. We model the spatial redundancy of videos using sparse representations and the temporal redundancy using brightness constancy constraints inferred via optical flow. We show that by modeling such spatio-temporal redundancies in a video volume, one can faithfully recover the underlying high-speed video frames from the observed low speed coded video. The imaging architecture and the reconstruction algorithm allows us to achieve temporal super-resolution without loss in spatial resolution. We implement a prototype of P2C2 using an LCOS modulator and recover several videos at 200 fps using a 25 fps camera.}, keywords = {Brightness, brightness constancy constraint, camera sensor, CAMERAS, compressive video sensing, high speed imaging, high-speed video frames, Image sequences, imaging, imaging architecture, independent shutter, Liquid crystal on silicon, low speed coded video, Modulation, optical flow, P2C2, programmable pixel compressive camera, reconstruction algorithm, sparse representation, Spatial resolution, spatio-temporal redundancies, temporal redundancy, temporal super-resolution, video coding}, isbn = {978-1-4577-0394-2}, doi = {10.1109/CVPR.2011.5995542}, author = {Reddy, D. and Veeraraghavan,A. and Chellapa, Rama} } @conference {13336, title = {The PR-star octree: a spatio-topological data structure for tetrahedral meshes}, booktitle = {Proceedings of the 19th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems}, series = {GIS {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {92 - 101}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We propose the PR-star octree as a combined spatial data structure for performing efficient topological queries on tetrahedral meshes. The PR-star octree augments the Point Region octree (PR Octree) with a list of tetrahedra incident to its indexed vertices, i.e. those in the star of its vertices. Thus, each leaf node encodes the minimal amount of information necessary to locally reconstruct the topological connectivity of its indexed elements. This provides the flexibility to efficiently construct the optimal data structure to solve the task at hand using a fraction of the memory required for a corresponding data structure on the global tetrahedral mesh. Due to the spatial locality of successive queries in typical GIS applications, the construction costs of these runtime data structures are amortized over multiple accesses while processing each node. We demonstrate the advantages of the PR-star octree representation in several typical GIS applications, including detection of the domain boundaries, computation of local curvature estimates and mesh simplification.}, isbn = {978-1-4503-1031-4}, doi = {10.1145/2093973.2093987}, url = {http://doi.acm.org/10.1145/2093973.2093987}, author = {Weiss,Kenneth and De Floriani, Leila and Fellegara,Riccardo and Velloso,Marcelo} } @article {16033, title = {Research Directions in Data Wrangling: Visualizations and Transformations for Usable and Credible Data}, journal = {Information VisualizationInformation Visualization}, volume = {10}, year = {2011}, month = {2011/10/01/}, pages = {271 - 288}, abstract = {In spite of advances in technologies for working with data, analysts still spend an inordinate amount of time diagnosing data quality issues and manipulating data into a usable form. This process of {\textquoteleft}data wrangling{\textquoteright} often constitutes the most tedious and time-consuming aspect of analysis. Though data cleaning and integration arelongstanding issues in the database community, relatively little research has explored how interactive visualization can advance the state of the art. In this article, we review the challenges and opportunities associated with addressing data quality issues. We argue that analysts might more effectively wrangle data through new interactive systems that integrate data verification, transformation, and visualization. We identify a number of outstanding research questions, including how appropriate visual encodings can facilitate apprehension of missing data, discrepant values, and uncertainty; how interactive visualizations might facilitate data transform specification; and how recorded provenance and social interaction might enable wider reuse, verification, and modification of data transformations.}, keywords = {data cleaning, data quality, data transformation, Uncertainty, Visualization}, isbn = {1473-8716, 1473-8724}, doi = {10.1177/1473871611415994}, url = {http://ivi.sagepub.com/content/10/4/271}, author = {Kandel,Sean and Heer,Jeffrey and Plaisant, Catherine and Kennedy,Jessie and Van Ham,Frank and Riche,Nathalie Henry and Weaver,Chris and Lee,Bongshin and Brodbeck,Dominique and Buono,Paolo} } @article {17920, title = {A robust and rotationally invariant local surface descriptor with applications to non-local mesh processing}, journal = {Graphical Models}, volume = {73}, year = {2011}, month = {2011/09//}, pages = {231 - 242}, abstract = {In recent years, we have witnessed a striking increase in research concerning how to describe a meshed surface. These descriptors are commonly used to encode mesh properties or guide mesh processing, not to augment existing computations by replication. In this work, we first define a robust surface descriptor based on a local height field representation, and present a transformation via the extraction of Zernike moments. Unlike previous work, our local surface descriptor is innately rotationally invariant. Second, equipped with this novel descriptor, we present SAMPLE {\textendash} similarity augmented mesh processing using local exemplars {\textendash} a method which uses feature neighbourhoods to propagate mesh processing done in one part of the mesh, the local exemplar, to many others. Finally, we show that SAMPLE can be used in a number of applications, such as detail transfer and parameterization.}, keywords = {Local descriptors, Non-local mesh processing, shape analysis, Similarity processing}, isbn = {1524-0703}, doi = {10.1016/j.gmod.2011.05.002}, url = {http://www.sciencedirect.com/science/article/pii/S1524070311000166}, author = {Maximo, A. and Patro,R. and Varshney, Amitabh and Farias, R.} } @article {15178, title = {Round-optimal password-based authenticated key exchange}, journal = {Theory of Cryptography}, year = {2011}, month = {2011///}, pages = {293 - 310}, abstract = {We show a general framework for constructing password-based authenticated key exchange protocols with optimal round complexity {\textemdash} one message per party, sent simultaneously {\textemdash} in the standard model, assuming a common reference string. When our framework is instantiated using bilinear-map cryptosystems, the resulting protocol is also (reasonably) efficient. Somewhat surprisingly, our framework can be adapted to give protocols in the standard model that are universally composable while still using only one (simultaneous) round.}, doi = {10.1007/978-3-642-19571-6_18}, author = {Katz, Jonathan and Vaikuntanathan,V.} } @article {17965, title = {Saliency-Assisted Navigation of Very Large Landscape Images}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {17}, year = {2011}, month = {2011/12//}, pages = {1737 - 1746}, abstract = {The field of visualization has addressed navigation of very large datasets, usually meshes and volumes. Significantly less attention has been devoted to the issues surrounding navigation of very large images. In the last few years the explosive growth in the resolution of camera sensors and robotic image acquisition techniques has widened the gap between the display and image resolutions to three orders of magnitude or more. This paper presents the first steps towards navigation of very large images, particularly landscape images, from an interactive visualization perspective. The grand challenge in navigation of very large images is identifying regions of potential interest. In this paper we outline a three-step approach. In the first step we use multi-scale saliency to narrow down the potential areas of interest. In the second step we outline a method based on statistical signatures to further cull out regions of high conformity. In the final step we allow a user to interactively identify the exceptional regions of high interest that merit further attention. We show that our approach of progressive elicitation is fast and allows rapid identification of regions of interest. Unlike previous work in this area, our approach is scalable and computationally reasonable on very large images. We validate the results of our approach by comparing them to user-tagged regions of interest on several very large landscape images from the Internet.}, keywords = {acquisition;data, acquisition;saliency, analysis;, assisted, image, images;robotic, Internet;camera, navigation;statistical, processing;image, resolution;image, resolution;interactive, sensors;image, sensors;statistical, signatures;data, visualisation;geophysical, visualization;landscape}, isbn = {1077-2626}, doi = {10.1109/TVCG.2011.231}, author = {Ip, Cheuk Yiu and Varshney, Amitabh} } @article {15190, title = {Secure computation with sublinear amortized work}, year = {2011}, month = {2011///}, institution = {Cryptology ePrint Archive, Report 2011/482}, abstract = {Traditional approaches to secure computation begin by representing the function f beingcomputed as a circuit. For any function f that depends on each of its inputs, this implies a protocol with complexity at least linear in the input size. In fact, linear running time is inherent for secure computation of non-trivial functions, since each party must {\textquotedblleft}touch{\textquotedblright} every bit of their input lest information about other party{\textquoteright}s input be leaked. This seems to rule out many interesting applications of secure computation in scenarios where at least one of the inputs is huge and sublinear-time algorithms can be utilized in the insecure setting; private database search is a prime example. We present an approach to secure two-party computation that yields sublinear-time proto- cols, in an amortized sense, for functions that can be computed in sublinear time on a random access machine (RAM). Furthermore, a party whose input is {\textquotedblleft}small{\textquotedblright} is required to maintain only small state. We provide a generic protocol that achieves the claimed complexity, based on any oblivious RAM and any protocol for secure two-party computation. We then present an optimized version of this protocol, where generic secure two-party computation is used only for evaluating a small number of simple operations. }, author = {Gordon,D. and Katz, Jonathan and Kolesnikov,V. and Malkin,T. and Raykova,M. and Vahlis,Y.} } @conference {18276, title = {Secure video processing: Problems and challenges}, booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2011 IEEE International Conference on}, year = {2011}, month = {2011/05//}, pages = {5856 - 5859}, abstract = {Secure signal processing is an emerging technology to enable signal processing tasks in a secure and privacy-preserving fashion. It has attracted a great amount of research attention due to the increasing demand to enable rich functionalities for private data stored online. Desirable functionalities may include search, analysis, clustering, etc. In this paper, we discuss the research issues and challenges in secure video processing with focus on the application of secure online video management. Video is different from text due to its large data volume and rich content diversity. To be practical, secure video processing requires efficient solutions that may involve a trade-off between security and complexity. We look at three representative video processing tasks and review existing techniques that can be applied. Many of the tasks do not have efficient solutions yet, and we discuss the challenges and research questions that need to be addressed.}, keywords = {data;video, fashion;secure, management;secure, of, online, privacy-preserving, processing;, processing;security, signal, video}, doi = {10.1109/ICASSP.2011.5947693}, author = {Lu,Wenjun and Varna,A. and M. Wu} } @conference {18281, title = {"Seeing" ENF: natural time stamp for digital video via optical sensing and signal processing}, booktitle = {Proceedings of the 19th ACM international conference on Multimedia}, series = {MM {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {23 - 32}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Electric Network Frequency (ENF) fluctuates slightly over time from its nominal value of 50 Hz/60 Hz. The fluctuations in the ENF remain consistent across the entire power grid even when measured at physically distant locations. The near-invisible flickering of fluorescent lights connected to the power mains reflect these fluctuations present in the ENF. In this paper, mechanisms using optical sensors and video cameras to record and validate the presence of the ENF fluctuations in fluorescent lighting are presented. Signal processing techniques are applied to demonstrate a high correlation between the fluctuations in the ENF signal captured from fluorescent lighting and the ENF signal captured directly from power mains supply. The proposed technique is then used to demonstrate the presence of the ENF signal in video recordings taken in various geographical areas. Experimental results show that the ENF signal can be used as a natural timestamp for optical sensor recordings and video surveillance recordings from indoor environments under fluorescent lighting. Application of the ENF signal analysis to tampering detection of surveillance video recordings is also demonstrated.}, keywords = {electric network frequency, information forensics, timestamp, video authentication}, isbn = {978-1-4503-0616-4}, doi = {10.1145/2072298.2072303}, url = {http://doi.acm.org/10.1145/2072298.2072303}, author = {Garg,Ravi and Varna,Avinash L. and M. Wu} } @article {19594, title = {Semantic Solutions to Program Analysis Problems}, journal = {arXiv:1105.0106 [cs]}, year = {2011}, month = {2011/04/30/}, abstract = {Problems in program analysis can be solved by developing novel program semantics and deriving abstractions conventionally. For over thirty years, higher-order program analysis has been sold as a hard problem. Its solutions have required ingenuity and complex models of approximation. We claim that this difficulty is due to premature focus on abstraction and propose a new approach that emphasizes semantics. Its simplicity enables new analyses that are beyond the current state of the art.}, keywords = {Computer Science - Programming Languages}, url = {http://arxiv.org/abs/1105.0106}, author = {Tobin-Hochstadt, Sam and David Van Horn} } @article {17909, title = {Social Snapshot: A System for Temporally Coupled Social Photography}, journal = {Computer Graphics and Applications, IEEE}, volume = {31}, year = {2011}, month = {2011/02//jan}, pages = {74 - 84}, abstract = {Social Snapshot actively acquires and reconstructs temporally dynamic data. The system enables spatiotemporal 3D photography using commodity devices, assisted by their auxiliary sensors and network functionality. It engages users, making them active rather than passive participants in data acquisition.}, keywords = {3D, acquisition;data, acquisition;photography;social, computing;, coupled, data, photography;data, photography;temporally, reconstruction;social, sciences, snapshot;spatiotemporal, social}, isbn = {0272-1716}, doi = {10.1109/MCG.2010.107}, author = {Patro,R. and Ip, Cheuk Yiu and Bista,S. and Varshney, Amitabh} } @article {12439, title = {Statistical Computations on Grassmann and Stiefel Manifolds for Image and Video-Based Recognition}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {33}, year = {2011}, month = {2011/11//}, pages = {2273 - 2286}, abstract = {In this paper, we examine image and video-based recognition applications where the underlying models have a special structure-the linear subspace structure. We discuss how commonly used parametric models for videos and image sets can be described using the unified framework of Grassmann and Stiefel manifolds. We first show that the parameters of linear dynamic models are finite-dimensional linear subspaces of appropriate dimensions. Unordered image sets as samples from a finite-dimensional linear subspace naturally fall under this framework. We show that an inference over subspaces can be naturally cast as an inference problem on the Grassmann manifold. To perform recognition using subspace-based models, we need tools from the Riemannian geometry of the Grassmann manifold. This involves a study of the geometric properties of the space, appropriate definitions of Riemannian metrics, and definition of geodesics. Further, we derive statistical modeling of inter and intraclass variations that respect the geometry of the space. We apply techniques such as intrinsic and extrinsic statistics to enable maximum-likelihood classification. We also provide algorithms for unsupervised clustering derived from the geometry of the manifold. Finally, we demonstrate the improved performance of these methods in a wide variety of vision applications such as activity recognition, video-based face recognition, object recognition from image sets, and activity-based video clustering.}, keywords = {activity based video clustering, activity recognition, computational geometry, Computational modeling, Data models, face recognition, feature representation, finite dimensional linear subspaces, geometric properties, Geometry, Grassmann Manifolds, Grassmann., HUMANS, Image and video models, image recognition, linear dynamic models, linear subspace structure, Manifolds, maximum likelihood classification, maximum likelihood estimation, Object recognition, Riemannian geometry, Riemannian metrics, SHAPE, statistical computations, statistical models, Stiefel, Stiefel Manifolds, unsupervised clustering, video based face recognition, video based recognition, video signal processing}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2011.52}, author = {Turaga,P. and Veeraraghavan,A. and Srivastava, A. and Chellapa, Rama} } @article {19595, title = {Systematic Abstraction of Abstract Machines}, journal = {arXiv:1107.3539 [cs]}, year = {2011}, month = {2011/07/18/}, abstract = {We describe a derivational approach to abstract interpretation that yields novel and transparently sound static analyses when applied to well-established abstract machines for higher-order and imperative programming languages. To demonstrate the technique and support our claim, we transform the CEK machine of Felleisen and Friedman, a lazy variant of Krivine{\textquoteright}s machine, and the stack-inspecting CM machine of Clements and Felleisen into abstract interpretations of themselves. The resulting analyses bound temporal ordering of program events; predict return-flow and stack-inspection behavior; and approximate the flow and evaluation of by-need parameters. For all of these machines, we find that a series of well-known concrete machine refactorings, plus a technique of store-allocated continuations, leads to machines that abstract into static analyses simply by bounding their stores. We demonstrate that the technique scales up uniformly to allow static analysis of realistic language features, including tail calls, conditionals, side effects, exceptions, first-class continuations, and even garbage collection. In order to close the gap between formalism and implementation, we provide translations of the mathematics as running Haskell code for the initial development of our method.}, keywords = {Computer Science - Programming Languages}, url = {http://arxiv.org/abs/1107.3539}, author = {David Van Horn and Might, Matthew} } @conference {17979, title = {Toolchain for Programming, Simulating and Studying the XMT Many-Core Architecture}, booktitle = {Parallel and Distributed Processing Workshops and Phd Forum (IPDPSW), 2011 IEEE International Symposium on}, year = {2011}, month = {2011/05//}, pages = {1282 - 1291}, abstract = {The Explicit Multi-Threading (XMT) is a general-purpose many-core computing platform, with the vision of a 1000-core chip that is easy to program but does not compromise on performance. This paper presents a publicly available tool chain for XMT, complete with a highly configurable cycle-accurate simulator and an optimizing compiler. The XMT tool chain has matured and has been validated to a point where its description merits publication. In particular, research and experimentation enabled by the tool chain played a central role in supporting the ease-of-programming and performance aspects of the XMT architecture. The compiler and the simulator are also important milestones for an efficient programmer{\textquoteright}s workflow from PRAM algorithms to programs that run on the shared memory XMT hardware. This workflow is a key component in accomplishing the dual goal of ease-of-programming and performance. The applicability of our tool chain extends beyond specific XMT choices. It can be used to explore the much greater design space of shared memory many-cores by system researchers or by programmers. As the tool chain can practically run on any computer, it provides a supportive environment for teaching parallel algorithmic thinking with a programming component. Unobstructed by techniques such as decomposition-first and programming for locality, this environment may be useful in deferring the teaching of these techniques, when desired, to more advanced or platform-specific courses.}, keywords = {algorithm;XMT, architecture;XMT, architectures;shared, chain;cycle-accurate, compiler;programmer, compilers;parallel, component;shared, computing;optimizing, hardware;shared, many-core, many-core;concurrency, memory, multithreading;general-purpose, PRAM, simulator;ease-of-programming;explicit, systems;, theory;multi-threading;optimising, tool, workflow;programming, XMT}, doi = {10.1109/IPDPS.2011.270}, author = {Keceli,F. and Tzannes,A. and Caragea,G.C. and Barua,R. and Vishkin, Uzi} } @article {17981, title = {Using simple abstraction to reinvent computing for parallelism}, journal = {Commun. ACM}, volume = {54}, year = {2011}, month = {2011/01//}, pages = {75 - 85}, abstract = {The ICE abstraction may take CS from serial (single-core) computing to effective parallel (many-core) computing.}, isbn = {0001-0782}, doi = {10.1145/1866739.1866757}, url = {http://doi.acm.org/10.1145/1866739.1866757}, author = {Vishkin, Uzi} } @article {19582, title = {Abstracting Abstract Machines}, journal = {arXiv:1007.4446 [cs]}, year = {2010}, note = {Comment: The 15th ACM SIGPLAN International Conference on Functional Programming (ICFP{\textquoteright}10), Baltimore, Maryland, September, 2010}, month = {2010/07/26/}, abstract = {We describe a derivational approach to abstract interpretation that yields novel and transparently sound static analyses when applied to well-established abstract machines. To demonstrate the technique and support our claim, we transform the CEK machine of Felleisen and Friedman, a lazy variant of Krivine{\textquoteright}s machine, and the stack-inspecting CM machine of Clements and Felleisen into abstract interpretations of themselves. The resulting analyses bound temporal ordering of program events; predict return-flow and stack-inspection behavior; and approximate the flow and evaluation of by-need parameters. For all of these machines, we find that a series of well-known concrete machine refactorings, plus a technique we call store-allocated continuations, leads to machines that abstract into static analyses simply by bounding their stores. We demonstrate that the technique scales up uniformly to allow static analysis of realistic language features, including tail calls, conditionals, side effects, exceptions, first-class continuations, and even garbage collection.}, keywords = {Computer Science - Programming Languages, F.3.2, F.4.1}, url = {http://arxiv.org/abs/1007.4446}, author = {David Van Horn and Might, Matthew} } @inbook {12498, title = {Advances in Video-Based Human Activity Analysis: Challenges and Approaches}, booktitle = {Advances in ComputersAdvances in Computers}, volume = {Volume 80}, year = {2010}, month = {2010///}, pages = {237 - 290}, publisher = {Elsevier}, organization = {Elsevier}, abstract = {Videos play an ever increasing role in our everyday lives with applications ranging from news, entertainment, scientific research, security, and surveillance. Coupled with the fact that cameras and storage media are becoming less expensive, it has resulted in people producing more video content than ever before. Analysis of human activities in video is important for several important applications. Interpretation and identification of human activities requires approaches that address the following questions (a) what are the appropriate atomic primitives for human activities, (b) how to combine primitives to produce complex activities, (c) what are the required invariances for inference algorithms, and (d) how to build computational models for each of these. In this chapter, we provide a broad overview and discussion of these issues. We shall review state-of-the-art computer vision algorithms that address these issues and then provide a unified perspective from which specific algorithms can be derived. We will then present supporting experimental results.}, isbn = {0065-2458}, url = {http://www.sciencedirect.com/science/article/pii/S0065245810800075}, author = {Turaga,Pavan and Chellapa, Rama and Veeraraghavan,Ashok}, editor = {Zelkowitz, Marvin V} } @article {16459, title = {BioNav: An Ontology-Based Framework to Discover Semantic Links in the Cloud of Linked Data}, journal = {The Semantic Web: Research and Applications}, year = {2010}, month = {2010///}, pages = {441 - 445}, author = {Vidal,M. E and Raschid, Louiqa and M{\'a}rquez,N. and Rivera,J. and Ruckhaus,E.} } @article {18501, title = {Chipping away at censorship firewalls with user-generated content}, journal = {Proc. 19th USENIX Security Symposium, Washington, DC}, year = {2010}, month = {2010///}, abstract = {Oppressive regimes and even democratic governmentsrestrict Internet access. Existing anti-censorship systems often require users to connect through proxies, but these systems are relatively easy for a censor to discover and block. This paper offers a possible next step in the cen- sorship arms race: rather than relying on a single system or set of proxies to circumvent censorship firewalls, we explore whether the vast deployment of sites that host user-generated content can breach these firewalls. To ex- plore this possibility, we have developed Collage, which allows users to exchange messages through hidden chan- nels in sites that host user-generated content. Collage has two components: a message vector layer for embedding content in cover traffic; and a rendezvous mechanism to allow parties to publish and retrieve messages in the cover traffic. Collage uses user-generated content (e.g., photo-sharing sites) as {\textquotedblleft}drop sites{\textquotedblright} for hidden messages. To send a message, a user embeds it into cover traffic and posts the content on some site, where receivers retrieve this content using a sequence of tasks. Collage makes it difficult for a censor to monitor or block these messages by exploiting the sheer number of sites where users can exchange messages and the variety of ways that a mes- sage can be hidden. Our evaluation of Collage shows that the performance overhead is acceptable for sending small messages (e.g., Web articles, email). We show how Collage can be used to build two applications: a direct messaging application, and a Web content delivery system. }, author = {Burnett,S. and Feamster, Nick and Vempala,S.} } @conference {18556, title = {Circumventing censorship with collage}, booktitle = {Proceedings of the ACM SIGCOMM 2010 conference}, series = {SIGCOMM {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {471 - 472}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Oppressive regimes and even democratic governments restrict Internet access. Existing anti-censorship systems often require users to connect through proxies, but these systems are relatively easy for a censor to discover and block. We explore a possible next step in the censorship arms race: rather than relying on a single system or set of proxies to circumvent censorship firewalls, we use the vast deployment of sites that host user-generated content to breach these firewalls. We have developed Collage, which allows users to exchange messages through hidden channels in sites that host user-generated content. To send a message, a user embeds it into cover traffic and posts the content on some site, where receivers retrieve this content. Collage makes it difficult for a censor to monitor or block these messages by exploiting the sheer number of sites where users can exchange messages and the variety of ways that a message can be hidden. We have built a censorship-resistant news reader using Collage that can retrieve from behind a censorship firewall and show Collage{\textquoteright}s effectiveness with a live demonstration of its complete infrastructure.}, keywords = {Availability, censorship}, isbn = {978-1-4503-0201-2}, doi = {10.1145/1851182.1851269}, url = {http://doi.acm.org/10.1145/1851182.1851269}, author = {Burnett,Sam and Feamster, Nick and Vempala,Santosh} } @article {12506, title = {Comment-Gait-Based Human Recognition by Classification of Cyclostationary Processes on Nonlinear Shape Manifolds}, journal = {Journal of the American Statistical Association}, volume = {102}, year = {2010}, month = {2010///}, pages = {1126 - 1126}, author = {Chellapa, Rama and Veeraraghavan,A.} } @article {17983, title = {Computer Memory Architecture for Hybrid Serial and Parallel Computing Systems}, volume = {12/721,252}, year = {2010}, month = {2010/11/11/}, abstract = {In one embodiment, a serial processor is configured to execute software instructions in a software program in serial. A serial memory is configured to store data for use by the serial processor in executing the software instructions in serial. A plurality of parallel processors are configured to execute software instructions in the software program in parallel. A plurality of partitioned memory modules are provided and configured to store data for use by the plurality of parallel processors in executing software instructions in parallel. Accordingly, a processor/memory structure is provided that allows serial programs to use quick local serial memories and parallel programs to use partitioned parallel memories. The system may switch between a serial mode and a parallel mode. The system may incorporate pre-fetching commands of several varieties. For example, towards switching between the serial mode and the parallel mode, the serial processor is configured to send a signal to start...}, url = {http://www.google.com/patents?id=0OPbAAAAEBAJ}, author = {Vishkin, Uzi}, editor = {Xmtt Inc.} } @article {18539, title = {Don{\textquoteright}t Configure the Network, Program It! Domain-Specific Programming Languages for Network Systems}, volume = {YALEU/DCS/RR-1432}, year = {2010}, month = {2010///}, institution = {Yale University}, abstract = {Network operators must configure networks to accomplish critical, complex, and often conflicting requirements: they must ensure good performance while maintaining security, and satisfy contractual obligations while ensuring profitable use of interdomain connections. Unfortunately, today they have no choice but to implement these high-level goals by configuring hundreds of individual network devices. These interact in complex and unexpected ways, often resulting in misconfigurations or downtime. We propose a new approach: rather than configure individual network devices, operators should program the network holistically, according to high-level policies. Towards this goal, we present Nettle, a system for clearly and concisely expressing network requirements together with mechanisms to control the network accordingly. At the lowest level, we rely on OpenFlow switches for programmable network hardware. On top of this layer, we build an extensible family of embedded domain-specific languages (EDSLs), each aimed at different operational concerns and provide convenient ways to sensibly combine expressions in these languages. We present a case study demonstrating a DSL for networks that provides fine-grained, dynamic access control policies.}, author = {Feamster, Nick and Voellmy,A. and Agarwal,A. and Hudak,P. and Burnett,S. and Launchbury,J.} } @article {12880, title = {Environmental reservoirs of Vibrio cholerae and their role in cholera}, journal = {Environmental Microbiology Reports}, volume = {2}, year = {2010}, month = {2010/01/15/}, pages = {27 - 33}, abstract = {In the aquatic environment, Vibrio cholerae has been reported to be associated with a variety of living organisms, including animals with an exoskeleton of chitin, aquatic plants, protozoa, bivalves, waterbirds, as well as abiotic substrates (e.g. sediments). Most of these are well-known or putative environmental reservoirs for the bacterium, defined as places where the pathogen lives over time, with the potential to be released and to cause human infection. Environmental reservoirs also serve as V. cholerae disseminators and vectors. They can be responsible for the start of an epidemic, may be critical to cholera endemicity, and affect the evolution of pathogen virulence. To date, in addition to the generally recognized role of zooplankton as the largest environmental reservoir for V. cholerae, other environmental reservoirs play some role in cholera epidemiology by favouring persistence of the pathogen during inter-epidemic periods. Little is known about the ecological factors affecting V. cholerae survival in association with aquatic substrates. Studies aimed at these aspects, i.e. understanding how environmental reservoirs interact, are affected by climate, and contribute to disease epidemiology, will be useful for understanding global implications of V. cholerae and the disease cholera.}, isbn = {1758-2229}, doi = {10.1111/j.1758-2229.2009.00128.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1758-2229.2009.00128.x/abstract?userIsAuthenticated=false\&deniedAccessCustomisedMessage=}, author = {Vezzulli,Luigi and Pruzzo,Carla and Huq,Anwar and Rita R Colwell} } @article {19585, title = {Evaluating Call-By-Need on the Control Stack}, journal = {arXiv:1009.3174 [cs]}, year = {2010}, note = {Comment: Symposium on Trends in Functional Programming (TFP 2010), Norman, Oklahoma, May 2010}, month = {2010/09/16/}, abstract = {Ariola and Felleisen{\textquoteright}s call-by-need {\lambda}-calculus replaces a variable occurrence with its value at the last possible moment. To support this gradual notion of substitution, function applications-once established-are never discharged. In this paper we show how to translate this notion of reduction into an abstract machine that resolves variable references via the control stack. In particular, the machine uses the static address of a variable occurrence to extract its current value from the dynamic control stack.}, keywords = {Computer Science - Programming Languages, D.3.1}, url = {http://arxiv.org/abs/1009.3174}, author = {Chang, Stephen and David Van Horn and Felleisen, Matthias} } @conference {12483, title = {Fast directional chamfer matching}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2010 IEEE Conference on}, year = {2010}, month = {2010/06//}, pages = {1696 - 1703}, abstract = {We study the object localization problem in images given a single hand-drawn example or a gallery of shapes as the object model. Although many shape matching algorithms have been proposed for the problem over the decades, chamfer matching remains to be the preferred method when speed and robustness are considered. In this paper, we significantly improve the accuracy of chamfer matching while reducing the computational time from linear to sublinear (shown empirically). Specifically, we incorporate edge orientation information in the matching algorithm such that the resulting cost function is piecewise smooth and the cost variation is tightly bounded. Moreover, we present a sublinear time algorithm for exact computation of the directional chamfer matching score using techniques from 3D distance transforms and directional integral images. In addition, the smooth cost function allows to bound the cost distribution of large neighborhoods and skip the bad hypotheses within. Experiments show that the proposed approach improves the speed of the original chamfer matching upto an order of 45 $\#$x00D7;, and it is much faster than many state of art techniques while the accuracy is comparable.}, keywords = {3D, algorithm;edge, algorithms;single, chamfer, cost, detection;image, directional, distance, distribution;cost, example;smooth, function;sublinear, hand-drawn, images;edge, information;fast, integral, localization, MATCHING, matching;gallery, matching;transforms;, model;piecewise, of, orientation, problem;object, score;directional, shapes;object, smooth;shape, TIME, time;cost, transforms;computational, variation;directional}, doi = {10.1109/CVPR.2010.5539837}, author = {Ming-Yu Liu and Tuzel, O. and Veeraraghavan,A. and Chellapa, Rama} } @article {18217, title = {Forensic hash for multimedia information}, journal = {SPIE Media Forensics and Security}, year = {2010}, month = {2010///}, pages = {7541{\textendash}0Y - 7541{\textendash}0Y}, abstract = {Digital multimedia such as images and videos are prevalent on today{\textquoteright}s internet and cause significantsocial impact, which can be evidenced by the proliferation of social networking sites with user generated contents. Due to the ease of generating and modifying images and videos, it is critical to establish trustworthiness for online multimedia information. In this paper, we propose a new framework to perform multimedia forensics by using compact side information to reconstruct the processing history of a multimedia document. We refer to this framework as FASHION, standing for Forensic hASH for informatION assurance. As a first step in the modular design for FASHION, we propose new algorithms based on Radon transform and scale space theory to effectively estimate the parameters of geometric transforms and detect local tampering that an image may have undergone. The FASHION framework is designed to answer a much broader range of questions regarding the processing history of multimedia data than simple binary decision from robust image hashing, and also offers more efficient and accurate forensic analysis than multimedia forensic techniques that do not use any side information. }, author = {Lu,W. and Varna,A.L. and Wu,M.} } @article {18152, title = {A framework for theoretical analysis of content fingerprinting}, journal = {Proc. of SPIE Media Forensics and Security}, year = {2010}, month = {2010///}, abstract = {The popularity of video sharing platforms such as Youtube has prompted the need for the development of efficienttechniques for multimedia identification. Content fingerprinting is a promising solution for this problem, whereby a short {\textquotedblleft}fingerprint{\textquotedblright} that captures robust and unique characteristics of a signal is computed from each multimedia document. This fingerprint is then compared with a database to identify the multimedia. Several fingerprinting techniques have been proposed in the literature and have been evaluated using experiments. To complement these experimental evaluations and gain a deeper understanding, this paper proposes a framework for theoretical modeling and analysis of content fingerprinting schemes. Analysis of some key modules for fingerprint encoding and matching are also presented under this framework. }, author = {Varna,A.L. and Chuang,W.H. and Wu,M.} } @conference {17988, title = {General-purpose vs. gpu: Comparison of many-cores on irregular workloads}, booktitle = {Proceedings of the Second Usenix Workshop on Hot Topics in Parallelism}, year = {2010}, month = {2010///}, pages = {14 - 15}, abstract = {XMT1 is a general-purpose many-core parallel architec-ture. The foremost design objective for XMT was to meet the highest standards for ease of parallel programming. GPUs, on the other hand, have acquired a strong reputa- tion on performance, sometimes at the expense of ease- of-programming. The current paper presents a perfor- mance comparison on diverse workloads between XMT and an NVIDIA CUDA-enabled GPU. Configured with roughly the same amount of chip resources as the GPU, XMT achieves an average speedup of 6.05x on irregu- lar applications, while incurring an average slowdown of 2.07x on regular ones. Namely, XMT comes ahead for significant applications without having to pay a (possibly worthwhile) price for easier programming. This surpris- ing result suggests a yet untapped opportunity: A high- performance easy-to-program general-purpose 1000-core computer. }, author = {Caragea,G.C. and Keceli,F. and Tzannes,A. and Vishkin, Uzi} } @article {17970, title = {Global Contours}, volume = {CS-TR-4957}, year = {2010}, month = {2010/05/05/}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {We present a multi-scale approach that uses Laplacian eigenvectorsto extract globally significant contours from an image. The input images are mapped into the Laplacian space by using Laplacian eigenvectors. This mapping causes globally significant pixels along the contours to expand in the Laplacian space. The measure of the expansion is used to compute the Global Contours. We apply our scheme to real color images and compare it with several other methods that compute image and color saliency. The contours calculated by our method reflect global properties of the image and are complementary to classic center-surround image saliency methods. We believe that hybrid image saliency algorithms that combine our method of Global Contours with center-surround image saliency algorithms will be able to better characterize the most important regions of images than those from just using contours calculated using bottom-up approaches. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/10072}, author = {Bista, Sujal and Varshney, Amitabh} } @conference {18218, title = {Gradient descent approach for secure localization in resource constrained wireless sensor networks}, booktitle = {Acoustics Speech and Signal Processing (ICASSP), 2010 IEEE International Conference on}, year = {2010}, month = {2010/03//}, pages = {1854 - 1857}, abstract = {Many sensor network related applications require precise knowledge of the location of constituent nodes. In these applications, it is desirable for the wireless nodes to be able to autonomously determine their locations before they start sensing and transmitting data. Most localization algorithms rely on anchor nodes whose locations are known to determine the positions of the remaining nodes. In an adversarial scenario, some of these anchor nodes could be compromised and used to transmit misleading information aimed at preventing the accurate localization of the remaining sensors. In this paper, a computationally efficient algorithm to determine the location of sensors that can resist such attacks is described. The proposed algorithm combines gradient descent with a selective pruning of inconsistent measurements to achieve good localization accuracy. Simulation results show that the proposed algorithm has performance comparable to existing schemes while requiring less computational resources.}, keywords = {algorithms;resource, approach;localization, computational, constrained, descent, localization;gradient, methods;wireless, networks;, networks;secure, resources;gradient, sensor, wireless}, doi = {10.1109/ICASSP.2010.5495371}, author = {Garg,R. and Varna,A.L. and M. Wu} } @article {15073, title = {A group signature scheme from lattice assumptions}, journal = {Advances in Cryptology-ASIACRYPT 2010}, year = {2010}, month = {2010///}, pages = {395 - 412}, abstract = {Group signature schemes allow users to sign messages on behalf of a group while (1) maintaining anonymity (within that group) with respect to an outside observer, yet (2) ensuring traceability of a signer (by the group manager) when needed. In this work we give the first construction of a group signature scheme based on lattices (more precisely, the learning with errors assumption), in the random oracle model. Towards our goal, we construct a new algorithm for sampling a basis for an orthogonal lattice, together with a trapdoor, that may be of independent interest.}, doi = {10.1007/978-3-642-17373-8_23}, author = {Gordon,S. and Katz, Jonathan and Vaikuntanathan,V.} } @conference {13952, title = {Kernelized R{\'e}nyi distance for speaker recognition}, booktitle = {Acoustics Speech and Signal Processing (ICASSP), 2010 IEEE International Conference on}, year = {2010}, month = {2010/03//}, pages = {4506 - 4509}, abstract = {Speaker recognition systems classify a test signal as a speaker or an imposter by evaluating a matching score between input and reference signals. We propose a new information theoretic approach for computation of the matching score using the Re $\#$x0301;nyi entropy. The proposed entropic distance, the Kernelized Re $\#$x0301;nyi distance (KRD), is formulated in a non-parametric way and the resulting measure is efficiently evaluated in a parallelized fashion on a graphical processor. The distance is then adapted as a scoring function and its performance compared with other popular scoring approaches in a speaker identification and speaker verification framework.}, keywords = {$\#$x0301;nyi, approach;input, distance;reference, entropy;graphical, equipment;entropy;speaker, graphic, identification;speaker, processor;information, Re, recognition;, recognition;speaker, signals;kernelized, signals;speaker, theoretic, verification;computer}, doi = {10.1109/ICASSP.2010.5495587}, author = {Vasan Srinivasan,B. and Duraiswami, Ramani and Zotkin,Dmitry N} } @conference {17986, title = {Lazy binary-splitting: a run-time adaptive work-stealing scheduler}, booktitle = {Proceedings of the 15th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming}, series = {PPoPP {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {179 - 190}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We present Lazy Binary Splitting (LBS), a user-level scheduler of nested parallelism for shared-memory multiprocessors that builds on existing Eager Binary Splitting work-stealing (EBS) implemented in Intel{\textquoteright}s Threading Building Blocks (TBB), but improves performance and ease-of-programming. In its simplest form (SP), EBS requires manual tuning by repeatedly running the application under carefully controlled conditions to determine a stop-splitting-threshold (sst)for every do-all loop in the code. This threshold limits the parallelism and prevents excessive overheads for fine-grain parallelism. Besides being tedious, this tuning also over-fits the code to some particular dataset, platform and calling context of the do-all loop, resulting in poor performance portability for the code. LBS overcomes both the performance portability and ease-of-programming pitfalls of a manually fixed threshold by adapting dynamically to run-time conditions without requiring tuning. We compare LBS to Auto-Partitioner (AP), the latest default scheduler of TBB, which does not require manual tuning either but lacks context portability, and outperform it by 38.9\% using TBB{\textquoteright}s default AP configuration, and by 16.2\% after we tuned AP to our experimental platform. We also compare LBS to SP by manually finding SP{\textquoteright}s sst using a training dataset and then running both on a different execution dataset. LBS outperforms SP by 19.5\% on average. while allowing for improved performance portability without requiring tedious manual tuning. LBS also outperforms SP with sst=1, its default value when undefined, by 56.7\%, and serializing work-stealing (SWS), another work-stealer by 54.7\%. Finally, compared to serializing inner parallelism (SI) which has been used by OpenMP, LBS is 54.2\% faster.}, keywords = {Dynamic scheduling, load balancing, nested parallelism, thread scheduling, work stealing}, isbn = {978-1-60558-877-3}, doi = {10.1145/1693453.1693479}, url = {http://doi.acm.org/10.1145/1693453.1693479}, author = {Tzannes,Alexandros and Caragea,George C. and Barua,Rajeev and Vishkin, Uzi} } @article {14820, title = {Mesh saliency and human eye fixations}, journal = {ACM Transactions on Applied Perception (TAP)}, volume = {7}, year = {2010}, month = {2010/02//}, pages = {12:1{\textendash}12:13 - 12:1{\textendash}12:13}, abstract = {Mesh saliency has been proposed as a computational model of perceptual importance for meshes, and it has been used in graphics for abstraction, simplification, segmentation, illumination, rendering, and illustration. Even though this technique is inspired by models of low-level human vision, it has not yet been validated with respect to human performance. Here, we present a user study that compares the previous mesh saliency approaches with human eye movements. To quantify the correlation between mesh saliency and fixation locations for 3D rendered images, we introduce the normalized chance-adjusted saliency by improving the previous chance-adjusted saliency measure. Our results show that the current computational model of mesh saliency can model human eye movements significantly better than a purely random model or a curvature-based model.}, keywords = {eye-tracker, mesh saliency, Visual perception}, isbn = {1544-3558}, doi = {10.1145/1670671.1670676}, url = {http://doi.acm.org/10.1145/1670671.1670676}, author = {Kim,Youngmin and Varshney, Amitabh and Jacobs, David W. and Guimbreti{\`e}re,Fran{\c c}ois} } @conference {13301, title = {Modeling and generalization of discrete Morse terrain decompositions}, booktitle = {Proc. 20th Int. Conf. on Pattern Recognition, ICPR}, volume = {10}, year = {2010}, month = {2010///}, pages = {999 - 1002}, abstract = {We address the problem of morphological analysis ofreal terrains. We describe a morphological model for a terrain by considering extensions of Morse theory to the discrete case. We propose a two-level model of the morphology of a terrain based on a graph joining the critical points of the terrain through integral lines. We present a new set of generalization operators specific for discrete piece-wise linear terrain models, which are used to reduce noise and the size of the morphological representation. We show results of our approach on real terrains. }, author = {De Floriani, Leila and Magillo,P. and Vitali,M.} } @conference {13315, title = {Multiresolution morse triangulations}, booktitle = {Proceedings of the 14th ACM Symposium on Solid and Physical Modeling}, series = {SPM {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {183 - 188}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We address the problem of representing the geometry and the morphology of a triangulated surface endowed with a scalar field in a combined geometric and topological multiresolution model. The model, called a Multiresolution Morse Triangulation (MMT), is composed of a multiresolution triangle mesh, and of a multiresolution Morse complex describing the morphology of the field. The MMT is built through a combined morphological and geometrical generalization, and supports queries to extract consistent geometrical and morphological representations of the field at both uniform and variable resolutions.}, isbn = {978-1-60558-984-8}, doi = {10.1145/1839778.1839806}, url = {http://doi.acm.org/10.1145/1839778.1839806}, author = {Danovaro,Emanuele and De Floriani, Leila and Magillo,Paola and Vitali,Maria} } @conference {18641, title = {Nfsight: netflow-based network awareness tool}, year = {2010}, month = {2010///}, abstract = {Network awareness is highly critical for network and se-curity administrators. It enables informed planning and management of network resources, as well as detection and a comprehensive understanding of malicious activ- ity. It requires a set of tools to efficiently collect, process, and represent network data. While many such tools al- ready exist, there is no flexible and practical solution for visualizing network activity at various granularities, and quickly gaining insights about the status of network as- sets. To address this issue, we developed Nfsight, a Net- Flow processing and visualization application designed to offer a comprehensive network awareness solution. Nfsight constructs bidirectional flows out of the unidi- rectional NetFlow flows and leverages these bidirectional flows to provide client/server identification and intrusion detection capabilities. We present in this paper the in- ternal architecture of Nfsight, the evaluation of the ser- vice, and intrusion detection algorithms. We illustrate the contributions of Nfsight through several case studies conducted by security administrators on a large univer- sity network. }, url = {http://www.usenix.org/event/lisa10/tech/full_papers/Berthier.pdf}, author = {Berthier,R. and Michel Cukier and Hiltunen,M. and Kormann,D. and Vesonder,G. and Sheleheda,D.} } @conference {15104, title = {Overcoming the Hole in the Bucket: Public-Key Cryptography Resilient to Continual Memory Leakage}, booktitle = {Foundations of Computer Science (FOCS), 2010 51st Annual IEEE Symposium on}, year = {2010}, month = {2010/10//}, pages = {501 - 510}, abstract = {In recent years, there has been a major effort to design cryptographic schemes that remain secure even when arbitrary information about the secret key is leaked (e.g., via side-channel attacks). We explore the possibility of achieving security under emphcontinual leakage from the emphentire secret key by designing schemes in which the secret key is updated over time. In this model, we construct public-key encryption schemes, digital signatures, and identity-based encryption schemes that remain secure even if an attacker can leak a constant fraction of the secret memory (including the secret key) in each time period between key updates. We also consider attackers who may probe the secret memory during the updates themselves. We stress that we allow unrestricted leakage, without the assumption that {\textquotedblleft}only computation leaks information{\textquotedblright}. Prior to this work, constructions of public-key encryption schemes secure under continual leakage were not known even under this assumption.}, keywords = {continual, cryptography;, cryptography;public-key, encryption, key, key;digital, leakage;cryptographic, memory, schemes;digital, schemes;public-key, schemes;secret, signatures;identity-based, signatures;public}, doi = {10.1109/FOCS.2010.55}, author = {Brakerski,Z. and Kalai,Y.T. and Katz, Jonathan and Vaikuntanathan,V.} } @article {18260, title = {Performance impact of ordinal ranking on content fingerprinting}, journal = {IEEE Int. Conf. on Image Processing}, year = {2010}, month = {2010///}, abstract = {Content fingerprinting provides a compact representation ofmultimedia objects for copy identification. This paper ana- lyzes the impact of the ordinal-ranking based feature encod- ing on the performance of content fingerprinting. Expressions are derived for the identification performance of a fingerprint- ing system with and without ordinal ranking. The analysis in- dicates that when the number of features is moderately large, ordinal ranking can improve the robustness of the fingerprint- ing system to large distortions of the features and significantly increase the probability of detection. These results enhance understandings of ordinal ranking and provide design guide- lines for choosing different system parameters to achieve a desired identification accuracy. }, author = {Chuang,W.H. and Varna,A.L. and Wu,M.} } @article {17985, title = {Plasmonic Systems and Devices Utilizing Surface Plasmon Polariton}, volume = {12/697,595}, year = {2010}, month = {2010/05/27/}, abstract = {Plasmonic systems and devices that utilize surface plasmon polaritons (or {\textquotedblleft}plasmons{\textquotedblright}) for inter-chip and/or intra-chip communications are provided. A plasmonic system includes a microchip that has an integrated circuit module and a plasmonic device configured to interface with the integrated circuit module. The plasmonic device includes a first electrode, a second electrode positioned at a non-contact distance from the first electrode, and a tunneling-junction configured to create a plasmon when a potential difference is created between the first electrode and the second electrode.}, url = {http://www.google.com/patents?id=2VnRAAAAEBAJ}, author = {Smolyaninov,Igor I. and Vishkin, Uzi and Davis,Christopher C.} } @conference {12486, title = {Pose estimation in heavy clutter using a multi-flash camera}, booktitle = {Robotics and Automation (ICRA), 2010 IEEE International Conference on}, year = {2010}, month = {2010/05//}, pages = {2028 - 2035}, abstract = {We propose a novel solution to object detection, localization and pose estimation with applications in robot vision. The proposed method is especially applicable when the objects of interest may not be richly textured and are immersed in heavy clutter. We show that a multi-flash camera (MFC) provides accurate separation of depth edges and texture edges in such scenes. Then, we reformulate the problem, as one of finding matches between the depth edges obtained in one or more MFC images to the rendered depth edges that are computed offline using 3D CAD model of the objects. In order to facilitate accurate matching of these binary depth edge maps, we introduce a novel cost function that respects both the position and the local orientation of each edge pixel. This cost function is significantly superior to traditional Chamfer cost and leads to accurate matching even in heavily cluttered scenes where traditional methods are unreliable. We present a sub-linear time algorithm to compute the cost function using techniques from 3D distance transforms and integral images. Finally, we also propose a multi-view based pose-refinement algorithm to improve the estimated pose. We implemented the algorithm on an industrial robot arm and obtained location and angular estimation accuracy of the order of 1 mm and 2 $\#$x00B0; respectively for a variety of parts with minimal texture.}, keywords = {3D, algorithm;object, based, camera;multiview, depth, detection;object, detection;pose, distance, edge, edges;cameras;image, edges;integral, estimation;binary, estimation;multiflash, estimation;robot, function;depth, images;location, localization;pose, maps, matching;cost, matching;image, pose-refinement, texture;object, transforms;angular, vision;texture, vision;transforms;}, doi = {10.1109/ROBOT.2010.5509897}, author = {Ming-Yu Liu and Tuzel, O. and Veeraraghavan,A. and Chellapa, Rama and Agrawal,A. and Okuda, H.} } @article {19591, title = {Pushdown Control-Flow Analysis of Higher-Order Programs}, journal = {arXiv:1007.4268 [cs]}, year = {2010}, note = {Comment: The 2010 Workshop on Scheme and Functional Programming}, month = {2010/07/24/}, abstract = {Context-free approaches to static analysis gain precision over classical approaches by perfectly matching returns to call sites---a property that eliminates spurious interprocedural paths. Vardoulakis and Shivers{\textquoteright}s recent formulation of CFA2 showed that it is possible (if expensive) to apply context-free methods to higher-order languages and gain the same boost in precision achieved over first-order programs. To this young body of work on context-free analysis of higher-order programs, we contribute a pushdown control-flow analysis framework, which we derive as an abstract interpretation of a CESK machine with an unbounded stack. One instantiation of this framework marks the first polyvariant pushdown analysis of higher-order programs; another marks the first polynomial-time analysis. In the end, we arrive at a framework for control-flow analysis that can efficiently compute pushdown generalizations of classical control-flow analyses.}, keywords = {Computer Science - Programming Languages, F.3.2, F.4.1}, url = {http://arxiv.org/abs/1007.4268}, author = {Earl, Christopher and Might, Matthew and David Van Horn} } @conference {19593, title = {Resolving and Exploiting the k-CFA Paradox: Illuminating Functional vs. Object-oriented Program Analysis}, booktitle = {PLDI {\textquoteright}10 Proceedings of the 2010 ACM SIGPLAN Conference on Programming Language Design and Implementation}, series = {PLDI {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {305 - 315}, publisher = {ACM}, organization = {ACM}, abstract = {Low-level program analysis is a fundamental problem, taking the shape of "flow analysis" in functional languages and "points-to" analysis in imperative and object-oriented languages. Despite the similarities, the vocabulary and results in the two communities remain largely distinct, with limited cross-understanding. One of the few links is Shivers{\textquoteright}s k-CFA work, which has advanced the concept of "context-sensitive analysis" and is widely known in both communities. Recent results indicate that the relationship between the functional and object-oriented incarnations of k-CFA is not as well understood as thought. Van Horn and Mairson proved k-CFA for k >= 1 to be EXPTIME-complete; hence, no polynomial-time algorithm can exist. Yet, there are several polynomial-time formulations of context-sensitive points-to analyses in object-oriented languages. Thus, it seems that functional k-CFA may actually be a profoundly different analysis from object-oriented k-CFA. We resolve this paradox by showing that the exact same specification of k-CFA is polynomial-time for object-oriented languages yet exponential-time for functional ones: objects and closures are subtly different, in a way that interacts crucially with context-sensitivity and complexity. This illumination leads to an immediate payoff: by projecting the object-oriented treatment of objects onto closures, we derive a polynomial-time hierarchy of context-sensitive CFAs for functional programs.}, keywords = {control-flow analysis, functional, k-cfa, m-cfa, object-oriented, pointer analysis, static analysis}, isbn = {978-1-4503-0019-3}, url = {http://doi.acm.org/10.1145/1806596.1806631}, author = {Might, Matthew and Smaragdakis, Yannis and David Van Horn} } @conference {17984, title = {Resource-Aware Compiler Prefetching for Many-Cores}, booktitle = {Parallel and Distributed Computing (ISPDC), 2010 Ninth International Symposium on}, year = {2010}, month = {2010/07//}, pages = {133 - 140}, abstract = {Super-scalar, out-of-order processors that can have tens of read and write requests in the execution window place significant demands on Memory Level Parallelism (MLP). Multi-and many-cores with shared parallel caches further increase MLP demand. Current cache hierarchies however have been unable to keep up with this trend, with modern designs allowing only 4-16 concurrent cache misses. This disconnect is exacerbated by recent highly parallel architectures (e.g. GPUs) where power and area per-core budget favor lighter cores with less resources. Support for hardware and software prefetch increase MLP pressure since these techniques overlap multiple memory requests with existing computation. In this paper, we propose and evaluate a novel Resource-Aware Prefetching (RAP) compiler algorithm that is aware of the number of simultaneous prefetches supported, and optimized for the same. We show that in situations where not enough resources are available to issue prefetch instructions for all references in a loop, it is more beneficial to decrease the prefetch distance and prefetch for as many references as possible, rather than use a fixed prefetched distance and skip prefetching for some references, as in current approaches. We implemented our algorithm in a GCC-derived compiler and evaluated its performance using an emerging fine-grained many-core architecture. Our results show that the RAP algorithm outperforms a well-known loop prefetching algorithm by up to 40.15\% and the state-of-the art GCC implementation by up to 34.79\%. Moreover, we compare the RAP algorithm with a simple hardware prefetching mechanism, and show improvements of up to 24.61\%.}, keywords = {algorithm;memory, architecture;hardware-software, architectures;parallel, architectures;resource, aware, caches;super-scalar, compiler, compiler;Multicore, compilers;parallel, GCC-derived, level, management;, many-core, memories;storage, out-of-order, Parallel, parallelism;parallel, prefetch;loop, prefetching, prefetching;shared, processor;fine-grained, processors;multiprocessing, systems;optimising}, doi = {10.1109/ISPDC.2010.16}, author = {Caragea,G.C. and Tzannes,A. and Keceli,F. and Barua,R. and Vishkin, Uzi} } @conference {12492, title = {Robust regression using sparse learning for high dimensional parameter estimation problems}, booktitle = {Acoustics Speech and Signal Processing (ICASSP), 2010 IEEE International Conference on}, year = {2010}, month = {2010/03//}, pages = {3846 - 3849}, abstract = {Algorithms such as Least Median of Squares (LMedS) and Random Sample Consensus (RANSAC) have been very successful for low-dimensional robust regression problems. However, the combinatorial nature of these algorithms makes them practically unusable for high-dimensional applications. In this paper, we introduce algorithms that have cubic time complexity in the dimension of the problem, which make them computationally efficient for high-dimensional problems. We formulate the robust regression problem by projecting the dependent variable onto the null space of the independent variables which receives significant contributions only from the outliers. We then identify the outliers using sparse representation/learning based algorithms. Under certain conditions, that follow from the theory of sparse representation, these polynomial algorithms can accurately solve the robust regression problem which is, in general, a combinatorial problem. We present experimental results that demonstrate the efficacy of the proposed algorithms. We also analyze the intrinsic parameter space of robust regression and identify an efficient and accurate class of algorithms for different operating conditions. An application to facial age estimation is presented.}, keywords = {algorithm;random, analysis;, combinatorial, complexity;least, complexity;parameter, consensus;robust, Estimation, estimation;polynomials;regression, learning;sparse, median, of, problem;cubic, problem;polynomial, problem;sparse, regression, representation;computational, sample, squares;parameter, TIME}, doi = {10.1109/ICASSP.2010.5495830}, author = {Mitra, K. and Veeraraghavan,A. and Chellapa, Rama} } @conference {12482, title = {Robust RVM regression using sparse outlier model}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2010 IEEE Conference on}, year = {2010}, month = {2010/06//}, pages = {1887 - 1894}, abstract = {Kernel regression techniques such as Relevance Vector Machine (RVM) regression, Support Vector Regression and Gaussian processes are widely used for solving many computer vision problems such as age, head pose, 3D human pose and lighting estimation. However, the presence of outliers in the training dataset makes the estimates from these regression techniques unreliable. In this paper, we propose robust versions of the RVM regression that can handle outliers in the training dataset. We decompose the noise term in the RVM formulation into a (sparse) outlier noise term and a Gaussian noise term. We then estimate the outlier noise along with the model parameters. We present two approaches for solving this estimation problem: (1) a Bayesian approach, which essentially follows the RVM framework and (2) an optimization approach based on Basis Pursuit Denoising. In the Bayesian approach, the robust RVM problem essentially becomes a bigger RVM problem with the advantage that it can be solved efficiently by a fast algorithm. Empirical evaluations, and real experiments on image de-noising and age estimation demonstrate the better performance of the robust RVM algorithms over that of the RVM reg ression.}, keywords = {3D, analysis;, approach;Gaussian, denoising;computer, denoising;lighting, denoising;regression, estimation;relevance, human, machine;robust, model;Gaussian, noise;basis, noise;computer, outlier, pose;Bayesian, pursuit, regression;sparse, RVM, vector, vision;image}, doi = {10.1109/CVPR.2010.5539861}, author = {Mitra, K. and Veeraraghavan,A. and Chellapa, Rama} } @article {17950, title = {Saliency Guided Summarization of Molecular Dynamics Simulations}, journal = {Scientific Visualization: Advanced Concepts}, volume = {1}, year = {2010}, month = {2010///}, pages = {321 - 335}, abstract = {We present a novel method to measure saliency in molecular dynamics simulation data. This saliency measure is based on a multiscale center-surround mechanism, which is fast and efficient to compute. We explore the use of the saliency function to guide the selection of representative and anomalous timesteps for summarization of simulations. To this end, we also introduce a multiscale keyframe selection procedure which automatically provides keyframes representing the simulation at varying levels of coarseness. We compare our saliency guided keyframe approach against other methods, and show that it consistently selects superior keyframes as measured by their predictive power in reconstructing the simulation.}, author = {Patro,R. and Ip,C. Y and Varshney, Amitabh and Hagen,H.} } @article {18277, title = {Security analysis for privacy preserving search of multimedia}, journal = {Image Processing (ICIP)}, year = {2010}, month = {2010///}, pages = {26 - 29}, abstract = {With the increasing popularity of digital multimedia such asimages and videos and the advent of the cloud computing paradigm, a fast growing amount of private and sensitive mul- timedia data are being stored and managed over the network cloud. To provide enhanced security and privacy protection beyond traditional access control techniques, privacy preserv- ing multimedia retrieval techniques have been proposed re- cently to allow content-based multimedia retrieval directly over encrypted databases and achieve accurate retrieval com- parable to conventional retrieval schemes. In this paper, we introduce a security definition for the privacy preserving re- trieval scenario and show that the recently proposed schemes are secure under the proposed security definition. }, author = {Lu,W. and Varna,A.L. and Wu,M.} } @article {13087, title = {SPECIAL SECTION ON SHAPE ANALYSIS AND ITS APPLICATIONS IN IMAGE UNDERSTANDING}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {32}, year = {2010}, month = {2010///}, pages = {0162 - 8828}, author = {Srivastava, A. and Damon,J.N. and Dryden,I.L. and Jermyn,I.H. and Das,S. and Vaswani, N. and Huckemann,S. and Hotz,T. and Munk,A. and Lin,Z. and others} } @inbook {12497, title = {Statistical Analysis on Manifolds and Its Applications to Video Analysis}, booktitle = {Video Search and MiningVideo Search and Mining}, series = {Studies in Computational Intelligence}, volume = {287}, year = {2010}, month = {2010///}, pages = {115 - 144}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The analysis and interpretation of video data is an important component of modern vision applications such as biometrics, surveillance, motionsynthesis and web-based user interfaces. A common requirement among these very different applications is the ability to learn statistical models of appearance and motion from a collection of videos, and then use them for recognizing actions or persons in a new video. These applications in video analysis require statistical inference methods to be devised on non-Euclidean spaces or more formally on manifolds. This chapter outlines a broad survey of applications in video analysis that involve manifolds. We develop the required mathematical tools needed to perform statistical inference on manifolds and show their effectiveness in real video-understanding applications.}, isbn = {978-3-642-12899-8}, url = {http://dx.doi.org/10.1007/978-3-642-12900-1_5}, author = {Turaga,Pavan and Veeraraghavan,Ashok and Srivastava,Anuj and Chellapa, Rama}, editor = {Schonfeld,Dan and Shan,Caifeng and Tao,Dacheng and Wang,Liang} } @book {12505, title = {Statistical Methods and Models for Video-Based Tracking, Modeling, and Recognition}, year = {2010}, month = {2010///}, publisher = {Now Publishers Inc}, organization = {Now Publishers Inc}, abstract = {Computer vision systems attempt to understand a scene and its components from mostly visual information. The geometry exhibited by the real world, the influence of material properties on scattering of incident light, and the process of imaging introduce constraints and properties that are key to solving some of these tasks. In the presence of noisy observations and other uncertainties, the algorithms make use of statistical methods for robust inference. Statistical Methods and Models for Video-based Tracking, Modeling, and Recognition highlights the role of geometric constraints in statistical estimation methods, and how the interplay of geometry and statistics leads to the choice and design of algorithms. In particular, it illustrates the role of imaging, illumination, and motion constraints in classical vision problems such as tracking, structure from motion, metrology, activity analysis and recognition, and appropriate statistical methods used in each of these problems.}, keywords = {Computers / Computer Vision \& Pattern Recognition, Computers / Image Processing, Technology \& Engineering / Electronics / General, Technology \& Engineering / General}, isbn = {9781601983145}, author = {Chellapa, Rama and Sankaranarayanan,Aswin C. and Veeraraghavan,Ashok} } @article {18284, title = {System and Method for Confidentiality-Preserving Rank-Ordered Search}, volume = {12/608,724}, year = {2010}, month = {2010/06/10/}, abstract = {A confidentiality preserving system and method for performing a rank-ordered search and retrieval of contents of a data collection. The system includes at least one computer system including a search and retrieval algorithm using term frequency and/or similar features for rank-ordering selective contents of the data collection, and enabling secure retrieval of the selective contents based on the rank-order. The search and retrieval algorithm includes a baseline algorithm, a partially server oriented algorithm, and/or a fully server oriented algorithm. The partially and/or fully server oriented algorithms use homomorphic and/or order preserving encryption for enabling search capability from a user other than an owner of the contents of the data collection. The confidentiality preserving method includes using term frequency for rank-ordering selective contents of the data collection, and retrieving the selective contents based on the rank-order.}, url = {http://www.google.com/patents?id=kInVAAAAEBAJ}, author = {Swaminathan,Ashwin and Mao,Yinian and Su,Guan-Ming and Gou,Hongmei and Varna,Avinash L. and He,Shan and M. Wu and Oard, Douglas} } @conference {17987, title = {Is teaching parallel algorithmic thinking to high school students possible?: one teacher{\textquoteright}s experience}, booktitle = {Proceedings of the 41st ACM technical symposium on Computer science education}, series = {SIGCSE {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {290 - 294}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {All students at our high school are required to take at least one course in Computer Science prior to their junior year. They are also required to complete a year-long senior project associated with a specific in-house laboratory, one of which is the Computer Systems Lab. To prepare students for this experience the lab offers elective courses at the post-AP Computer Science level. Since the early 1990s one of these electives has focused on parallel computing. The course enrolls approximately 40 students each year for two semesters of instruction. The lead programming language is C and topics include a wide array of industry-standard and experimental tools. Since the 2007-2008 school year we have included a unit on parallel algorithmic thinking (PAT) using the Explicit Multi-Threading (XMT) system. We describe our experiences using this system after self-studying the approach from a publicly available tutorial. Overall, this article provides significant evidence regarding the unique teachability of the XMT PAT approach, and advocates using it broadly in Computer Science education.}, keywords = {high school, parallel algorithmic thinking, pram algorithms, XMT}, isbn = {978-1-4503-0006-3}, doi = {10.1145/1734263.1734363}, url = {http://doi.acm.org/10.1145/1734263.1734363}, author = {Torbert,Shane and Vishkin, Uzi and Tzur,Ron and Ellison,David J.} } @article {18607, title = {Transit portal: BGP connectivity as a service}, journal = {ACM SIGCOMM Computer Communication Review}, volume = {40}, year = {2010}, month = {2010/08//}, pages = {463 - 464}, keywords = {bgp-mux, transit portal}, isbn = {0146-4833}, doi = {10.1145/1851275.1851265}, url = {http://doi.acm.org/10.1145/1851275.1851265}, author = {Valancius,Vytautas and Kim,Hyojoon and Feamster, Nick} } @conference {18613, title = {Wide-area route control for distributed services}, booktitle = {Proceedings of the 2010 USENIX conference on USENIX annual technical conference}, series = {USENIXATC{\textquoteright}10}, year = {2010}, month = {2010///}, pages = {2 - 2}, publisher = {USENIX Association}, organization = {USENIX Association}, address = {Berkeley, CA, USA}, abstract = {Many distributed services would benefit from control over the flow of traffic to and from their users, to offer better performance and higher reliability at a reasonable cost. Unfortunately, although today{\textquoteright}s cloud-computing platforms offer elastic computing and bandwidth resources, they do not give services control over wide-area routing. We propose replacing the data center{\textquoteright}s border router with a Transit Portal (TP) that gives each service the illusion of direct connectivity to upstream ISPs, without requiring each service to deploy hardware, acquire IP address space, or negotiate contracts with ISPs. Our TP prototype supports many layer-two connectivity mechanisms, amortizes memory and message overhead over multiple services, and protects the rest of the Internet from misconfigured and malicious applications. Our implementation extends and synthesizes open-source software components such as the Linux kernel and the Quagga routing daemon. We also implement a management plane based on the GENI control framework and couple this with our four-site TP deployment and Amazon EC2 facilities. Experiments with an anycast DNS application demonstrate the benefits the TP offers to distributed services.}, url = {http://dl.acm.org/citation.cfm?id=1855840.1855842}, author = {Valancius,Vytautas and Feamster, Nick and Rexford,Jennifer and Nakao,Akihiro} } @article {14650, title = {Young Proteins Experience More Variable Selection Pressures Than Old Proteins}, journal = {Genome ResearchGenome Res.}, volume = {20}, year = {2010}, month = {2010/11/01/}, pages = {1574 - 1581}, abstract = {It is well known that young proteins tend to experience weaker purifying selection and evolve more quickly than old proteins. Here, we show that, in addition, young proteins tend to experience more variable selection pressures over time than old proteins. We demonstrate this pattern in three independent taxonomic groups: yeast, Drosophila, and mammals. The increased variability of selection pressures on young proteins is highly significant even after controlling for the fact that young proteins are typically shorter and experience weaker purifying selection than old proteins. The majority of our results are consistent with the hypothesis that the function of a young gene tends to change over time more readily than that of an old gene. At the same time, our results may be caused in part by young genes that serve constant functions over time, but nevertheless appear to evolve under changing selection pressures due to depletion of adaptive mutations. In either case, our results imply that the evolution of a protein-coding sequence is partly determined by its age and origin, and not only by the phenotypic properties of the encoded protein. We discuss, via specific examples, the consequences of these findings for understanding of the sources of evolutionary novelty.}, isbn = {1088-9051, 1549-5469}, doi = {10.1101/gr.109595.110}, url = {http://genome.cshlp.org/content/20/11/1574}, author = {Vishnoi,Anchal and Kryazhimskiy,Sergey and Bazykin,Georgii A and Hannenhalli, Sridhar and Plotkin,Joshua B.} } @conference {17989, title = {Algorithmic approach to designing an easy-to-program system: Can it lead to a HW-enhanced programmer{\textquoteright}s workflow add-on?}, booktitle = {Computer Design, 2009. ICCD 2009. IEEE International Conference on}, year = {2009}, month = {2009/10//}, pages = {60 - 63}, abstract = {Our earlier parallel algorithmics work on the parallel random-access-machine/model (PRAM) computation model led us to a PRAM-On-Chip vision: a comprehensive many-core system that can look to the programmer like the abstract PRAM model. We introduced the eXplicit MultiThreaded (XMT) design and prototyped it in hardware and software. XMT comprises a programmer{\textquoteright}s workflow that advances from work-depth, a standard PRAM theory abstraction, to an XMT program, and, if desired, to its performance tuning. XMT provides strong performance for programs developed this way due to its hardware support of very fine-grained threads and the overhead of handling them. XMT has also shown unique promise when it comes to ease-of-programming, the biggest problem that has limited the impact of all parallel systems to date. For example, teachability of XMT programming has been demonstrated at various levels from rising 6th graders to graduate students, and students in a freshman class were able to program 3 parallel sorting algorithms. The main purpose of the current paper is to stimulate discussion on the following somewhat open-ended question. Now that we made significant progress on a system devoted to supporting PRAM-like programming, is it possible to incorporate our hardware support as an add-on into other current and future many-core systems? The paper considers a concrete proposal for doing that: recasting our work as a hardware-enhanced programmer{\textquoteright}s workflow {\^A}{\textquestiondown}module{\^A}{\textquestiondown} that can then be essentially imported into the other systems.}, keywords = {abstraction;PRAM-On-Chip;easy-to-program, algorithms;parallel, algorithms;sorting;, designing;explicit, multi-threading;fine-grained, PRAM, random-access-machine/model;parallel, sorting, system, system;parallel, systems;concurrency, THEORY, theory;multi-threading;parallel, threads;many-core}, doi = {10.1109/ICCD.2009.5413174}, author = {Vishkin, Uzi} } @conference {17994, title = {Brief announcement: performance potential of an easy-to-program PRAM-on-chip prototype versus state-of-the-art processor}, booktitle = {Proceedings of the twenty-first annual symposium on Parallelism in algorithms and architectures}, year = {2009}, month = {2009///}, pages = {163 - 165}, author = {Caragea,G.C. and Saybasili,A. B and Wen,X. and Vishkin, Uzi} } @article {15077, title = {Collusion-free multiparty computation in the mediated model}, journal = {Advances in Cryptology-CRYPTO 2009}, year = {2009}, month = {2009///}, pages = {524 - 540}, abstract = {Collusion-free protocols prevent subliminal communication (i.e., covert channels) between parties running the protocol. In the standard communication model, if one-way functions exist, then protocols satisfying any reasonable degree of privacy cannot be collusion-free. To circumvent this impossibility, Alwen, shelat and Visconti (CRYPTO 2008) recently suggested the mediated model where all communication passes through a mediator. The goal is to design protocols where collusion-freeness is guaranteed as long as the mediator is honest, while standard security guarantees hold if the mediator is dishonest. In this model, they gave constructions of collusion-free protocols for commitments and zero-knowledge proofs in the two-party setting.We strengthen the definition of Alwen et al., and resolve the main open questions in this area by showing a collusion-free protocol (in the mediated model) for computing any multi-party functionality. }, doi = {10.1007/978-3-642-03356-8_31}, author = {Alwen,J. and Katz, Jonathan and Lindell,Y. and Persiano,G. and Shelat,A. and Visconti,I.} } @conference {13095, title = {Combining multiple kernels for efficient image classification}, booktitle = {Applications of Computer Vision (WACV), 2009 Workshop on}, year = {2009}, month = {2009/12//}, pages = {1 - 8}, abstract = {We investigate the problem of combining multiple feature channels for the purpose of efficient image classification. Discriminative kernel based methods, such as SVMs, have been shown to be quite effective for image classification. To use these methods with several feature channels, one needs to combine base kernels computed from them. Multiple kernel learning is an effective method for combining the base kernels. However, the cost of computing the kernel similarities of a test image with each of the support vectors for all feature channels is extremely high. We propose an alternate method, where training data instances are selected, using AdaBoost, for each of the base kernels. A composite decision function, which can be evaluated by computing kernel similarities with respect to only these chosen instances, is learnt. This method significantly reduces the number of kernel computations required during testing. Experimental results on the benchmark UCI datasets, as well as on a challenging painting dataset, are included to demonstrate the effectiveness of our method.}, keywords = {(artificial, AdaBoost;base, channels;multiple, classification;kernel, classification;learning, decision, feature, function;discriminative, intelligence);support, Kernel, kernel;image, kernels;composite, learning;support, machine;image, machines;, similarity;multiple, vector}, doi = {10.1109/WACV.2009.5403040}, author = {Siddiquie,B. and Vitaladevuni,S.N. and Davis, Larry S.} } @article {14482, title = {Common effect of antipsychotics on the biosynthesis and regulation of fatty acids and cholesterol supports a key role of lipid homeostasis in schizophrenia}, journal = {Schizophrenia Research}, volume = {108}, year = {2009}, month = {2009/03//}, pages = {134 - 142}, abstract = {For decades, the dopamine hypothesis has gained the most attention in an attempt to explain the origin and the symptoms of schizophrenia. While this hypothesis offers an explanation for the relationship between psychotic symptoms and dopamine kinetics, it does not provide a direct explanation of the etiology of schizophrenia which remains poorly understood. Consequently, current antipsychotics that target neurotransmitter receptors, have limited and inconsistent efficacy. To gain insights into the mechanism of action of these drugs, we studied the expression profile of 12,490 human genes in a cell line treated with 18 antipsychotics, and compared it to that of a library of 448 other compounds used in a variety of disorders. Analysis reveals a common effect of antipsychotics on the biosynthesis and regulation of fatty acids and cholesterol, which is discussed in the context of a lipid hypothesis where alterations in lipid homeostasis might underlie the pathogenesis of schizophrenia. This finding may help research aimed at the development of novel treatments for this devastating disease.}, keywords = {Antipsychotic action, Gene expression, Lipid homeostasis, Pathogenesis}, isbn = {0920-9964}, doi = {10.1016/j.schres.2008.11.025}, url = {http://www.sciencedirect.com/science/article/pii/S0920996408005306}, author = {Polymeropoulos,Mihael H. and Licamele,Louis and Volpi,Simona and Mack,Kendra and Mitkus,Shruti N. and Carstea,Eugene D. and Getoor, Lise and Thompson,Andrew and Lavedan,Christian} } @mastersthesis {19596, title = {The complexity of flow analysis in higher-order languages}, year = {2009}, month = {2009/09//}, school = {BRANDEIS UNIVERSITY}, keywords = {Computer science The complexity of flow analysis in higher-order languages BRANDEIS UNIVERSITY Harry G. Mairson Van Horn, David}, url = {http://gradworks.umi.com/33/69/3369445.html}, author = {David Van Horn} } @article {14603, title = {CTCF binding site classes exhibit distinct evolutionary, genomic, epigenomic and transcriptomic features}, journal = {Genome Biology}, volume = {10}, year = {2009}, month = {2009/11/18/}, pages = {R131 - R131}, abstract = {CTCF (CCCTC-binding factor) is an evolutionarily conserved zinc finger protein involved in diverse functions ranging from negative regulation of MYC, to chromatin insulation of the beta-globin gene cluster, to imprinting of the Igf2 locus. The 11 zinc fingers of CTCF are known to differentially contribute to the CTCF-DNA interaction at different binding sites. It is possible that the differences in CTCF-DNA conformation at different binding sites underlie CTCF{\textquoteright}s functional diversity. If so, the CTCF binding sites may belong to distinct classes, each compatible with a specific functional role.}, isbn = {1465-6906}, doi = {10.1186/gb-2009-10-11-r131}, url = {http://genomebiology.com/2009/10/11/R131}, author = {Essien,Kobby and Vigneau,Sebastien and Apreleva,Sofia and Singh,Larry N. and Bartolomei,Marisa S. and Hannenhalli, Sridhar} } @inbook {19635, title = {Efficient Robust Private Set Intersection}, booktitle = {Applied Cryptography and Network Security}, series = {Lecture Notes in Computer Science}, year = {2009}, month = {2009/01/01/}, pages = {125 - 142}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Computing Set Intersection privately and efficiently between two mutually mistrusting parties is an important basic procedure in the area of private data mining. Assuring robustness, namely, coping with potentially arbitrarily misbehaving (i.e., malicious) parties, while retaining protocol efficiency (rather than employing costly generic techniques) is an open problem. In this work the first solution to this problem is presented.}, keywords = {Coding and Information Theory, Computer Communication Networks, Cryptographic protocols, Data Encryption, Data Structures, Cryptology and Information Theory, Information Systems Applications (incl.Internet), Privacy Preserving Data Mining, Secure Two-party Computation, Set Intersection, Systems and Data Security}, isbn = {978-3-642-01956-2, 978-3-642-01957-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-01957-9_8}, author = {Dana Dachman-Soled and Malkin, Tal and Raykova, Mariana and Yung, Moti}, editor = {Abdalla, Michel and Pointcheval, David and Fouque, Pierre-Alain and Vergnaud, Damien} } @article {18209, title = {Enabling search over encrypted multimedia databases}, journal = {SPIE/IS\&T Media Forensics and Security}, year = {2009}, month = {2009///}, pages = {7254 - 18}, abstract = {Performing information retrieval tasks while preserving data confidentiality is a desirable capability when adatabase is stored on a server maintained by a third-party service provider. This paper addresses the problem of enabling content-based retrieval over encrypted multimedia databases. Search indexes, along with multimedia documents, are first encrypted by the content owner and then stored onto the server. Through jointly apply- ing cryptographic techniques, such as order preserving encryption and randomized hash functions, with image processing and information retrieval techniques, secure indexing schemes are designed to provide both privacy protection and rank-ordered search capability. Retrieval results on an encrypted color image database and se- curity analysis of the secure indexing schemes under different attack models show that data confidentiality can be preserved while retaining very good retrieval performance. This work has promising applications in secure multimedia management. }, author = {Lu,W. and Swaminathan,A. and Varna,A.L. and Wu,M.} } @conference {18941, title = {Energy loss in MEMS resonators and the impact on inertial and RF devices}, year = {2009}, month = {2009/06//}, pages = {688 - 695}, abstract = {In this paper, we review the current understanding of energy loss mechanisms in micromachined (MEMS and NEMS) devices. We describe the importance of high quality factor (Q) to the performance of MEMS gyros and MEMS resonators used in radio-frequency applications.}, keywords = {energy loss, gyroscopes, inertial devices, MEMS gyros, MEMS resonators, micromachined devices, micromechanical resonators, NEMS devices, Q-factor, quality factor, radiofrequency applications, RF devices}, doi = {10.1109/SENSOR.2009.5285418}, author = {Weinberg,M. and Candler,R. and Chandorkar,S. and Varsanik,J. and Kenny,T. and Duwel,A.} } @article {18213, title = {Fingerprinting Compressed Multimedia Signals}, journal = {Information Forensics and Security, IEEE Transactions on}, volume = {4}, year = {2009}, month = {2009/09//}, pages = {330 - 345}, abstract = {Digital fingerprinting is a technique to deter unauthorized redistribution of multimedia content by embedding a unique identifying signal in each legally distributed copy. The embedded fingerprint can later be extracted and used to trace the originator of an unauthorized copy. A group of users may collude and attempt to create a version of the content that cannot be traced back to any of them. As multimedia data is commonly stored in compressed form, this paper addresses the problem of fingerprinting compressed signals. Analysis is carried out to show that due to the quantized nature of the host signal and the embedded fingerprint, directly extending traditional fingerprinting techniques for uncompressed signals to the compressed case leads to low collusion resistance. To overcome this problem and improve the collusion resistance, a new technique for fingerprinting compressed signals called Anti-Collusion Dither (ACD) is proposed, whereby a random dither signal is added to the compressed host before embedding so as to make the effective host signal appear more continuous. The proposed technique is shown to reduce the accuracy with which attackers can estimate the host signal, and from an information theoretic perspective, the proposed ACD technique increases the maximum number of users that can be supported by the fingerprinting system under a given attack. Both analytical and experimental studies confirm that the proposed technique increases the probability of identifying a guilty user and can approximately quadruple the collusion resistance compared to conventional Gaussian fingerprinting.}, keywords = {coding;, coding;copy, collusion;digital, communication;video, compression;digital, compression;video, dither;collusion, domain, fingerprinting;anti-collusion, fingerprinting;multimedia, Gaussian, management;multimedia, protection;data, resistance;compressed, rights}, isbn = {1556-6013}, doi = {10.1109/TIFS.2009.2025860}, author = {Varna,A.L. and He,Shan and Swaminathan,A. and M. Wu} } @conference {16485, title = {Flexible and efficient querying and ranking on hyperlinked data sources}, booktitle = {Proceedings of the 12th International Conference on Extending Database Technology: Advances in Database Technology}, year = {2009}, month = {2009///}, pages = {553 - 564}, author = {Varadarajan,R. and Hristidis,V. and Raschid, Louiqa and Vidal,M. E and Ib{\'a}\ nez,L. and Rodr{\'\i}guez-Drumond,H.} } @inbook {12536, title = {Knowledge Extraction from Surveillance Sensors}, booktitle = {Wiley Handbook of Science and Technology for Homeland SecurityWiley Handbook of Science and Technology for Homeland Security}, year = {2009}, month = {2009///}, publisher = {John Wiley \& Sons, Inc.}, organization = {John Wiley \& Sons, Inc.}, keywords = {multi-camera tracking, multi-modal fusion, recognition, sensor networks, Surveillance}, isbn = {9780470087923}, url = {http://onlinelibrary.wiley.com/doi/10.1002/9780470087923.hhs510/abstract;jsessionid=D0D752EF5D04327BE003BDFBD2F96134.d01t03}, author = {Chellapa, Rama and Veeraraghavan,Ashok and Sankaranarayanan,Aswin C.} } @article {12146, title = {Maturing Software Engineering Knowledge through Classifications: A Case Study on Unit Testing Techniques}, journal = {Software Engineering, IEEE Transactions on}, volume = {35}, year = {2009}, month = {2009/08//july}, pages = {551 - 565}, abstract = {Classification makes a significant contribution to advancing knowledge in both science and engineering. It is a way of investigating the relationships between the objects to be classified and identifies gaps in knowledge. Classification in engineering also has a practical application; it supports object selection. They can help mature software engineering knowledge, as classifications constitute an organized structure of knowledge items. Till date, there have been few attempts at classifying in software engineering. In this research, we examine how useful classifications in software engineering are for advancing knowledge by trying to classify testing techniques. The paper presents a preliminary classification of a set of unit testing techniques. To obtain this classification, we enacted a generic process for developing useful software engineering classifications. The proposed classification has been proven useful for maturing knowledge about testing techniques, and therefore, SE, as it helps to: 1) provide a systematic description of the techniques, 2) understand testing techniques by studying the relationships among techniques (measured in terms of differences and similarities), 3) identify potentially useful techniques that do not yet exist by analyzing gaps in the classification, and 4) support practitioners in testing technique selection by matching technique characteristics to project characteristics.}, keywords = {characteristic;project, characteristic;software, classification;matching, engineering, engineering;, knowledge;software, technique, techniques;program, Testing, testing;software, testing;unit}, isbn = {0098-5589}, doi = {10.1109/TSE.2009.13}, author = {Vegas,S. and Juristo,N. and Basili, Victor R.} } @article {17990, title = {Mesh-of-Trees and Alternative Interconnection Networks for Single-Chip Parallelism}, journal = {Very Large Scale Integration (VLSI) Systems, IEEE Transactions on}, volume = {17}, year = {2009}, month = {2009/10//}, pages = {1419 - 1432}, abstract = {In single-chip parallel processors, it is crucial to implement a high-throughput low-latency interconnection network to connect the on-chip components, especially the processing units and the memory units. In this paper, we propose a new mesh of trees (MoT) implementation of the interconnection network and evaluate it relative to metrics such as wire complexity, total register count, single switch delay, maximum throughput, tradeoffs between throughput and latency, and post-layout performance. We show that on-chip interconnection networks can provide higher bandwidth between processors and shared first-level cache than previously considered possible, facilitating greater scalability of memory architectures that require that. MoT is also compared, both analytically and experimentally, to some other traditional network topologies, such as hypercube, butterfly, fat trees and butterfly fat trees. When we evaluate a 64-terminal MoT network at 90-nm technology, concrete results show that MoT provides higher throughput and lower latency especially when the input traffic (or the on-chip parallelism) is high, at comparable area. A recurring problem in networking and communication is that of achieving good sustained throughput in contrast to just high theoretical peak performance that does not materialize for typical work loads. Our quantitative results demonstrate a clear advantage of the proposed MoT network in the context of single-chip parallel processing.}, keywords = {90, cache;single, complexity;multiprocessor, delay;single-chip, first-level, high-throughput, interconnection, low-latency, network;memory, network;shared, networks;network-on-chip;parallel, nm;wire, Parallel, parallelism;size, processing;, processor;single-chip, switch, topologies;on-chip, units;mesh-of-trees;network}, isbn = {1063-8210}, doi = {10.1109/TVLSI.2008.2003999}, author = {Balkan,A.O. and Gang Qu and Vishkin, Uzi} } @conference {18241, title = {Modeling and analysis of content identification}, booktitle = {Multimedia and Expo, 2009. ICME 2009. IEEE International Conference on}, year = {2009}, month = {2009/07/28/3}, pages = {1528 - 1531}, abstract = {Content fingerprinting provides a compact content-based representation of a multimedia document. An important application of fingerprinting is the identification of modified copies of the original media content. These modifications may be incidental changes that occur during the usage of multimedia, or intentional modifications made by an adversary to avoid detection. Currently, the effectiveness of content identification techniques is often assessed through benchmark databases. To complement these experimental performance evaluations, this paper develops a theoretical framework for analyzing content identification techniques. Beneficial aspects from decision theory and game theory are exploited to gain insights toward optimal system design and parameter selection.}, keywords = {benchmark, databases;content, design;decision, document;optimal, identification, identification;game, representation;decision, system, systems;, techniques;content-based, theory;fingerprint, theory;fingerprinting, theory;multimedia}, doi = {10.1109/ICME.2009.5202795}, author = {Varna,A.L. and M. Wu} } @conference {18243, title = {Modeling and analysis of ordinal ranking in content fingerprinting}, booktitle = {Information Forensics and Security, 2009. WIFS 2009. First IEEE International Workshop on}, year = {2009}, month = {2009/12//}, pages = {116 - 120}, abstract = {Content fingerprinting provides a compact representation of multimedia objects for copy detection. This paper analyzes the robustness of the ordinal ranking module frequently used in content fingerprinting by examining the changes in ranks as local variations are introduced in feature values. Closed-form expressions to measure such sensitivity are derived when feature values are jointly Gaussian-distributed. The results show that sensitivity depends on the strength of local variation, the total number of blocks, and the correlations among block-based feature values. Experiments with both synthesized data and image data validate the analysis and provide interesting insights, inspiring an approach to reduce the sensitivity.}, keywords = {detection;, detection;multimedia, fingerprinting;copy, Gaussian-distributed;content, identification;multimedia, objects;ordinal, processes;fingerprint, ranking;Gaussian, systems;object}, doi = {10.1109/WIFS.2009.5386472}, author = {Chuang,Wei-Hong and Varna,A.L. and M. Wu} } @article {17917, title = {Modeling and visualization of human activities for multicamera networks}, journal = {EURASIP Journal on Image and Video Processing}, volume = {2009}, year = {2009}, month = {2009///}, author = {Sankaranarayanan,A. C and Patro,R. and Turaga,P. and Varshney, Amitabh and Chellapa, Rama} } @conference {18244, title = {Modeling content fingerprints using markov random fields}, booktitle = {Information Forensics and Security, 2009. WIFS 2009. First IEEE International Workshop on}, year = {2009}, month = {2009/12//}, pages = {111 - 115}, abstract = {Content fingerprints are widely employed for identifying multimedia in various applications. A {\^A}{\textquestiondown}fingerprint{\^A}{\textquestiondown} of a video or audio is a short signature that captures unique characteristics of the signal and can be used to perform robust identification. Several fingerprinting techniques have been proposed in the literature and are often evaluated using benchmark databases. To complement these experimental evaluations, this paper develops a theoretical model for content fingerprints and evaluates the identification accuracy. Fingerprints and the noise are modeled as Markov random fields and the optimal decision rule for matching is derived. An algorithm to compute the probability of correct detection and the false alarm rate by estimating the density of states is described. Numerical results are provided for a model of a block based binary fingerprinting scheme and the influence of the fingerprint correlation and the noise on the detection accuracy is studied.}, keywords = {based, binary, correlation;multimedia;noise;optimal, data;, decision, field;block, fingerprint;fingerprint, fingerprinting;content, Markov, of, processes;multimedia, random, rule;Markov, systems;probability;security}, doi = {10.1109/WIFS.2009.5386471}, author = {Varna,A.L. and M. Wu} } @article {17993, title = {Optical interconnect structure in a computer system and method of transporting data between processing elements and memory through the optical interconnect structure}, volume = {10/529,310}, year = {2009}, month = {2009/03/17/}, abstract = {A multi-chip processor/memory arrangement replacing a large computer chip, includes a number of modules each including processing elements, registers, and/or memories interconnected by an optical interconnection fabric providing an all-to-all interconnection between the chips, so that the memory cells on each chip represent a portion of shared memory. The optical interconnect fabric is responsible for transporting data between the chips while processing elements on each chip dominate processing. Each chip is manufactured in mass production so that the entire processor/memory arrangement is fabricated in an inexpensive and simplified technology process. The optical communication fabric is based on waveguide technology and includes a number of waveguides, the layout of which follows certain constraints. The waveguides can intersect each other in the single plane, or alternatively, a double layer of waveguide structures and bent over approach may be used. Specific layout patterns of...}, url = {http://www.google.com/patents?id=wgS6AAAAEBAJ}, author = {Vishkin, Uzi}, editor = {University of Maryland} } @article {14588, title = {PTM-Switchboard--a database of posttranslational modifications of transcription factors, the mediating enzymes and target genes}, journal = {Nucleic Acids ResearchNucleic Acids Research}, volume = {37}, year = {2009}, month = {2009/01//}, pages = {D66-D71 - D66-D71}, isbn = {0305-1048}, doi = {10.1093/nar/gkn731}, url = {http://nar.oxfordjournals.org/content/37/suppl_1/D66.short}, author = {Everett,L. and Vo,A. and Hannenhalli, Sridhar} } @article {12515, title = {Rate-Invariant Recognition of Humans and Their Activities}, journal = {Image Processing, IEEE Transactions on}, volume = {18}, year = {2009}, month = {2009/06//}, pages = {1326 - 1339}, abstract = {Pattern recognition in video is a challenging task because of the multitude of spatio-temporal variations that occur in different videos capturing the exact same event. While traditional pattern-theoretic approaches account for the spatial changes that occur due to lighting and pose, very little has been done to address the effect of temporal rate changes in the executions of an event. In this paper, we provide a systematic model-based approach to learn the nature of such temporal variations (time warps) while simultaneously allowing for the spatial variations in the descriptors. We illustrate our approach for the problem of action recognition and provide experimental justification for the importance of accounting for rate variations in action recognition. The model is composed of a nominal activity trajectory and a function space capturing the probability distribution of activity-specific time warping transformations. We use the square-root parameterization of time warps to derive geodesics, distance measures, and probability distributions on the space of time warping functions. We then design a Bayesian algorithm which treats the execution rate function as a nuisance variable and integrates it out using Monte Carlo sampling, to generate estimates of class posteriors. This approach allows us to learn the space of time warps for each activity while simultaneously capturing other intra- and interclass variations. Next, we discuss a special case of this approach which assumes a uniform distribution on the space of time warping functions and show how computationally efficient inference algorithms may be derived for this special case. We discuss the relative advantages and disadvantages of both approaches and show their efficacy using experiments on gait-based person identification and activity recognition.}, keywords = {Automated;Video Recording;, Bayesian algorithm;Monte Carlo sampling;action recognition;activity-specific time warping transformations;computationally efficient inference algorithms;distance measures;execution rate function;function space;gait-based person identification;geodesics;no, Statistical;Monte Carlo Method;Movement;Pattern Recognition}, isbn = {1057-7149}, doi = {10.1109/TIP.2009.2017143}, author = {Veeraraghavan,A. and Srivastava, A. and Roy-Chowdhury, A.K. and Chellapa, Rama} } @article {17926, title = {Saliency-guided lighting}, journal = {IEICE TRANSACTIONS on Information and Systems}, volume = {2}, year = {2009}, month = {2009///}, pages = {369 - 73}, author = {Lee,C. H and Kim,Y. and Varshney, Amitabh} } @conference {17911, title = {Salient Clustering for View-dependent Multiresolution Rendering}, booktitle = {Computer Graphics and Image Processing (SIBGRAPI), 2009 XXII Brazilian Symposium on}, year = {2009}, month = {2009/10//}, pages = {56 - 63}, abstract = {Perceptual information is quickly gaining importance in mesh representation, analysis and rendering. User studies, eye tracking and other techniques are able to provide ever more useful insights for many user-centric systems, which form the bulk of computer graphics applications. In this work we build upon the concept of Mesh Saliency - an automatic measure of visual importance for triangle meshes based on models of low-level human visual attention - applying it to the problem of mesh segmentation and view-dependent rendering. We introduce a technique for segmentation that partitions an object into a set of face clusters, each encompassing a group of locally interesting features; Mesh Saliency is incorporated in a propagative mesh clustering framework, guiding cluster seed selection and triangle propagation costs and leading to a convergence of face clusters around perceptually important features. We compare our technique with different fully automatic segmentation algorithms, showing that it provides similar or better segmentation without the need for user input. We illustrate application of our clustering results through a saliency-guided view-dependent rendering system, achieving significant frame rate increases with little loss of visual detail.}, keywords = {(computer, algorithms;cluster, analysis;mesh, attention;mesh, AUTOMATIC, centred, clustering, clustering;rendering, clustering;user-centric, clusters;low-level, dependent, design;, framework;salient, graphics);user, graphics;face, human, information;propagative, mesh, multiresolution, rendering;image, representation;mesh, resolution;image, saliency;mesh, seed, segmentation, segmentation;pattern, segmentation;perceptual, selection;computer, system;view, visual}, doi = {10.1109/SIBGRAPI.2009.34}, author = {Barni,R. and Comba,J. and Varshney, Amitabh} } @conference {18275, title = {Secure image retrieval through feature protection}, booktitle = {Acoustics, Speech and Signal Processing, 2009. ICASSP 2009. IEEE International Conference on}, year = {2009}, month = {2009/04//}, pages = {1533 - 1536}, abstract = {This paper addresses the problem of image retrieval from an encrypted database, where data confidentiality is preserved both in the storage and retrieval process. The paper focuses on image feature protection techniques which enable similarity comparison among protected features. By utilizing both signal processing and cryptographic techniques, three schemes are investigated and compared, including bit-plane randomization, random projection, and randomized unary encoding. Experimental results show that secure image retrieval can achieve comparable retrieval performance to conventional image retrieval techniques without revealing information about image content. This work enriches the area of secure information retrieval and can find applications in secure online services for images and videos.}, keywords = {bit-plane, confidentiality;data, database;image, databases;, encoding;signal, extraction;image, feature, processes;visual, processing;content-based, projection;randomized, protection;image, randomization;cryptographic, retrieval;cryptography;feature, retrieval;random, storage;encrypted, technique;data, unary}, doi = {10.1109/ICASSP.2009.4959888}, author = {Lu,Wenjun and Varna,A.L. and Swaminathan,A. and M. Wu} } @article {15082, title = {Signature schemes with bounded leakage resilience}, journal = {Advances in Cryptology{\textendash}ASIACRYPT 2009}, year = {2009}, month = {2009///}, pages = {703 - 720}, abstract = {A leakage-resilient cryptosystem remains secure even if arbitrary, but bounded, information about the secret key (and possibly other internal state information) is leaked to an adversary. Denote the length of the secret key by n. We show:{\textbullet} A full-fledged signature scheme tolerating leakage of n - n ε bits of information about the secret key (for any constant ε> 0), based on general assumptions. {\textbullet} A one-time signature scheme, based on the minimal assumption of one-way functions, tolerating leakage of (41-)n bits of information about the signer{\textquoteright}s entire state. {\textbullet} A more efficient one-time signature scheme, that can be based on several specific assumptions, tolerating leakage of (21-)n bits of information about the signer{\textquoteright}s entire state. The latter two constructions extend to give leakage-resilient t-time signature schemes. All the above constructions are in the standard model. }, doi = {10.1007/978-3-642-10366-7_41}, author = {Katz, Jonathan and Vaikuntanathan,V.} } @article {15083, title = {Smooth projective hashing and password-based authenticated key exchange from lattices}, journal = {Advances in Cryptology{\textendash}ASIACRYPT 2009}, year = {2009}, month = {2009///}, pages = {636 - 652}, abstract = {We describe a public-key encryption scheme based on lattices {\textemdash} specifically, based on the hardness of the learning with error (LWE) problem {\textemdash} that is secure against chosen-ciphertext attacks while admitting (a variant of) smooth projective hashing. This encryption scheme suffices to construct a protocol for password-based authenticated key exchange (PAKE) that can be proven secure based on the LWE assumption in the standard model. We thus obtain the first PAKE protocol whose security relies on a lattice-based assumption.}, doi = {10.1007/978-3-642-10366-7_37}, author = {Katz, Jonathan and Vaikuntanathan,V.} } @article {17992, title = {Spawn-join instruction set architecture for providing explicit multithreading}, volume = {10/236,934}, year = {2009}, month = {2009/04/21/}, abstract = {The invention presents a unique computational paradigm that provides the tools to take advantage of the parallelism inherent in parallel algorithms to the full spectrum from algorithms through architecture to implementation. The invention provides a new processing architecture that extends the standard instruction set of the conventional uniprocessor architecture. The architecture used to implement this new computational paradigm includes a thread control unit (34), a spawn control unit (38), and an enabled instruction memory (50). The architecture initiates multiple threads and executes them in parallel. Control of the threads is provided such that the threads may be suspended or allowed to execute each at its own pace.}, url = {http://www.google.com/patents?id=YQTDAAAAEBAJ}, author = {Vishkin, Uzi} } @article {16062, title = {The Story of One: Humanity scholarship with visualization and text analysis}, journal = {Relation}, volume = {10}, year = {2009}, month = {2009///}, pages = {8485 - 8485}, author = {Clement,T. and Plaisant, Catherine and Vuillemot,R.} } @conference {13048, title = {Streamed learning: one-pass SVMs}, booktitle = {Proceedings of the 21st international jont conference on Artifical intelligence}, year = {2009}, month = {2009///}, pages = {1211 - 1216}, author = {Rai,P. and Daum{\'e}, Hal and Venkatasubramanian,S.} } @conference {13038, title = {Streaming for large scale NLP: Language modeling}, booktitle = {Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics}, year = {2009}, month = {2009///}, pages = {512 - 520}, author = {Goyal,A. and Daum{\'e}, Hal and Venkatasubramanian,S.} } @article {11909, title = {System and Method for Spatio-Temporal-Context Aware Interaction of Users ...}, volume = {12/267,921}, year = {2009}, month = {2009/05/14/}, abstract = {A multifunctional interaction system which is capable of spatio-temporal context localization of users and of communication of audio/video streams to an entity of interest defined by the user, includes a communication domain supporting a predefined localization service, a server associated with the communication domain, client devices, and a dynamically changing context database which is customized in accord with the dynamics of interaction sessions of client devices with the server. The client communicates with the system to either request services therefrom or to send a message to the entity of interest. The system is provided with a panic alert mechanism which, upon actuation, transmits an audio/video data stream along with the client location tag, time stamp, and client ID, to a police precinct for prompt action.}, url = {http://www.google.com/patents?id=nZbGAAAAEBAJ}, author = {Agrawala, Ashok K. and Varshney, Amitabh and Almazan,Christian B.}, editor = {University of Maryland} } @article {17991, title = {SYSTEM AND METHOD FOR THREAD HANDLING IN MULTITHREADED PARALLEL COMPUTING OF NESTED THREADS}, volume = {12/158,004}, year = {2009}, month = {2009/05/14/}, abstract = {An Explicit Multi-Threading (XMT) system and method is provided for processing multiple spawned threads associated with SPAWN-type commands of an XMT program. The method includes executing a plurality of child threads by a plurality of TCUs including a first TCU executing a child thread which is allocated to it; completing execution of the child thread by the first TCU; announcing that the first TCU is available to execute another child thread; executing by a second TCU a parent child thread that includes a nested spawn-type command for spawning additional child threads of the plurality of child threads, wherein the parent child thread is related in a parent-child relationship to the child threads that are spawned in conjunction with the nested spawn-type command; assigning a thread ID (TID) to each child thread, wherein the TID is unique with respect to the other TIDs; and allocating a new child thread to the first TCU.}, url = {http://www.google.com/patents?id=qJjGAAAAEBAJ}, author = {Wen,Xingzhi and Vishkin, Uzi} } @article {18529, title = {Towards an Internet Connectivity Market}, volume = {GT-CS-09-01}, year = {2009}, month = {2009///}, institution = {Georgia Institute of Technology}, abstract = {Today{\textquoteright}s Internet achieves end-to-end connectivity through bilateral contracts between neighboring networks; unfortunately, this {\textquotedblleft}one size fits all{\textquotedblright} connectivity results in less efficient paths, unsold capacity and unmet demand, and sometimes catastrophic market failures that result in global disconnectivity. This paper presents the design and evaluation of MINT, a Market for Internet Transit. MINT is a connectivity market and corresponding set of protocols that allows ISPs to offer path segments on an open market. Edge networks bid for end-to-end paths, and a mediator matches bids for paths to collections of path segments that form end-to-end paths. MINT can be deployed using protocols that are present in today{\textquoteright}s routers, and it operates in parallel with the existing routing infrastructure and connectivity market. We present MINT{\textquoteright}s market model and protocol design; evaluate how MINT improves efficiency, the utility of edge networks, and the profits of transit networks; and how MINT can operate at Internet scale.}, url = {http://hdl.handle.net/1853/30622}, author = {Feamster, Nick and Hassan,U. and Sundaresan,S. and Valancius,V. and Johari,R. and Vazirani,V.} } @article {12523, title = {Unsupervised view and rate invariant clustering of video sequences}, journal = {Computer Vision and Image Understanding}, volume = {113}, year = {2009}, month = {2009/03//}, pages = {353 - 371}, abstract = {Videos play an ever increasing role in our everyday lives with applications ranging from news, entertainment, scientific research, security and surveillance. Coupled with the fact that cameras and storage media are becoming less expensive, it has resulted in people producing more video content than ever before. This necessitates the development of efficient indexing and retrieval algorithms for video data. Most state-of-the-art techniques index videos according to the global content in the scene such as color, texture, brightness, etc. In this paper, we discuss the problem of activity-based indexing of videos. To address the problem, first we describe activities as a cascade of dynamical systems which significantly enhances the expressive power of the model while retaining many of the computational advantages of using dynamical models. Second, we also derive methods to incorporate view and rate-invariance into these models so that similar actions are clustered together irrespective of the viewpoint or the rate of execution of the activity. We also derive algorithms to learn the model parameters from a video stream and demonstrate how a single video sequence may be clustered into different clusters where each cluster represents an activity. Experimental results for five different databases show that the clusters found by the algorithm correspond to semantically meaningful activities.}, keywords = {Affine invariance, Cascade of linear dynamical systems, Rate invariance, Summarization, Surveillance, Video clustering, View invariance}, isbn = {1077-3142}, doi = {10.1016/j.cviu.2008.08.009}, url = {http://www.sciencedirect.com/science/article/pii/S1077314208001367}, author = {Turaga,Pavan and Veeraraghavan,Ashok and Chellapa, Rama} } @article {12846, title = {Using formal specifications to support testing}, journal = {ACM Computing Surveys}, volume = {41}, year = {2009}, month = {2009/02/01/}, pages = {1 - 76}, isbn = {03600300}, doi = {10.1145/1459352.1459354}, url = {http://dl.acm.org/citation.cfm?id=1459352.1459354}, author = {Hierons,Robert M. and Krause,Paul and L{\"u}ttgen,Gerald and Simons,Anthony J. H. and Vilkomir,Sergiy and Woodward,Martin R. and Zedan,Hussein and Bogdanov,Kirill and Bowen,Jonathan P. and Cleaveland, Rance and Derrick,John and Dick,Jeremy and Gheorghe,Marian and Harman,Mark and Kapoor,Kalpesh} } @article {17916, title = {Using Graphics Processors for High-Performance Computation and Visualization of Plasma Turbulence}, journal = {Computing in Science Engineering}, volume = {11}, year = {2009}, month = {2009/04//march}, pages = {52 - 59}, abstract = {Direct numerical simulation (DNS) of turbulence is computationally intensive and typically relies on some form of parallel processing. The authors present techniques to map DNS computations to modern graphics processing units (GPUs), which are characterized by very high memory bandwidth and hundreds of SPMD (single-program-multiple-data) processors.}, keywords = {analysis;parallel, computation;parallel, computing;numerical, direct, engineering, numerical, PROCESSING, processing;plasma, processors;data, simulation;graphics, systems;nuclear, turbulence, turbulence;, units;high-performance, visualisation;multiprocessing, visualization;single-program-multiple-data}, isbn = {1521-9615}, doi = {10.1109/MCSE.2009.42}, author = {Stantchev,G. and Juba,D. and Dorland,W. and Varshney, Amitabh} } @conference {16057, title = {What{\textquoteright}s being said near "Martha"? Exploring name entities in literary text collections}, booktitle = {Visual Analytics Science and Technology, 2009. VAST 2009. IEEE Symposium on}, year = {2009}, month = {2009/10//}, pages = {107 - 114}, abstract = {A common task in literary analysis is to study characters in a novel or collection. Automatic entity extraction, text analysis and effective user interfaces facilitate character analysis. Using our interface, called POSvis, the scholar uses word clouds and self-organizing graphs to review vocabulary, to filter by part of speech, and to explore the network of characters located near characters under review. Further, visualizations show word usages within an analysis window (i.e. a book chapter), which can be compared with a reference window (i.e. the whole book). We describe the interface and report on an early case study with a humanities scholar.}, keywords = {analysis;humanities, analysis;literary, analysis;user, clouds;word, collection;name, entity, entity;part-of-speech, extraction;character, filtering;linguistics;text, filtering;self-organizing, graph;text, interface;vocabulary;word, interfaces;vocabulary;, POSvis;automatic, scholar;literary, text, usage;data, visualisation;information}, doi = {10.1109/VAST.2009.5333248}, author = {Vuillemot,R. and Clement,T. and Plaisant, Catherine and Kumar,A.} } @conference {13121, title = {Action recognition using ballistic dynamics}, booktitle = {Computer Vision and Pattern Recognition, 2008. CVPR 2008. IEEE Conference on}, year = {2008}, month = {2008/06//}, pages = {1 - 8}, abstract = {We present a Bayesian framework for action recognition through ballistic dynamics. Psycho-kinesiological studies indicate that ballistic movements form the natural units for human movement planning. The framework leads to an efficient and robust algorithm for temporally segmenting videos into atomic movements. Individual movements are annotated with person-centric morphological labels called ballistic verbs. This is tested on a dataset of interactive movements, achieving high recognition rates. The approach is also applied on a gesture recognition task, improving a previously reported recognition rate from 84\% to 92\%. Consideration of ballistic dynamics enhances the performance of the popular Motion History Image feature. We also illustrate the approachpsilas general utility on real-world videos. Experiments indicate that the method is robust to view, style and appearance variations.}, keywords = {analysis;image, Bayesian, dynamics;gesture, feature;person-centric, framework;action, History, image, labels;psycho-kinesiological, morphological, MOTION, Movement, movements;motion, planning;interactive, processing;, recognition, recognition;ballistic, recognition;image, segmentation;video, signal, studies;image, task;human}, doi = {10.1109/CVPR.2008.4587806}, author = {Vitaladevuni,S.N. and Kellokumpu,V. and Davis, Larry S.} } @conference {18546, title = {Answering what-if deployment and configuration questions with wise}, booktitle = {Proceedings of the ACM SIGCOMM 2008 conference on Data communication}, series = {SIGCOMM {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {99 - 110}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Designers of content distribution networks often need to determine how changes to infrastructure deployment and configuration affect service response times when they deploy a new data center, change ISP peering, or change the mapping of clients to servers. Today, the designers use coarse, back-of-the-envelope calculations, or costly field deployments; they need better ways to evaluate the effects of such hypothetical "what-if" questions before the actual deployments. This paper presents What-If Scenario Evaluator (WISE), a tool that predicts the effects of possible configuration and deployment changes in content distribution networks. WISE makes three contributions: (1) an algorithm that uses traces from existing deployments to learn causality among factors that affect service response-time distributions; (2) an algorithm that uses the learned causal structure to estimate a dataset that is representative of the hypothetical scenario that a designer may wish to evaluate, and uses these datasets to predict future response-time distributions; (3) a scenario specification language that allows a network designer to easily express hypothetical deployment scenarios without being cognizant of the dependencies between variables that affect service response times. Our evaluation, both in a controlled setting and in a real-world field deployment at a large, global CDN, shows that WISE can quickly and accurately predict service response-time distributions for many practical What-If scenarios.}, keywords = {content distribution networks, performance modeling, what-if scenario evaluation}, isbn = {978-1-60558-175-0}, doi = {10.1145/1402958.1402971}, url = {http://doi.acm.org/10.1145/1402958.1402971}, author = {Tariq,Mukarram and Zeitoun,Amgad and Valancius,Vytautas and Feamster, Nick and Ammar,Mostafa} } @conference {17998, title = {An area-efficient high-throughput hybrid interconnection network for single-chip parallel processing}, booktitle = {Proceedings of the 45th annual Design Automation Conference}, series = {DAC {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {435 - 440}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Single-chip parallel processing requires high bandwidth between processors and on-chip memory modules. A recently proposed Mesh-of-Trees (MoT) network provides high throughput and low latency at relatively high area cost. In this paper, we introduce a hybrid MoT-BF network that combines MoT network with the area efficient butterfly network. We prove that the hybrid network reduces MoT network{\textquoteright}s area cost. Cycle-accurate simulation and post-layout results all show that significant area reduction can be achieved with negligible performance degradation, when operating at same clock rate.}, keywords = {hybrid networks, mesh-of-trees, on-chip networks}, isbn = {978-1-60558-115-6}, doi = {10.1145/1391469.1391583}, url = {http://doi.acm.org/10.1145/1391469.1391583}, author = {Balkan,Aydin O. and Gang Qu and Vishkin, Uzi} } @article {13052, title = {A Bayesian statistics approach to multiscale coarse graining}, journal = {The Journal of chemical physics}, volume = {129}, year = {2008}, month = {2008///}, pages = {214114 - 214114}, author = {Liu,P. and Shi,Q. and Daum{\'e}, Hal and Voth,G. A} } @conference {18681, title = {On the Comparison of Network Attack Datasets: An Empirical Analysis}, year = {2008}, month = {2008/12//}, pages = {39 - 48}, abstract = {Network malicious activity can be collected and reported by various sources using different attack detection solutions. The granularity of these solutions provides either very detailed information (intrusion detection systems, honeypots) or high-level trends (CAIDA, SANS). The problem for network security operators is often to select the sources of information to better protect their network. How much information from these sources is redundant and how much is unique? The goal of this paper is to show empirically that while some global attack events can be correlated across various sensors, the majority of incoming malicious activity has local specificities. This study presents a comparative analysis of four different attack datasets offering three different levels of granularity: 1) two high interaction honeynets deployed at two different locations (i.e., a corporate and an academic environment); 2) ATLAS which is a distributed network telescope from Arbor; and 3) Internet Protecttrade which is a global alerting service from AT amp;T.}, keywords = {ATLAS, distributed network telescope, Internet, intrusion detection systems, network attack datasets, network malicious activity, network security operators, security of data}, doi = {10.1109/HASE.2008.50}, author = {Berthier,R. and Korman,D. and Michel Cukier and Hiltunen,M. and Vesonder,G. and Sheleheda,D.} } @article {17932, title = {Confluent Volumetric Visualization of Gyrokinetic Turbulence}, journal = {Plasma Science, IEEE Transactions on}, volume = {36}, year = {2008}, month = {2008/08//}, pages = {1112 - 1113}, abstract = {Data from gyrokinetic turbulence codes are often difficult to visualize due their high dimensionality, the nontrivial geometry of the underlying grids, and the vast range of spatial scales. We present an interactive visualization framework that attempts to address these issues. Images from a nonlinear gyrokinetic simulation are presented.}, keywords = {flow;plasma, geometry;plasma, gyrokinetic, simulation;nontrivial, simulation;plasma, turbulence;, turbulence;nonlinear, turbulence;volumetric, visualisation;plasma, visualization;flow}, isbn = {0093-3813}, doi = {10.1109/TPS.2008.924509}, author = {Stantchev,G. and Juba,D. and Dorland,W. and Varshney, Amitabh} } @conference {19584, title = {Deciding kCFA is Complete for EXPTIME}, booktitle = {ICFP {\textquoteright}08 Proceedings of the 13th ACM SIGPLAN International Conference on Functional Programming}, series = {ICFP {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {275 - 282}, publisher = {ACM}, organization = {ACM}, abstract = {We give an exact characterization of the computational complexity of the kCFA hierarchy. For any k > 0, we prove that the control flow decision problem is complete for deterministic exponential time. This theorem validates empirical observations that such control flow analysis is intractable. It also provides more general insight into the complexity of abstract interpretation.}, keywords = {complexity, flow analysis}, isbn = {978-1-59593-919-7}, url = {http://doi.acm.org/10.1145/1411204.1411243}, author = {David Van Horn and Mairson, Harry G.} } @conference {18151, title = {A decision theoretic framework for analyzing binary hash-based content identification systems}, booktitle = {Proceedings of the 8th ACM workshop on Digital rights management}, series = {DRM {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {67 - 76}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Content identification has many applications, ranging from preventing illegal sharing of copyrighted content on video sharing websites, to automatic identification and tagging of content. Several content identification techniques based on watermarking or robust hashes have been proposed in the literature, but they have mostly been evaluated through experiments. This paper analyzes binary hash-based content identification schemes under a decision theoretic framework and presents a lower bound on the length of the hash required to correctly identify multimedia content that may have undergone modifications. A practical scheme for content identification is evaluated under the proposed framework. The results obtained through experiments agree very well with the performance suggested by the theoretical analysis.}, keywords = {content fingerprinting, content identification, decision theory}, isbn = {978-1-60558-290-0}, doi = {10.1145/1456520.1456532}, url = {http://doi.acm.org/10.1145/1456520.1456532}, author = {Varna,Avinash L. and Swaminathan,Ashwin and M. Wu} } @article {12904, title = {Dual role colonization factors connecting Vibrio cholerae{\textquoteright}s lifestyles in human and aquatic environments open new perspectives for combating infectious diseases}, journal = {Current Opinion in Biotechnology}, volume = {19}, year = {2008}, month = {2008/06//}, pages = {254 - 259}, abstract = {Vibrio cholerae exhibits two distinctive lifestyles, one inside the milieu of the human intestine and the other in the aquatic environment. Recently, the existence of V. cholerae ligands involved in colonization of both human intestine and environmental chitin surfaces via the same binding specificity has been shown. Such molecules, here named {\textquoteleft}dual role colonization factors (DRCFs){\textquoteright}, are example of a tight connection between the two V. cholerae{\textquoteright}s lifestyles. It is suggested that DRCFs and, more generally, bacterial factors and pathways having roles in pathogenesis and in the out of the human body life may be promising targets for development of novel prophylactic or therapeutic interventions that may also affect V. cholerae fitness in its environmental reservoirs.}, isbn = {0958-1669}, doi = {10.1016/j.copbio.2008.04.002}, url = {http://www.sciencedirect.com/science/article/pii/S0958166908000426}, author = {Vezzulli,Luigi and Guzm{\'a}n,Carlos A and Rita R Colwell and Pruzzo,Carla} } @article {18518, title = {A Dynamic Reputation Service for Spotting Spammers}, volume = {GT-CS-08-09}, year = {2008}, month = {2008///}, institution = {School of Computer Science, Georgia Tech}, abstract = {This paper presents the design, implementation, evalu-ation, and initial deployment of SpamSpotter, the first open, large-scale, real-time reputation system for filtering spam. Existing blacklists (e.g., SpamHaus) have trouble keeping pace with spammers{\textquoteright} increasing ability to send spam from {\textquotedblleft}fresh{\textquotedblright} IP addresses, and filters based purely on content are easily evadable. In contrast, SpamSpotter dynamically classifies email senders in real time based on their global sending behavior, rather than based on ephemeral features such as an IP address or the content of the message. In implementing SpamSpotter, we address significant challenges involving both dynamism (i.e., de- termining when to {\textquotedblleft}retrain{\textquotedblright} our dynamic classification algorithms) and scale (i.e., maintaining fast, accurate per- formance in the face of tremendous email message vol- ume). We have evaluated the performance and accuracy of SpamSpotter using traces from a large email-hosting provider and a spam appliance vendor that receives 300 million messages a day. Our evaluation shows that SpamSpotter is scalable, fast, and accurate. SpamSpotter is also operational today: it will currently answer queries from existing spam filtering software (e.g., SpamAssas- sin) with only minor configuration changes. }, author = {Ramachandran,A. and Hao,S. and Khandelwal,H. and Feamster, Nick and Vempala,S.} } @conference {18567, title = {Fast monitoring of traffic subpopulations}, booktitle = {Proceedings of the 8th ACM SIGCOMM conference on Internet measurement}, series = {IMC {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {257 - 270}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Network accounting, forensics, security, and performance monitoring applications often need to examine detailed traces from subsets of flows ("subpopulations"), where the application desires flexibility in specifying the subpopulation (e.g., to detect a portscan, the application must observe many packets between a source and a destination with one packet to each port). However, the dynamism and volume of network traffic on many high-speed links necessitates traffic sampling, which adversely affects subpopulation monitoring: because many subpopulations of interest to operators are low-volume flows, conventional sampling schemes (e.g., uniform random sampling) miss much of the subpopulation{\textquoteright}s traffic. Today{\textquoteright}s routers and network devices provide scant support for monitoring specific traffic subpopulations. This paper presents the design, implementation, and evaluation of FlexSample, a traffic monitoring engine that dynamically extracts traffic from subpopulations that operators define using conditions on packet header fields. FlexSample uses a fast, flexible counter array to provide rough estimates of packets{\textquoteright} membership in respective subpopulations. Based on these coarse estimates, FlexSample then makes per-packet sampling decisions to sample proportionately from each subpopulation (as specified by a network operator), subject to an overall sampling constraint. We apply FlexSample to extract subpopulations such as port scans and traffic to high-degree nodes and find that it is able to capture significantly more packets from these subpopulations than conventional approaches.}, keywords = {counters, flexsample, sampling, traffic statistics, traffic subpopulations}, isbn = {978-1-60558-334-1}, doi = {10.1145/1452520.1452551}, url = {http://doi.acm.org/10.1145/1452520.1452551}, author = {Ramachandran,Anirudh and Seetharaman,Srinivasan and Feamster, Nick and Vazirani,Vijay} } @article {18517, title = {Fishing for Phishing from the Network Stream}, volume = {GT-CS-08-08}, year = {2008}, month = {2008///}, institution = {Georgia Institute of Technology}, abstract = {Phishing is an increasingly prevalent social-engineering attack that attempts identity theft using spoofed Web pages of legitimate organizations. Unfortunately, current phishing detection methods are neither complete nor responsive because they rely on user reports, and many also require clientside software. Anti-phishing techniques could be more effective if they (1) could detect phishing attacks automatically from the network traffic; (2) could operate without cooperation from end-users. This paper performs a preliminary study to determine the feasibility of detecting phishing attacks in real-time, from the network traffic stream itself. We develop a model to identify the stages where in-network phishing detection is feasible and the data sources that can be analyzed to provide relevant information at each stage. Based on this model, we develop and evaluate a detection method based on features that exist in the network traffic it- self and are correlated with confirmed phishing attacks.}, url = {http://hdl.handle.net/1853/25463}, author = {Ramachandran,A. and Feamster, Nick and Krishnamurthy,B. and Spatscheck,O. and Van der Merwe,J.} } @inbook {19586, title = {Flow Analysis, Linearity, and PTIME}, booktitle = {Static Analysis}, series = {Lecture Notes in Computer Science}, year = {2008}, month = {2008/01/01/}, pages = {255 - 269}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Flow analysis is a ubiquitous and much-studied component of compiler technology{\textemdash}and its variations abound. Amongst the most well known is Shivers{\textquoteright} 0CFA; however, the best known algorithm for 0CFA requires time cubic in the size of the analyzed program and is unlikely to be improved. Consequently, several analyses have been designed to approximate 0CFA by trading precision for faster computation. Henglein{\textquoteright}s simple closure analysis, for example, forfeits the notion of directionality in flows and enjoys an {\textquotedblleft}almost linear{\textquotedblright} time algorithm. But in making trade-offs between precision and complexity, what has been given up and what has been gained? Where do these analyses differ and where do they coincide? We identify a core language{\textemdash}the linear λ-calculus{\textemdash}where 0CFA, simple closure analysis, and many other known approximations or restrictions to 0CFA are rendered identical. Moreover, for this core language, analysis corresponds with (instrumented) evaluation. Because analysis faithfully captures evaluation, and because the linear λ-calculus is complete for ptime, we derive ptime-completeness results for all of these analyses.}, keywords = {Logics and Meanings of Programs, Mathematical Logic and Formal Languages, Programming Languages, Compilers, Interpreters, Programming Techniques, software engineering}, isbn = {978-3-540-69163-1, 978-3-540-69166-2}, url = {http://link.springer.com/chapter/10.1007/978-3-540-69166-2_17}, author = {David Van Horn and Mairson, Harry G.}, editor = {Alpuente, Mar{\'\i}a and Vidal, Germ{\'a}n} } @conference {17996, title = {Fpga-based prototype of a pram-on-chip processor}, booktitle = {Proceedings of the 5th conference on Computing frontiers}, series = {CF {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {55 - 66}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {PRAM (Parallel Random Access Model) has been widely regarded a desirable parallel machine model for many years, but it is also believed to be "impossible in reality." As the new billion-transistor processor era begins, the eXplicit Multi-Threading (XMT) PRAM-On-Chip project is attempting to design an on-chip parallel processor that efficiently supports PRAM algorithms. This paper presents the first prototype of the XMT architecture that incorporates 64 simple in-order processors operating at 75MHz. The microarchitecture of the prototype is described and the performance is studied with respect to some micro-benchmarks. Using cycle accurate emulation, the projected performance of an 800MHz XMT ASIC processor is compared with AMD Opteron 2.6GHz, which uses similar area as would a 64-processor ASIC version of the XMT prototype. The results suggest that an only 800MHz XMT ASIC system outperforms AMD Opteron 2.6GHz, with speedups ranging between 1.57 and 8.56.}, keywords = {ease-of-programming, explicit multi-threading, on-chip parallel processor, Parallel algorithms, PRAM, XMT}, isbn = {978-1-60558-077-7}, doi = {10.1145/1366230.1366240}, url = {http://doi.acm.org/10.1145/1366230.1366240}, author = {Wen,Xingzhi and Vishkin, Uzi} } @article {12563, title = {From Videos to Verbs: Mining Videos for Activities using a cascade of dynamical systems (Supplemental Material)}, journal = {Relation}, volume = {10}, year = {2008}, month = {2008///}, pages = {9944 - 9944}, author = {Turaga, P.K. and Veeraraghavan,A. and Chellapa, Rama} } @article {12903, title = {Global impact of Vibrio cholerae interactions with chitin}, journal = {Environmental Microbiology}, volume = {10}, year = {2008}, month = {2008/06/01/}, pages = {1400 - 1410}, abstract = {The interaction of Vibrio cholerae with chitin exemplifies for microbial ecology a successful bacteria{\textendash}substrate interaction with complex and significant influence on the lifestyle of the bacterium. Chitin is one of the most abundant polymers on earth and possibly the most abundant in the aquatic environment, where its association with V.~cholerae has provided the microorganism with a number of advantages, including food availability, adaptation to environmental nutrient gradients, tolerance to stress and protection from predators. Emergent properties of V.~cholerae{\textendash}chitin interactions occur at multiple hierarchical levels in the environment and include cell metabolic and physiological responses e.g. chemotaxis, cell multiplication, induction of competence, biofilm formation, commensal and symbiotic relationship with higher organisms, cycling of nutrients, and pathogenicity for humans and aquatic animals. As factors mediating virulence of V.~cholerae for humans and aquatic animals derive from mechanisms of adaptation to its environment, at different levels of hierarchical scale, V.~cholerae interactions with chitin represent a useful model for examination of the role of primary habitat selection in the development of traits that have been identified as virulence factors in human disease.}, isbn = {1462-2920}, doi = {10.1111/j.1462-2920.2007.01559.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1462-2920.2007.01559.x/full}, author = {Pruzzo,Carla and Vezzulli,Luigi and Rita R Colwell} } @article {18492, title = {Hosting virtual networks on commodity hardware}, volume = {GT-CS-07-10}, year = {2008}, month = {2008///}, institution = {Georgia Institute of Technology}, abstract = {This paper describes Trellis, a software platform for hostingmultiple virtual networks on shared commodity hardware. Trellis allows each virtual network to define its own topol- ogy, control protocols, and forwarding tables, which low- ers the barrier for deploying custom services on an isolated, reconfigurable, and programmable network, while amor- tizing costs by sharing the physical infrastructure. Trellis synthesizes two container-based virtualization technologies, VServer and NetNS, as well as a new tunneling mechanism, EGRE, into a coherent platform that enables high-speed vir- tual networks. We describe the design and implementation, of Trellis, including kernel-level performance optimizations, and evaluate its supported packet-forwarding rates against other virtualization technologies. We are in the process of upgrading the VINI facility to use Trellis. We also plan to release Trellis as part of MyVINI, a standalone software dis- tribution that allows researchers and application developers to deploy their own virtual network hosting platforms. }, author = {Bhatia,S. and Motiwala,M. and Muhlbauer,W. and Valancius,V. and Bavier,A. and Feamster, Nick and Peterson,L. and Rexford,J.} } @article {17995, title = {An Immediate Concurrent Execution (ICE) Abstraction Proposal for Many-Cores}, journal = {Computer Science Research Works}, year = {2008}, month = {2008/12//}, abstract = {Settling on a simple abstraction that programmers aim at, and hardware and software systems people enable and support, is an important step towards convergence to a robust many-core platform.The current paper: (i) advocates incorporating a quest for the simplest possible abstraction in the debate on the future of many-core computers, (ii) suggests {\textquotedblleft}immediate concurrent execution (ICE){\textquotedblright} as a new abstraction, and (iii) argues that an XMT architecture is one possible demonstration of ICE providing an easy-to-program general-purpose many-core platform. }, keywords = {abstraction, many-cores, parallelism, XMT architecture}, url = {http://drum.lib.umd.edu/handle/1903/8694}, author = {Vishkin, Uzi} } @conference {18585, title = {MINT: a Market for INternet Transit}, booktitle = {Proceedings of the 2008 ACM CoNEXT Conference}, series = {CoNEXT {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {70:1{\textendash}70:6 - 70:1{\textendash}70:6}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Today{\textquoteright}s Internet{\textquoteright}s routing paths are inefficient with respect to both connectivity and the market for interconnection. The former manifests itself via needlessly long paths, de-peering, etc. The latter arises because of a primitive market structure that results in unfulfilled demand and unused capacity. Today{\textquoteright}s networks make pairwise, myopic interconnection decisions based on business considerations that may not mirror considerations of the edge networks (or end systems) that would benefit from the existence of a particular interconnection. These bilateral contracts are also complex and difficult to enforce. This paper proposes MINT, a market structure and routing protocol suite that facilitates the sale and purchase of end-to-end Internet paths. We present MINT{\textquoteright}s structure, explain how it improves connectivity and market efficiency, explore the types of connectivity that might be exchanged (vs. today{\textquoteright}s "best effort" connectivity), and argue that MINT{\textquoteright}s deployment is beneficial to both stub networks and transit providers. We discuss research challenges, including the design both of the protocol that maintains information about connectivity and of the market clearing algorithms. Our preliminary evaluation shows that such a market quickly reaches equilibrium and exhibits price stability.}, isbn = {978-1-60558-210-8}, doi = {10.1145/1544012.1544082}, url = {http://doi.acm.org/10.1145/1544012.1544082}, author = {Valancius,Vytautas and Feamster, Nick and Johari,Ramesh and Vazirani,Vijay} } @article {12557, title = {Mixed state models for automatic target recognition and behavior analysis in video sequences}, journal = {Proceedings of SPIE}, volume = {6967}, year = {2008}, month = {2008/04/03/}, pages = {69670Q-69670Q-11 - 69670Q-69670Q-11}, abstract = {Mixed state or hybrid state space systems are useful tools for various problems in computer vision. These systems model complicated system dynamics as a mixture of inherently simple sub-systems, with an additional mechanism to switch between the sub-systems. This approach of modeling using simpler systems allows for ease in learning the parameters of the system and in solving the inference problem. In this paper, we study the use of such mixed state space systems for problems in recognition and behavior analysis in video sequences. We begin with a dynamical system formulation for recognition of faces from a video. This system is used to introduce the simultaneous tracking and recognition paradigm that allows for improved performance in both tracking and recognition. We extend this framework to design a second system for verification of vehicles across non-overlapping views using structural and textural fingerprints for characterizing the identity of the target. Finally, we show the use of such modeling for tracking and behavior analysis of bees from video.}, isbn = {0277786X}, doi = {doi:10.1117/12.777766}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/6967/1/69670Q_1?isAuthorized=no}, author = {Chellapa, Rama and Sankaranarayanan,Aswin C. and Veeraraghavan,Ashok} } @conference {13117, title = {Multi-resolution Tracking in Space and Time}, booktitle = {Computer Vision, Graphics Image Processing, 2008. ICVGIP {\textquoteright}08. Sixth Indian Conference on}, year = {2008}, month = {2008/12//}, pages = {352 - 358}, abstract = {This paper proposes efficient and robust methods for tracking a moving object at multiple spatial and temporal resolution levels. The efficiency comes from optimising the amounts of spatial and temporal data processed. The robustness results from multi-level coarse-to-fine state-space searching. Tracking across resolution levels incurs an accuracy-versus-speed trade-off. For example, tracking at higher resolutions incurs greater processing cost, while maintaining higher accuracy in estimating the position of the moving object. We propose a novel spatial multi-scale tracker that tracks at the optimal accuracy-versus-speed operating point. Next, we relax this requirement to propose a multi-resolution tracker that operates at a minimum acceptable performance level. Finally, we extend these ideas to a multi-resolution spatio-temporal tracker. We show results of extensive experimentation in support of the proposed approaches.}, keywords = {analysis;image, coarse-to-fine, detection;, estimation;spatial, MOTION, moving, multiscale, object, resolution;object, searching;multiresolution, state-space, tracker;image, tracker;spatio-temporal, tracking;multilevel, tracking;position}, doi = {10.1109/ICVGIP.2008.74}, author = {Roy,S.D. and Tran,S.D. and Davis, Larry S. and Vikram,B.S.} } @article {18707, title = {Mutations in the Hydrophobic Core of Ubiquitin Differentially Affect Its Recognition by Receptor Proteins}, journal = {Journal of Molecular Biology}, volume = {375}, year = {2008}, month = {2008/01/25/}, pages = {979 - 996}, abstract = {Ubiquitin (Ub) is one of the most highly conserved signaling proteins in eukaryotes. In carrying out its myriad functions, Ub conjugated to substrate proteins interacts with dozens of receptor proteins that link the Ub signal to various biological outcomes. Here we report mutations in conserved residues of Ub{\textquoteright}s hydrophobic core that have surprisingly potent and specific effects on molecular recognition. Mutant Ubs bind tightly to the Ub-associated domain of the receptor proteins Rad23 and hHR23A but fail to bind the Ub-interacting motif present in the receptors Rpn10 and S5a. Moreover, chains assembled on target substrates with mutant Ubs are unable to support substrate degradation by the proteasome in vitro or sustain viability of yeast cells. The mutations have relatively little effect on Ub{\textquoteright}s overall structure but reduce its rigidity and cause a slight displacement of the C-terminal β-sheet, thereby compromising association with Ub-interacting motif but not with Ub-associated domains. These studies emphasize an unexpected role for Ub{\textquoteright}s core in molecular recognition and suggest that the diversity of protein{\textendash}protein interactions in which Ub engages placed enormous constraints on its evolvability.}, keywords = {hydrophobic core mutation, molecular recognition, proteasomal degradation, ubiquitin, ubiquitin receptors}, isbn = {0022-2836}, doi = {10.1016/j.jmb.2007.11.016}, url = {http://www.sciencedirect.com/science/article/pii/S0022283607014763}, author = {Haririnia,Aydin and Verma,Rati and Purohit,Nisha and Twarog,Michael Z. and Deshaies,Raymond J. and Bolon,Dan and Fushman, David} } @article {12545, title = {Object Detection, Tracking and Recognition for Multiple Smart Cameras}, journal = {Proceedings of the IEEE}, volume = {96}, year = {2008}, month = {2008/10//}, pages = {1606 - 1624}, abstract = {Video cameras are among the most commonly used sensors in a large number of applications, ranging from surveillance to smart rooms for videoconferencing. There is a need to develop algorithms for tasks such as detection, tracking, and recognition of objects, specifically using distributed networks of cameras. The projective nature of imaging sensors provides ample challenges for data association across cameras. We first discuss the nature of these challenges in the context of visual sensor networks. Then, we show how real-world constraints can be favorably exploited in order to tackle these challenges. Examples of real-world constraints are (a) the presence of a world plane, (b) the presence of a three-dimiensional scene model, (c) consistency of motion across cameras, and (d) color and texture properties. In this regard, the main focus of this paper is towards highlighting the efficient use of the geometric constraints induced by the imaging devices to derive distributed algorithms for target detection, tracking, and recognition. Our discussions are supported by several examples drawn from real applications. Lastly, we also describe several potential research problems that remain to be addressed.}, keywords = {algorithm;geometric, analysis;image, association;distributed, camera;visual, cameras;, cameras;object, colour, constraints;imaging, data, detection;object, detection;sensor, detection;three-dimiensional, fusion;target, model;video, network;distributed, recognition;object, scene, sensor, sensor;multiple, sensors;geometry;image, sensors;object, smart, texture;intelligent, tracking;target, tracking;video}, isbn = {0018-9219}, doi = {10.1109/JPROC.2008.928758}, author = {Sankaranarayanan,A. C and Veeraraghavan,A. and Chellapa, Rama} } @article {15013, title = {Parallel Algorithms for Volumetric Surface Construction}, year = {2008}, month = {2008///}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Large scale scientific data sets are appearing at an increasing rate whose sizes can range from hundreds of gigabytes to tens of terabytes. Isosurface extraction and rendering is an important visualization technique that enables the visual exploration of such data sets using surfaces. However the computational requirements of this approach are substantial which in general prevent the interactive rendering of isosurfaces for large data sets. Therefore, parallel and distributed computing techniques offer a promising direction to deal with the corresponding computational challenges. In this chapter, we give a brief historical perspective of the isosurface visualization approach, and describe the basic sequential and parallel techniques used to extract and render isosurfaces with a particular focus on out-of-core techniques. For parallel algorithms, we assume a distributed memory model in which each processor has its own local disk, and processors communicate and exchange data through an interconnection network. We present a general framework for evaluating parallel isosurface extraction algorithms and describe the related best known parallel algorithms. We also describe the main parallel strategies used to handle isosurface rendering, pointing out the limitations of these strategies. 1.}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=?doi=10.1.1.122.4472}, author = {JaJa, Joseph F. and Shi,Qingmin and Varshney, Amitabh} } @article {17930, title = {Parallel, stochastic measurement of molecular surface area}, journal = {Journal of Molecular Graphics and Modelling}, volume = {27}, year = {2008}, month = {2008/08//}, pages = {82 - 87}, abstract = {Biochemists often wish to compute surface areas of proteins. A variety of algorithms have been developed for this task, but they are designed for traditional single-processor architectures. The current trend in computer hardware is towards increasingly parallel architectures for which these algorithms are not well suited.We describe a parallel, stochastic algorithm for molecular surface area computation that maps well to the emerging multi-core architectures. Our algorithm is also progressive, providing a rough estimate of surface area immediately and refining this estimate as time goes on. Furthermore, the algorithm generates points on the molecular surface which can be used for point-based rendering. We demonstrate a GPU implementation of our algorithm and show that it compares favorably with several existing molecular surface computation programs, giving fast estimates of the molecular surface area with good accuracy. }, keywords = {gpu, Molecular surface, Parallel, Progressive, Quasi-random, Stochastic}, isbn = {1093-3263}, doi = {10.1016/j.jmgm.2008.03.001}, url = {http://www.sciencedirect.com/science/article/pii/S1093326308000387}, author = {Juba,Derek and Varshney, Amitabh} } @article {18592, title = {Path splicing}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {38}, year = {2008}, month = {2008/08//}, pages = {27 - 38}, abstract = {We present path splicing, a new routing primitive that allows network paths to be constructed by combining multiple routing trees ("slices") to each destination over a single network topology. Path splicing allows traffic to switch trees at any hop en route to the destination. End systems can change the path on which traffic is forwarded by changing a small number of additional bits in the packet header. We evaluate path splicing for intradomain routing using slices generated from perturbed link weights and find that splicing achieves reliability that approaches the best possible using a small number of slices, for only a small increase in latency and no adverse effects on traffic in the network. In the case of interdomain routing, where splicing derives multiple trees from edges in alternate backup routes, path splicing achieves near-optimal reliability and can provide significant benefits even when only a fraction of ASes deploy it. We also describe several other applications of path splicing, as well as various possible deployment paths.}, keywords = {multi-path routing, path diversity, path splicing}, isbn = {0146-4833}, doi = {10.1145/1402946.1402963}, url = {http://doi.acm.org/10.1145/1402946.1402963}, author = {Motiwala,Murtaza and Elmore,Megan and Feamster, Nick and Vempala,Santosh} } @article {17967, title = {Persuading Visual Attention through Geometry}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {14}, year = {2008}, month = {2008/08//july}, pages = {772 - 782}, abstract = {Artists, illustrators, photographers, and cinematographers have long used the principles of contrast and composition to guide visual attention. In this paper, we introduce geometry modification as a tool to persuasively direct visual attention. We build upon recent advances in mesh saliency to develop techniques to alter geometry to elicit greater visual attention. Eye-tracking-based user studies show that our approach successfully guides user attention in a statistically significant manner. Our approach operates directly on geometry and, therefore, produces view-independent results that can be used with existing view-dependent techniques of visual persuasion.}, keywords = {attention;visual, generation;Attention;Awareness;Computer, Geometry, Graphics;Cues;Humans;Photic, Interface;Visual, modification;mesh, perception;, persuasion;art;mesh, saliency;visual, Stimulation;User-Computer}, isbn = {1077-2626}, doi = {10.1109/TVCG.2007.70624}, author = {Kim,Youngmin and Varshney, Amitabh} } @article {12109, title = {A pilot study to compare programming effort for two parallel programming models}, journal = {Journal of Systems and Software}, volume = {81}, year = {2008}, month = {2008/11//}, pages = {1920 - 1930}, abstract = {ContextWriting software for the current generation of parallel systems requires significant programmer effort, and the community is seeking alternatives that reduce effort while still achieving good performance. Objective Measure the effect of parallel programming models (message-passing vs. PRAM-like) on programmer effort. Design, setting, and subjects One group of subjects implemented sparse-matrix dense-vector multiplication using message-passing (MPI), and a second group solved the same problem using a PRAM-like model (XMTC). The subjects were students in two graduate-level classes: one class was taught MPI and the other was taught XMTC. Main outcome measures Development time, program correctness. Results Mean XMTC development time was 4.8\&$\#$xa0;h less than mean MPI development time (95\% confidence interval, 2.0{\textendash}7.7), a 46\% reduction. XMTC programs were more likely to be correct, but the difference in correctness rates was not statistically significant (p\&$\#$xa0;=\&$\#$xa0;.16). Conclusions XMTC solutions for this particular problem required less effort than MPI equivalents, but further studies are necessary which examine different types of problems and different levels of programmer experience. }, keywords = {effort, empirical study, Message-passing, MPI, parallel programming, PRAM, XMT}, isbn = {0164-1212}, doi = {10.1016/j.jss.2007.12.798}, url = {http://www.sciencedirect.com/science/article/pii/S0164121208000125}, author = {Hochstein, Lorin and Basili, Victor R. and Vishkin, Uzi and Gilbert,John} } @inbook {13396, title = {Predictive Modeling-Based Data Collection in Wireless Sensor Networks}, booktitle = {Wireless Sensor NetworksWireless Sensor Networks}, series = {Lecture Notes in Computer Science}, volume = {4913}, year = {2008}, month = {2008///}, pages = {34 - 51}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We address the problem of designing practical, energy-efficient protocols for data collection in wireless sensor networks using predictive modeling. Prior work has suggested several approaches to capture and exploit the rich spatio-temporal correlations prevalent in WSNs during data collection. Although shown to be effective in reducing the data collection cost, those approaches use simplistic corelation models and further, ignore many idiosyncrasies of WSNs, in particular the broadcast nature of communication. Our proposed approach is based on approximating the joint probability distribution over the sensors using undirected graphical models , ideally suited to exploit both the spatial correlations and the broadcast nature of communication. We present algorithms for optimally using such a model for data collection under different communication models, and for identifying an appropriate model to use for a given sensor network. Experiments over synthetic and real-world datasets show that our approach significantly reduces the data collection cost.}, isbn = {978-3-540-77689-5}, url = {http://dx.doi.org/10.1007/978-3-540-77690-1_3}, author = {Wang,Lidan and Deshpande, Amol}, editor = {Verdone,Roberto} } @article {12564, title = {Recognition of Humans and their Activities using Statistical analysis on Stiefel and Grassmann Manifolds}, journal = {Red}, volume = {7}, year = {2008}, month = {2008///}, pages = {643 - 643}, abstract = {Many applications in computer vision involve learning and recognition of patterns from exemplars which lie on certainmanifolds. Given a database of examples and a query, the following two questions are usually addressed {\textendash} a) what is the {\textquoteleft}closest{\textquoteright} example to the query in the database ? b) what is the {\textquoteleft}most probable{\textquoteright} class to which the query belongs ? The answer to the first question involves study of the geometric properties of the manifold, which then leads to appropriate definitions of distance metrics on the manifold (geodesics etc). The answer to the second question involves statistical modeling of inter- and intra-class variations on the manifold. In this paper, we concern ourselves with two related manifolds that often appear in several vision applications {\textendash} the Stiefel Manifold and the Grassmann Manifold. We describe statistical modeling and inference tools on these manifolds which result in significant improvements in performance over traditional distance-based classifiers. We illustrate applications to video-based face recognition and activity recognition. }, author = {Turaga,P. and Veeraraghavan,A. and Chellapa, Rama} } @article {12559, title = {Shape-and-Behavior Encoded Tracking of Bee Dances}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {30}, year = {2008}, month = {2008/03//}, pages = {463 - 476}, abstract = {Behavior analysis of social insects has garnered impetus in recent years and has led to some advances in fields like control systems and flight navigation. Manual labeling of insect motions required for analyzing the behaviors of insects requires significant investment of time and effort. In this paper, we propose certain general principles that help in simultaneous automatic tracking and behavior analysis, with applications in tracking bees and recognizing specific behaviors that they exhibit. The state space for tracking is defined using the position, orientation, and current behavior of the insect being tracked. The position and the orientation are parameterized using a shape model, whereas the behavior is explicitly modeled using a three-tier hierarchical motion model. The first tier (dynamics) models the local motions exhibited, and the models built in this tier act as a vocabulary for behavior modeling. The second tier is a Markov motion model built on top of the local motion vocabulary, which serves as the behavior model. The third tier of the hierarchy models the switching between behaviors, and this is also modeled as a Markov model. We address issues in learning the three-tier behavioral model, in discriminating between models, and in detecting and modeling abnormal behaviors. Another important aspect of this work is that it leads to joint tracking and behavior analysis instead of the traditional "track-and-then-recognize" approach. We apply these principles for tracking bees in a hive while they are executing the waggle dance and the round dance.}, keywords = {Automated;Reproducibility of Results;Sensitivity and Specificity;Social Behavior;, Computer-Assisted;Imaging, Markov model;bee dance;behavior analysis;shape model;social insect;three-tier hierarchical motion model;tracking;video sequence;waggle dance;Markov processes;image sequences;optical tracking;video signal processing;zoology;Algorithms;Animal Communication;, Three-Dimensional;Information Storage and Retrieval;Male;Pattern Recognition}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2007.70707}, author = {Veeraraghavan,A. and Chellapa, Rama and Srinivasan, M.} } @article {15557, title = {Space-time tradeoffs for proximity searching in doubling spaces}, journal = {Algorithms-ESA 2008}, year = {2008}, month = {2008///}, pages = {112 - 123}, abstract = {We consider approximate nearest neighbor searching in metric spaces of constant doubling dimension. More formally, we are given a set S of n points and an error bound ε> 0. The objective is to build a data structure so that given any query point q in the space, it is possible to efficiently determine a point of S whose distance from q is within a factor of (1 + ε) of the distance between q and its nearest neighbor in S. In this paper we obtain the following space-time tradeoffs. Given a parameter γ ∈ [2,1/ε], we show how to construct a data structure of space nO(dim)log(1) space that can answer queries in time O(log(n))+(1())O(dim) . This is the first result that offers space-time tradeoffs for approximate nearest neighbor queries in doubling spaces. At one extreme it nearly matches the best result currently known for doubling spaces, and at the other extreme it results in a data structure that can answer queries in time O(log(n/ε)), which matches the best query times in Euclidean space. Our approach involves a novel generalization of the AVD data structure from Euclidean space to doubling space.}, doi = {10.1007/978-3-540-87744-8_10}, author = {Arya,S. and Mount, Dave and Vigneron,A. and Xia,J.} } @conference {12551, title = {Statistical analysis on Stiefel and Grassmann manifolds with applications in computer vision}, booktitle = {Computer Vision and Pattern Recognition, 2008. CVPR 2008. IEEE Conference on}, year = {2008}, month = {2008/06//}, pages = {1 - 8}, abstract = {Many applications in computer vision and pattern recognition involve drawing inferences on certain manifold-valued parameters. In order to develop accurate inference algorithms on these manifolds we need to a) understand the geometric structure of these manifolds b) derive appropriate distance measures and c) develop probability distribution functions (pdf) and estimation techniques that are consistent with the geometric structure of these manifolds. In this paper, we consider two related manifolds - the Stiefel manifold and the Grassmann manifold, which arise naturally in several vision applications such as spatio-temporal modeling, affine invariant shape analysis, image matching and learning theory. We show how accurate statistical characterization that reflects the geometry of these manifolds allows us to design efficient algorithms that compare favorably to the state of the art in these very different applications. In particular, we describe appropriate distance measures and parametric and non-parametric density estimators on these manifolds. These methods are then used to learn class conditional densities for applications such as activity recognition, video based face recognition and shape classification.}, keywords = {algorithm;learning, analysis;computer, analysis;statistical, analysis;video, based, classification;image, classification;spatio-temporal, distribution, distributions;, Face, functions;shape, Grassmann, invariant, manifold;activity, manifold;Stiefel, matching;inference, matching;spatiotemporal, measures;estimation, modeling;statistical, parameters;pattern, phenomena;statistical, recognition;affine, recognition;computer, recognition;probability, SHAPE, structure;image, technique;geometric, theory;manifold-valued, vision;distance, vision;image}, doi = {10.1109/CVPR.2008.4587733}, author = {Turaga,P. and Veeraraghavan,A. and Chellapa, Rama} } @article {17997, title = {Toward Realizing a PRAM-on-a-Chip Vision}, journal = {Lecture Notes in Computer Science}, volume = {4854}, year = {2008}, month = {2008///}, pages = {5 - 6}, abstract = {Serial computing has become largely irrelevant for growth in computing performance at around 2003. Having already concluded that to maintain past performance growth rates, general-purpose computing must be overhauled to incorporate parallel computing at all levels of a computer system {\textendash} including the programming mode {\textendash} all processor vendors put forward many-core roadmaps. They all expect exponential increase in the number of cores over at least a decade. This welcome development is also a cause for apprehension. The whole world of computing is now facing the same general-purpose parallel computing challenge that eluded computer science for so many years and the clock is ticking. It is becoming common knowledge that if you want your program to run faster you will have to program for parallelism, but the vendors who set up the rules have not yet provided clear and effective means (e.g., programming models and languages) for doing that. How can application software vendors be expected to make a large investment in new software developments, when they know that in a few years they are likely to have a whole new set of options for getting much better performance?! Namely, we are already in a problematic transition stage that slows down performance growth, and may cause a recession if it lasts too long. Unfortunately, some industry leaders are already predicting that the transition period can last a full decade.}, author = {Vishkin, Uzi} } @conference {18608, title = {Trellis: a platform for building flexible, fast virtual networks on commodity hardware}, booktitle = {Proceedings of the 2008 ACM CoNEXT Conference}, series = {CoNEXT {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {72:1{\textendash}72:6 - 72:1{\textendash}72:6}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We describe Trellis, a platform for hosting virtual networks on shared commodity hardware. Trellis allows each virtual network to define its own topology, control protocols, and forwarding tables, while amortizing costs by sharing the physical infrastructure. Trellis synthesizes two container-based virtualization technologies, VServer and NetNS, as well as a new tunneling mechanism, EGRE, into a coherent platform that enables high-speed virtual networks. We describe the design and implementation of Trellis and evaluate its packet-forwarding rates relative to other virtualization technologies and native kernel forwarding performance.}, isbn = {978-1-60558-210-8}, doi = {10.1145/1544012.1544084}, url = {http://doi.acm.org/10.1145/1544012.1544084}, author = {Bhatia,Sapan and Motiwala,Murtaza and Muhlbauer,Wolfgang and Mundada,Yogesh and Valancius,Vytautas and Bavier,Andy and Feamster, Nick and Peterson,Larry and Rexford,Jennifer} } @article {12566, title = {Video stabilization and mosaicing}, journal = {The Essential Guide to Video Processing,}, year = {2008}, month = {2008///}, pages = {109 - 138}, author = {Ramachandran, M. and Veeraraghavan,A. and Chellapa, Rama} } @conference {18165, title = {Analysis of Nonlinear Collusion Attacks on Fingerprinting Systems for Compressed Multimedia}, booktitle = {Image Processing, 2007. ICIP 2007. IEEE International Conference on}, volume = {2}, year = {2007}, month = {2007/10/16/19}, pages = {II -133 -II -136 - II -133 -II -136}, abstract = {In this paper, we analyze the effect of various collusion attacks on fingerprinting systems for compressed multimedia. We evaluate the effectiveness of the collusion attacks in terms of the probability of detection and accuracy in estimating the host signal. Our analysis shows that applying averaging collusion on copies of moderately compressed content gives a highly accurate estimation of the host, and can effectively remove the embedded fingerprints. Averaging is thus the best choice for an attacker as the probability of detection and the distortion introduced are the lowest.}, keywords = {attacks;data, coding;multimedia, coding;security, collusion, compressed, compression;image, data;watermarking;, fingerprinting, multimedia;data, of, systems;nonlinear}, doi = {10.1109/ICIP.2007.4379110}, author = {Varna,A.L. and He,Shan and Swaminathan,A. and M. Wu} } @article {18507, title = {Building a Better Mousetrap}, volume = {GIT-CSS-07-01}, year = {2007}, month = {2007///}, institution = {Georgia Institute of Technology. College of Computing}, abstract = {Routers in the network core are unable to maintain detailed statistics for every packet; thus, traffic statistics are often based on packet sampling, which reduces accuracy. Because tracking large ("heavy-hitter") traffic flows is important both for pricing and for traffic engineering, much attention has focused on maintaining accurate statistics for such flows, often at the expense of small-volume flows. Eradicating these smaller flows makes it difficult to observe communication structure, which is sometimes more important than maintaining statistics about flow sizes. This paper presents FlexSample, a sampling framework that allows network operators to get the best of both worlds: For a fixed sampling budget, FlexSample can capture significantly more small-volume flows for only a small increase in relative error of large traffic flows. FlexSample uses a fast, lightweight counter array that provides a coarse estimate of the size ("class") of each traffic flow; a router then can sample at different rates according to the class of the traffic using any existing sampling strategy. Given a fixed sampling rate and a target fraction of sampled packets to allocate across traffic classes, FlexSample computes packet sampling rates for each class that achieve these allocations online. Through analysis and trace-based experiments, we find that FlexSample captures at least 50\% more mouse flows than strategies that do not perform class-dependent packet sampling. We also show how FlexSample can be used to capture unique flows for specific applications.}, url = {http://hdl.handle.net/1853/14350}, author = {Ramachandran,A. and Seetharaman,S. and Feamster, Nick and Vazirani,V.} } @conference {18178, title = {Collusion-Resistant Fingerprinting for Compressed Multimedia Signals}, booktitle = {Acoustics, Speech and Signal Processing, 2007. ICASSP 2007. IEEE International Conference on}, volume = {2}, year = {2007}, month = {2007/04//}, pages = {II-165 -II-168 - II-165 -II-168}, abstract = {Most existing collusion-resistant fingerprinting techniques are for fingerprinting uncompressed signals. In this paper, we first study the performance of the traditional Gaussian based spread spectrum sequences for fingerprinting compressed signals and show that the system can be easily defeated by averaging or taking the median of a few copies. To overcome the collusion problem for compressed multimedia host signals, we propose a technique called anti-collusion dithering to mimic an uncompressed signal. Results show higher probability of catching a colluder using the proposed scheme compared to using Gaussian based fingerprints.}, keywords = {based, compression;Gaussian, compression;multimedia, dithering;collusion-resistant, fingerprinting;multimedia, Gaussian, processes;data, sequences;anticollusion, signals, spectrum, spread, systems;}, doi = {10.1109/ICASSP.2007.366198}, author = {Varna,A.L. and He,Shan and Swaminathan,A. and M. Wu and Lu,Haiming and Lu,Zengxiang} } @conference {12351, title = {Compact, low power wireless sensor network system for line crossing recognition}, booktitle = {Circuits and Systems, 2007. ISCAS 2007. IEEE International Symposium on}, year = {2007}, month = {2007///}, pages = {2506 - 2509}, author = {Shen,C. C and Kupershtok,R. and Yang,B. and Vanin,F. M and Shao,X. and Sheth,D. and Goldsman,N. and Balzano,Q. and Bhattacharyya, Shuvra S.} } @article {14600, title = {COMPUTATIONAL BIOLOGY}, journal = {Nucleic acids research}, volume = {35}, year = {2007}, month = {2007///}, pages = {3163 - 3163}, author = {Leparc,GG and Mitra,RD and Vardhanabhuti,S. and Wang,J. and Hannenhalli, Sridhar and Smit,S. and Widmann,J. and Knight,R. and Wu,S. and Zhang,Y. and others} } @conference {15904, title = {Confidentiality-preserving rank-ordered search}, booktitle = {Proceedings of the 2007 ACM workshop on Storage security and survivability}, series = {StorageSS {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {7 - 12}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper introduces a new framework for confidentiality preserving rank-ordered search and retrieval over large document collections. The proposed framework not only protects document/query confidentiality against an outside intruder, but also prevents an untrusted data center from learning information about the query and the document collection. We present practical techniques for proper integration of relevance scoring methods and cryptographic techniques, such as order preserving encryption, to protect data collections and indices and provide efficient and accurate search capabilities to securely rank-order documents in response to a query. Experimental results on the W3C collection show that these techniques have comparable performance to conventional search systems designed for non-encrypted data in terms of search accuracy. The proposed methods thus form the first steps to bring together advanced information retrieval and secure search capabilities for a wide range of applications including managing data in government and business operations, enabling scholarly study of sensitive data, and facilitating the document discovery process in litigation.}, keywords = {encrypted domain search, Ranked retrieval, secure index}, isbn = {978-1-59593-891-6}, doi = {10.1145/1314313.1314316}, url = {http://doi.acm.org/10.1145/1314313.1314316}, author = {Swaminathan,Ashwin and Mao,Yinian and Su,Guan-Ming and Gou,Hongmei and Varna,Avinash L. and He,Shan and M. Wu and Oard, Douglas} } @article {18697, title = {Crystal Structure and Solution NMR Studies of Lys48-linked Tetraubiquitin at Neutral pH}, journal = {Journal of Molecular Biology}, volume = {367}, year = {2007}, month = {2007/03/16/}, pages = {204 - 211}, abstract = {Ubiquitin modification of proteins is used as a signal in many cellular processes. Lysine side-chains can be modified by a single ubiquitin or by a polyubiquitin chain, which is defined by an isopeptide bond between the C terminus of one ubiquitin and a specific lysine in a neighboring ubiquitin. Polyubiquitin conformations that result from different lysine linkages presumably differentiate their roles and ability to bind specific targets and enzymes. However, conflicting results have been obtained regarding the precise conformation of Lys48-linked tetraubiquitin. We report the crystal structure of Lys48-linked tetraubiquitin at near-neutral pH. The two tetraubiquitin complexes in the asymmetric unit show the complete connectivity of the chain and the molecular details of the interactions. This tetraubiquitin conformation is consistent with our NMR data as well as with previous studies of diubiquitin and tetraubiquitin in solution at neutral pH. The structure provides a basis for understanding Lys48-linked polyubiquitin recognition under physiological conditions.}, keywords = {crystal structure, Lys48-linked, polyubiquitin chains, tetraubiquitin, ubiquitin}, isbn = {0022-2836}, doi = {10.1016/j.jmb.2006.12.065}, url = {http://www.sciencedirect.com/science/article/pii/S0022283606017554}, author = {Eddins,Michael J. and Varadan,Ranjani and Fushman, David and Pickart,Cecile M. and Wolberger,Cynthia} } @article {18700, title = {Effects of cyclization on conformational dynamics and binding properties of Lys48-linked di-ubiquitin}, journal = {Protein Science}, volume = {16}, year = {2007}, month = {2007///}, pages = {369 - 378}, abstract = {In solution, Lys48-linked di-ubiquitin exists in dynamic equilibrium between closed and open conformations. To understand the effect of interdomain motion in polyubiquitin chains on their ability to bind ligands, we cyclized di-ubiquitin by cross-linking the free C terminus of the proximal ubiquitin with the side chain of residue 48 in the distal ubiquitin, using a chemical cross-linker, 1,6-Hexane-bis-vinylsulfone. Our NMR studies confirm that the cyclization affects conformational dynamics in di-ubiquitin by restricting opening of the interface and shifting the conformational equilibrium toward closed conformations. The cyclization, however, did not rigidly lock di-ubiquitin in a single closed conformation: The chain undergoes slow exchange between at least two closed conformations, characterized by interdomain contacts involving the same hydrophobic patch residues (Leu8-Ile44-Val70) as in the uncyclized di-ubiquitin. Lowering the pH changes the relative populations of these conformations, but in contrast with the uncyclized di-ubiquitin, does not lead to opening of the interface. This restriction of domain motions inhibits direct access of protein molecules to the hydrophobic patch residues located at the very center of the interdomain interface in di-ubiquitin, although the residual motions are sufficient to allow access of small molecules to the interface. This renders di-ubiquitin unable to bind protein molecules (e.g., UBA2 domain) in the normal manner, and thus could interfere with Ub2 recognition by various downstream effectors. These results emphasize the importance of the opening/closing domain motions for the recognition and function of di-ubiquitin and possibly longer polyubiquitin chains.}, keywords = {cyclization, di-ubiquitin, interdomain dynamics, Lys48-linked ubiquitin chain, UBA domain}, isbn = {1469-896X}, doi = {10.1110/ps.062508007}, url = {http://onlinelibrary.wiley.com/doi/10.1110/ps.062508007/abstract}, author = {Dickinson,Bryan C. and Varadan,Ranjani and Fushman, David} } @article {14938, title = {An efficient and scalable parallel algorithm for out-of-core isosurface extraction and rendering}, journal = {Journal of Parallel and Distributed Computing}, volume = {67}, year = {2007}, month = {2007/05//}, pages = {592 - 603}, abstract = {We consider the problem of isosurface extraction and rendering for large scale time-varying data. Such data sets have been appearing at an increasing rate especially from physics-based simulations, and can range in size from hundreds of gigabytes to tens of terabytes. Isosurface extraction and rendering is one of the most widely used visualization techniques to explore and analyze such data sets. A common strategy for isosurface extraction involves the determination of the so-called active cells followed by a triangulation of these cells based on linear interpolation, and ending with a rendering of the triangular mesh. We develop a new simple indexing scheme for out-of-core processing of large scale data sets, which enables the identification of the active cells extremely quickly, using more compact indexing structure and more effective bulk data movement than previous schemes. Moreover, our scheme leads to an efficient and scalable implementation on multiprocessor environments in which each processor has access to its own local disk. In particular, our parallel algorithm provably achieves load balancing across the processors independent of the isovalue, with almost no overhead in the total amount of work relative to the sequential algorithm. We conduct a large number of experimental tests on the University of Maryland Visualization Cluster using the Richtmyer{\textendash}Meshkov instability data set, and obtain results that consistently validate the efficiency and the scalability of our algorithm.}, keywords = {Parallel isosurface extraction, scientific visualization}, isbn = {0743-7315}, doi = {10.1016/j.jpdc.2006.12.007}, url = {http://www.sciencedirect.com/science/article/pii/S0743731506002450}, author = {Wang,Qin and JaJa, Joseph F. and Varshney, Amitabh} } @article {18006, title = {Electron beam and optical proximity effect reduction for nanolithography: New results}, journal = {Journal of Vacuum Science \& Technology B}, volume = {25}, year = {2007}, month = {2007///}, pages = {2288 - 2294}, abstract = {Proximity effect correction by dose modulation is widely practiced in electron-beam lithography. Optical proximity control is also possible using a combination of shape adjustment and phase control. Assigning {\textquotedblleft}the right{\textquotedblright} dose (or fill factor and phase for optics) is a well known mathematical inverse problem. Linear programming, by definition, is the appropriate method for determining dose. In the past, the technique was too slow for full-scale implementation in mask making. Here, the authors discuss how recent developments in computer speed and architecture have improved the prospects for full-scale implementation. In addition, the authors discuss some numerical techniques, analogous to gridding and relaxation, that make linear programming more attractive in mask making.}, keywords = {electron beam lithography, Linear programming, masks, nanolithography, proximity effect (lithography)}, doi = {10.1116/1.2806967}, url = {http://link.aip.org/link/?JVB/25/2288/1}, author = {Peckerar,Martin and Sander,David and Srivastava,Ankur and Foli,Adakou and Vishkin, Uzi} } @article {12118, title = {An Environment of Conducting Families of Software Engineering Experiments}, year = {2007}, month = {2007/05//}, institution = {University of Maryland, College Park}, abstract = {The classroom is a valuable resource for conducting software engineering experiments. However, coordinating a family of experiments in classroom environments presents a number of challenges to researchers. This paper describes an environment that simplifies the process of collecting, managing and sanitizing data from classroom experiments, while minimizing disruption to natural subject behavior. We have successfully used this environment to study the impact of parallel programming languages on programmer productivity at multiple universities across the United States.}, keywords = {collecting data, managing data, parallel programming languages, sanitizing data, software engineering experiments, universities}, url = {http://drum.lib.umd.edu/handle/1903/7545}, author = {Hochstein, Lorin and Nakamura,Taiga and Shull, Forrest and Zazworka, Nico and Voelp,Martin and Zelkowitz, Marvin V and Basili, Victor R.} } @article {16261, title = {Evolution of genes and genomes on the Drosophila phylogeny}, journal = {Nature}, volume = {450}, year = {2007}, month = {2007/11/08/}, pages = {203 - 218}, abstract = {Comparative analysis of multiple genomes in a phylogenetic framework dramatically improves the precision and sensitivity of evolutionary inference, producing more robust results than single-genome analyses can provide. The genomes of 12 Drosophila species, ten of which are presented here for the first time (sechellia, simulans, yakuba, erecta, ananassae, persimilis, willistoni, mojavensis, virilis and grimshawi), illustrate how rates and patterns of sequence divergence across taxa can illuminate evolutionary processes on a genomic scale. These genome sequences augment the formidable genetic tools that have made Drosophila melanogaster a pre-eminent model for animal genetics, and will further catalyse fundamental research on mechanisms of development, cell biology, genetics, disease, neurobiology, behaviour, physiology and evolution. Despite remarkable similarities among these Drosophila species, we identified many putatively non-neutral changes in protein-coding genes, non-coding RNA genes, and cis-regulatory regions. These may prove to underlie differences in the ecology and behaviour of these diverse species.}, isbn = {0028-0836}, doi = {10.1038/nature06341}, url = {http://www.nature.com/nature/journal/v450/n7167/full/nature06341.html}, author = {Clark,Andrew G. and Eisen,Michael B. and Smith,Douglas R. and Bergman,Casey M. and Oliver,Brian and Markow,Therese A. and Kaufman,Thomas C. and Kellis,Manolis and Gelbart,William and Iyer,Venky N. and Pollard,Daniel A. and Sackton,Timothy B. and Larracuente,Amanda M. and Singh,Nadia D. and Abad,Jose P. and Abt,Dawn N. and Adryan,Boris and Aguade,Montserrat and Akashi,Hiroshi and Anderson,Wyatt W. and Aquadro,Charles F. and Ardell,David H. and Arguello,Roman and Artieri,Carlo G. and Barbash,Daniel A. and Barker,Daniel and Barsanti,Paolo and Batterham,Phil and Batzoglou,Serafim and Begun,Dave and Bhutkar,Arjun and Blanco,Enrico and Bosak,Stephanie A. and Bradley,Robert K. and Brand,Adrianne D. and Brent,Michael R. and Brooks,Angela N. and Brown,Randall H. and Butlin,Roger K. and Caggese,Corrado and Calvi,Brian R. and Carvalho,A. Bernardo de and Caspi,Anat and Castrezana,Sergio and Celniker,Susan E. and Chang,Jean L. and Chapple,Charles and Chatterji,Sourav and Chinwalla,Asif and Civetta,Alberto and Clifton,Sandra W. and Comeron,Josep M. and Costello,James C. and Coyne,Jerry A. and Daub,Jennifer and David,Robert G. and Delcher,Arthur L. and Delehaunty,Kim and Do,Chuong B. and Ebling,Heather and Edwards,Kevin and Eickbush,Thomas and Evans,Jay D. and Filipski,Alan and Findei|[szlig]|,Sven and Freyhult,Eva and Fulton,Lucinda and Fulton,Robert and Garcia,Ana C. L. and Gardiner,Anastasia and Garfield,David A. and Garvin,Barry E. and Gibson,Greg and Gilbert,Don and Gnerre,Sante and Godfrey,Jennifer and Good,Robert and Gotea,Valer and Gravely,Brenton and Greenberg,Anthony J. and Griffiths-Jones,Sam and Gross,Samuel and Guigo,Roderic and Gustafson,Erik A. and Haerty,Wilfried and Hahn,Matthew W. and Halligan,Daniel L. and Halpern,Aaron L. and Halter,Gillian M. and Han,Mira V. and Heger,Andreas and Hillier,LaDeana and Hinrichs,Angie S. and Holmes,Ian and Hoskins,Roger A. and Hubisz,Melissa J. and Hultmark,Dan and Huntley,Melanie A. and Jaffe,David B. and Jagadeeshan,Santosh and Jeck,William R. and Johnson,Justin and Jones,Corbin D. and Jordan,William C. and Karpen,Gary H. and Kataoka,Eiko and Keightley,Peter D. and Kheradpour,Pouya and Kirkness,Ewen F. and Koerich,Leonardo B. and Kristiansen,Karsten and Kudrna,Dave and Kulathinal,Rob J. and Kumar,Sudhir and Kwok,Roberta and Lander,Eric and Langley,Charles H. and Lapoint,Richard and Lazzaro,Brian P. and Lee,So-Jeong and Levesque,Lisa and Li,Ruiqiang and Lin,Chiao-Feng and Lin,Michael F. and Lindblad-Toh,Kerstin and Llopart,Ana and Long,Manyuan and Low,Lloyd and Lozovsky,Elena and Lu,Jian and Luo,Meizhong and Machado,Carlos A. and Makalowski,Wojciech and Marzo,Mar and Matsuda,Muneo and Matzkin,Luciano and McAllister,Bryant and McBride,Carolyn S. and McKernan,Brendan and McKernan,Kevin and Mendez-Lago,Maria and Minx,Patrick and Mollenhauer,Michael U. and Montooth,Kristi and Mount, Stephen M. and Mu,Xu and Myers,Eugene and Negre,Barbara and Newfeld,Stuart and Nielsen,Rasmus and Noor,Mohamed A. F. and O{\textquoteright}Grady,Patrick and Pachter,Lior and Papaceit,Montserrat and Parisi,Matthew J. and Parisi,Michael and Parts,Leopold and Pedersen,Jakob S. and Pesole,Graziano and Phillippy,Adam M and Ponting,Chris P. and Pop, Mihai and Porcelli,Damiano and Powell,Jeffrey R. and Prohaska,Sonja and Pruitt,Kim and Puig,Marta and Quesneville,Hadi and Ram,Kristipati Ravi and Rand,David and Rasmussen,Matthew D. and Reed,Laura K. and Reenan,Robert and Reily,Amy and Remington,Karin A. and Rieger,Tania T. and Ritchie,Michael G. and Robin,Charles and Rogers,Yu-Hui and Rohde,Claudia and Rozas,Julio and Rubenfield,Marc J. and Ruiz,Alfredo and Russo,Susan and Salzberg,Steven L. and Sanchez-Gracia,Alejandro and Saranga,David J. and Sato,Hajime and Schaeffer,Stephen W. and Schatz,Michael C and Schlenke,Todd and Schwartz,Russell and Segarra,Carmen and Singh,Rama S. and Sirot,Laura and Sirota,Marina and Sisneros,Nicholas B. and Smith,Chris D. and Smith,Temple F. and Spieth,John and Stage,Deborah E. and Stark,Alexander and Stephan,Wolfgang and Strausberg,Robert L. and Strempel,Sebastian and Sturgill,David and Sutton,Granger and Sutton,Granger G. and Tao,Wei and Teichmann,Sarah and Tobari,Yoshiko N. and Tomimura,Yoshihiko and Tsolas,Jason M. and Valente,Vera L. S. and Venter,Eli and Venter,J. Craig and Vicario,Saverio and Vieira,Filipe G. and Vilella,Albert J. and Villasante,Alfredo and Walenz,Brian and Wang,Jun and Wasserman,Marvin and Watts,Thomas and Wilson,Derek and Wilson,Richard K. and Wing,Rod A. and Wolfner,Mariana F. and Wong,Alex and Wong,Gane Ka-Shu and Wu,Chung-I and Wu,Gabriel and Yamamoto,Daisuke and Yang,Hsiao-Pei and Yang,Shiaw-Pyng and Yorke,James A. and Yoshida,Kiyohito and Zdobnov,Evgeny and Zhang,Peili and Zhang,Yu and Zimin,Aleksey V. and Baldwin,Jennifer and Abdouelleil,Amr and Abdulkadir,Jamal and Abebe,Adal and Abera,Brikti and Abreu,Justin and Acer,St Christophe and Aftuck,Lynne and Alexander,Allen and An,Peter and Anderson,Erica and Anderson,Scott and Arachi,Harindra and Azer,Marc and Bachantsang,Pasang and Barry,Andrew and Bayul,Tashi and Berlin,Aaron and Bessette,Daniel and Bloom,Toby and Blye,Jason and Boguslavskiy,Leonid and Bonnet,Claude and Boukhgalter,Boris and Bourzgui,Imane and Brown,Adam and Cahill,Patrick and Channer,Sheridon and Cheshatsang,Yama and Chuda,Lisa and Citroen,Mieke and Collymore,Alville and Cooke,Patrick and Costello,Maura and D{\textquoteright}Aco,Katie and Daza,Riza and Haan,Georgius De and DeGray,Stuart and DeMaso,Christina and Dhargay,Norbu and Dooley,Kimberly and Dooley,Erin and Doricent,Missole and Dorje,Passang and Dorjee,Kunsang and Dupes,Alan and Elong,Richard and Falk,Jill and Farina,Abderrahim and Faro,Susan and Ferguson,Diallo and Fisher,Sheila and Foley,Chelsea D. and Franke,Alicia and Friedrich,Dennis and Gadbois,Loryn and Gearin,Gary and Gearin,Christina R. and Giannoukos,Georgia and Goode,Tina and Graham,Joseph and Grandbois,Edward and Grewal,Sharleen and Gyaltsen,Kunsang and Hafez,Nabil and Hagos,Birhane and Hall,Jennifer and Henson,Charlotte and Hollinger,Andrew and Honan,Tracey and Huard,Monika D. and Hughes,Leanne and Hurhula,Brian and Husby,M Erii and Kamat,Asha and Kanga,Ben and Kashin,Seva and Khazanovich,Dmitry and Kisner,Peter and Lance,Krista and Lara,Marcia and Lee,William and Lennon,Niall and Letendre,Frances and LeVine,Rosie and Lipovsky,Alex and Liu,Xiaohong and Liu,Jinlei and Liu,Shangtao and Lokyitsang,Tashi and Lokyitsang,Yeshi and Lubonja,Rakela and Lui,Annie and MacDonald,Pen and Magnisalis,Vasilia and Maru,Kebede and Matthews,Charles and McCusker,William and McDonough,Susan and Mehta,Teena and Meldrim,James and Meneus,Louis and Mihai,Oana and Mihalev,Atanas and Mihova,Tanya and Mittelman,Rachel and Mlenga,Valentine and Montmayeur,Anna and Mulrain,Leonidas and Navidi,Adam and Naylor,Jerome and Negash,Tamrat and Nguyen,Thu and Nguyen,Nga and Nicol,Robert and Norbu,Choe and Norbu,Nyima and Novod,Nathaniel and O{\textquoteright}Neill,Barry and Osman,Sahal and Markiewicz,Eva and Oyono,Otero L. and Patti,Christopher and Phunkhang,Pema and Pierre,Fritz and Priest,Margaret and Raghuraman,Sujaa and Rege,Filip and Reyes,Rebecca and Rise,Cecil and Rogov,Peter and Ross,Keenan and Ryan,Elizabeth and Settipalli,Sampath and Shea,Terry and Sherpa,Ngawang and Shi,Lu and Shih,Diana and Sparrow,Todd and Spaulding,Jessica and Stalker,John and Stange-Thomann,Nicole and Stavropoulos,Sharon and Stone,Catherine and Strader,Christopher and Tesfaye,Senait and Thomson,Talene and Thoulutsang,Yama and Thoulutsang,Dawa and Topham,Kerri and Topping,Ira and Tsamla,Tsamla and Vassiliev,Helen and Vo,Andy and Wangchuk,Tsering and Wangdi,Tsering and Weiand,Michael and Wilkinson,Jane and Wilson,Adam and Yadav,Shailendra and Young,Geneva and Yu,Qing and Zembek,Lisa and Zhong,Danni and Zimmer,Andrew and Zwirko,Zac and Jaffe,David B. and Alvarez,Pablo and Brockman,Will and Butler,Jonathan and Chin,CheeWhye and Gnerre,Sante and Grabherr,Manfred and Kleber,Michael and Mauceli,Evan and MacCallum,Iain} } @article {17915, title = {A fast all nearest neighbor algorithm for applications involving large point-clouds}, journal = {Computers \& Graphics}, volume = {31}, year = {2007}, month = {2007/04//}, pages = {157 - 174}, abstract = {Algorithms that use point-cloud models make heavy use of the neighborhoods of the points. These neighborhoods are used to compute the surface normals for each point, mollification, and noise removal. All of these primitive operations require the seemingly repetitive process of finding the k nearest neighbors (kNNs) of each point. These algorithms are primarily designed to run in main memory. However, rapid advances in scanning technologies have made available point-cloud models that are too large to fit in the main memory of a computer. This calls for more efficient methods of computing the kNNs of a large collection of points many of which are already in close proximity. A fast kNN algorithm is presented that makes use of the locality of successive points whose k nearest neighbors are sought to reduce significantly the time needed to compute the neighborhood needed for the primitive operation as well as enable it to operate in an environment where the data is on disk. Results of experiments demonstrate an order of magnitude improvement in the time to perform the algorithm and several orders of magnitude improvement in work efficiency when compared with several prominent existing methods.}, keywords = {All nearest neighbor algorithm, Disk-based data structures, Incremental neighbor finding algorithm, k Nearest neighbors, kNN Algorithm, Locality, Neighbor finding, Neighborhood, Point-cloud graphics, Point-cloud operations}, isbn = {0097-8493}, doi = {10.1016/j.cag.2006.11.011}, url = {http://www.sciencedirect.com/science/article/pii/S0097849306002378}, author = {Sankaranarayanan,Jagan and Samet, Hanan and Varshney, Amitabh} } @conference {12576, title = {Fast Bilinear SfM with Side Information}, booktitle = {Computer Vision, 2007. ICCV 2007. IEEE 11th International Conference on}, year = {2007}, month = {2007/10//}, pages = {1 - 8}, abstract = {We study the beneficial effect of side information on the Structure from Motion (SfM) estimation problem. The side information that we consider is measurement of a {\textquoteright}reference vector{\textquoteright} and distance from fixed plane perpendicular to that reference vector. Firstly, we show that in the presence of this information, the SfM equations can be rewritten similar to a bilinear form in its unknowns. Secondly, we describe a fast iterative estimation procedure to recover the structure of both stationary scenes and moving objects that capitalizes on this information. We also provide a refinement procedure in order to tackle incomplete or noisy side information. We characterize the algorithm with respect to its reconstruction accuracy, memory requirements and stability. Finally, we describe two classes of commonly occurring real-world scenarios in which this algorithm will be effective: (a) presence of a dominant ground plane in the scene and (b) presence of an inertial measurement unit on board. Experiments using both real data and rigorous simulations show the efficacy of the algorithm.}, keywords = {bilinear, Estimation, estimation;, estimation;reference, information;image, methods;motion, procedure;motion, procedure;side, reconstruction;iterative, recovery;image, structure, vector;refinement}, doi = {10.1109/ICCV.2007.4408874}, author = {Ramachandran, M. and Veeraraghavan,A. and Chellapa, Rama} } @conference {18570, title = {Filtering spam with behavioral blacklisting}, booktitle = {Proceedings of the 14th ACM conference on Computer and communications security}, series = {CCS {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {342 - 351}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Spam filters often use the reputation of an IP address (or IP address range) to classify email senders. This approach worked well when most spam originated from senders with fixed IP addresses, but spam today is also sent from IP addresses for which blacklist maintainers have outdated or inaccurate information (or no information at all). Spam campaigns also involve many senders, reducing the amount of spam any particular IP address sends to a single domain; this method allows spammers to stay "under the radar". The dynamism of any particular IP address begs for blacklisting techniques that automatically adapt as the senders of spam change. This paper presents SpamTracker, a spam filtering system that uses a new technique called behavioral blacklisting to classify email senders based on their sending behavior rather than their identity. Spammers cannot evade SpamTracker merely by using "fresh" IP addresses because blacklisting decisions are based on sending patterns, which tend to remain more invariant. SpamTracker uses fast clustering algorithms that react quickly to changes in sending patterns. We evaluate SpamTracker{\textquoteright}s ability to classify spammers using email logs for over 115 email domains; we find that SpamTracker can correctly classify many spammers missed by current filtering techniques. Although our current datasets prevent us from confirming SpamTracker{\textquoteright}s ability to completely distinguish spammers from legitimate senders, our evaluation shows that SpamTracker can identify a significant fraction of spammers that current IP-based blacklists miss. SpamTracker{\textquoteright}s ability to identify spammers before existing blacklists suggests that it can be used in conjunction with existing techniques (e.g., as an input to greylisting). SpamTracker is inherently distributed and can be easily replicated; incorporating it into existing email filtering infrastructures requires only small modifications to mail server configurations.}, keywords = {blacklists, botnets, clustering, Security, spam}, isbn = {978-1-59593-703-2}, doi = {10.1145/1315245.1315288}, url = {http://doi.acm.org/10.1145/1315245.1315288}, author = {Ramachandran,Anirudh and Feamster, Nick and Vempala,Santosh} } @conference {12583, title = {From Videos to Verbs: Mining Videos for Activities using a Cascade of Dynamical Systems}, booktitle = {Computer Vision and Pattern Recognition, 2007. CVPR {\textquoteright}07. IEEE Conference on}, year = {2007}, month = {2007/06//}, pages = {1 - 8}, abstract = {Clustering video sequences in order to infer and extract activities from a single video stream is an extremely important problem and has significant potential in video indexing, surveillance, activity discovery and event recognition. Clustering a video sequence into activities requires one to simultaneously recognize activity boundaries (activity consistent subsequences) and cluster these activity subsequences. In order to do this, we build a generative model for activities (in video) using a cascade of dynamical systems and show that this model is able to capture and represent a diverse class of activities. We then derive algorithms to learn the model parameters from a video stream and also show how a single video sequence may be clustered into different clusters where each cluster represents an activity. We also propose a novel technique to build affine, view, rate invariance of the activity into the distance metric for clustering. Experiments show that the clusters found by the algorithm correspond to semantically meaningful activities.}, keywords = {activities, clustering;image, clustering;video, extraction;dynamical, mining;video, processing;, sequence, sequences;pattern, signal, stream;video, systems;single, video}, doi = {10.1109/CVPR.2007.383170}, author = {Turaga, P.K. and Veeraraghavan,A. and Chellapa, Rama} } @article {14618, title = {Genome-wide expression profiling and bioinformatics analysis of diurnally regulated genes in the mouse prefrontal cortex}, journal = {Genome Biology}, volume = {8}, year = {2007}, month = {2007/11/20/}, pages = {R247 - R247}, abstract = {The prefrontal cortex is important in regulating sleep and mood. Diurnally regulated genes in the prefrontal cortex may be controlled by the circadian system, by sleep:wake states, or by cellular metabolism or environmental responses. Bioinformatics analysis of these genes will provide insights into a wide-range of pathways that are involved in the pathophysiology of sleep disorders and psychiatric disorders with sleep disturbances.}, isbn = {1465-6906}, doi = {10.1186/gb-2007-8-11-r247}, url = {http://genomebiology.com/2007/8/11/R247}, author = {Yang,Shuzhang and Wang,Kai and Valladares,Otto and Hannenhalli, Sridhar and Bucan,Maja} } @article {17905, title = {High-throughput sequence alignment using Graphics Processing Units}, journal = {BMC Bioinformatics}, volume = {8}, year = {2007}, month = {2007///}, pages = {474 - 474}, isbn = {1471-2105}, doi = {10.1186/1471-2105-8-474}, url = {http://www.biomedcentral.com/1471-2105/8/474}, author = {Schatz,Michael C and Trapnell,Cole and Delcher,Arthur L. and Varshney, Amitabh} } @article {13156, title = {On implementing graph cuts on cuda}, journal = {First Workshop on General Purpose Processing on Graphics Processing Units}, year = {2007}, month = {2007///}, abstract = {The Compute Unified Device Architecture (CUDA)has enabled graphics processors to be explicitly programmed as general-purpose shared-memory multi-core processors with a high level of parallelism. In this paper, we present our preliminary results of implementing the Graph Cuts algorithm on CUDA. Our primary focus is on implementing Graph Cuts on grid graphs, which are extensively used in imaging applications. We first explain our implementation of breadth first search (BFS) graph traversal on CUDA, which is extensively used in our Graph Cuts implementation. We then present a basic implementation of Graph Cuts that succeeds to achieve absolute and relative speedups when used for foreground-background segmentation on synthesized images. Finally, we introduce two optimizations that utilize the special structure of grid graphs. The first one is lockstep BFS, which is used to reduce the overhead of BFS traversals. The second is cache emulation, which is a general technique to regularize memory access patterns and hence enhance memory access throughput. We experimentally show how each of the two optimizations can enhance the performance of the basic implementation on the image segmentation application. }, author = {Hussein,M. and Varshney, Amitabh and Davis, Larry S.} } @conference {18000, title = {Layout-Accurate Design and Implementation of a High-Throughput Interconnection Network for Single-Chip Parallel Processing}, booktitle = {High-Performance Interconnects, 2007. HOTI 2007. 15th Annual IEEE Symposium on}, year = {2007}, month = {2007/08//}, pages = {21 - 28}, abstract = {A mesh of trees (MoT) on-chip interconnection network has been proposed recently to provide high throughput between memory units and processors for single-chip parallel processing (Balkan et al., 2006). In this paper, we report our findings in bringing this concept to silicon. Specifically, we conduct cycle-accurate Verilog simulations to verify the analytical results claimed in (Balkan et al., 2006). We synthesize and obtain the layout of the MoT interconnection networks of various sizes. To further improve throughput, we investigate different arbitration primitives to handle load and store, the two most common memory operations. We also study the use of pipeline registers in large networks when there are long wires. Simulation based on full network layout demonstrates that significant throughput improvement can be achieved over the original proposed MoT interconnection network. The importance of this work lies in its validation of performance features of the MoT interconnection network, as they were previously shown to be competitive with traditional network solutions. The MoT network is currently used in an eXplicit multi-threading (XMT) on-chip parallel processor, which is engineered to support parallel programming. In that context, a 32-terminal MoT network could support up to 512 on-chip XMT processors. Our 8-terminal network that could serve 8 processor clusters (or 128 total processors), was also accepted recently for fabrication.}, keywords = {description, design;mesh, interconnection, languages;multi-threading;multiprocessor, MoT, multi-threading;layout-accurate, network;on-chip, network;Verilog, networks;parallel, of, on-chip, Parallel, processing;, processing;hardware, processor;parallel, processors;on-chip, programming;pipeline, registers;single-chip, simulations;eXplicit, TREES, XMT}, doi = {10.1109/HOTI.2007.11}, author = {Balkan,A.O. and Horak,M.N. and Gang Qu and Vishkin, Uzi} } @article {17974, title = {Modelling and rendering large volume data with gaussian radial basis functions}, volume = {UMIACS-TR-2007-22}, year = {2007}, month = {2007///}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Implicit representations have the potential to represent large volumes succinctly. In this paper we presenta multiresolution and progressive implicit representation of scalar volumetric data using anisotropic Gaussian radial basis functions (RBFs) defined over an octree. Our representation lends itself well to progressive level-of-detail representations. Our RBF encoding algorithm based on a Maximum Like- lihood Estimation (MLE) calculation is non-iterative, scales in a O(nlogn) manner, and operates in a memory-friendly manner on very large datasets by processing small blocks at a time. We also present a GPU-based ray-casting algorithm for direct rendering from implicit volumes. Our GPU-based im- plicit volume rendering algorithm is accelerated by early-ray termination and empty-space skipping for implicit volumes and can render volumes encoded with 16 million RBFs at 1 to 3 frames/second. The octree hierarchy enables the GPU-based ray-casting algorithm to efficiently traverse using location codes and is also suitable for view-dependent level-of-detail-based rendering. }, author = {Juba,D. and Varshney, Amitabh} } @conference {18002, title = {Models for advancing PRAM and other algorithms into parallel programs for a PRAM-On-Chip platform}, booktitle = {IN HANDBOOK OF PARALLEL COMPUTING: MODELS, ALGORITHMS AND APPLICATIONS, EDITORS}, year = {2007}, month = {2007///}, publisher = {CRC Press}, organization = {CRC Press}, abstract = {A bold vision that guided this work is as follows: (i) a parallel algorithms and programming course could become a standard course in every undergraduate computer science program, and (ii) this course could be coupled with a so-called PRAM-On-Chip architecture {\textemdash} a commodity high-end multi-core computer architecture. In fact, the current paper is a tutorial on how to convert PRAM algorithms intoefficient PRAM-On-Chip programs. Coupled with a text on PRAM algorithms as well as an available PRAM-On-Chip tool-chain, comprising a compiler and a simulator, the paper provides the missing link for upgrading a standard theoretical PRAM algorithms class to a parallel algorithms and programming class. Having demonstrated that such a course could cover similar programming projects and material to what is covered by a typical first serial algorithms and programming course, the paper suggests that parallel programming in the emerging multi-core era does not need to be more difficult than serial programming. If true, a powerful answer to the so-called parallel programming open problem is being provided. This open problem is currently the main stumbling block for the industry in getting the upcoming generation of multi-core architectures to improve single task completion time using easy-to-program application programmer interfaces. Known constraints of this open problem, such as backwards compatibility on serial code, are also addressed by the overall approach. More concretely, a widely used methodology for advancing parallel algorithmic thinking into parallel algorithms is revisited, and is extended into a methodology for advancing parallel algorithms to PRAM-On-Chip programs. A performance cost model for the PRAM-On-Chip is also presented. It uses as complexity metrics the length of sequence of round trips to memory (LSRTM) and queuing delay (QD) from memory access queues, in addition to standard PRAM computation costs. Highlighting the importance of LSRTM in determining performance is another contribution of the paper. Finally, some alternatives to PRAM algorithms, which, on one hand, are easier-to-think, but, on the other hand, suppress more architecture details, are also discussed. }, author = {Vishkin, Uzi and Caragea,George C. and Lee,Bryant} } @conference {18588, title = {Multiplexing BGP sessions with BGP-Mux}, booktitle = {Proceedings of the 2007 ACM CoNEXT conference}, series = {CoNEXT {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {44:1{\textendash}44:2 - 44:1{\textendash}44:2}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper describes a BGP-session multiplexer called BGP-Mux, which provides stable, on-demand access to global BGP route feeds. This gateway allows arbitrary and even transient client BGP connections to be provisioned and torn down on demand without affecting globally visible BGP sessions. BGP-Mux provides two capabilities: (1) the ability for a client network to receive multiple unfiltered routes per destination from a set of upstream ASes; and (2) the ability to provision BGP sessions without introducing global instability. Several applications could benefit from these features:}, isbn = {978-1-59593-770-4}, doi = {10.1145/1364654.1364707}, url = {http://doi.acm.org/10.1145/1364654.1364707}, author = {Valancius,Vytautas and Feamster, Nick} } @conference {13309, title = {Multi-resolution Morse-Smale Complexes for Terrain Modeling}, booktitle = {Image Analysis and Processing, 2007. ICIAP 2007. 14th International Conference on}, year = {2007}, month = {2007/09//}, pages = {337 - 342}, abstract = {We propose a hierarchical representation for the morphology of a terrain. The basis of our morphological model is a decomposition of the terrain model, composed of the stable and unstable manifolds defined by its critical points, called a Morse-Smale complex. We propose a compact dual representation of the Morse-Smale complex and we define new simplification operators of the terrain morphology, which act on such representation. Based on these operators, we define a hierarchical morphology-based representation for a terrain, that we call a Multi-resolution Morse-Smale Complex (MMSC). Results from our implementation of the MMSC are shown.}, keywords = {complex;terrain, hierarchical, mapping;, modeling;terrain, modelling;terrain, morphology-based, morphology;image, morphology;solid, Morse-Smale, representation;image, representation;multiresolution, resolution;mathematical}, doi = {10.1109/ICIAP.2007.4362801}, author = {Danovaro,E. and De Floriani, Leila and Vitali,M.} } @conference {13312, title = {Multi-scale dual morse complexes for representing terrain morphology}, booktitle = {Proceedings of the 15th annual ACM international symposium on Advances in geographic information systems}, series = {GIS {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {29:1{\textendash}29:8 - 29:1{\textendash}29:8}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We propose a new multi-scale terrain model, based on a hierarchical representation for the morphology of a terrain. The basis of our morphological model is a dual Morse decomposition of the terrain, composed by the stable and unstable manifolds defined by its critical points and its integral lines. We propose a two-level representation of the dual Morse decomposition and we define new simplification operators for the Morse decomposition which act on such representation. Based on these operators, we define a hierarchical morphology-based representation, that we call a Multi-scale Morse Complex (MMC). Results from our implementation of the MMC are presented.}, keywords = {Morphology, multi-scale representations, terrain modeling}, isbn = {978-1-59593-914-2}, doi = {10.1145/1341012.1341050}, url = {http://doi.acm.org/10.1145/1341012.1341050}, author = {Danovaro,Emanuele and De Floriani, Leila and Vitali,Maria and Magillo,Paola} } @article {18498, title = {Path splicing: Reliable connectivity with rapid recovery}, journal = {ACM SIGCOMM HotNets VI}, year = {2007}, month = {2007///}, abstract = {We present path splicing, a primitive that constructs net-work paths from multiple independent routing processes that run over a single network topology. The routing processes compute distinct routing trees using randomly perturbed link weights. A few additional bits in packet headers give end systems access to a large number of paths. By changing these bits, nodes can redirect traffic without detailed knowl- edge of network paths. Assembling paths by {\textquotedblleft}splicing{\textquotedblright} segments can yield up to an exponential improvement in path diversity for only a linear increase in storage and mes- sage complexity. We present randomized approaches for slice construction and failure recovery that achieve near- optimal performance and are extremely simple to config- ure. Our evaluation of path splicing on realistic ISP topolo- gies demonstrates a dramatic increase in reliability that ap- proaches the best possible using only a small number of slices and for only a small increase in latency. }, author = {Motiwala,M. and Feamster, Nick and Vempala,S.} } @conference {18516, title = {Path splicing with network slicing}, booktitle = {Proc. ACM SIGCOMM Hot-Nets}, year = {2007}, month = {2007///}, author = {Feamster, Nick and Motiwala,M. and Vempala,S.} } @article {17906, title = {Plasma Turbulence Simulation and Visualization on Graphics Processors: Efficient Parallel Computing on the Desktop}, journal = {Bulletin of the American Physical Society}, volume = {Volume 52, Number 11}, year = {2007}, month = {2007/11/12/}, abstract = {Direct numerical simulation (DNS) of turbulence is computationally very intensive and typically relies on some form of parallel processing. Spectral kernels used for spatial discretization are a common computational bottleneck on distributed memory architectures. One way to increase the efficiency of DNS algorithms is to parallelize spectral kernels using tightly-coupled SPMD multiprocessor hardware architecture with minimal inter-processor communication latency. In this poster we present techniques to take advantage of the recent programmable interfaces for modern Graphics Processing Units (GPUs) to carefully map DNS computations to GPU architectures that are characterized by a very high memory bandwidth and hundreds of SPMD processors. We compare and contrast the performance of our parallel algorithm on a modern GPU versus a CPU implementation of several turbulence simulation codes. We also demonstrate a prototype of a scalable computational steering framework based on turbulence simulation and visualization coupling on the GPU.}, url = {http://meetings.aps.org/Meeting/DPP07/Event/70114}, author = {Stantchev,George and Juba,Derek and Dorland,William and Varshney, Amitabh} } @conference {18003, title = {Plasmonics and the parallel programming problem}, booktitle = {Society of Photo-Optical Instrumentation Engineers (SPIE) Conference Series}, volume = {6477}, year = {2007}, month = {2007///}, pages = {19 - 19}, abstract = {While many parallel computers have been built, it has generally been too difficult to program them. Now, all computersare effectively becoming parallel machines. Biannual doubling in the number of cores on a single chip, or faster, over the coming decade is planned by most computer vendors. Thus, the parallel programming problem is becoming more critical. The only known solution to the parallel programming problem in the theory of computer science is through a parallel algorithmic theory called PRAM. Unfortunately, some of the PRAM theory assumptions regarding the bandwidth between processors and memories did not properly reflect a parallel computer that could be built in previous decades. Reaching memories, or other processors in a multi-processor organization, required off-chip connections through pins on the boundary of each electric chip. Using the number of transistors that is becoming available on chip, on-chip architectures that adequately support the PRAM are becoming possible. However, the bandwidth of off-chip connections remains insufficient and the latency remains too high. This creates a bottleneck at the boundary of the chip for a PRAM-On-Chip architecture. This also prevents scalability to larger {\textquotedblleft}supercomputing{\textquotedblright} organizations spanning across many processing chips that can handle massive amounts of data. Instead of connections through pins and wires, power-efficient CMOS-compatible on-chip conversion to plasmonic nanowaveguides is introduced for improved latency and bandwidth. Proper incorporation of our ideas offer exciting avenues to resolving the parallel programming problem, and an alternative way for building faster, more useable and much more compact supercomputers. }, author = {Vishkin, Uzi and Smolyaninov,I. and Davis,C.} } @article {14582, title = {Position and distance specificity are important determinants of cis-regulatory motifs in addition to evolutionary conservation}, journal = {Nucleic Acids Research}, volume = {35}, year = {2007}, month = {2007/05/01/}, pages = {3203 - 3213}, abstract = {Computational discovery of cis-regulatory elements remains challenging. To cope with the high false positives, evolutionary conservation is routinely used. However, conservation is only one of the attributes of cis-regulatory elements and is neither necessary nor sufficient. Here, we assess two additional attributes{\textemdash}positional and inter-motif distance specificity{\textemdash}that are critical for interactions between transcription factors. We first show that for a greater than expected fraction of known motifs, the genes that contain the motifs in their promoters in a position-specific or distance-specific manner are related, both in function and/or in expression pattern. We then use the position and distance specificity to discover novel motifs. Our work highlights the importance of distance and position specificity, in addition to the evolutionary conservation, in discovering cis-regulatory motifs.}, doi = {10.1093/nar/gkm201}, url = {http://nar.oxfordjournals.org/content/35/10/3203.abstract}, author = {Vardhanabhuti,Saran and Wang,Junwen and Hannenhalli, Sridhar} } @conference {18001, title = {PRAM-on-chip: first commitment to silicon}, booktitle = {Proceedings of the nineteenth annual ACM symposium on Parallel algorithms and architectures}, series = {SPAA {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {301 - 302}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {ease-of-programming, explicit multi-threading, on-chip parallel processor, Parallel algorithms, PRAM, XMT}, isbn = {978-1-59593-667-7}, doi = {10.1145/1248377.1248427}, url = {http://doi.acm.org/10.1145/1248377.1248427}, author = {Wen,Xingzhi and Vishkin, Uzi} } @conference {19592, title = {Relating Complexity and Precision in Control Flow Analysis}, booktitle = {ICFP {\textquoteright}07 Proceedings of the 12th ACM SIGPLAN International Conference on Functional Programming}, series = {ICFP {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {85 - 96}, publisher = {ACM}, organization = {ACM}, abstract = {We analyze the computational complexity of kCFA, a hierarchy of control flow analyses that determine which functions may be applied at a given call-site. This hierarchy specifies related decision problems, quite apart from any algorithms that may implement their solutions. We identify a simple decision problem answered by this analysis and prove that in the 0CFA case, the problem is complete for polynomial time. The proof is based on a nonstandard, symmetric implementation of Boolean logic within multiplicative linear logic (MLL). We also identify a simpler version of 0CFA related to η-expansion, and prove that it is complete for logarithmic space, using arguments based on computing paths and permutations. For any fixed k>0, it is known that kCFA (and the analogous decision problem) can be computed in time exponential in the program size. For k=1, we show that the decision problem is NP-hard, and sketch why this remains true for larger fixed values of k. The proof technique depends on using the approximation of CFA as an essentially nondeterministic computing mechanism, as distinct from the exactness of normalization. When k=n, so that the "depth" of the control flow analysis grows linearly in the program length, we show that the decision problem is complete for exponential time. In addition, we sketch how the analysis presented here may be extended naturally to languages with control operators. All of the insights presented give clear examples of how straightforward observations about linearity, and linear logic, may in turn be used to give a greater understanding of functional programming and program analysis.}, keywords = {complexity, continuation, control flow analysis, eta expansion, geometry of interaction, linear logic, normalization, proofnet, static analysis}, isbn = {978-1-59593-815-2}, url = {http://doi.acm.org/10.1145/1291151.1291166}, author = {David Van Horn and Mairson, Harry G.} } @article {12572, title = {Signal Processing for Biometric Systems [DSP Forum]}, journal = {Signal Processing Magazine, IEEE}, volume = {24}, year = {2007}, month = {2007/11//}, pages = {146 - 152}, abstract = {This IEEE signal processing magazine (SPM) forum discuses signal processing applications, technologies, requirements, and standardization of biometric systems. The forum members bring their expert insights into issues such as biometric security, privacy, and multibiometric and fusion techniques. The invited forum members are Prof. Anil K. Jain of Michigan State University, Prof. Rama Chellappa of the University of Maryland, Dr. Stark C. Draper of theUniversity of Wisconsin in Madison, Prof. Nasir Memon of Polytechnic University, and Dr. P. Jonathon Phillips of the National Institute of Standards and Technology. The moderator of the forum is Dr. Anthony Vetro of Mitsubishi Electric Research Labs, and associate editor of SPM.}, keywords = {(access, biometric, control);security;signal, forum;signal, magazine, PROCESSING, processing;, security;biometric, standardization;fusion, systems, technique;multibiometric, technique;signal, technology;biometrics}, isbn = {1053-5888}, doi = {10.1109/MSP.2007.905886}, author = {Jain, A.K. and Chellapa, Rama and Draper, S.C. and Memon, N. and Phillips,P.J. and Vetro, A.} } @article {18005, title = {Thinking in parallel: Some basic data-parallel algorithms and techniques}, journal = {UMIACS, University of Maryland, College Park}, volume = {1993}, year = {2007}, month = {2007///}, author = {Vishkin, Uzi} } @conference {18004, title = {Towards Realizing a PRAM-On-Chip Vision}, booktitle = {Workshop on Highly Parallel Processing on a Chip (HPPC)}, volume = {28}, year = {2007}, month = {2007///}, abstract = {Serial computing has become largely irrelevant for growth in computing performance at around 2003.Having already concluded that to maintain past performance growth rates, general-purpose computing must be overhauled to incorporate parallel computing at all levels of a computer system--including the programming model{\textemdash}all processor vendors put forward many-core roadmaps. They all expect exponential increase in the num- ber of cores over at least a decade. This welcome development is also a cause for apprehension. The whole world of computing is now facing the same general-purpose parallel computing challenge that eluded computer science for so many years and the clock is ticking. It is becoming common knowledge that if you want your program to run faster you will have to program for parallelism, but the vendors who set up the rules have not yet provided clear and effective means (eg, programming models and languages) for doing that. How can application software ven- dors be expected to make a large investment in new software developments, when they know that in a few years they are likely to have a whole new set of options for getting much better performance?! Namely, we are already in a problematic transition stage that slows down performance growth, and may cause a recession if it lasts too long. Unfortunately, some industry leaders are already predicting that the transition period can last a full decade. }, author = {Vishkin, Uzi} } @article {14572, title = {Variola virus topoisomerase: DNA cleavage specificity and distribution of sites in Poxvirus genomes}, journal = {Virology}, volume = {365}, year = {2007}, month = {2007/08/15/}, pages = {60 - 69}, abstract = {Topoisomerase enzymes regulate superhelical tension in DNA resulting from transcription, replication, repair, and other molecular transactions. Poxviruses encode an unusual type IB topoisomerase that acts only at conserved DNA sequences containing the core pentanucleotide 5{\textquoteright}-(T/C)CCTT-3{\textquoteright}. In X-ray structures of the variola virus topoisomerase bound to DNA, protein-DNA contacts were found to extend beyond the core pentanucleotide, indicating that the full recognition site has not yet been fully defined in functional studies. Here we report quantitation of DNA cleavage rates for an optimized 13~bp site and for all possible single base substitutions (40 total sites), with the goals of understanding the molecular mechanism of recognition and mapping topoisomerase sites in poxvirus genome sequences. The data allow a precise definition of enzyme-DNA interactions and the energetic contributions of each. We then used the resulting "action matrix" to show that favorable topoisomerase sites are distributed all along the length of poxvirus DNA sequences, consistent with a requirement for local release of superhelical tension in constrained topological domains. In orthopox genomes, an additional central cluster of sites was also evident. A negative correlation of predicted topoisomerase sites was seen relative to early terminators, but no correlation was seen with early or late promoters. These data define the full variola virus topoisomerase recognition site and provide a new window on topoisomerase function in vivo.}, keywords = {Annotation of topoisomerase sites, Sequence specific recognition, Topoisomerase IB, Variola virus}, isbn = {0042-6822}, doi = {16/j.virol.2007.02.037}, url = {http://www.sciencedirect.com/science/article/pii/S0042682207001225}, author = {Minkah,Nana and Hwang,Young and Perry,Kay and Van Duyne,Gregory D. and Hendrickson,Robert and Lefkowitz,Elliot J. and Hannenhalli, Sridhar and Bushman,Frederic D.} } @article {18008, title = {Bootstrapping free-space optical networks}, journal = {Selected Areas in Communications, IEEE Journal on}, volume = {24}, year = {2006}, month = {2006/12//}, pages = {13 - 22}, abstract = {We introduce a challenging problem in establishing and initially configuring or bootstrapping a Free Space Optical (FSO) network. In such networks, it is assumed that each communication node is a base station, including a router and wireless optical communications hardware, and its number of transceivers is limited. In addition, the FSO networks are characterized by narrow beam, directional links (e.g., operating at 1550 nm) and support up to Gbps data rates. The problem of initially configuring the transceivers to form a connected topology is NP-complete because of the transceiver limitation. What makes this problem even more challenging is the need to configure the transceiver in a "distributed" fashion, because a node can have only direct knowledge of its neighbors. We have developed a fully distributed approximation algorithm, which constructs a spanning tree with maximal node degree at most one larger than that in the optimal solution. Due to its distributed nature, this algorithm outperforms known serial algorithms. For a graph with 200 nodes generated in some randomized model, speed-ups greater than 6 have been demonstrated.}, keywords = {algorithm;free-space, algorithms;optical, approximation, communication;telecommunication, communications, configuration;randomized, FSO;base, hardware;distributed, model;spanning, network, network;network, node;connected, optical, router;optical, routing;telecommunication, searching;, station;bootstrapping;communication, topology;distributed, topology;transceivers;tree, transceiver, tree;wireless}, isbn = {0733-8716}, doi = {10.1109/JSAC.2006.258219}, author = {Liu,F. and Vishkin, Uzi and Milner,S.} } @article {18007, title = {A bootstrapping model for directional wireless networks}, journal = {Communications Letters, IEEE}, volume = {10}, year = {2006}, month = {2006/12//}, pages = {840 - 842}, abstract = {Initially configuring or bootstrapping a connected topology in directional wireless networks is a challenging problem, especially when nodes only have local connectivity information and a limited number of transceivers. This paper presents a scalable bootstrapping model which integrates: 1) a distributed bottom-up algorithm that constructs a spanning tree with degree at most one larger than the optimal 2) a resource discovery algorithm for efficient dissemination of local connectivity information, and 3) synchronization protocols to guarantee the efficient emergence of overall network connectivity from local interactions. We investigate the feasibility and scalability of the proposed model. Results are presented for different network systems, with varying size and signaling data rates.}, keywords = {(mathematics);, algorithm;resource, algorithm;spanning, bootstrapping, bottom-up, discovery, model;directional, network;distributed, networks;trees, protocols;protocols;radio, tree;synchronization, wireless}, isbn = {1089-7798}, doi = {10.1109/LCOMM.2006.060808}, author = {Milner,S. and Llorca,J. and Anibha,A. and Vishkin, Uzi} } @article {18012, title = {Case study of gate-level logic simulation on an extremely fine-grained chip multiprocessor}, journal = {Journal of Embedded Computing}, volume = {2}, year = {2006}, month = {2006/01/01/}, pages = {181 - 190}, abstract = {Explicit-multi-threading (XMT) is a parallel programming approach for exploiting on-chip parallelism. Its fine-grained single program multiple data (SPMD) programming model is suitable for many computing intensive applications. In this paper, we present a parallel gate level logic simulator implemented on an XMT platform and study its performance. Test results show potential for achieving more than a hundred-fold speedup over a serial implementation. This indicates an interesting possibility for a certain type of a single chip multicore architecture: use an existing easy-to-program API, such as VHDL or Verilog, for reduced application-software development time and better performance over serial performance-driven languages, such as C.}, url = {http://iospress.metapress.com/content/1E5MJUMCQL6VCHJW}, author = {Gu,Pei and Vishkin, Uzi} } @article {17952, title = {Computing and displaying intermolecular negative volume for docking}, journal = {Scientific visualization: the visual extraction of knowledge from data}, volume = {I}, year = {2006}, month = {2006///}, pages = {49 - 64}, abstract = {Protein docking is a Grand Challenge problem that is crucial to our understanding of biochemical processes. Several protein docking algorithms use shape complementarity as the primary criterion for evaluating the docking candidates. The intermolecular volume and area between docked molecules is useful as a measure of the shape complementarity. In this paper we discuss an algorithm for interactively computing intermolecular negative volume and the area of docking site using graphics hardware. We also present the design considerations for building an interactive 3D visualization tool for visualizing intermolecular negative volumes.}, doi = {10.1007/3-540-30790-7_4}, author = {Lee,C. and Varshney, Amitabh} } @conference {13258, title = {A decomposition-based representation for 3D simplicial complexes}, booktitle = {Proceedings of the fourth Eurographics symposium on Geometry processing}, series = {SGP {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {101 - 110}, publisher = {Eurographics Association}, organization = {Eurographics Association}, address = {Aire-la-Ville, Switzerland, Switzerland}, abstract = {We define a new representation for non-manifold 3D shapes described by three-dimensional simplicial complexes, that we call the Double-Level Decomposition (DLD) data structure. The DLD data structure is based on a unique decomposition of the simplicial complex into nearly manifold parts, and encodes the decomposition in an efficient and powerful two-level representation. It is compact, and it supports efficient topological navigation through adjacencies. It also provides a suitable basis for geometric reasoning on non-manifold shapes. We describe an algorithm to decompose a 3D simplicial complex into nearly manifold parts. We discuss how to build the DLD data structure from a description of a 3D complex as a collection of tetrahedra, dangling triangles and wire edges, and we present algorithms for topological navigation. We present a thorough comparison with existing representations for 3D simplicial complexes.}, isbn = {3-905673-36-3}, url = {http://dl.acm.org/citation.cfm?id=1281957.1281970}, author = {Hui,Annie and Vaczlavik,Lucas and De Floriani, Leila} } @article {17931, title = {An Efficient Computational Method for Predicting Rotational Diffusion Tensors of Globular Proteins Using an Ellipsoid Representation}, journal = {Journal of the American Chemical Society}, volume = {128}, year = {2006}, month = {2006///}, pages = {15432 - 15444}, abstract = {We propose a new computational method for predicting rotational diffusion properties of proteins in solution. The method is based on the idea of representing protein surface as an ellipsoid shell. In contrast to other existing approaches this method uses principal component analysis of protein surface coordinates, which results in a substantial increase in the computational efficiency of the method. Direct comparison with the experimental data as well as with the recent computational approach (Garcia de la Torre; et al. J. Magn. Reson. 2000, B147, 138?146), based on representation of protein surface as a set of small spherical friction elements, shows that the method proposed here reproduces experimental data with at least the same level of accuracy and precision as the other approach, while being approximately 500 times faster. Using the new method we investigated the effect of hydration layer and protein surface topography on the rotational diffusion properties of a protein. We found that a hydration layer constructed of approximately one monolayer of water molecules smoothens the protein surface and effectively doubles the overall tumbling time. We also calculated the rotational diffusion tensors for a set of 841 protein structures representing the known protein folds. Our analysis suggests that an anisotropic rotational diffusion model is generally required for NMR relaxation data analysis in single-domain proteins, and that the axially symmetric model could be sufficient for these purposes in approximately half of the proteins.We propose a new computational method for predicting rotational diffusion properties of proteins in solution. The method is based on the idea of representing protein surface as an ellipsoid shell. In contrast to other existing approaches this method uses principal component analysis of protein surface coordinates, which results in a substantial increase in the computational efficiency of the method. Direct comparison with the experimental data as well as with the recent computational approach (Garcia de la Torre; et al. J. Magn. Reson. 2000, B147, 138?146), based on representation of protein surface as a set of small spherical friction elements, shows that the method proposed here reproduces experimental data with at least the same level of accuracy and precision as the other approach, while being approximately 500 times faster. Using the new method we investigated the effect of hydration layer and protein surface topography on the rotational diffusion properties of a protein. We found that a hydration layer constructed of approximately one monolayer of water molecules smoothens the protein surface and effectively doubles the overall tumbling time. We also calculated the rotational diffusion tensors for a set of 841 protein structures representing the known protein folds. Our analysis suggests that an anisotropic rotational diffusion model is generally required for NMR relaxation data analysis in single-domain proteins, and that the axially symmetric model could be sufficient for these purposes in approximately half of the proteins. }, isbn = {0002-7863}, doi = {10.1021/ja062715t}, url = {http://dx.doi.org/10.1021/ja062715t}, author = {Ryabov,Yaroslav E. and Geraghty,Charles and Varshney, Amitabh and Fushman, David} } @conference {14444, title = {Entity resolution in geospatial data integration}, booktitle = {Proceedings of the 14th annual ACM international symposium on Advances in geographic information systems}, series = {GIS {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {83 - 90}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Due to the growing availability of geospatial data from a wide variety of sources, there is a pressing need for robust, accurate and automatic merging and matching techniques. Geospatial Entity Resolution is the process of determining, from a collection of database sources referring to geospatial locations, a single consolidated collection of {\textquoteright}true{\textquoteright} locations. At the heart of this process is the problem of determining when two locations references match---i.e., when they refer to the same underlying location. In this paper, we introduce a novel method for resolving location entities in geospatial data. A typical geospatial database contains heterogeneous features such as location name, spatial coordinates, location type and demographic information. We investigate the use of all of these features in algorithms for geospatial entity resolution. Entity resolution is further complicated by the fact that the different sources may use different vocabularies for describing the location types and a semantic mapping is required. We propose a novel approach which learns how to combine the different features to perform accurate resolutions. We present experimental results showing that methods combining spatial and non-spatial features (e.g., location-name, location-type, etc.) together outperform methods based on spatial or name information alone.}, isbn = {1-59593-529-0}, doi = {10.1145/1183471.1183486}, url = {http://doi.acm.org/10.1145/1183471.1183486}, author = {Sehgal,Vivek and Getoor, Lise and Viechnicki,Peter D} } @article {12135, title = {Experiments to understand HPC time to development}, journal = {CTWatch Quarterly}, year = {2006}, month = {2006///}, abstract = {In order to understand how high performance computing (HPC) programs are developed, a series of experiments, using students in graduate level HPC classes, have been conducted at many universities in the US. In this paper we discuss the general process of conducting those experiments, give some of the early results of those experiments, and describe a web-based process we are developing that will allow us to run additional experiments at other universities and laboratories that will be easier to conduct and generate results that more accurately reflect the process of building HPC programs.}, keywords = {hackystat, HPC, publications-journals}, url = {http://csdl.ics.hawaii.edu/techreports/06-08/06-08.pdf}, author = {Hochstein, Lorin and Nakamura,Taiga and Basili, Victor R. and Asgari, Sima and Zelkowitz, Marvin V and Hollingsworth, Jeffrey K and Shull, Forrest and Carver,Jeffrey and Voelp,Martin and Zazworka, Nico and Johnson,Philip} } @conference {16871, title = {A Fast k-Neighborhood Algorithm for Large Point-Clouds}, booktitle = {Proceedings of the 3rd IEEE/Eurographics Symposium on Point-Based Graphics. ACM, Boston, MA, USA}, year = {2006}, month = {2006///}, abstract = {Algorithms that use point-cloud models make heavy use of the neighborhoods of the points. These neighborhoodsare used to compute the surface normals for each point, mollification, and noise removal. All of these primitive operations require the seemingly repetitive process of finding the k nearest neighbors of each point. These algo- rithms are primarily designed to run in main memory. However, rapid advances in scanning technologies have made available point-cloud models that are too large to fit in the main memory of a computer. This calls for more efficient methods of computing the k nearest neighbors of a large collection of points many of which are already in close proximity. A fast k nearest neighbor algorithm is presented that makes use of the locality of successive points whose k nearest neighbors are sought to significantly reduce the time needed to compute the neighborhood needed for the primitive operation as well as enable it to operate in an environment where the data is on disk. Results of experiments demonstrate an order of magnitude improvement in the time to perform the algorithm and several orders of magnitude improvement in work efficiency when compared with several prominent existing method. }, author = {Sankaranarayanan,J. and Samet, Hanan and Varshney, Amitabh} } @conference {12622, title = {The Function Space of an Activity}, booktitle = {Computer Vision and Pattern Recognition, 2006 IEEE Computer Society Conference on}, volume = {1}, year = {2006}, month = {2006///}, pages = {959 - 968}, abstract = {An activity consists of an actor performing a series of actions in a pre-defined temporal order. An action is an individual atomic unit of an activity. Different instances of the same activity may consist of varying relative speeds at which the various actions are executed, in addition to other intra- and inter- person variabilities. Most existing algorithms for activity recognition are not very robust to intra- and inter-personal changes of the same activity, and are extremely sensitive to warping of the temporal axis due to variations in speed profile. In this paper, we provide a systematic approach to learn the nature of such time warps while simultaneously allowing for the variations in descriptors for actions. For each activity we learn an $\#$145;average $\#$146; sequence that we denote as the nominal activity trajectory. We also learn a function space of time warpings for each activity separately. The model can be used to learn individualspecific warping patterns so that it may also be used for activity based person identification. The proposed model leads us to algorithms for learning a model for each activity, clustering activity sequences and activity recognition that are robust to temporal, intra- and inter-person variations. We provide experimental results using two datasets.}, doi = {10.1109/CVPR.2006.304}, author = {Veeraraghavan,A. and Chellapa, Rama and Roy-Chowdhury, A.K.} } @article {17968, title = {Geometry-dependent lighting}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {12}, year = {2006}, month = {2006/04//march}, pages = {197 - 207}, abstract = {In this paper, we introduce geometry-dependent lighting that allows lighting parameters to be defined independently and possibly discrepantly over an object or scene based on the local geometry. We present and discuss light collages, a lighting design system with geometry-dependent lights for effective feature-enhanced visualization. Our algorithm segments the objects into local surface patches and places lights that are locally consistent but globally discrepant to enhance the perception of shape. We use spherical harmonics for efficiently storing and computing light placement and assignment. We also outline a method to find the minimal number of light sources sufficient to illuminate an object well with our globally discrepant lighting approach.}, keywords = {Computer-Assisted;Imaging, feature-enhanced visualization;geometry-dependent light;light collage;light placement;lighting design system;computational geometry;data visualisation;lighting;Algorithms;Computer Graphics;Image Enhancement;Image Interpretation, Three-Dimensional;Information Storage and Retrieval;Lighting;User-Computer Interface;}, isbn = {1077-2626}, doi = {10.1109/TVCG.2006.30}, author = {Lee,C. H and Hao,X. and Varshney, Amitabh} } @article {17908, title = {Geometry-guided computation of 3D electrostatics for large biomolecules}, journal = {Computer Aided Geometric Design}, volume = {23}, year = {2006}, month = {2006/08//}, pages = {545 - 557}, abstract = {Electrostatic interactions play a central role in biological processes. Development of fast computational methods to solve the underlying Poisson{\textendash}Boltzmann equation (PBE) is vital for biomolecular modeling and simulation package. In this paper, we propose new methods for efficiently computing the electrostatic potentials for large molecules by using the geometry of the molecular shapes to guide the computation. The accuracy and stability of the solution to the PBE is quite sensitive to the boundary layer between the solvent and the solute which defines the molecular surface. In this paper, we present a new interface-layer-focused PBE solver. First, we analytically construct the molecular surface of the molecule and compute a distance field from the surface. We then construct nested iso-surface layers outwards and inwards from the surface using the distance field. We have developed a volume simplification algorithm to adaptively adjust the density of the irregular grid based on the importance to the PBE solution. We have generalized the finite difference methods using Taylor series expansion on the irregular grids. Our algorithm achieves about three times speedup in the iterative solution process of PBE, with more accurate results on an analytical solvable testing case, compared with the popular optimized DelPhi program.}, keywords = {Finite difference methods, Iso-surface generation, Level-of-detail of hierarchy, Poisson{\textendash}Boltzmann equation, Scalar field, Tetrahedron decomposition}, isbn = {0167-8396}, doi = {10.1016/j.cagd.2006.04.003}, url = {http://www.sciencedirect.com/science/article/pii/S0167839606000434}, author = {Hao,Xuejun and Varshney, Amitabh} } @conference {16451, title = {Implementing a bioinformatics pipeline (bip) on a mediator platform: Comparing cost and quality of alternate choices}, booktitle = {Data Engineering Workshops, 2006. Proceedings. 22nd International Conference on}, year = {2006}, month = {2006///}, pages = {67 - 67}, author = {Eckman,B. A and Gaasterland,T. and Lacroix,Z. and Raschid, Louiqa and Snyder,B. and Vidal,M. E} } @article {18013, title = {Issues in writing a parallel compiler starting from a serial compiler}, year = {2006}, month = {2006///}, institution = {draft. Technical report, University of Maryland Institute for Advanced Compuer Studies}, author = {Tzannes,A. and Barua,R. and Caragea,G. and Vishkin, Uzi} } @article {18010, title = {Mesh-of-Trees and Alternative Interconnection Networks for Single Chip Parallel Processing (Extended Abstract)}, year = {2006}, month = {2006/06//}, abstract = {Many applications have stimulated the recent surge of interest single-chip parallel processing. In such machines, it is crucial to implement a high-throughput low-latency interconnection network to connect the on-chip components, especially the processing units and the memory units. In this paper, we propose a new mesh of trees (MoT) implementation of the interconnection network and evaluate it relative to metrics such as wire area, register count, total switch delay, maximum throughput, latency-throughput relation and delay effects of long wires. We show that on-chip interconnection networks can facilitate higher bandwidth between processors and shared first-level cache than previously considered possible. This has significant impact for chip multiprocessing. MoT is also compared, both analytically and experimentally, to some other traditional network topologies, such as hypercube, butterfly, fat trees and butterfly fat trees. When we evaluate a 64-terminal MoT network at 65nm technology, concrete results show that MoT provides higher throughput and lower latency especially when the input traffic (or the on-chip parallelism) is high, at the cost of larger area. A recurring problem in networking and communication is that of achieving good sustained throughput in contrast to just high theoretical peak performance that does not materialize for typical work loads. Our quantitative results demonstrate a clear advantage of the proposed MoT network in the context of single-chip parallel processing.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/3678}, author = {Balkan,Aydin O. and Gang Qu and Vishkin, Uzi} } @conference {17854, title = {Model-based OpenMP implementation of a 3D facial pose tracking system}, booktitle = {2006 International Conference on Parallel Processing Workshops, 2006. ICPP 2006 Workshops}, year = {2006}, month = {2006///}, pages = {8 pp.-73 - 8 pp.-73}, publisher = {IEEE}, organization = {IEEE}, abstract = {Most image processing applications are characterized by computation-intensive operations, and high memory and performance requirements. Parallelized implementation on shared-memory systems offer an attractive solution to this class of applications. However, we cannot thoroughly exploit the advantages of such architectures without proper modeling and analysis of the application. In this paper, we describe our implementation of a 3D facial pose tracking system using the OpenMP platform. Our implementation is based on a design methodology that uses coarse-grain dataflow graphs to model and schedule the application. We present our modeling approach, details of the implementation that we derived based on this modeling approach, and associated performance results. The parallelized implementation achieves significant speedup, and meets or exceeds the target frame rate under various configurations}, keywords = {3D facial pose tracking system, application modeling, application program interfaces, application scheduling, coarse-grain dataflow graphs, Concurrent computing, data flow graphs, Educational institutions, face recognition, IMAGE PROCESSING, image processing applications, Inference algorithms, Message passing, OpenMP platform, parallel implementation, PARALLEL PROCESSING, parallel programming, Particle tracking, Processor scheduling, SHAPE, shared memory systems, shared-memory systems, Solid modeling, tracking}, isbn = {0-7695-2637-3}, doi = {10.1109/ICPPW.2006.55}, author = {Saha,S. and Chung-Ching Shen and Chia-Jui Hsu and Aggarwal,G. and Veeraraghavan,A. and Sussman, Alan and Bhattacharyya, Shuvra S.} } @article {18011, title = {Models for Advancing PRAM and Other Algorithms into Parallel Programs for a PRAM-On-Chip Platform}, year = {2006}, month = {2006/05/18/}, abstract = {A bold vision that guided this work is as follows: (i) a parallel algorithms and programming course could become a standard course in every undergraduate computer science program, and (ii) this course could be coupled with a so-called PRAM-On-Chip architecture --- a commodity high-end multi-core computer architecture.In fact, the current paper is a tutorial on how to convert PRAM algorithms into efficient PRAM-On-Chip programs. Coupled with a text on PRAM algorithms as well as an available PRAM-On-Chip tool-chain, comprising a compiler and a simulator, the paper provides the missing link for upgrading a standard theoretical PRAM algorithms class to a parallel algorithms and programming class. Having demonstrated that such a course could cover similar programming projects and material to what is covered by a typical first serial algorithms and programming course, the paper suggests that parallel programming in the emerging multi-core era does not need to be more difficult than serial programming. If true, a powerful answer to the so-called parallel programming open problem is being provided. This open problem is currently the main stumbling block for the industry in getting the upcoming generation of multi-core architectures to improve single task completion time using easy-to-program application programmer interfaces. Known constraints of this open problem, such as backwards compatibility on serial code, are also addressed by the overall approach. More concretely, a widely used methodology for advancing parallel algorithmic thinking into parallel algorithms is revisited, and is extended into a methodology for advancing parallel algorithms to PRAM-On-Chip programs. A performance cost model for the PRAM-On-Chip is also presented. It uses as complexity metrics the length of sequence of round trips to memory (LSRTM) and queuing delay (QD) from memory access queues, in addition to standard PRAM computation costs. Highlighting the importance of LSRTM in determining performance is another contribution of the paper. Finally, some alternatives to PRAM algorithms, which, on one hand, are easier-to-think, but, on the other hand, suppress more architecture details, are also discussed. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/3338}, author = {Vishkin, Uzi and Caragea,George C. and Lee,Bryant} } @conference {12616, title = {Motion Based Correspondence for 3D Tracking of Multiple Dim Objects}, booktitle = {Acoustics, Speech and Signal Processing, 2006. ICASSP 2006 Proceedings. 2006 IEEE International Conference on}, volume = {2}, year = {2006}, month = {2006/05//}, pages = {II - II}, abstract = {Tracking multiple objects in a video is a demanding task that is frequently encountered in several systems such as surveillance and motion analysis. Ability to track objects in 3D requires the use of multiple cameras. While tracking multiple objects using multiples video cameras, establishing correspondence between objects in the various cameras is a nontrivial task. Specifically, when the targets are dim or are very far away from the camera, appearance cannot be used in order to establish this correspondence. Here, we propose a technique to establish correspondence across cameras using the motion features extracted from the targets, even when the relative position of the cameras is unknown. Experimental results are provided for the problem of tracking multiple bees in natural flight using two cameras. The reconstructed 3D flight paths of the bees show some interesting flight patterns}, keywords = {3D, analysis;motion, analysis;video, based, cameras;feature, correspondence;motion, dim, extraction;image, extraction;multiple, features, MOTION, objects;video, processing;, signal, tracking;motion}, doi = {10.1109/ICASSP.2006.1660431}, author = {Veeraraghavan,A. and Srinivasan, M. and Chellapa, Rama and Baird, E. and Lamont, R.} } @article {13266, title = {A multi-resolution representation for terrain morphology}, journal = {Geographic Information Science}, year = {2006}, month = {2006///}, pages = {33 - 46}, abstract = {Mesh-based terrain representations provide accurate descriptions of a terrain, but fail in capturing its morphological structure. The morphology of a terrain is defined by its critical points and by the critical lines joining them, which form a so-called surface network. Because of the large size of current terrain data sets, a multi-resolution representation of the terrain morphology is crucial. Here, we address the problem of representing the morphology of a terrain at different resolutions. The basis of the multi-resolution terrain model, that we call a Multi-resolution Surface Network (MSN), is a generalization operator on a surface network, which produces a simplified representation incrementally. An MSN is combined with a multi-resolution mesh-based terrain model, which encompasses the terrain morphology at different resolutions. We show how variable-resolution representations can be extracted from an MSN, and we present also an implementation of an MSN in a compact encoding data structure.}, doi = {10.1007/11863939_3}, author = {Danovaro,E. and De Floriani, Leila and Papaleo,L. and Vitali,M.} } @article {14631, title = {Patterns of sequence conservation in presynaptic neural genes}, journal = {Genome Biology}, volume = {7}, year = {2006}, month = {2006/11/10/}, pages = {R105 - R105}, abstract = {The neuronal synapse is a fundamental functional unit in the central nervous system of animals. Because synaptic function is evolutionarily conserved, we reasoned that functional sequences of genes and related genomic elements known to play important roles in neurotransmitter release would also be conserved.}, isbn = {1465-6906}, doi = {10.1186/gb-2006-7-11-r105}, url = {http://genomebiology.com/2006/7/11/R105}, author = {Hadley,Dexter and Murphy,Tara and Valladares,Otto and Hannenhalli, Sridhar and Ungar,Lyle and Kim,Junhyong and Bu{\'c}an,Maja} } @article {12612, title = {Principal components null space analysis for image and video classification}, journal = {Image Processing, IEEE Transactions on}, volume = {15}, year = {2006}, month = {2006/07//}, pages = {1816 - 1830}, abstract = {We present a new classification algorithm, principal component space analysis (PCNSA), which is designed for classification problems like object recognition where different classes have unequal and nonwhite noise covariance matrices. PCNSA first obtains a principal components subspace (PCA space) for the entire data. In this PCA space, it finds for each class "i", an Mi-dimensional subspace along which the class{\textquoteright} intraclass variance is the smallest. We call this subspace an approximate space (ANS) since the lowest variance is usually "much smaller" than the highest. A query is classified into class "i" if its distance from the class{\textquoteright} mean in the class{\textquoteright} ANS is a minimum. We derive upper bounds on classification error probability of PCNSA and use these expressions to compare classification performance of PCNSA with that of subspace linear discriminant analysis (SLDA). We propose a practical modification of PCNSA called progressive-PCNSA that also detects "new" (untrained classes). Finally, we provide an experimental comparison of PCNSA and progressive PCNSA with SLDA and PCA and also with other classification algorithms-linear SVMs, kernel PCA, kernel discriminant analysis, and kernel SLDA, for object recognition and face recognition under large pose/expression variation. We also show applications of PCNSA to two classification problems in video-an action retrieval problem and abnormal activity detection.}, keywords = {approximate null space;classification error probability;face recognition;image classification;object recognition;principal components null space analysis;subspace linear discriminant analysis;video classification;image classification;principal component a, Automated;Principal Component Analysis;Signal Processing, Computer-Assisted;Information Storage and Retrieval;Models, Computer-Assisted;Video Recording;, Statistical;Pattern Recognition}, isbn = {1057-7149}, doi = {10.1109/TIP.2006.873449}, author = {Vaswani, N. and Chellapa, Rama} } @article {18009, title = {Programmer{\textquoteright}s Manual for XMTC Language, XMTC Compiler and XMT Simulator}, journal = {Technical Reports from UMIACS, UMIACS-TR-2005-45}, year = {2006}, month = {2006/06//}, abstract = {Explicit Multi-Threading (XMT) is a computing framework developed at the University of Maryland as part of a PRAM-on-chip vision (http://www.umiacs.umd.edu/users/vishkin/XMT). Much in the same way that performance programming of standard computers relies on C language, XMT performance programming is done using an extension of C called XMTC. This manual presents the second generation of XMTCprogramming paradigm. It is intended to be used by an application programmer, who is new to XMTC. In the first part of this technical report (UMIACS-TR 2005-45 Part 1 of 2), we define and describe key concepts, list the limitations and restrictions, and give examples. The second part (UMIACS-TR 2005-45 Part 2 of 2) is a brief tutorial, and it demonstrates the basic programming concepts of XMTC language with examples and exercises. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/3673}, author = {Balkan,Aydin O. and Vishkin, Uzi} } @conference {16472, title = {Query Rewriting in the Semantic Web7}, booktitle = {Data Engineering Workshops, 22nd International Conference on}, year = {2006}, month = {2006///}, pages = {7 - 7}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {The widespread explosion of Web accessible resources has lead to a new challenge of locating all relevant resources and identifying the best ones to answer a query. This challenge has to address the difficult task of ranking the resources based on user needs, as well as the more expensive computational task of determining all the solutions to answer a query. In this paper, we define a Top K problem for query rewriting on the Semantic Web. We first introduce a three level data model composed of the ontology level, the physical level of the physical resources, and the data level composed of the entries in the different resources. We present a query language for Top K navigational queries over ontology concepts. We then sketch the outline of an efficient search algorithm to compute an approximation of the Top K rewriting options to produce source paths among the physical resources. We briefly discuss the results of an experimental study}, isbn = {0-7695-2571-7}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICDEW.2006.124}, author = {Vidal,Maria Esther and Raschid, Louiqa and Marquez,Natalia and Cardenas,Marelis and Yao Wu} } @conference {15150, title = {Reliable broadcast in radio networks: the bounded collision case}, booktitle = {Proceedings of the twenty-fifth annual ACM symposium on Principles of distributed computing}, series = {PODC {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {258 - 264}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We study the problem of achieving global broadcast in a radio network where a node can multicast messages to all of its neighbors (that is, nodes within some given distance r), and up to t nodes in any single neighborhood may be corrupted. Previous work assumes that corrupted nodes can neither cause collisions nor spoof addresses of honest nodes. In this work, we eliminate these assumptions and allow each faulty node to cause a (known) bounded number of collisions and spoof the addresses of arbitrary other nodes. We show that the maximum tolerable t in this case is identical to the maximum tolerable t when collisions and address spoofing are not allowed. Thus, by causing collisions and spoofing addresses an adversary may be able to degrade the efficiency of achieving broadcast, but it cannot affect the feasibility of this task.}, keywords = {broadcast, byzantine failure, Fault tolerance, radio networks}, isbn = {1-59593-384-0}, doi = {10.1145/1146381.1146420}, url = {http://doi.acm.org/10.1145/1146381.1146420}, author = {Koo,Chiu-Yuen and Bhandari,Vartika and Katz, Jonathan and Vaidya,Nitin H.} } @article {18506, title = {Revisiting Internet addressing: Back to the future}, volume = {MIT-CSAIL-TR-2006-025}, year = {2006}, month = {2006///}, institution = {Massachusetts Institute of Technology Computer Science and Artificial Intelligence Laboratory}, abstract = {IP prefixes undermine three goals of Internet routing:accurate reflection of network-layer reachability, secure routing messages, and effective traffic control. This pa- per presents Atomic IP (AIP), a simple change to Inter- net addressing (which in fact reverts to how addressing once worked), that allows Internet routing to achieve these goals. }, author = {Vutukuru,M. and Feamster, Nick and Walfish,M. and Balakrishnan,H. and Shenker,S.} } @article {17966, title = {Saliency-guided Enhancement for Volume Visualization}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {12}, year = {2006}, month = {2006/10//sept}, pages = {925 - 932}, abstract = {Recent research in visual saliency has established a computational measure of perceptual importance. In this paper we present a visual-saliency-based operator to enhance selected regions of a volume. We show how we use such an operator on a user-specified saliency field to compute an emphasis field. We further discuss how the emphasis field can be integrated into the visualization pipeline through its modifications of regional luminance and chrominance. Finally, we validate our work using an eye-tracking-based user study and show that our new saliency enhancement operator is more effective at eliciting viewer attention than the traditional Gaussian enhancement operator}, keywords = {Computer-Assisted;Imaging, Ocular;Humans;Image Enhancement;Image Interpretation, saliency-guided enhancement;visual-saliency-based operator;volume rendering;volume visualization;data visualisation;rendering (computer graphics);Algorithms;Attention;Computer Graphics;Eye Movements;Fixation, Three-Dimensional;User-Computer Interface;}, isbn = {1077-2626}, doi = {10.1109/TVCG.2006.174}, author = {Kim,Y. and Varshney, Amitabh} } @conference {17538, title = {A structural approach to latency prediction}, booktitle = {Proceedings of the 6th ACM SIGCOMM conference on Internet measurement}, series = {IMC {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {99 - 104}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Several models have been recently proposed for predicting the latency of end to end Internet paths. These models treat the Internet as a black-box, ignoring its internal structure. While these models are simple, they can often fail systematically; for example, the most widely used models use metric embeddings that predict no benefit to detour routes even though half of all Internet routes can benefit from detours.In this paper, we adopt a structural approach that predicts path latency based on measurements of the Internet{\textquoteright}s routing topology, PoP connectivity, and routing policy. We find that our approach outperforms Vivaldi, the most widely used black-box model. Furthermore, unlike metric embeddings, our approach successfully predicts 65\% of detour routes in the Internet. The number of measurements used in our approach is comparable with that required by black box techniques, but using traceroutes instead of pings.}, keywords = {internet topology, latency prediction, route measurements}, isbn = {1-59593-561-4}, doi = {10.1145/1177080.1177092}, url = {http://doi.acm.org/10.1145/1177080.1177092}, author = {Madhyastha,Harsha V. and Anderson,Thomas and Krishnamurthy,Arvind and Spring, Neil and Venkataramani,Arun} } @conference {16327, title = {Towards Dependability in Everyday Software Using Software Telemetry}, booktitle = {Engineering of Autonomic and Autonomous Systems, IEEE International Workshop on}, year = {2006}, month = {2006///}, pages = {9 - 18}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Application-level software dependability is difficult to ensure. Thus it?s typically used only in custom systems and is achieved using one-of-a-kind, handcrafted solutions. We are interested in understanding whether and how these techniques can be applied to more common, lower-end systems. To this end, we have adapted a condition-based maintenance (CBM) approach called the Multivariate State Estimation Technique (MSET). This approach automatically creates sophisticated statistical models that predict system failure well before failures occur, leading to simpler and more successful recoveries. We have packaged this approach in the Software Dependability Framework (SDF). The SDF consists of instrumentation and data management libraries, a CBM module, performance visualization tools, and a software architecture that supports system designers. Finally, we evaluated our framework on a simple video game application. Our results suggest that we can cheaply and reliably predict impending runtime failures and respond to them in time to improve the system?s dependability.}, isbn = {0-7695-2544-X}, doi = {http://doi.ieeecomputersociety.org/10.1109/EASE.2006.21}, author = {Gross,Kenny C. and Urmanov,Aleksey and Votta,Lawrence G. and McMaster,Scott and Porter, Adam} } @article {16082, title = {TreePlus: Interactive Exploration of Networks with Enhanced Tree Layouts}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {12}, year = {2006}, month = {2006/12//nov}, pages = {1414 - 1426}, abstract = {Despite extensive research, it is still difficult to produce effective interactive layouts for large graphs. Dense layout and occlusion make food Webs, ontologies and social networks difficult to understand and interact with. We propose a new interactive visual analytics component called TreePlus that is based on a tree-style layout. TreePlus reveals the missing graph structure with visualization and interaction while maintaining good readability. To support exploration of the local structure of the graph and gathering of information from the extensive reading of labels, we use a guiding metaphor of "plant a seed and watch it grow." It allows users to start with a node and expand the graph as needed, which complements the classic overview techniques that can be effective at (but often limited to) revealing clusters. We describe our design goals, describe the interface and report on a controlled user study with 28 participants comparing TreePlus with a traditional graph interface for six tasks. In general, the advantage of TreePlus over the traditional interface increased as the density of the displayed data increased. Participants also reported higher levels of confidence in their answers with TreePlus and most of them preferred TreePlus}, keywords = {Automated;Software;User-Computer Interface;, Biological;Pattern Recognition, data visualization;graph structure;graphical user interface;interactive visual analytics;occlusion;tree-style layout;data visualisation;graphical user interfaces;hidden feature removal;interactive systems;trees (mathematics);Algorithms;Computer Graphics;C}, isbn = {1077-2626}, doi = {10.1109/TVCG.2006.106}, author = {Lee,B. and Parr,C.S. and Plaisant, Catherine and Bederson, Benjamin B. and Veksler,V.D. and Gray,W.D. and Kotfila,C.} } @article {17919, title = {Vertex{\textendash}transformation streams}, journal = {Graphical Models}, volume = {68}, year = {2006}, month = {2006/07//}, pages = {371 - 383}, abstract = {Recent trends in parallel computer architecture strongly suggest the need to improve the arithmetic intensity (the compute to bandwidth ratio) for greater performance in time-critical applications, such as interactive 3D graphics. At the same time, advances in stream programming abstraction for graphics processors (GPUs) have enabled us to use parallel algorithm design methods for GPU programming. Inspired by these developments, this paper explores the interactions between multiple data streams to improve arithmetic intensity and address the input geometry bandwidth bottleneck for interactive 3D graphics applications. We introduce the idea of creating vertex and transformation streams that represent large point datasets via their interaction. We discuss how to factor such point datasets into a set of source vertices and transformation streams by identifying the most common translations amongst vertices. We accomplish this by identifying peaks in the cross-power spectrum of the dataset in the Fourier domain. We validate our approach by integrating it with a view-dependent point rendering system and show significant improvements in input geometry bandwidth requirements as well as rendering frame rates.}, keywords = {Arithmetic intensity, Geometry instancing, Stream programming, Streaming algorithms, Transformation encoding}, isbn = {1524-0703}, doi = {10.1016/j.gmod.2006.03.005}, url = {http://www.sciencedirect.com/science/article/pii/S1524070306000373}, author = {Kim,Youngmin and Lee,Chang Ha and Varshney, Amitabh} } @inbook {12679, title = {3D Facial Pose Tracking in Uncalibrated Videos}, booktitle = {Pattern Recognition and Machine IntelligencePattern Recognition and Machine Intelligence}, series = {Lecture Notes in Computer Science}, volume = {3776}, year = {2005}, month = {2005///}, pages = {515 - 520}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper presents a method to recover the 3D configuration of a face in each frame of a video. The 3D configuration consists of the 3 translational parameters and the 3 orientation parameters which correspond to the yaw, pitch and roll of the face, which is important for applications like face modeling, recognition, expression analysis, etc. The approach combines the structural advantages of geometric modeling with the statistical advantages of a particle-filter based inference. The face is modeled as the curved surface of a cylinder which is free to translate and rotate arbitrarily. The geometric modeling takes care of pose and self-occlusion while the statistical modeling handles moderate occlusion and illumination variations. Experimental results on multiple datasets are provided to show the efficacy of the approach. The insensitivity of our approach to calibration parameters (focal length) is also shown.}, isbn = {978-3-540-30506-4}, url = {http://dx.doi.org/10.1007/11590316_81}, author = {Aggarwal,Gaurav and Veeraraghavan,Ashok and Chellapa, Rama}, editor = {Pal,Sankar and Bandyopadhyay,Sanghamitra and Biswas,Sambhunath} } @conference {14325, title = {Are GSM phones THE solution for localization?}, booktitle = {Mobile Computing Systems and Applications, 2006. WMCSA{\textquoteright}06. Proceedings. 7th IEEE Workshop on}, year = {2005}, month = {2005///}, pages = {34 - 42}, author = {Varshavsky,A. and Chen,M.Y. and de Lara,E. and Jon Froehlich and Haehnel,D. and Hightower,J. and LaMarca,A. and Potter,F. and Sohn,T. and Tang,K. and others} } @conference {19027, title = {BIND: a fine-grained attestation service for secure distributed systems}, year = {2005}, month = {2005}, pages = {154 - 168}, abstract = {In this paper we propose BIND (binding instructions and data), a fine-grained attestation service for securing distributed systems. Code attestation has recently received considerable attention in trusted computing. However, current code attestation technology is relatively immature. First, due to the great variability in software versions and configurations, verification of the hash is difficult. Second, the time-of-use and time-of-attestation discrepancy remains to be addressed, since the code may be correct at the time of the attestation, but it may be compromised by the time of use. The goal of BIND is to address these issues and make code attestation more usable in securing distributed systems. BIND offers the following properties: (1) BIND performs fine-grained attestation. Instead of attesting to the entire memory content, BIND attests only to the piece of code we are concerned about. This greatly simplifies verification. (2) BIND narrows the gap between time-of-attestation and time-of-use. BIND measures a piece of code immediately before it is executed and uses a sandboxing mechanism to protect the execution of the attested code. (3) BIND ties the code attestation with the data that the code produces, such that we can pinpoint what code has been run to generate that data. In addition, by incorporating the verification of input data integrity into the attestation, BIND offers transitive integrity verification, i.e., through one signature, we can vouch for the entire chain of processes that have performed transformations over a piece of data. BIND offers a general solution toward establishing a trusted environment for distributed system designers.}, keywords = {BIND, binding instructions and data, code attestation, data integrity, digital signatures, distributed processing, fine-grained attestation service, input data integrity, program verification, sandboxing mechanism, secure distributed systems, signature, time-of-attestation, time-of-use, transitive integrity verification, trusted computing}, author = {Elaine Shi and Perrig, A. and Van Doorn, L.} } @article {17030, title = {Content Index to Volume 18}, journal = {INTERNATIONAL JOURNAL OF HUMAN{\textendash}COMPUTER INTERACTION}, volume = {18}, year = {2005}, month = {2005///}, pages = {367 - 368}, author = {Kuniavsky,M. and Vaughan,M. and Bederson, Benjamin B. and Shneiderman, Ben and Rau,P.L.P. and from Menus,T. and Lane,D.M. and Napier,H.A. and Peres,S.C. and S{\'a}ndor,A.} } @conference {16482, title = {A data model and query language to explore enhanced links and paths in life science sources}, booktitle = {Proceedings of the ACM SIGMOD Workshop on The Web and Databases (WebDB)}, year = {2005}, month = {2005///}, author = {Mihaila,G. and Naumann,F. and Raschid, Louiqa and Vidal,M. E} } @article {18699, title = {Diverse polyubiquitin interaction properties of ubiquitin-associated domains}, journal = {Nature Structural \& Molecular Biology}, volume = {12}, year = {2005}, month = {2005///}, pages = {708 - 714}, abstract = {The ubiquitin-associated (UBA) domain occurs frequently in proteins involved in ubiquitin-dependent signaling pathways. Although polyubiquitin chain binding is considered to be a defining feature of the UBA domain family, the generality of this property has not been established. Here we have surveyed the polyubiquitin interaction properties of 30 UBA domains, including 16 of 17 occurrences in budding yeast. The UBA domains sort into four classes that include linkage-selective polyubiquitin binders and domains that bind different chains (and monoubiquitin) in a nondiscriminatory manner; one notable class (30\%) did not bind any ubiquitin ligand surveyed. The properties of a given UBA domain are conserved from yeast to mammals. Their functional relevance is further suggested by the ability of an ectopic UBA domain to alter the specificity of a deubiquitylating enzyme in a predictable manner. Conversely, non-UBA sequences can modulate the interaction properties of a UBA domain.}, keywords = {apoptosis, basic cellular processes, Biochemistry, biophysics, cell biology, cell cycle, cell surface proteins, cell-cell interactions, checkpoints, chromatin, chromatin remodeling, chromatin structure, content, DNA recombination, DNA repair, DNA replication, Gene expression, Genetics, intracellular signaling, journal, macromolecules, mechanism, membrane processes, molecular, molecular basis of disease, molecular biology, molecular interactions, multi-component complexes, nature publishing group, nature structural molecular biology, nucleic acids, protein degradation, protein folding, protein processing, Proteins, regulation of transcription, regulation of translation, RNA, RNA processing, RNAi, signal transduction, single molecule studies, structure and function of proteins, transcription, translation}, isbn = {1545-9993}, doi = {10.1038/nsmb962}, url = {http://www.nature.com/nsmb/journal/v12/n8/full/nsmb962.html}, author = {Raasi,Shahri and Varadan,Ranjani and Fushman, David and Pickart,Cecile M.} } @article {18203, title = {Dynamic network resource allocation using multimedia content features and ...}, volume = {09/795,952}, year = {2005}, month = {2005/09/20/}, abstract = {A method for dynamically allocating network resources while transferring multimedia at variable bit-rates in a network extracts first content features from the multimedia to determine renegotiation points and observation periods. Second content features and traffic features are extracted from the multimedia bit stream during the observation periods. The second content features and the traffic features are combined in a neural network to predict the network resources to be allocated at the renegotiation points.}, url = {http://www.google.com/patents?id=dToWAAAAEBAJ}, author = {M. Wu and Joyce,Robert A. and Vetro,Anthony and Wong,Hau-San and Guan,Ling and Kung,Sun-Yuan}, editor = {Mitsubishi Electric Research Labs, Inc.} } @article {16389, title = {EvoSTOC Contributions}, journal = {Applications of evolutionary computing: EvoWorkshops 2005, EvoBIO, EvoCOMNET, EvoHOT, EvoIASP, EvoMUSART, and EvoSTOC}, year = {2005}, month = {2005///}, author = {Merkle,D. and Middendorf,M. and Scheidler,A. and Avigad,G. and Moshaiov,A. and Brauner,N. and Parsopoulos,K.E. and Vrahatis,M.N. and Rand, William and Riolo,R} } @article {14071, title = {The Genome Sequence of Trypanosoma cruzi, Etiologic Agent of Chagas Disease}, journal = {Science}, volume = {309}, year = {2005}, month = {2005/07/15/}, pages = {409 - 415}, abstract = {Whole-genome sequencing of the protozoan pathogen Trypanosoma cruzi revealed that the diploid genome contains a predicted 22,570 proteins encoded by genes, of which 12,570 represent allelic pairs. Over 50\% of the genome consists of repeated sequences, such as retrotransposons and genes for large families of surface molecules, which include trans-sialidases, mucins, gp63s, and a large novel family (>1300 copies) of mucin-associated surface protein (MASP) genes. Analyses of the T. cruzi, T. brucei, and Leishmania major (Tritryp) genomes imply differences from other eukaryotes in DNA repair and initiation of replication and reflect their unusual mitochondrial DNA. Although the Tritryp lack several classes of signaling molecules, their kinomes contain a large and diverse set of protein kinases and phosphatases; their size and diversity imply previously unknown interactions and regulatory processes, which may be targets for intervention.}, doi = {10.1126/science.1112631}, url = {http://www.sciencemag.org/content/309/5733/409.abstract}, author = {El-Sayed, Najib M. and Myler,Peter J. and Bartholomeu,Daniella C. and Nilsson,Daniel and Aggarwal,Gautam and Tran,Anh-Nhi and Ghedin,Elodie and Worthey,Elizabeth A. and Delcher,Arthur L. and Blandin,Ga{\"e}lle and Westenberger,Scott J. and Caler,Elisabet and Cerqueira,Gustavo C. and Branche,Carole and Haas,Brian and Anupama,Atashi and Arner,Erik and {\r A}slund,Lena and Attipoe,Philip and Bontempi,Esteban and Bringaud,Fr{\'e}d{\'e}ric and Burton,Peter and Cadag,Eithon and Campbell,David A. and Carrington,Mark and Crabtree,Jonathan and Darban,Hamid and da Silveira,Jose Franco and de Jong,Pieter and Edwards,Kimberly and Englund,Paul T. and Fazelina,Gholam and Feldblyum,Tamara and Ferella,Marcela and Frasch,Alberto Carlos and Gull,Keith and Horn,David and Hou,Lihua and Huang,Yiting and Kindlund,Ellen and Klingbeil,Michele and Kluge,Sindy and Koo,Hean and Lacerda,Daniela and Levin,Mariano J. and Lorenzi,Hernan and Louie,Tin and Machado,Carlos Renato and McCulloch,Richard and McKenna,Alan and Mizuno,Yumi and Mottram,Jeremy C. and Nelson,Siri and Ochaya,Stephen and Osoegawa,Kazutoyo and Pai,Grace and Parsons,Marilyn and Pentony,Martin and Pettersson,Ulf and Pop, Mihai and Ramirez,Jose Luis and Rinta,Joel and Robertson,Laura and Salzberg,Steven L. and Sanchez,Daniel O. and Seyler,Amber and Sharma,Reuben and Shetty,Jyoti and Simpson,Anjana J. and Sisk,Ellen and Tammi,Martti T. and Tarleton,Rick and Teixeira,Santuza and Van Aken,Susan and Vogt,Christy and Ward,Pauline N. and Wickstead,Bill and Wortman,Jennifer and White,Owen and Fraser,Claire M. and Stuart,Kenneth D. and Andersson,Bj{\"o}rn} } @article {16291, title = {The Genome Sequence of Trypanosoma Cruzi, Etiologic Agent of Chagas Disease}, journal = {ScienceScience}, volume = {309}, year = {2005}, month = {2005/07/15/}, pages = {409 - 415}, abstract = {Whole-genome sequencing of the protozoan pathogen Trypanosoma cruzi revealed that the diploid genome contains a predicted 22,570 proteins encoded by genes, of which 12,570 represent allelic pairs. Over 50\% of the genome consists of repeated sequences, such as retrotransposons and genes for large families of surface molecules, which include trans-sialidases, mucins, gp63s, and a large novel family (>1300 copies) of mucin-associated surface protein (MASP) genes. Analyses of the T. cruzi, T. brucei, and Leishmania major (Tritryp) genomes imply differences from other eukaryotes in DNA repair and initiation of replication and reflect their unusual mitochondrial DNA. Although the Tritryp lack several classes of signaling molecules, their kinomes contain a large and diverse set of protein kinases and phosphatases; their size and diversity imply previously unknown interactions and regulatory processes, which may be targets for intervention.}, isbn = {0036-8075, 1095-9203}, doi = {10.1126/science.1112631}, url = {http://www.sciencemag.org/content/309/5733/409}, author = {El-Sayed, Najib M. and Myler,Peter J. and Bartholomeu,Daniella C. and Nilsson,Daniel and Aggarwal,Gautam and Tran,Anh-Nhi and Ghedin,Elodie and Worthey,Elizabeth A. and Delcher,Arthur L. and Blandin,Ga{\"e}lle and Westenberger,Scott J. and Caler,Elisabet and Cerqueira,Gustavo C. and Branche,Carole and Haas,Brian and Anupama,Atashi and Arner,Erik and {\r A}slund,Lena and Attipoe,Philip and Bontempi,Esteban and Bringaud,Fr{\'e}d{\'e}ric and Burton,Peter and Cadag,Eithon and Campbell,David A. and Carrington,Mark and Crabtree,Jonathan and Darban,Hamid and da Silveira,Jose Franco and de Jong,Pieter and Edwards,Kimberly and Englund,Paul T. and Fazelina,Gholam and Feldblyum,Tamara and Ferella,Marcela and Frasch,Alberto Carlos and Gull,Keith and Horn,David and Hou,Lihua and Huang,Yiting and Kindlund,Ellen and Klingbeil,Michele and Kluge,Sindy and Koo,Hean and Lacerda,Daniela and Levin,Mariano J. and Lorenzi,Hernan and Louie,Tin and Machado,Carlos Renato and McCulloch,Richard and McKenna,Alan and Mizuno,Yumi and Mottram,Jeremy C. and Nelson,Siri and Ochaya,Stephen and Osoegawa,Kazutoyo and Pai,Grace and Parsons,Marilyn and Pentony,Martin and Pettersson,Ulf and Pop, Mihai and Ramirez,Jose Luis and Rinta,Joel and Robertson,Laura and Salzberg,Steven L. and Sanchez,Daniel O. and Seyler,Amber and Sharma,Reuben and Shetty,Jyoti and Simpson,Anjana J. and Sisk,Ellen and Tammi,Martti T. and Tarleton,Rick and Teixeira,Santuza and Van Aken,Susan and Vogt,Christy and Ward,Pauline N. and Wickstead,Bill and Wortman,Jennifer and White,Owen and Fraser,Claire M. and Stuart,Kenneth D. and Andersson,Bj{\"o}rn} } @article {14616, title = {Genome-Wide Analysis of Chromosomal Features Repressing Human Immunodeficiency Virus Transcription}, journal = {Journal of VirologyJ. Virol.}, volume = {79}, year = {2005}, month = {2005/06/01/}, pages = {6610 - 6619}, abstract = {We have investigated regulatory sequences in noncoding human DNA that are associated with repression of an integrated human immunodeficiency virus type 1 (HIV-1) promoter. HIV-1 integration results in the formation of precise and homogeneous junctions between viral and host DNA, but integration takes place at many locations. Thus, the variation in HIV-1 gene expression at different integration sites reports the activity of regulatory sequences at nearby chromosomal positions. Negative regulation of HIV transcription is of particular interest because of its association with maintaining HIV in a latent state in cells from infected patients. To identify chromosomal regulators of HIV transcription, we infected Jurkat T cells with an HIV-based vector transducing green fluorescent protein (GFP) and separated cells into populations containing well-expressed (GFP-positive) or poorly expressed (GFP-negative) proviruses. We then determined the chromosomal locations of the two classes by sequencing 971 junctions between viral and cellular DNA. Possible effects of endogenous cellular transcription were characterized by transcriptional profiling. Low-level GFP expression correlated with integration in (i) gene deserts, (ii) centromeric heterochromatin, and (iii) very highly expressed cellular genes. These data provide a genome-wide picture of chromosomal features that repress transcription and suggest models for transcriptional latency in cells from HIV-infected patients.}, isbn = {0022-538X, 1098-5514}, doi = {10.1128/JVI.79.11.6610-6619.2005}, url = {http://jvi.asm.org/content/79/11/6610}, author = {Lewinski,M. K and Bisgrove,D. and Shinn,P. and Chen,H. and Hoffmann,C. and Hannenhalli, Sridhar and Verdin,E. and Berry,C. C and Ecker,J. R and Bushman,F. D} } @conference {18571, title = {Geographic locality of IP prefixes}, booktitle = {Proceedings of the 5th ACM SIGCOMM conference on Internet Measurement}, series = {IMC {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {13 - 13}, publisher = {USENIX Association}, organization = {USENIX Association}, address = {Berkeley, CA, USA}, abstract = {Information about the geographic locality of IP prefixes can be useful for understanding the issues related to IP address allocation, aggregation, and BGP routing table growth. In this paper, we use traceroute data and geographic mappings of IP addresses to study the geographic properties of IP prefixes and their implications on Internet routing. We find that (1) IP prefixes may be too coarse-grained for expressing routing policies, (2) address allocation policies and the granularity of routing contribute significantly to routing table size, and (3) not considering the geographic diversity of contiguous prefixes may result in overestimating the opportunities for aggregation in the BGP routing table.}, url = {http://dl.acm.org/citation.cfm?id=1251086.1251099}, author = {Freedman,Michael J. and Vutukuru,Mythili and Feamster, Nick and Balakrishnan,Hari} } @inbook {12677, title = {Image Sequence Stabilization, Mosaicking, and Superresolution}, booktitle = {Handbook of Image and Video Processing (Second Edition)Handbook of Image and Video Processing (Second Edition)}, year = {2005}, month = {2005///}, pages = {309-VII - 309-VII}, publisher = {Academic Press}, organization = {Academic Press}, address = {Burlington}, abstract = {A sequence of temporal images gathered from a single sensor adds a whole new dimension to two-dimensional (2D) image data. Availability of an image sequence permits the measurement of quantities such as subpixel intensities, camera motion and depth, and detection and tracking of moving objects. In turn, the processing of image sequences necessitates the development of sophisticated techniques to extract this information. With the recent availability of powerful yet inexpensive computers, data storage systems, and image acquisition devices, image sequence analysis has transitioned from an esoteric research domain to a practical area with significant commercial interest.}, isbn = {978-0-12-119792-6}, url = {http://www.sciencedirect.com/science/article/pii/B9780121197926500826}, author = {Chellapa, Rama and Srinivasan, S. and Aggarwal,G. and Veeraraghavan,A.}, editor = {Al Bovik} } @article {14821, title = {Mesh saliency}, journal = {ACM transactions on graphics}, volume = {24}, year = {2005}, month = {2005/07//}, pages = {659 - 666}, abstract = {Research over the last decade has built a solid mathematical foundation for representation and analysis of 3D meshes in graphics and geometric modeling. Much of this work however does not explicitly incorporate models of low-level human visual attention. In this paper we introduce the idea of mesh saliency as a measure of regional importance for graphics meshes. Our notion of saliency is inspired by low-level human visual system cues. We define mesh saliency in a scale-dependent manner using a center-surround operator on Gaussian-weighted mean curvatures. We observe that such a definition of mesh saliency is able to capture what most would classify as visually interesting regions on a mesh. The human-perception-inspired importance measure computed by our mesh saliency operator results in more visually pleasing results in processing and viewing of 3D meshes. compared to using a purely geometric measure of shape. such as curvature. We discuss how mesh saliency can be incorporated in graphics applications such as mesh simplification and viewpoint selection and present examples that show visually appealing results from using mesh saliency.}, keywords = {perception, saliency, simplification, viewpoint selection, visual attention}, isbn = {0730-0301}, doi = {10.1145/1073204.1073244}, url = {http://doi.acm.org/10.1145/1073204.1073244}, author = {Lee,Chang Ha and Varshney, Amitabh and Jacobs, David W.} } @inbook {12670, title = {Moving Object Detection and Compression in IR Sequences}, booktitle = {Computer Vision Beyond the Visible SpectrumComputer Vision Beyond the Visible Spectrum}, series = {Advances in Pattern Recognition}, year = {2005}, month = {2005///}, pages = {141 - 165}, publisher = {Springer London}, organization = {Springer London}, abstract = {We consider the problem of remote surveillance using infrared (IR) sensors. The aim is to use IR image sequences to detect moving objects (humans or vehicles), and to transmit a few {\textquotedblleft}best-view images{\textquotedblright} of every new object that is detected. Since the available bandwidth is usually low, if the object chip is big, it needs to be compressed before being transmitted. Due to low computational power of computing devices attached to the sensor, the algorithms should be computationally simple. We present two approaches for object detection {\textemdash} one which specifically solves the more difficult long-range object detection problem, and the other for objects at short range. For objects at short range, we also present techniques for selecting a single best-view object chip and computationally simple techniques for compressing it to very low bit rates due to the channel bandwidth constraint. A fast image chip compression scheme implemented in the wavelet domain by combining a non-iterative zerotree coding method with 2D-DPCM for both low-and high-frequency subbands is presented. Comparisons with some existing schemes are also included. The object detection and compression algorithms have been implemented in C/C++ and their performance has been evaluated using the Hitachi{\textquoteright}s SH4 platform with software simulation.}, isbn = {978-1-84628-065-8}, url = {http://dx.doi.org/10.1007/1-84628-065-6_5}, author = {Vaswani,Namrata and Agrawal,Amit and Qinfen Zheng and Chellapa, Rama}, editor = {Bhanu,Bir and Pavlidis,Ioannis} } @conference {12644, title = {Non-Stationary "Shape Activities"}, booktitle = {Decision and Control, 2005 and 2005 European Control Conference. CDC-ECC {\textquoteright}05. 44th IEEE Conference on}, year = {2005}, month = {2005/12//}, pages = {1521 - 1528}, abstract = {The changing configuration of a group of moving landmarks can be modeled as a moving and deforming shape. The landmarks defining the shape could be moving objects(people/vehicles/robots) or rigid components of an articulated shape like the human body. In past work, the term "shape activity" has been used to denote a particular stochastic model for shape deformation. Dynamical models have been proposed for characterizing stationary shape activities (assume constant mean shape). In this work we define stochastic dynamic models for non-stationary shape activities and show that the stationary shape activity model follows as a special case of this. Most activities performed by a group of moving landmarks (here, objects) are not stationary and hence this more general model is needed. We also define a piecewise stationary model with non-stationary transitions which can be used to segment out and track a sequence of activities. Noisy observations coming from these models can be tracked using a particle filter. We discuss applications of our framework to abnormal activity detection, tracking and activity sequence segmentation.}, doi = {10.1109/CDC.2005.1582374}, author = {Vaswani, N. and Chellapa, Rama} } @article {15071, title = {A pairwise key predistribution scheme for wireless sensor networks}, journal = {ACM Trans. Inf. Syst. Secur.}, volume = {8}, year = {2005}, month = {2005/05//}, pages = {228 - 258}, abstract = {To achieve security in wireless sensor networks, it is important to be able to encrypt and authenticate messages sent between sensor nodes. Before doing so, keys for performing encryption and authentication must be agreed upon by the communicating parties. Due to resource constraints, however, achieving key agreement in wireless sensor networks is nontrivial. Many key agreement schemes used in general networks, such as Diffie-Hellman and other public-key based schemes, are not suitable for wireless sensor networks due to the limited computational abilities of the sensor nodes. Predistribution of secret keys for all pairs of nodes is not viable due to the large amount of memory this requires when the network size is large.In this paper, we provide a framework in which to study the security of key predistribution schemes, propose a new key predistribution scheme which substantially improves the resilience of the network compared to previous schemes, and give an in-depth analysis of our scheme in terms of network resilience and associated overhead. Our scheme exhibits a nice threshold property: when the number of compromised nodes is less than the threshold, the probability that communications between any additional nodes are compromised is close to zero. This desirable property lowers the initial payoff of smaller-scale network breaches to an adversary, and makes it necessary for the adversary to attack a large fraction of the network before it can achieve any significant gain.}, keywords = {key predistribution, Security, Wireless sensor networks}, isbn = {1094-9224}, doi = {10.1145/1065545.1065548}, url = {http://doi.acm.org/10.1145/1065545.1065548}, author = {Du,Wenliang and Deng,Jing and Han,Yunghsiang S. and Varshney,Pramod K. and Katz, Jonathan and Khalili,Aram} } @inbook {12676, title = {Pattern Recognition in Video}, booktitle = {Pattern Recognition and Machine IntelligencePattern Recognition and Machine Intelligence}, series = {Lecture Notes in Computer Science}, volume = {3776}, year = {2005}, month = {2005///}, pages = {11 - 20}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Images constitute data that live in a very high dimensional space, typically of the order of hundred thousand dimensions. Drawing inferences from correlated data of such high dimensions often becomes intractable. Therefore traditionally several of these problems like face recognition, object recognition, scene understanding etc. have been approached using techniques in pattern recognition. Such methods in conjunction with methods for dimensionality reduction have been highly popular and successful in tackling several image processing tasks. Of late, the advent of cheap, high quality video cameras has generated new interests in extending still image-based recognition methodologies to video sequences. The added temporal dimension in these videos makes problems like face and gait-based human recognition, event detection, activity recognition addressable. Our research has focussed on solving several of these problems through a pattern recognition approach. Of course, in video streams patterns refer to both patterns in the spatial structure of image intensities around interest points and temporal patterns that arise either due to camera motion or object motion. In this paper, we discuss the applications of pattern recognition in video to problems like face and gait-based human recognition, behavior classification, activity recognition and activity based person identification.}, isbn = {978-3-540-30506-4}, url = {http://dx.doi.org/10.1007/11590316_2}, author = {Chellapa, Rama and Veeraraghavan,Ashok and Aggarwal,Gaurav}, editor = {Pal,Sankar and Bandyopadhyay,Sanghamitra and Biswas,Sambhunath} } @article {16094, title = {Robotic apparatus and wireless communication system}, volume = {10/085,821}, year = {2005}, month = {2005/05/17/}, abstract = {A robotic apparatus and system adapted to communicate with a wireless sensor. The apparatus may be either physical or virtual in nature and is adapted to communicate physical movements with a wireless sensor. Data received from the sensor and/or robotic apparatus may be reviewed in a real-time mode, or may be saved for review at a later time. In addition, the apparatus may be controlled through an operator that is in local or remote communication with the apparatus. The robotic system may include pre-programmed interactive platforms for enabling communication between a user and the apparatus in a dynamic mode. In addition, the system may allow an operator to program a game/story for use as an interactive platform. Accordingly, the apparatus and system provides a platform for rehabilitative exercise of a patient as well as an entertainment device.}, url = {http://www.google.com/patents?id=5-0VAAAAEBAJ}, author = {Lathan,Corinna E. and Tracey,Michael R. and Vice,Jack M. and Druin, Allison and Plaisant, Catherine} } @article {12647, title = {"Shape Activity": a continuous-state HMM for moving/deforming shapes with application to abnormal activity detection}, journal = {Image Processing, IEEE Transactions on}, volume = {14}, year = {2005}, month = {2005/10//}, pages = {1603 - 1616}, abstract = {The aim is to model "activity" performed by a group of moving and interacting objects (which can be people, cars, or different rigid components of the human body) and use the models for abnormal activity detection. Previous approaches to modeling group activity include co-occurrence statistics (individual and joint histograms) and dynamic Bayesian networks, neither of which is applicable when the number of interacting objects is large. We treat the objects as point objects (referred to as "landmarks") and propose to model their changing configuration as a moving and deforming "shape" (using Kendall{\textquoteright}s shape theory for discrete landmarks). A continuous-state hidden Markov model is defined for landmark shape dynamics in an activity. The configuration of landmarks at a given time forms the observation vector, and the corresponding shape and the scaled Euclidean motion parameters form the hidden-state vector. An abnormal activity is then defined as a change in the shape activity model, which could be slow or drastic and whose parameters are unknown. Results are shown on a real abnormal activity-detection problem involving multiple moving objects.}, keywords = {abnormal activity detection;activity recognition;co-occurrence statistics;continuous-state hidden Markov model;dynamic Bayesian networks;hidden-state vector;particle filtering;scaled Euclidean motion parameter;shape deforming;belief networks;filtering the, Automated;Subtraction Technique;Video Recording;, Biological;Models, Computer-Assisted;Information Storage and Retrieval;Markov Chains;Models, Statistical;Movement;Pattern Recognition}, isbn = {1057-7149}, doi = {10.1109/TIP.2005.852197}, author = {Vaswani, N. and Roy-Chowdhury, A.K. and Chellapa, Rama} } @article {17902, title = {Statistical geometry representation for efficient transmission and rendering}, journal = {ACM Transactions on Graphics}, volume = {24}, year = {2005}, month = {2005/04//}, pages = {348 - 373}, abstract = {Traditional geometry representations have focused on representing the details of the geometry in a deterministic fashion. In this article we propose a statistical representation of the geometry that leverages local coherence for very large datasets. We show how the statistical analysis of a densely sampled point model can be used to improve the geometry bandwidth bottleneck, both on the system bus and over the network as well as for randomized rendering, without sacrificing visual realism. Our statistical representation is built using a clustering-based hierarchical principal component analysis (PCA) of the point geometry. It gives us a hierarchical partitioning of the geometry into compact local nodes representing attributes such as spatial coordinates, normal, and color. We pack this information into a few bytes using classification and quantization. This allows our representation to directly render from compressed format for efficient remote as well as local rendering. Our representation supports both view-dependent and on-demand rendering. Our approach renders each node using quasi-random sampling utilizing the probability distribution derived from the PCA analysis. We show many benefits of our approach: (1) several-fold improvement in the storage and transmission complexity of point geometry; (2) direct rendering from compressed data; and (3) support for local and remote rendering on a variety of rendering platforms such as CPUs, GPUs, and PDAs.}, keywords = {network graphics, Point-based rendering, Principal component analysis, programmable GPU, progressive transmission, quasi-random numbers, view-dependent rendering}, isbn = {0730-0301}, doi = {10.1145/1061347.1061356}, url = {http://doi.acm.org/10.1145/1061347.1061356}, author = {Kalaiah,Aravind and Varshney, Amitabh} } @article {18718, title = {Structural Determinants for Selective Recognition of a Lys48-Linked Polyubiquitin Chain by a UBA Domain}, journal = {Molecular Cell}, volume = {18}, year = {2005}, month = {2005/06/10/}, pages = {687 - 698}, abstract = {SummaryAlthough functional diversity in polyubiquitin chain signaling has been ascribed to the ability of differently linked chains to bind in a distinctive manner to effector proteins, structural models of such interactions have been lacking. Here, we use NMR to unveil the structural basis of selective recognition of Lys48-linked di- and tetraubiquitin chains by the UBA2 domain of hHR23A. Although the interaction of UBA2 with Lys48-linked diubiquitin involves the same hydrophobic surface on each ubiquitin unit as that utilized in monoubiquitin:UBA complexes, our results show how the {\textquotedblleft}closed{\textquotedblright} conformation of Lys48-linked diubiquitin is crucial for high-affinity binding. Moreover, recognition of Lys48-linked diubiquitin involves a unique epitope on UBA, which allows the formation of a sandwich-like diubiqutin:UBA complex. Studies of the UBA-tetraubiquitin interaction suggest that this mode of UBA binding to diubiquitin is relevant for longer chains. }, isbn = {1097-2765}, doi = {10.1016/j.molcel.2005.05.013}, url = {http://www.sciencedirect.com/science/article/pii/S1097276505013195}, author = {Varadan,Ranjani and Assfalg,Michael and Raasi,Shahri and Pickart,Cecile and Fushman, David} } @article {19581, title = {A Type and Effect System for Flexible Abstract Interpretation of Java: (Extended Abstract)}, journal = {Electronic Notes in Theoretical Computer Science}, volume = {131}, year = {2005}, month = {2005/05/24/}, pages = {111 - 124}, abstract = {This paper describes a flexible type and effect inference system for Featherweight Java (FJ). The effect terms generated by static type and effect inference embody the abstract interpretation of pro- gram event sequences. Flexibility in the analysis is obtained by post-processing of inferred effects, allowing a modular adaptation to extensions of the language. Several example transformations are discussed, including how inferred effects can be transformed to reflect the impact of exceptions on FJ control flow.}, keywords = {language security, object oriented languages, Type analysis}, isbn = {1571-0661}, url = {http://www.sciencedirect.com/science/article/pii/S1571066105002628}, author = {Skalka, Christian and Smith, Scott and David Van Horn} } @article {17913, title = {Unsupervised learning applied to progressive compression of time-dependent geometry}, journal = {Computers \& Graphics}, volume = {29}, year = {2005}, month = {2005/06//}, pages = {451 - 461}, abstract = {We propose a new approach to progressively compress time-dependent geometry. Our approach exploits correlations in motion vectors to achieve better compression. We use unsupervised learning techniques to detect good clusters of motion vectors. For each detected cluster, we build a hierarchy of motion vectors using pairwise agglomerative clustering, and succinctly encode the hierarchy using entropy encoding. We demonstrate our approach on a client{\textendash}server system that we have built for downloading time-dependent geometry.}, keywords = {Clustering algorithms, Distributed/network graphics, pattern recognition}, isbn = {0097-8493}, doi = {10.1016/j.cag.2005.03.021}, url = {http://www.sciencedirect.com/science/article/pii/S009784930500052X}, author = {Baby,Thomas and Kim,Youngmin and Varshney, Amitabh} } @inbook {18726, title = {Using NMR Spectroscopy to Monitor Ubiquitin Chain Conformation and Interactions with Ubiquitin-Binding Domains}, booktitle = {Ubiquitin and Protein Degradation, Part B}, volume = {Volume 399}, year = {2005}, month = {2005///}, pages = {177 - 192}, publisher = {Academic Press}, organization = {Academic Press}, abstract = {Polyubiquitin (polyUb) chains function as signaling molecules that mediate a diverse set of cellular events. The outcome of polyubiquitination depends on the specific linkage between Ub moieties in the chain, and differently linked chains function as distinct intracellular signals. Although an increasing number of Ub-binding proteins that transmit the regulatory information conferred by (poly)ubiquitination have been identified, the molecular mechanisms of linkage-specific signaling and recognition still remain to be understood. Knowledge of the chain structure is expected to provide insights into the basis of diversity in polyUb signaling. Here we describe several NMR approaches aimed at determining the physiological conformation of polyUb and characterization of the chains{\textquoteright} interactions with ubiquitin-binding proteins.}, isbn = {0076-6879}, url = {http://www.sciencedirect.com/science/article/pii/S0076687905990125}, author = {Varadan,Ranjani and Assfalg,Michael and Fushman, David}, editor = {Deshaies,Raymond J.} } @conference {18017, title = {Arbitrate-and-move primitives for high throughput on-chip interconnection networks}, booktitle = {Circuits and Systems, 2004. ISCAS {\textquoteright}04. Proceedings of the 2004 International Symposium on}, volume = {2}, year = {2004}, month = {2004/05//}, pages = {II - 441-4 Vol.2 - II - 441-4 Vol.2}, abstract = {An n-leaf pipelined balanced binary tree is used for arbitration of order and movement of data from n input ports to one output port. A novel arbitrate-and-move primitive circuit for every node of the tree, which is based on a concept of reduced synchrony that benefits from attractive features of both asynchronous and synchronous designs, is presented. The design objective of the pipelined binary tree is to provide a key building block in a high-throughput mesh-of-trees interconnection network for Explicit Multi Threading (XMT) architecture, a recently introduced parallel computation framework. The proposed reduced synchrony circuit was compared with asynchronous and synchronous designs of arbitrate-and-move primitives. Simulations with 0.18 mu;m technology show that compared to an asynchronous design, the proposed reduced synchrony implementation achieves a higher throughput, up to 2 Giga-Requests per second on an 8-leaf binary tree. Our circuit also consumes less power than the synchronous design, and requires less silicon area than both the synchronous and asynchronous designs.}, keywords = {8, arbiter, arbitrate-and-move, architecture;, asynchronous, balanced, binary, circuit, circuit;, circuits;, consumption;, data, explicit, interconnection, interconnections;, leaf, mesh-of-trees, multi-threading;, Multithreading, n-leaf, network;, pipeline, pipelined, power, primitive, processing;, reduced, simulation;, structures;, synchronous, synchrony, system-on-chip;, tree, tree;}, doi = {10.1109/ISCAS.2004.1329303}, author = {Balkan,A.O. and Gang Qu and Vishkin, Uzi} } @conference {15466, title = {ASPIRE: automated systematic protocol implementation robustness evaluation}, booktitle = {Software Engineering Conference, 2004. Proceedings. 2004 Australian}, year = {2004}, month = {2004///}, pages = {241 - 250}, abstract = {Network protocol implementations are susceptible to problems caused by their lack of ability to handle invalid inputs. We present ASPIRE: automated systematic protocol implementation robustness evaluation, an automated approach to pro-actively test protocol implementations by observing their responses to faulty protocol data units (PDUs) or messages. In contrast to existing approaches, we sample the faulty PDU space in a systematic manner, thus allowing us to evaluate protocol implementations in the face of a wider variety of faulty PDUs. We use a pruning strategy to reduce, from exponential, the size of the faulty PDU set to polynomial in the number of fields of a PDU. We have implemented the ASPIRE algorithms and evaluated them on implementations of HTTP (Apache, Google Web Server (GWS), and Microsoft IIS) and SMTP (Sendmail and Microsoft Exchange) protocols. Our results show that Apache, GWS, and IIS, although implementing the same protocol specification, behave differently on faulty HTTP PDUs; Sendmail and exchange are different in handling our faulty SMTP PDUs.}, keywords = {algorithm, ASPIRE, automated systematic protocol, automated testing, fault tolerant computing, faulty PDU, formal specification, HTTP, implementation robustness evaluation, Internet, network protocol, protocol data unit, protocol specification, robustness testing, SMTP protocol, stateful protocols, stateless protocols, Transport protocols}, doi = {10.1109/ASWEC.2004.1290477}, author = {Vasan,Arunchandar and Memon, Atif M.} } @conference {18020, title = {Bending light for multi-chip virtual PRAMs?}, booktitle = {Proc. 3rd Workshop on Non-Slicon Computation, held in conjunction with the 31st International Symposium on Computer Architecture (ISCA 2004)}, year = {2004}, month = {2004///}, pages = {19 - 23}, abstract = {A new paradigm for an all-to-alloptical interconnection network is presented. An interesting modeling aspect is that (limited) bending of optical communication channels is allowed. In a computer system, the paradigm could provide part of an interconnection fabric between several tens (e.g., 64) of chips comprising parallel processing elements and the first level of the cache. An optical interconnection network raises an intriguing possibility: obtain both improved performance and significant cost reduction with respect to standard serial computer system models. }, author = {Vishkin, Uzi and Smolyaninov,I.} } @conference {16447, title = {Challenges in selecting paths for navigational queries: Trade-off of benefit of path versus cost of plan}, booktitle = {Proceedings of the 7th International Workshop on the Web and Databases: colocated with ACM SIGMOD/PODS 2004}, year = {2004}, month = {2004///}, pages = {61 - 66}, author = {Vidal,M. E and Raschid, Louiqa and Mestre,J.} } @article {18016, title = {Circuit architecture for reduced-synchrony on-chip interconnect}, volume = {10/166,008}, year = {2004}, month = {2004/07/27/}, abstract = {The invention relates to an interconnect, and to interconnect architecture, for communicating between processing elements and memory modules in a computer system comprising on-chip parallel computation, in order to reduce the tight synchrony that is required by important components of most present computers.}, url = {http://www.google.com/patents?id=RRUSAAAAEBAJ}, author = {Vishkin, Uzi and Nuzman,Joseph F.}, editor = {University of Maryland, College Park} } @conference {12693, title = {Classification probability analysis of principal component space analysis}, booktitle = {Pattern Recognition, 2004. ICPR 2004. Proceedings of the 17th International Conference on}, volume = {1}, year = {2004}, month = {2004/08//}, pages = {240 - 243 Vol.1 - 240 - 243 Vol.1}, abstract = {In a previous paper, we have presented a new linear classification algorithm, principal component space analysis (PCNSA) which is designed for problems like object recognition where different classes have unequal and non-white noise covariance matrices. PCNSA first obtains a principal components space (PCA space) for the entire data and in this PCA space, it finds for each class "i", an Mi dimensional subspace along which the class{\textquoteright}s intra-class variance is the smallest. We call this subspace an approximate space (ANS) since the lowest variance is usually "much smaller" than the highest. A query is classified into class "i" if its distance from the class{\textquoteright}s mean in the class{\textquoteright}s ANS is a minimum. In this paper, we discuss the PCNSA algorithm more precisely and derive tight upper bounds on its classification error probability. We use these expressions to compare classification performance of PCNSA with that of subspace linear discriminant analysis (SLDA).}, keywords = {algorithm;, analysis;, approximate, classification, classification;, component, covariance, discriminant, error, intraclass, linear, matrices;, matrix;, noise;, nonwhite, object, pattern, PCA;, principal, probability;, recognition;, space, space;, statistics;, subspace, variance;}, doi = {10.1109/ICPR.2004.1334068}, author = {Vaswani, N. and Chellapa, Rama} } @book {16304, title = {Detecting and correcting a failure sequence in a computer system before a failure occurs}, year = {2004}, month = {2004/02//}, publisher = {Google Patents}, organization = {Google Patents}, author = {Gross,K. C and Votta,L. G. and Porter, Adam} } @article {17922, title = {Efficient solution of poisson-boltzmann equation for electrostatics of large molecules}, journal = {High-Performance Computing Symposium (April 2004)}, year = {2004}, month = {2004///}, pages = {71 - 76}, abstract = {Development of fast computational methods to solve thePoisson-Boltzmann equation (PBE) for molecular elec- trostatics is important because of the central role played by electrostatic interactions in many biological processes. The accuracy and stability of the solution to the PBE is quite sensitive to the boundary layer between the sol- vent and the solute which defines the solvent-accessible surface. In this paper, we propose a new interface- layer-focused PBE solver for efficiently computing the electrostatic potential for large molecules. Our method analytically constructs the solvent-accessible surface of molecules and then builds nested iso-surface layers out- wards and inwards from the surface using the distance field around the surface. We then develop a volume sim- plification algorithm to adaptively adjust the density of the irregular grid based on the importance to the PBE solution. We also generalize finite difference methods on our irregular grids using Taylor series expansions. Our algorithm achieves about three times speedup in the iter- ative solution process of PBE, with more accurate results on an analytical solvable testing case, compared with the popular optimized DelPhi program. Our approach can also be applied directly to solve partial differential equa- tions arising in other application domains. }, author = {Hao,X. and Varshney, Amitabh} } @conference {16467, title = {Efficient techniques to explore and rank paths in life science data sources}, booktitle = {Data Integration in the Life Sciences}, year = {2004}, month = {2004///}, pages = {187 - 202}, author = {Lacroix,Z. and Raschid, Louiqa and Vidal,M. E} } @conference {16462, title = {Exploiting multiple paths to express scientific queries}, booktitle = {16th International Conference on Scientific and Statistical Database Management, 2004. Proceedings}, year = {2004}, month = {2004/06/21/23}, pages = {357 - 360}, publisher = {IEEE}, organization = {IEEE}, abstract = {The purpose of this demonstration is to present the main features of the BioNavigation system. Scientific data collection needed in various stages of scientific discovery is typically performed manually. For each scientific object of interest (e.g., a gene, a sequence), scientists query a succession of Web resources following links between retrieved entries. Each of the steps provides part of the intended characterization of the scientific object. This process is sometimes partially supported by hard-coded scripts or complex queries that will be evaluated by a mediation-based data integration system or against a data warehouse. These approaches fail in guiding the scientists during the collection process. In contrast, the BioNavigation approach presented in the paper provides the scientists with information on the available alternative resources, their provenance, and the costs of data collection. The BioNavigation system enhances a mediation-based integration system and provides scientists with support for the following: to ask queries at a high conceptual level; to visualize the multiple alternative resources that may be exploited to execute their data collection queries; to choose the final execution path to evaluate their queries.}, keywords = {access protocols, biology computing, BioNavigation system, complex queries, Costs, Data analysis, data handling, Data visualization, data warehouse, Data warehouses, Databases, diseases, distributed databases, hard-coded scripts, information resources, Information retrieval, mediation-based data integration system, multiple paths, query evaluation, Query processing, scientific data collection, scientific discovery, scientific information, scientific information systems, scientific object of interest, scientific queries, sequences, Web resources}, isbn = {0-7695-2146-0}, doi = {10.1109/SSDM.2004.1311231}, author = {Lacroix,Z. and Moths,T. and Parekh,K. and Raschid, Louiqa and Vidal,M. -E} } @conference {12367, title = {Java-through-c compilation: An enabling technology for java in embedded systems}, booktitle = {Proceedings of the conference on Design, automation and test in Europe-Volume 3}, year = {2004}, month = {2004///}, pages = {30161 - 30161}, author = {Varma,A. and Bhattacharyya, Shuvra S.} } @conference {17947, title = {Light Collages: Lighting Design for Effective Visualization}, booktitle = {Proceedings of the conference on Visualization {\textquoteright}04}, series = {VIS {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {281 - 288}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, abstract = {We introduce Light Collages {\textquestiondown} a lighting design system for effective visualization based on principles of human perception. Artists and illustrators enhance perception of features with lighting that is locally consistent and globally inconsistent. Inspired by these techniques, we design the placement of light sources to convey a greater sense of realism and better perception of shape with globally inconsistent lighting. Our algorithm segments the objects into local surface patches and uses a number of perceptual heuristics, such as highlights, shadows, and silhouettes, to enhance the perception of shape. We show our results on scientific and sculptured datasets.}, keywords = {inconsistent lighting, light placement, lighting design, proximity shadows, scientific illustration, silhouette enhancement}, isbn = {0-7803-8788-0}, doi = {10.1109/VISUAL.2004.62}, url = {http://dx.doi.org/10.1109/VISUAL.2004.62}, author = {Lee,Chang Ha and Hao,Xuejun and Varshney, Amitabh} } @article {16334, title = {Measuring HPC productivity}, journal = {International Journal of High Performance Computing Applications}, volume = {18}, year = {2004}, month = {2004///}, pages = {459 - 473}, author = {Faulk,S. and Gustafson,J. and Johnson,P. and Porter, Adam and Tichy,W. and Votta,L.} } @article {18015, title = {OPTICAL INTERCONNECT STRUCTURE IN A COMPUTER SYSTEM}, year = {2004}, month = {2004/09/30/}, abstract = {A multi-chip processor/memory arrangement (20) is shown which includes a plurality of modules (22), also referred to herein as chips. The modules (22) are interconnected there between by an optical interconnect structure (24) also referred to herein as optical interconnect fabric. The basic concept underlining the structure of the arrangement (20) is to position the processing elements and memory cells on the small chips (22) which are fabricated in mass production based on inexpensive technology, for example, 0.25 micron technology and interconnected with the optical interconnect fabric (24). Packaged with the optical interconnect structure (24), a plurality of inexpensive chips (22) provides sufficient performance but for a small fraction of the cost of the processor/memory argument implemented on a single large computer chips (0.065 micron chip).}, author = {Vishkin, Uzi} } @inbook {18019, title = {PRAM-On-Chip: A Quest for Not-So-Obvious Non-obviousness}, booktitle = {Mathematical Foundations of Computer Science 2004Mathematical Foundations of Computer Science 2004}, series = {Lecture Notes in Computer Science}, volume = {3153}, year = {2004}, month = {2004///}, pages = {104 - 105}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Consider situations where once you were told about a new technical idea you reacted by saying: {\textquotedblleft}but this is so obvious, I wonder how I missed it{\textquotedblright}. I found out recently that the US patent law has a nice formal way of characterizing such a situation. The US patent law protects inventions that meet three requirements: utility, novelty and non-obviousness. Non-obviousness is considered the most challenging of the three to establish. The talk will try to argue that a possible virtue for a technical contribution is when, in restrospect, its non-obviousness is not too obvious; and since hindsight is always 20/20, one may often need to resort to various types of circumstantial evidence in order to establish non-obviousness. There are two reasons for bringing this issue up in my talk: (i) seeking such a virtue has been an objective of my work over the years, and (ii) issues of taste in research are more legitimate for invited talks; there might be merit in reminding younger researchers that not every {\textquotedblleft}result{\textquotedblright} is necessarily also a {\textquotedblleft}contribution{\textquotedblright}; perhaps the criterion of not-so-obvious non-obviousness could be helpful in some cases to help recognize a contribution. The focus of the second focal point for my talk, the PRAM-On-Chip approach, meets at least one of the standard legal ways to support non-obviousness: {\textquotedblleft}Expressions of disbelief by experts constitute strong evidence of non-obviousness{\textquotedblright}. It is well documented that the whole PRAM algorithmic theory was considered {\textquotedblleft}unrealistic{\textquotedblright} by numerous experts in the field, prior to the PRAM-On-Chip project. In fact, I needed recently to use this documentation in a reply to the U.S. patent office. An introduction of the PRAM-On-Chip approach follows. Many parallel computer systems architectures have been proposed and built over the last several decades. The outreach of the few that survived has been severely limited due to their programmability problems. The question of how to think algorithmically in parallel has been the fundamental problem for which these architectures did not have an adequate answer. A computational model, the Parallel Random Access Model (PRAM), has been developed by numerous (theoretical computer science) algorithm researchers to address this question during the 1980s and 1990s and is considered by many as the easiest known approach to parallel programming. Despite the broad interest the PRAM generated, it had not been possible to build parallel machines that adequately support it using multi-chip multiprocessors, the only multiprocessors that were buildable in the 1990s since low-overhead coordination was not possible. Our main insight is that this is becoming possible with the increasing amounts of hardware that can be placed on a single chip. From the PRAM, as a starting point, a highly parallel explicit multi-threaded (XMT) on-chip processor architecture that relies on new low-overhead coordination mechanisms and whose performance objective is reducing single task completion time has been conceived and developed. Simulated program executions have shown dramatic performance gains over conventional processor architectures. Namely, in addition to the unique parallel programmability features, which set XMT apart from any other current approach, XMT also provides very competitive performance. If XMT will meet expectations, its introduction would greatly enhance the normal rate of improvement of conventional processor architectures leading to new applications.}, keywords = {Computer, Science}, isbn = {978-3-540-22823-3}, url = {http://dx.doi.org/10.1007/978-3-540-28629-5_5}, author = {Vishkin, Uzi}, editor = {Fiala,Jir{\'\i} and Koubek,V{\'a}clav and Kratochv{\'\i}l,Jan} } @conference {16493, title = {Querying Web-Accessible Life Science Sources: Which paths to choose?}, booktitle = {Proceedings of VLDB Workshop on Information Integration on the Web (IIWeb-2004)}, year = {2004}, month = {2004///}, pages = {83 - 83}, author = {Bleiholder,J. and Naumann,F. and Raschid, Louiqa and Vidal,M. E} } @article {17899, title = {Real-time rendering of translucent meshes}, journal = {ACM Trans. Graph.}, volume = {23}, year = {2004}, month = {2004/04//}, pages = {120 - 142}, abstract = {Subsurface scattering is important for photo-realistic rendering of translucent materials. We make approximations to the BSSRDF model and propose a simple lighting model to simulate the effects on translucent meshes. Our approximations are based on the observation that subsurface scattering is relatively local due to its exponential falloff.In the preprocessing stage we build subsurface scattering neighborhood information, which includes all the vertices within effective scattering range from each vertex. We then modify the traditional local illumination model into a run-time two-stage process. The first stage involves computation of reflection and transmission of light on surface vertices. The second stage bleeds in scattering effects from a vertex{\textquoteright}s neighborhood to generate the final result. We then merge the run-time two-stage process into a run-time single-stage process using precomputed integrals, and reduce the complexity of our run-time algorithm to O(N), where N is the number of vertices. The selection of the optimum set size for precomputed integrals is guided by a standard imagespace error-metric. Furthermore, we show how to compress the precomputed integrals using spherical harmonics. We compensate for the inadequacy of spherical harmonics for storing high frequency components by a reference points scheme to store high frequency components of the precomputed integrals explicitly. With this approach, we greatly reduce memory usage without loss of visual quality under a high-frequency lighting environment and achieve interactive frame rates for medium-sized scenes. Our model is able to capture the most important features of subsurface scattering: reflection and transmission due to multiple scattering.}, keywords = {BSSRDF, local illumination, reflection models, subsurface scattering}, isbn = {0730-0301}, doi = {10.1145/990002.990004}, url = {http://doi.acm.org/10.1145/990002.990004}, author = {Hao,Xuejun and Varshney, Amitabh} } @article {18018, title = {Reconfigurable optical wireless sensor networks}, journal = {Proceedings of SPIE}, volume = {5237}, year = {2004}, month = {2004/02/06/}, pages = {136 - 146}, abstract = {Optical wireless networks are emerging as a viable, cost effective technology for rapidly deployable broadband sensor communication infrastructures. The use of directional, narrow beam, optical wireless links provides great promise for secure, extremely high data rate communication between fixed or mobile nodes, very suitable for sensor networks in civil and military contexts. The main challenge is to maintain the quality of such networks, as changing atmosphericand platform conditions critically affect their performance. Topology control is used as the means to achieve survivable optical wireless networking under adverse conditions, based on dynamic and autonomous topology reconfiguration. The topology control process involves tracking and acquisition of nodes, assessment of link-state information, collection and distribution of topology data, and the algorithmic solution of an optimal topology. This paper focuses onthe analysis, implementation and evaluation of algorithms and heuristics for selecting the best possible topology in order to optimize a given performance objective while satisfying connectivity constraints. The work done at the physical layer is based on link cost information. A cost measure is defined in terms of bit-error-rate and the heuristics developed seek to form a bi-connected topology which minimizes total network cost. At the network layer a key factor is the traffic matrix, and heuristics were developed in order to minimize congestion, flow-rate or end-to-end delay.}, isbn = {0277786X}, doi = {doi:10.1117/12.511368}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/5237/1/136_1?isAuthorized=no}, author = {Llorca,Jaime and Desai,Aniket and Vishkin, Uzi and Davis,Christopher C. and Milner,Stuart D} } @conference {12696, title = {Role of shape and kinematics in human movement analysis}, booktitle = {Computer Vision and Pattern Recognition, 2004. CVPR 2004. Proceedings of the 2004 IEEE Computer Society Conference on}, volume = {1}, year = {2004}, month = {2004/07/02/june}, pages = {I-730 - I-737 Vol.1 - I-730 - I-737 Vol.1}, abstract = {Human gait and activity analysis from video is presently attracting a lot of attention in the computer vision community. In this paper we analyze the role of two of the most important cues in human motion-shape and kinematics. We present an experimental framework whereby it is possible to evaluate the relative importance of these two cues in computer vision based recognition algorithms. In the process, we propose a new gait recognition algorithm by computing the distance between two sequences of shapes that lie on a spherical manifold. In our experiments, shape is represented using Kendall{\textquoteright}s definition of shape. Kinematics is represented using a Linear Dynamical system We place particular emphasis on human gait. Our conclusions show that shape plays a role which is more significant than kinematics in current automated gait based human identification algorithms. As a natural extension we study the role of shape and kinematics in activity recognition. Our experiments indicate that we require models that contain both shape and kinematics in order to perform accurate activity classification. These conclusions also allow us to explain the relative performance of many existing methods in computer-based human activity modeling.}, keywords = {activity, algorithm;, algorithms;, analysis;, autoregressive, average, based, classification;, community;, Computer, definition;, dynamical, extraction;, feature, Gait, hidden, human, identification, image, Kendall, linear, manifold;, Markov, modeling;, models;, MOTION, Movement, moving, processes;, recognition, sequences;, SHAPE, spherical, system;, VISION, vision;}, doi = {10.1109/CVPR.2004.1315104}, author = {Veeraraghavan,A. and Chowdhury, A.R. and Chellapa, Rama} } @article {18713, title = {Solution Conformation of Lys63-linked Di-ubiquitin Chain Provides Clues to Functional Diversity of Polyubiquitin Signaling}, journal = {Journal of Biological ChemistryJ. Biol. Chem.}, volume = {279}, year = {2004}, month = {2004/02/20/}, pages = {7055 - 7063}, abstract = {Diverse cellular events are regulated by post-translational modification of substrate proteins via covalent attachment of one or a chain of ubiquitin molecules. The outcome of (poly)ubiquitination depends upon the specific lysine residues involved in the formation of polyubiquitin chains. Lys48-linked chains act as a universal signal for proteasomal degradation, whereas Lys63-linked chains act as a specific signal in several non-degradative processes. Although it has been anticipated that functional diversity between alternatively linked polyubiquitin chains relies on linkage-dependent differences in chain conformation/topology, direct structural evidence in support of this model has been lacking. Here we use NMR methods to determine the structure of a Lys63-linked di-ubiquitin chain. The structure is characterized by an extended conformation, with no direct contact between the hydrophobic residues Leu8, Ile44, and Val70 on the ubiquitin units. This structure contrasts with the closed conformation observed for Lys48-linked di-ubiquitin wherein these residues form the interdomain interface (Cook, W. J., Jeffrey, L. C., Carson, M., Zhijian, C., and Pickart, C. M. (1992) J. Biol. Chem. 267, 16467-16471; Varadan, R., Walker, O., Pickart, C., and Fushman, D. (2002) J. Mol. Biol. 324, 637-647). Consistent with the open conformation of the Lys63-linked di-ubiquitin, our binding studies show that both ubiquitin domains in this chain can bind a ubiquitin-associated domain from HHR23A independently and in a mode similar to that for mono-ubiquitin. In contrast, Lys48-linked di-ubiquitin binds in a different, higher affinity mode that has yet to be determined. This is the first experimental evidence that alternatively linked polyubiquitin chains adopt distinct conformations.}, isbn = {0021-9258, 1083-351X}, doi = {10.1074/jbc.M309184200}, url = {http://www.jbc.org/content/279/8/7055}, author = {Varadan,Ranjani and Assfalg,Michael and Haririnia,Aydin and Raasi,Shahri and Pickart,Cecile and Fushman, David} } @article {18725, title = {Ubistatins Inhibit Proteasome-Dependent Degradation by Binding the Ubiquitin Chain}, journal = {Science}, volume = {306}, year = {2004}, month = {2004/10/01/}, pages = {117 - 120}, abstract = {To identify previously unknown small molecules that inhibit cell cycle machinery, we performed a chemical genetic screen in Xenopus extracts. One class of inhibitors, termed ubistatins, blocked cell cycle progression by inhibiting cyclin B proteolysis and inhibited degradation of ubiquitinated Sic1 by purified proteasomes. Ubistatins blocked the binding of ubiquitinated substrates to the proteasome by targeting the ubiquitin-ubiquitin interface of Lys48-linked chains. The same interface is recognized by ubiquitin-chain receptors of the proteasome, indicating that ubistatins act by disrupting a critical protein-protein interaction in the ubiquitin-proteasome system.}, doi = {10.1126/science.1100946}, url = {http://www.sciencemag.org/cgi/content/abstract/sci;306/5693/117}, author = {Verma,Rati and Peters,Noel R. and D{\textquoteright}Onofrio,Mariapina and Tochtrop,Gregory P. and Sakamoto,Kathleen M. and Varadan,Ranjani and Zhang,Mingsheng and Coffino,Philip and Fushman, David and Deshaies,Raymond J. and King,Randall W.} } @article {12936, title = {Viable but Nonculturable Vibrio Cholerae O1 in the Aquatic Environment of Argentina}, journal = {Applied and Environmental MicrobiologyAppl. Environ. Microbiol.}, volume = {70}, year = {2004}, month = {2004/12/01/}, pages = {7481 - 7486}, abstract = {In Argentina, as in other countries of Latin America, cholera has occurred in an epidemic pattern. Vibrio cholerae O1 is native to the aquatic environment, and it occurs in both culturable and viable but nonculturable (VNC) forms, the latter during interepidemic periods. This is the first report of the presence of VNC V. cholerae O1 in the estuarine and marine waters of the R{\'\i}o de la Plata and the Argentine shelf of the Atlantic Ocean, respectively. Employing immunofluorescence and PCR methods, we were able to detect reservoirs of V. cholerae O1 carrying the virulence-associated genes ctxA and tcpA. The VNC forms of V. cholerae O1 were identified in samples of water, phytoplankton, and zooplankton; the latter organisms were mainly the copepods Acartia tonsa, Diaptomus sp., Paracalanus crassirostris, and Paracalanus parvus. We found that under favorable conditions, the VNC form of V. cholerae can revert to the pathogenic, transmissible state. We concluded that V. cholerae O1 is a resident of Argentinean waters, as has been shown to be the case in other geographic regions of the world.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.70.12.7481-7486.2004}, url = {http://aem.asm.org/content/70/12/7481}, author = {Binsztein,Norma and Costagliola,Marcela C. and Pichel,Mariana and Jurquiza,Ver{\'o}nica and Ram{\'\i}rez,Fernando C. and Akselman,Rut and Vacchino,Marta and Huq,Anwarul and Rita R Colwell} } @article {12745, title = {Activity modeling and recognition using shape theory}, journal = {Behavior Representation in Modeling and Simulation}, year = {2003}, month = {2003///}, abstract = {Understanding activities arising out of the interactions of aconfiguration of moving objects is an important problem in video understanding, with applications in surveillance and monitoring, animation, medicine, etc. In this paper, we in- troduce a novel method for activity modeling based on the observation that that every activity has with it an associated structure characterized by a non-rigid shape and a dynamic model that characterizes the variations in the structure as the activity unfolds. We propose two mathematical models to characterize the non-rigid shape and its dynamics. In our first approach, we propose to model an activity by the polygonal shape formed by joining the locations of these point masses at any time , and its deformation over time. This uses the statistical shape theory of Kendall. The second approach models the trajectories of each separate class of moving objects in 3D shape space, and thus can identify dif- ferent kinds of activities. It is based on the factorization the- orem for matrices, which has been used before in computer vision for structure estimation. Deviations from the learned normal shape for each activity is used to identify abnormal ones. We demonstrate the applicability of our algorithms using real-life video sequences in an airport surveillance environment. We are able to identify the major activities that take place in that setting and detect abnormal ones. }, author = {Chellapa, Rama and Vaswani, N. and Chowdhury, A.K.R.} } @conference {12729, title = {Activity recognition using the dynamics of the configuration of interacting objects}, booktitle = {Computer Vision and Pattern Recognition, 2003. Proceedings. 2003 IEEE Computer Society Conference on}, volume = {2}, year = {2003}, month = {2003/06//}, pages = {II - 633-40 vol.2 - II - 633-40 vol.2}, abstract = {Monitoring activities using video data is an important surveillance problem. A special scenario is to learn the pattern of normal activities and detect abnormal events from a very low resolution video where the moving objects are small enough to be modeled as point objects in a 2D plane. Instead of tracking each point separately, we propose to model an activity by the polygonal {\textquoteright}shape{\textquoteright} of the configuration of these point masses at any time t, and its deformation over time. We learn the mean shape and the dynamics of the shape change using hand-picked location data (no observation noise) and define an abnormality detection statistic for the simple case of a test sequence with negligible observation noise. For the more practical case where observation (point locations) noise is large and cannot be ignored, we use a particle filter to estimate the probability distribution of the shape given the noisy observations up to the current time. Abnormality detection in this case is formulated as a change detection problem. We propose a detection strategy that can detect both {\textquoteright}drastic{\textquoteright} and {\textquoteright}slow{\textquoteright} abnormalities. Our framework can be directly applied for object location data obtained using any type of sensors - visible, radar, infrared or acoustic.}, keywords = {2D, abnormal, abnormality, abnormality;, acoustic, activity, analysis;, change;, Computer, configuration, configuration;, data;, DETECTION, detection;, distribution;, drastic, dynamics;, event;, filter;, hand-picked, image, infrared, interacting, learning;, location, low, mean, model;, monitoring;, MOTION, moving, noise;, noisy, object, object;, observation, observation;, particle, pattern, plane;, point, polygonal, probability, probability;, problem;, processing;, radar, recognition;, resolution, sensor;, sensors;, sequence;, SHAPE, shape;, signal, slow, statistic;, strategy;, Surveillance, surveillance;, target, test, tracking;, video, video;, visible, vision;}, doi = {10.1109/CVPR.2003.1211526}, author = {Vaswani, N. and RoyChowdhury, A. and Chellapa, Rama} } @article {18023, title = {Deterministic Resource Discovery in Distributed Networks}, journal = {Theory of Computing Systems}, volume = {36}, year = {2003}, month = {2003///}, pages = {479 - 495}, abstract = {The resource discovery problem was introduced by Harchol-Balter, Leighton, and Lewin. They developed a number of algorithms for the problem in the weakly connected directed graph model. This model is a directed logical graph that represents the vertices{\textquoteright} knowledge about the topology of the underlying communication network. The current paper proposes a deterministic algorithm for the problem in the same model, with improved time, message, and communication complexities. Each previous algorithm had a complexity that was higher at least in one of the measures. Specifically, previous deterministic solutions required either time linear in the diameter of the initial network, or communication complexity $O(n^3)$ (with message complexity $O(n^2)$), or message complexity $O(|E_0| {\l}og n)$ (where $E_0$ is the arc set of the initial graph $G_0$). Compared with the main randomized algorithm of Harchol-Balter, Leighton, and Lewin, the time complexity is reduced from $O({\l}og^2n)$ to\pagebreak[4] $O({\l}og n )$, the message complexity from $O(n {\l}og^2 n)$ to $O(n {\l}og n )$, and the communication complexity from $O(n^2 {\l}og^3 n)$ to $O(|E_0|{\l}og ^2 n )$. \par Our work significantly extends the connectivity algorithm of Shiloach and Vishkin which was originally given for a parallel model of computation. Our result also confirms a conjecture of Harchol-Balter, Leighton, and Lewin, and addresses an open question due to Lipton.}, keywords = {Computer, Science}, isbn = {1432-4350}, url = {http://dx.doi.org/10.1007/s00224-003-1084-8}, author = {Kutten,Shay and Peleg,David and Vishkin, Uzi} } @article {16289, title = {The Dog Genome: Survey Sequencing and Comparative Analysis}, journal = {ScienceScience}, volume = {301}, year = {2003}, month = {2003/09/26/}, pages = {1898 - 1903}, abstract = {A survey of the dog genome sequence (6.22 million sequence reads; 1.5{\texttimes} coverage) demonstrates the power of sample sequencing for comparative analysis of mammalian genomes and the generation of species-specific resources. More than 650 million base pairs (>25\%) of dog sequence align uniquely to the human genome, including fragments of putative orthologs for 18,473 of 24,567 annotated human genes. Mutation rates, conserved synteny, repeat content, and phylogeny can be compared among human, mouse, and dog. A variety of polymorphic elements are identified that will be valuable for mapping the genetic basis of diseases and traits in the dog.}, isbn = {0036-8075, 1095-9203}, doi = {10.1126/science.1086432}, url = {http://www.sciencemag.org/content/301/5641/1898}, author = {Kirkness,Ewen F. and Bafna,Vineet and Halpern,Aaron L. and Levy,Samuel and Remington,Karin and Rusch,Douglas B and Delcher,Arthur L. and Pop, Mihai and Wang,Wei and Fraser,Claire M. and Venter,J. Craig} } @book {12138, title = {Identifying Relevant Information for Testing Technique Selection: An Instantiated Characterization Schema}, year = {2003}, month = {2003/04/01/}, publisher = {Springer}, organization = {Springer}, abstract = {The importance of properly selecting testing techniques is widely accepted in the software engineering community today. However, there are chiefly two reasons why the selections now made by software developers are difficult to evaluate as correct. First, there are several techniques with which the average developer is unfamiliar, often leaving testers with limited knowledge of all the techniques currently available. Second, the available information regarding the different testing techniques is primarily procedure (focused on how to use the technique), rather than pragmatic (focused on the effect and appropriateness of using the technique). The problem addressed in this book is aimed at improving software testing technique selection.Identifying Relevant Information for Testing Technique Selection: An Instantiated Characterization Schema will train its readers how to use the conceptual tool presented here in various ways. Developers will improve their testing technique selection process by systematically and objectively selecting the testing techniques for a software project. Developers will also build a repository containing their own experience with the application of various software testing techniques. Researchers will focus their research on the relevant aspects of testing technique when creating it, and when comparing different techniques.Identifying Relevant Information for Testing Technique Selection: An Instantiated Characterization Schema is designed to meet the needs of a professional audience in software engineering. This book is also suitable for graduate-level students in computer science and engineering.}, keywords = {Business \& Economics / Information Management, Computer software, Computer software - Testing, Computer software/ Testing, Computers / Information Technology, Computers / Internet / Application Development, Computers / Programming / General, Computers / Programming Languages / General, Computers / Software Development \& Engineering / General, Computers / Software Development \& Engineering / Quality Assurance \& Testing, Technology \& Engineering / Materials Science}, isbn = {9781402074356}, author = {Vegas,Sira and Juristo,Natalia and Basili, Victor R.} } @conference {17939, title = {Interactive subsurface scattering for translucent meshes}, booktitle = {Proceedings of the 2003 symposium on Interactive 3D graphics}, series = {I3D {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {75 - 82}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We propose a simple lighting model to incorporate subsurface scattering effects within the local illumination framework. Subsurface scattering is relatively local due to its exponential falloff and has little effect on the appearance of neighboring objects. These observations have motivated us to approximate the BSSRDF model and to model subsurface scattering effects by using only local illumination. Our model is able to capture the most important features of subsurface scattering: reflection and transmission due to multiple scattering.In our approach we build the neighborhood information as a preprocess and modify the traditional local illumination model into a run-time two-stage process. In the first stage we compute the reflection and transmission of light on the surface. The second stage involves bleeding the scattering effects from a vertex{\textquoteright}s neighborhood to produce the final result. We then show how to merge the run-time two-stage process into a run-time single-stage process using precomputed integral. The complexity of our run-time algorithm is O(N), where N is the number of vertices. Using this approach, we achieve interactive frame rates with about one to two orders of magnitude speedup compared with the state-of-the-art methods.}, keywords = {BSSRDF, local illumination, reflection models, subsurface scattering}, isbn = {1-58113-645-5}, doi = {10.1145/641480.641497}, url = {http://doi.acm.org/10.1145/641480.641497}, author = {Hao,Xuejun and Baby,Thomas and Varshney, Amitabh} } @article {17923, title = {Modeling and Rendering of Points with Local Geometry}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {9}, year = {2003}, month = {2003///}, pages = {30 - 42}, abstract = {We present a novel rendering primitive that combines the modeling brevity of points with the rasterization efficiency of polygons. The surface is represented by a sampled collection of Differential Points (DP), each with embedded curvature information that captures the local differential geometry in the vicinity of that point. This is a more general point representation that, for the cost of a few additional bytes, packs much more information per point than the traditional point-based models. This information is used to efficiently render the surface as a collection of local geometries. To use the hardware acceleration, the DPs are quantized into $\big. 256\bigr.$ different types and each sampled point is approximated by the closest quantized DP and is rendered as a normal-mapped rectangle. The advantages to this representation are: 1) The surface can be represented more sparsely compared to other point primitives, 2) it achieves a robust hardware accelerated per-pixel shading{\textemdash}even with no connectivity information, and 3) it offers a novel point-based simplification technique that factors in the complexity of the local geometry. The number of primitives being equal, DPs produce a much better quality of rendering than a pure splat-based approach. Visual appearances being similar, DPs are about two times faster and require about 75 percent less disk space in comparison to splatting primitives.}, keywords = {differential geometry, per-pixel shading., point sample rendering, simplification}, isbn = {1077-2626}, doi = {http://doi.ieeecomputersociety.org/10.1109/TVCG.2003.1175095}, author = {Kalaiah,Aravind and Varshney, Amitabh} } @article {12737, title = {A particle filtering approach to abnormality detection in nonlinear systems and its application to abnormal activity detection}, journal = {3rd Int{\textquoteright}l Workshop on Statistical and Computational Theories of Vision, Nice, France}, year = {2003}, month = {2003///}, abstract = {We study abnormality detection in partially observed nonlinear dynamic systems tracked usingparticle filters. An {\textquoteleft}abnormality{\textquoteright} is defined as a change in the system model, which could be drastic or gradual, with the parameters of the changed system unknown. If the change is drastic the particle filter will lose track rapidly and the increase in tracking error can be used to detect the change. In this paper we propose a new statistic for detecting {\textquoteleft}slow{\textquoteright} changes or abnormalities which do not cause the particle filter to lose track for a long time. In a previous work, we have proposed a partially observed nonlinear dynamical system for modeling the configuration dynamics of a group of interacting point objects and formulated abnormal activity detection as a change detection problem. We show here results for abnormal activity detection comparing our proposed change detection strategy with others used in literature. }, author = {Vaswani, N. and Chellapa, Rama} } @article {18021, title = {Prefix sums and an application thereof}, volume = {:~09/224,104}, year = {2003}, month = {2003/04/01/}, abstract = {A method for performing prefix sums, by including a prefix sum instruction in the instruction set of a microprocessor. Both general prefix summation, base-zero prefix summation and base-zero suffix summation are included in the scope of the present invention. The prefix sum instruction may be implemented in software, using the instructions of existing instruction sets, or may be implemented in dedicated hardware, for example, as a functional unit of a microprocessor. The hardware implementation is suitable for application to the allocation of computational resources among concurrent tasks. The scope of the present invention includes one such application: guaranteeing conflict-free access to multiple single-ported register files.}, url = {http://www.google.com/patents?id=qCAPAAAAEBAJ}, author = {Vishkin, Uzi}, editor = {Ramot at Tel Aviv University Ltd.} } @book {14916, title = {Special section on perceptual organization in computer vision}, year = {2003}, month = {2003///}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, author = {Jacobs, David W. and Lindenbaum,M. and August,J. and Zucker,SW and Ben-Shahar,O. and Zucker,SW and Tuytelaars,T. and Turina,A. and Van Gool,L. and Mahamud,S.} } @conference {17938, title = {Statistical point geometry}, booktitle = {Proceedings of the 2003 Eurographics/ACM SIGGRAPH symposium on Geometry processing}, series = {SGP {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {107 - 115}, publisher = {Eurographics Association}, organization = {Eurographics Association}, address = {Aire-la-Ville, Switzerland, Switzerland}, abstract = {We propose a scheme for modeling point sample geometry with statistical analysis. In our scheme we depart from the current schemes that deterministically represent the attributes of each point sample. We show how the statistical analysis of a densely sampled point model can be used to improve the geometry bandwidth bottleneck and to do randomized rendering without sacrificing visual realism. We first carry out a hierarchical principal component analysis (PCA) of the model. This stage partitions the model into compact local geometries by exploiting local coherence. Our scheme handles vertex coordinates, normals, and color. The input model is reconstructed and rendered using a probability distribution derived from the PCA analysis. We demonstrate the benefits of this approach in all stages of the graphics pipeline: (1) orders of magnitude improvement in the storage and transmission complexity of point geometry, (2) direct rendering from compressed data, and (3) view-dependent randomized rendering.}, isbn = {1-58113-687-0}, url = {http://dl.acm.org/citation.cfm?id=882370.882385}, author = {Kalaiah,Aravind and Varshney, Amitabh} } @conference {12733, title = {Statistical shape theory for activity modeling}, booktitle = {Acoustics, Speech, and Signal Processing, 2003. Proceedings. (ICASSP {\textquoteright}03). 2003 IEEE International Conference on}, volume = {3}, year = {2003}, month = {2003/04//}, pages = {III - 493-6 vol.3 - III - 493-6 vol.3}, abstract = {Monitoring activities in a certain region from video data is an important surveillance problem. The goal is to learn the pattern of normal activities and detect unusual ones by identifying activities that deviate appreciably from the typical ones. We propose an approach using statistical shape theory based on the shape model of D.G. Kendall et al. (see "Shape and Shape Theory", John Wiley and Sons, 1999). In a low resolution video, each moving object is best represented as a moving point mass or particle. In this case, an activity can be defined by the interactions of all or some of these moving particles over time. We model this configuration of the particles by a polygonal shape formed from the locations of the points in a frame and the activity by the deformation of the polygons in time. These parameters are learned for each typical activity. Given a test video sequence, an activity is classified as abnormal if the probability for the sequence (represented by the mean shape and the dynamics of the deviations), given the model, is below a certain threshold The approach gives very encouraging results in surveillance applications using a single camera and is able to identify various kinds of abnormal behavior.}, keywords = {abnormal, activities, activity, analysis;, behavior;, classification;, data;, image, mass;, matching;, modeling;, monitoring;, moving, normal, particle;, pattern, pattern;, point, polygonal, probability;, problem;, processing;, sequence;, sequences;, SHAPE, shape;, signal, statistical, Surveillance, surveillance;, theory;, video}, doi = {10.1109/ICASSP.2003.1199519}, author = {Vaswani, N. and Chowdhury, A.R. and Chellapa, Rama} } @article {18022, title = {Towards a first vertical prototyping of an extremely fine-grained parallel programming approach}, journal = {Theory of Computing Systems}, volume = {36}, year = {2003}, month = {2003///}, pages = {521 - 552}, abstract = {Explicit multithreading (XMT) is a parallel programming approach for exploiting on-chip parallelism. XMT introduces a computational framework with (1) a simple programming style that relies on fine-grained PRAM-style algorithms; (2) hardware support for low-overhead parallel threads, scalable load balancing, and efficient synchronization. The missing link between the algorithmic-programming level and the architecture level is provided by the first prototype XMT compiler. This paper also takes this new opportunity to evaluate the overall effectiveness of the interaction between the programming model and the hardware, and enhance its performance where needed, incorporating new optimizations into the XMT compiler. We present a wide range of applications, which written in XMT obtain significant speedups relative to the best serial programs. We show that XMT is especially useful for more advanced applications with dynamic, irregular access patterns, where for regular computations we demonstrate performance gains that scale up to much higher levels than have been demonstrated before for on-chip systems.}, author = {Naishlos,D. and Nuzman,J. and Tseng,C. W and Vishkin, Uzi} } @conference {12765, title = {3D face reconstruction from video using a generic model}, booktitle = {Multimedia and Expo, 2002. ICME {\textquoteright}02. Proceedings. 2002 IEEE International Conference on}, volume = {1}, year = {2002}, month = {2002///}, pages = {449 - 452 vol.1 - 449 - 452 vol.1}, abstract = {Reconstructing a 3D model of a human face from a video sequence is an important problem in computer vision, with applications to recognition, surveillance, multimedia etc. However, the quality of 3D reconstructions using structure from motion (SfM) algorithms is often not satisfactory. One common method of overcoming this problem is to use a generic model of a face. Existing work using this approach initializes the reconstruction algorithm with this generic model. The problem with this approach is that the algorithm can converge to a solution very close to this initial value, resulting in a reconstruction which resembles the generic model rather than the particular face in the video which needs to be modeled. We propose a method of 3D reconstruction of a human face from video in which the 3D reconstruction algorithm and the generic model are handled separately. A 3D estimate is obtained purely from the video sequence using SfM algorithms without use of the generic model. The final 3D model is obtained after combining the SfM estimate and the generic model using an energy function that corrects for the errors in the estimate by comparing local regions in the two models. The optimization is done using a Markov chain Monte Carlo (MCMC) sampling strategy. The main advantage of our algorithm over others is that it is able to retain the specific features of the face in the video sequence even when these features are different from those of the generic model. The evolution of the 3D model through the various stages of the algorithm is presented.}, keywords = {3D, algorithm;, algorithms;, analysis;, Carlo, chain, Computer, Face, from, function;, generic, human, image, Markov, MCMC, methods;, model;, Monte, MOTION, optimisation;, OPTIMIZATION, processes;, processing;, recognition;, reconstruction, reconstruction;, sampling;, sequence;, sequences;, SfM, signal, structure, surveillance;, video, vision;}, doi = {10.1109/ICME.2002.1035815}, author = {Chowdhury, A.R. and Chellapa, Rama and Krishnamurthy, S. and Vo, T.} } @inbook {14598, title = {Combinatorial Algorithms for Design of DNA Arrays}, booktitle = {Chip TechnologyChip Technology}, series = {Advances in Biochemical Engineering/Biotechnology}, volume = {77}, year = {2002}, month = {2002///}, pages = {1 - 19}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Optimal design of DNA arrays requires the development of algorithms with two-fold goals: reducing the effects caused by unintended illumination ( border length minimization problem ) and reducing the complexity of masks ( mask decomposition problem ). We describe algorithms that reduce the number of rectangles in mask decomposition by 20{\textendash}30\% as compared to a standard array design under the assumption that the arrangement of oligonucleotides on the array is fixed. This algorithm produces provably optimal solution for all studied real instances of array design. We also address the difficult problem of finding an arrangement which minimizes the border length and come up with a new idea of threading that significantly reduces the border length as compared to standard designs.}, isbn = {978-3-540-43215-9}, url = {http://dx.doi.org/10.1007/3-540-45713-5_1}, author = {Hannenhalli, Sridhar and Hubbell,Earl and Lipshutz,Robert and Pevzner,Pavel}, editor = {Hoheisel,J{\"o}rg and Brazma,A. and B{\"u}ssow,K. and Cantor,C. and Christians,F. and Chui,G. and Diaz,R. and Drmanac,R. and Drmanac,S. and Eickhoff,H. and Fellenberg,K. and Hannenhalli, Sridhar and Hoheisel,J. and Hou,A. and Hubbell,E. and Jin,H. and Jin,P. and Jurinke,C. and Konthur,Z. and K{\"o}ster,H. and Kwon,S. and Lacy,S. and Lehrach,H. and Lipshutz,R. and Little,D. and Lueking,A. and McGall,G. and Moeur,B. and Nordhoff,E. and Nyarsik,L. and Pevzner,P. and Robinson,A. and Sarkans,U. and Shafto,J. and Sohail,M. and Southern,E. and Swanson,D. and Ukrainczyk,T. and van den Boom,D. and Vilo,J. and Vingron,M. and Walter,G. and Xu,C.} } @article {16270, title = {Genome sequence and comparative analysis of the model rodent malaria parasite Plasmodium yoelii yoelii}, journal = {Nature}, volume = {419}, year = {2002}, month = {2002/10/03/}, pages = {512 - 519}, abstract = {Species of malaria parasite that infect rodents have long been used as models for malaria disease research. Here we report the whole-genome shotgun sequence of one species, Plasmodium yoelii yoelii, and comparative studies with the genome of the human malaria parasite Plasmodium falciparum clone 3D7. A synteny map of 2,212 P. y. yoelii contiguous DNA sequences (contigs) aligned to 14 P. falciparum chromosomes reveals marked conservation of gene synteny within the body of each chromosome. Of about 5,300 P. falciparum genes, more than 3,300 P. y. yoelii orthologues of predominantly metabolic function were identified. Over 800 copies of a variant antigen gene located in subtelomeric regions were found. This is the first genome sequence of a model eukaryotic parasite, and it provides insight into the use of such systems in the modelling of Plasmodium biology and disease.}, isbn = {0028-0836}, doi = {10.1038/nature01099}, url = {http://www.nature.com/nature/journal/v419/n6906/full/nature01099.html}, author = {Carlton,Jane M. and Angiuoli,Samuel V and Suh,Bernard B. and Kooij,Taco W. and Pertea,Mihaela and Silva,Joana C. and Ermolaeva,Maria D. and Allen,Jonathan E and Jeremy D Selengut and Koo,Hean L. and Peterson,Jeremy D. and Pop, Mihai and Kosack,Daniel S. and Shumway,Martin F. and Bidwell,Shelby L. and Shallom,Shamira J. and Aken,Susan E. van and Riedmuller,Steven B. and Feldblyum,Tamara V. and Cho,Jennifer K. and Quackenbush,John and Sedegah,Martha and Shoaibi,Azadeh and Cummings,Leda M. and Florens,Laurence and Yates,John R. and Raine,J. Dale and Sinden,Robert E. and Harris,Michael A. and Cunningham,Deirdre A. and Preiser,Peter R. and Bergman,Lawrence W. and Vaidya,Akhil B. and Lin,Leo H. van and Janse,Chris J. and Waters,Andrew P. and Smith,Hamilton O. and White,Owen R. and Salzberg,Steven L. and Venter,J. Craig and Fraser,Claire M. and Hoffman,Stephen L. and Gardner,Malcolm J. and Carucci,Daniel J.} } @inbook {17604, title = {Improved Approximation Algorithms for the Partial Vertex Cover Problem}, booktitle = {Approximation Algorithms for Combinatorial OptimizationApproximation Algorithms for Combinatorial Optimization}, series = {Lecture Notes in Computer Science}, volume = {2462}, year = {2002}, month = {2002///}, pages = {161 - 174}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The partial vertex cover problem is a generalization of the vertex cover problem:given an undirected graph G = ( V,E ) and an integer k , we wish to choose a minimum number of vertices such that at least k edges are covered. Just as for vertex cover, 2-approximation algorithms are known for this problem, and it is of interest to see if we can do better than this.The current-best approximation ratio for partial vertex cover, when parameterized by the maximum degree d of G , is (2 - Θ (1/ d )).We improve on this by presenting a $$ {\l}eft( 2 - \Theta {\l}eft( \tfrac{\l}n {\l}n d {\l}n d \right) \right) $$ -approximation algorithm for partial vertex cover using semidefinite programming, matching the current-best bound for vertex cover. Our algorithmuses a new rounding technique, which involves a delicate probabilistic analysis.}, isbn = {978-3-540-44186-1}, url = {http://dx.doi.org/10.1007/3-540-45753-4_15}, author = {Halperin,Eran and Srinivasan, Aravind}, editor = {Jansen,Klaus and Leonardi,Stefano and Vazirani,Vijay} } @book {19352, title = {Level of Detail for 3D Graphics}, year = {2002}, month = {2002}, publisher = {Elsevier Science Inc.}, organization = {Elsevier Science Inc.}, address = {New York, NY, USA}, abstract = {Level of detail (LOD) techniques are increasingly used by professional real-time developers to strike the balance between breathtaking virtual worlds and smooth, flowing animation. Level of Detail for 3D Graphics brings together, for the first time, the mechanisms, principles, practices, and theory needed by every graphics developer seeking to apply LOD methods.Continuing advances in level of detail management have brought this powerful technology to the forefront of 3D graphics optimization research. This book, written by the very researchers and developers who have built LOD technology, is both a state-of-the-art chronicle of LOD advances and a practical sourcebook, which will enable graphics developers from all disciplines to apply these formidable techniques to their own work. }, isbn = {1558608389}, author = {Luebke, David and Watson, Benjamin and Cohen, Jonathan D. and Reddy, Martin and Varshney, Amitabh} } @inbook {14248, title = {Polydioptric Cameras: New Eyes for Structure from Motion}, booktitle = {Pattern RecognitionPattern Recognition}, series = {Lecture Notes in Computer Science}, volume = {2449}, year = {2002}, month = {2002///}, pages = {618 - 625}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We examine the influence of camera design on the estimation of the motion and structure of a scene from video data. Every camera captures a subset of the light rays passing though some volume in space. By relating the differential structure of the time varying space of light rays to different known and new camera designs, we can establish a hierarchy of cameras. This hierarchy is based upon the stability and complexity of the computations necessary to estimate structure and motion. At the low end of this hierarchy is the standard planar pinhole camera for which the structure from motion problem is non-linear and ill-posed. At the high end is a camera, which we call the full field of view polydioptric camera, for which the problem is linear and stable. We develop design suggestions for the polydioptric camera, and based upon this new design we propose a linear algorithm for structure-from-motion estimation, which combines differential motion estimation with differential stereo.}, isbn = {978-3-540-44209-7}, url = {http://dx.doi.org/10.1007/3-540-45783-6_74}, author = {Neumann, Jan and Ferm{\"u}ller, Cornelia and Aloimonos, J.}, editor = {Van Gool,Luc} } @article {17927, title = {A real-time seamless tiled display system for 3D graphics}, journal = {Immersive Projection Technology Symposium of the IEEE Virtual Reality 2002 Conference (VR2002 IPT)}, year = {2002}, month = {2002///}, abstract = {We outline our seamless tiled display system forinteractive 3D graphics applications that is low-cost, easy to calibrate, scalable, and portable. Our system achieves geometric alignment in software by pre-warping the 3D space in contrast with the current systems that usually achieve this by 2D image pre-warping. Our system accomplishes this through real-time image capture from a digital camcorder, image segmentation, and derivation of the 3D warping matrices for each 3D graphics pipeline that feeds a projector. Our prototype system demonstrates our results on a 2 {\texttimes} 2 tiled array of projectors. }, author = {Li,Z. and Varshney, Amitabh} } @article {17955, title = {Representing thermal vibrations and uncertainty in molecular surfaces}, journal = {SPIE Conference on Visualization and Data Analysis}, volume = {4665}, year = {2002}, month = {2002///}, pages = {80 - 90}, abstract = {The previous methods to compute smooth molecular surface assumed that each atom in a molecule has a fixedposition without thermal motion or uncertainty. In real world, the position of an atom in a molecule is fuzzy because of its uncertainty in protein structure determination and thermal energy of the atom. In this paper, we propose a method to compute smooth molecular surface for fuzzy atoms. The Gaussian distribution is used for modeling the fuzziness of each atom, and a p-probability sphere is computed for each atom with a certain confidence level. The smooth molecular surface with fuzzy atoms is computed efficiently from extended-radius p-probability spheres. We have implemented a program for visualizing three-dimensional molecular structures including the smooth molecular surface with fuzzy atoms using multi-layered transparent surfaces, where the surface of each layer has a different confidence level and the transparency associated with the confidence level. }, author = {Lee,C. H and Varshney, Amitabh} } @article {17925, title = {State of the art in data representation for visualization}, journal = {IEEE Visualization Tutorial}, year = {2002}, month = {2002///}, author = {Chen,B. and Kaufman,A. and Mueller,K. and Varshney, Amitabh} } @article {18719, title = {Structural Properties of Polyubiquitin Chains in Solution}, journal = {Journal of Molecular Biology}, volume = {324}, year = {2002}, month = {2002/12/06/}, pages = {637 - 647}, abstract = {Because polyubiquitin chain structure modulates Ub-mediated signaling, knowledge of the physiological conformations of chain signals should provide insights into specific recognition. Here, we characterized the solution conformations of K48-linked Ub2 and Ub4 using a combination of NMR techniques, including chemical shift mapping of the interdomain interface, domain orientation measurements on the basis of 15N relaxation and residual dipolar couplings, and the solvent accessibility studies. Our data indicate a switch in the conformation of Ub2, from open to closed, with increasing pH. The closed conformation features a well-defined interface that is related to, but distinguishable from, that observed in the Ub2 crystal structure. This interface is dynamic in solution, such that important hydrophobic residues (L8, I44, V70) that are sequestered at the interface in the closed conformation may be accessible for direct interactions with recognition factors. Our results suggest that the distal two units of Ub4, which is the minimum signal for efficient proteasomal degradation, may adopt the closed Ub2 conformation.}, keywords = {chemical shift mapping, domain orientation measurements, polyubiquitin chains, spin relaxation, ubiquitin}, isbn = {0022-2836}, doi = {10.1016/S0022-2836(02)01198-1}, url = {http://www.sciencedirect.com/science/article/pii/S0022283602011981}, author = {Varadan,Ranjani and Walker,Olivier and Pickart,Cecile and Fushman, David} } @article {17973, title = {System and method for entering text in a virtual environment}, volume = {:~09/364,433}, year = {2002}, month = {2002/06/18/}, abstract = {A system and method for entering text in a virtual environment by sensory gloves. The user enters a key that represents one or more letters by simulating a press of a keyboard in the gloves. The user calibrates the gloves by entering text, during which time the system establishes threshold values that represent simulated presses for each finger. After the initial calibration of the sensory gloves, the user enters text with simulated finger presses. The system distinguishes which movements are intended as simulated finger presses by examining the relative motions of fingers and maintaining dynamic thresholds. Errors are alleviated by providing feedback to the user, such as beeps and a visual display of the fingers and the current text. Because keys may represent more than one character, the system determines the intended text by probabilistic analysis and the Viterbi algorithm.}, url = {http://www.google.com/patents?id=w0QJAAAAEBAJ}, author = {Evans,Francine and Skiena,Steven and Varshney, Amitabh}, editor = {The Research Foundation of the State University of New York} } @conference {18025, title = {Two techniques for reconciling algorithm parallelism with memory constraints}, booktitle = {Proceedings of the fourteenth annual ACM symposium on Parallel algorithms and architectures}, series = {SPAA {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {95 - 98}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The utility of algorithm parallelism for coping with increased processor to memory latencies using "latency hiding" is part of the folklore of parallel computing. Latency hiding techniques increase the traffic to memory and therefore may "hit another wall": limited bandwidth to memory. The current paper attempts to stimulate research in the following general direction: show that algorithm parallelism need not conflict with limited bandwidth.A general technique for using parallel algorithms to enhance serial implementation in the face of processor-memory latency problems is revisited. Two techniques for alleviating memory bandwidth constraints are presented. Both techniques can be incorporated in a compiler.There is often considerable parallelism in many of the algorithms which are known as useful serial algorithms. Interestingly enough, all the examples provided for the use of the two techniques come from such serial algorithms.}, keywords = {memory systems constraints, Parallel algorithms, prefetching}, isbn = {1-58113-529-7}, doi = {10.1145/564870.564884}, url = {http://doi.acm.org/10.1145/564870.564884}, author = {Vishkin, Uzi} } @conference {19653, title = {Automatically Tracking and Analyzing the Behavior of Live Insect Colonies}, booktitle = {AGENTS {\textquoteright}01 Proceedings of the Fifth International Conference on Autonomous Agents }, series = {AGENTS {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {521 - 528}, publisher = {ACM}, organization = {ACM}, abstract = {We introduce the study of {\it live} social insect colonies as a relevant and exciting domain for the development and application of multi-agent systems modeling tools. Social insects provide a rich source of {\it traceable} social behavior for testing multi-agent tracking, prediction and modeling algorithms. An additional benefit of this research is the potential for contributions to experimental biology --- the principled techniques developed for analyzing artificial multi-agent systems can be applied to advance the state of knowledge of insect behavior. We contribute a novel machine vision system that addresses the challenge of tracking hundreds of small animals simultaneously. Fast color-based tracking is combined with movement-based tracking to locate ants in a real-time video stream. We also introduce new methods for analyzing the spatial activity of ant colonies. The system was validated in experiments with laboratory colonies of {\it Camponotus festinatus} and several example analyses of the colonies{\textquoteright} spatial behavior are provided.}, isbn = {1-58113-326-X}, url = {http://doi.acm.org/10.1145/375735.376434}, author = {Balch, Tucker and Zia Khan and Veloso, Manuela} } @article {17933, title = {Differential point rendering}, journal = {Proceedings of 12th Eurographics Workshop on Rendering}, year = {2001}, month = {2001///}, pages = {139 - 50}, author = {Varshney, Amitabh and Kalaiah,A.} } @article {18027, title = {Evaluating the XMT parallel programming model}, journal = {High-Level Parallel Programming Models and Supportive Environments}, year = {2001}, month = {2001///}, pages = {95 - 108}, abstract = {Explicit-multithreading (XMT) is a parallel programming model designed for exploiting on-chip parallelism. Its features include a simple thread execution model and an efficient prefix-sum instruction for synchronizing shared data accesses. By taking advantage of low-overhead parallel threads and high on-chip memory bandwidth, the XMT model tries to reduce the burden on programmers by obviating the need for explicit task assignment and thread coarsening. This paper presents features of the XMT programming model, and evaluates their utility through experiments on a prototype XMT compiler and architecture simulator. We find the lack of explicit task assignment has slight effects on performance for the XMT architecture. Despite low thread overhead, thread coarsening is still necessary to some extent, but can usually be automatically applied by the XMT compiler. The prefix-sum instruction provides more scalable synchronization than traditional locks, and the simple run-untilcompletion thread execution model (no busy-waits) does not impair performance. Finally, the combination of features in XMT can encourage simpler parallel algorithms that may be more efficient than more traditional complex approaches.}, author = {Naishlos,D. and Nuzman,J. and Tseng,C. W and Vishkin, Uzi} } @inbook {14217, title = {Eyes from Eyes}, booktitle = {3D Structure from Images {\textemdash} SMILE 20003D Structure from Images {\textemdash} SMILE 2000}, series = {Lecture Notes in Computer Science}, volume = {2018}, year = {2001}, month = {2001///}, pages = {204 - 217}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We describe a family of new imaging systems, called Argus eyes, that consist of common video cameras arranged in some network. The system we built consists of six cameras arranged so that they sample different parts of the visual sphere. This system has the capability of very accurately estimating its own 3D motion and consequently estimating shape models from the individual videos. The reason is that inherent ambiguities of confusion between translation and rotation disappear in this case. We provide an algorithm and several experiments using real outdoor or indoor images demonstrating the superiority of the new sensor with regard to 3D motion estimation.}, isbn = {978-3-540-41845-0}, url = {http://dx.doi.org/10.1007/3-540-45296-6_14}, author = {Baker,Patrick and Pless,Robert and Ferm{\"u}ller, Cornelia and Aloimonos, J.}, editor = {Pollefeys,Marc and Van Gool,Luc and Zisserman,Andrew and Fitzgibbon,Andrew} } @article {17924, title = {Guest Editor{\textquoteright}s Introduction: Special Issue on Visualization 2000}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {7}, year = {2001}, month = {2001///}, pages = {193 - 194}, isbn = {1077-2626}, doi = {http://doi.ieeecomputersociety.org/10.1109/TVCG.2001.942687}, author = {Varshney, Amitabh} } @article {17903, title = {Haptic and aural rendering of a virtual milling process}, journal = {ASME 2001 Design Engineering Technical Conference and Computers and Information in Engineering Conference, Pittsburgh, PA}, year = {2001}, month = {2001///}, pages = {105 - 113}, author = {Chang,C. F and Varshney, Amitabh and Ge,Q. J} } @inbook {17921, title = {Hierarchical image-based and polygon-based rendering for large-scale visualizations}, booktitle = {Hierarchical and Geometrical Methods in Scientific VisualizationHierarchical and Geometrical Methods in Scientific Visualization}, year = {2001}, month = {2001///}, publisher = {Springer}, organization = {Springer}, isbn = {9783540433132}, author = {Chang,C. F and Varshney, Amitabh and Ge,Q. J} } @article {16463, title = {Source Selection and Ranking in the WebSemantics Architecture Using Quality of Data Metadata-1 Introduction}, journal = {Advances in Computers}, volume = {55}, year = {2001}, month = {2001///}, pages = {89 - 90}, author = {Mihaila,G. A and Raschid, Louiqa and Vidal,M. E} } @conference {16156, title = {Therapeutic play with a storytelling robot}, booktitle = {CHI {\textquoteright}01 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {27 - 28}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We are developing a prototype storytelling robot for use with children in rehabilitation. Children can remotely control a furry robot by using a variety of body sensors adapted to their disability or rehabilitation goal. We believe this robot can motivate children and help them reach their therapy goals through therapeutic play, either by exercising muscles or joints (e.g. for physically challenged children) or by reflecting on the stories (e.g. for children with developmental disabilities). To develop this technology we use an innovative design methodology involving children as design partners.}, keywords = {children, design process, rehabilitation, robot, therapeutic play, user interface}, isbn = {1-58113-340-5}, doi = {10.1145/634067.634088}, url = {http://doi.acm.org/10.1145/634067.634088}, author = {Lathan,Corinna and Vice,Jack Maxwell and Tracey,Michael and Plaisant, Catherine and Druin, Allison and Edward,Kris and Montemayor,Jaime} } @conference {17937, title = {Variable-precision rendering}, booktitle = {Proceedings of the 2001 symposium on Interactive 3D graphics}, series = {I3D {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {149 - 158}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {hierarchical rendering, levels of detail, variable-precision rendering, view-dependent rendering}, isbn = {1-58113-292-1}, doi = {10.1145/364338.364384}, url = {http://doi.acm.org/10.1145/364338.364384}, author = {Hao,Xuejun and Varshney, Amitabh} } @article {17945, title = {Web-based interactive design of freeform motions}, journal = {Proceedings of the ASME 2001 Design Engineering Technical Conferences and Computers and Information in Engineering Conference}, year = {2001}, month = {2001///}, author = {Li,ST and Ge,Q. J and Varshney, Amitabh} } @conference {18332, title = {An assumptive logic programming methodology for parsing}, booktitle = {Tools with Artificial Intelligence, 2000. ICTAI 2000. Proceedings. 12th IEEE International Conference on}, year = {2000}, month = {2000///}, pages = {11 - 18}, publisher = {IEEE Comput. Soc}, organization = {IEEE Comput. Soc}, abstract = {We show how several novel tools in logic programming for AI (namely, continuation based linear and timeless assumptions, and datalog grammars) can assist us in producing terse treatments of difficult language processing phenomena. As a proof of concept, we present a concise parser for datalog grammars (logic grammars where strings are represented with numbered word boundaries rather than as lists of words), that uses assumptions and a combination of left-corner parsing and charting. We then study two test cases of this parser{\textquoteright}s application: complete constituent coordination, and error diagnosis and correction}, isbn = {0-7695-0909-6}, doi = {10.1109/TAI.2000.889840}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=889840}, author = {Voll,K. and Tom Yeh and Dahl,V.} } @conference {18032, title = {Communication complexity of document exchange}, booktitle = {Proceedings of the eleventh annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {197 - 206}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-453-2}, url = {http://dl.acm.org/citation.cfm?id=338219.338252}, author = {Cormode,Graham and Paterson,Mike and Sahinalp,S{\"u}leyman Cenk and Vishkin, Uzi} } @book {12848, title = {A compositional approach to statecharts semantics}, volume = {25}, year = {2000}, month = {2000///}, publisher = {ACM}, organization = {ACM}, author = {L\{\"u}ttgen,G. and Von Der Beeck,M. and Cleaveland, Rance} } @article {17958, title = {Continuously-adaptive haptic rendering}, journal = {Virtual Environments}, volume = {2000}, year = {2000}, month = {2000///}, pages = {135 - 135}, abstract = {Haptic display with force feedback is often necessary in sev-eral virtual environments. To enable haptic rendering of large datasets we introduce Continuously-Adaptive Haptic Rendering, a novel approach to reduce the complexity of the rendered dataset. We construct a continu- ous, multiresolution hierarchy of the model during the pre-processing and then at run time we use high-detail representation for regions around the probe pointer and coarser representation farther away. We achieve this by using a bell-shaped filter centered at the position of the probe pointer. Using our algorithm we are able to haptically render one to two orders of magnitude larger datasets than otherwise possible. Our approach is orthogonal to the previous work done in accelerating haptic rendering and thus can be used with them. }, author = {El-Sana,J. and Varshney, Amitabh} } @article {17907, title = {Efficiently computing and updating triangle strips for real-time rendering}, journal = {Computer Aided Design}, volume = {32}, year = {2000}, month = {2000///}, pages = {753 - 772}, abstract = {Triangle strips are a widely used hardware-supported data-structure to compactly represent and efficiently render polygonal meshes. In thispaper we survey the efficient generation of triangle strips as well as their variants. We present efficient algorithms for partitioning polygonal meshes into triangle strips. Triangle strips have traditionally used a buffer size of two vertices. In this paper we also study the impact of larger buffer sizes and various queuing disciplines on the effectiveness of triangle strips. View-dependent simplification has emerged as a powerful tool for graphics acceleration in visualization of complex environments. However, in a view-dependent framework the triangle mesh connectivity changes at every frame making it difficult to use triangle strips. In this paper we present a novel data-structure, Skip Strip, that efficiently maintains triangle strips during such view-dependent changes. A Skip Strip stores the vertex hierarchy nodes in a skip-list-like manner with path compression. We anticipate that Skip Strips will provide a road-map to combine rendering acceleration techniques for static datasets, typical of retained-mode graphics applications, with those for dynamic datasets found in immediate-mode applications. }, author = {El-Sana,J. and Evans,F. and Kalaiah,A. and Varshney, Amitabh and Skiena,S. and Azanli,E.} } @conference {16339, title = {Empirical studies of software engineering: a roadmap}, booktitle = {Proceedings of the conference on The future of Software engineering}, year = {2000}, month = {2000///}, pages = {345 - 355}, author = {Perry,D. E. and Porter, Adam and Votta,L. G.} } @conference {18031, title = {Evaluating multi-threading in the prototype XMT environment}, booktitle = {Proc. 4th Workshop on Multi-Threaded Execution, Architecture and Compliation (MTEAC2000)}, year = {2000}, month = {2000///}, abstract = {XMT is a multi-threaded programming model designed toexploit explicit specification of parallel threads. Its main features are a simple thread execution model and an efficient prefix-sum instruction for synchronizing shared data accesses. This paper presents and evaluates the performance of multi- threading in the XMT programming environment. A prototype XMT compiler converts parallel regions into procedure calls, which are then executed efficiently in XMT hardware. An architecture simulator similar to SimpleScalar is used to evaluate the performance of the XMT system for twelve benchmark codes. Results show the XMT architecture generally succeeds in providing low-overhead parallel threads and uniform access times on-chip. However, compiler optimizations to cluster (coarsen) threads are still needed for very fine-grained threads. }, author = {Naishlos,D. and Nuzman,J. and Tseng,C. W and Vishkin, Uzi} } @article {18028, title = {Experiments with list ranking for explicit multi-threaded (XMT) instruction parallelism}, journal = {J. Exp. Algorithmics}, volume = {5}, year = {2000}, month = {2000/12//}, abstract = {Algorithms for the problem of list ranking are empiricallystudied with respect to the Explicit Multi-Threaded (XMT) platform for instruction-level parallelism (ILP). The main goal of this study is to understand the differences between XMT and more traditional parallel computing implementation platforms/models as they pertain to the well studied list ranking problem. The main two findings are: (i) good speedups for much smaller inputs are possible and (ii) in part, the first finding is based on a new variant of a 1984 algorithm, called the No-Cut algorithm. The paper incorporates analytic (non-asymptotic) performance analysis into experimental performance analysis for relatively small inputs. This provides an interesting example where experimental research and theoretical analysis complement one another. Explicit Multi-Threading (XMT) is a fine-grained computation framework introduced in our SPAA{\textquoteright}98 paper. Building on some key ideas of parallel computing, XMT covers the spectrum from algorithms through architecture to implementation; the main implementation related innovation in XMT was through the incorporation of low-overhead hardware and software mechanisms (for more effective fine-grained parallelism). The reader is referred to that paper for detail on these mechanisms. The XMT platform aims at faster single-task completion time by way of ILP. }, isbn = {1084-6654}, doi = {10.1145/351827.384252}, url = {http://doi.acm.org/10.1145/351827.384252}, author = {Vishkin,Dascal and Vishkin, Uzi} } @article {14980, title = {Kronos: A software system for the processing and retrieval of large-scale AVHRR data sets}, journal = {PE \& RS- Photogrammetric Engineering and Remote Sensing}, volume = {66}, year = {2000}, month = {2000///}, pages = {1073 - 1082}, abstract = {Raw remotely sensed satellite data have to be processed andmapped into a standard projection in order to produce a multi- temporal data set which can then be used for regional or global Earth science studies. However, traditional methods of processing remotely sensed satellite data have inherent limitations because they are based on a fixed processing chain. Different users may need the data in different forms with possibly different processing steps; hence, additional transformations may have to be applied to the processed data, resulting in potentially significant errors. In this paper, we describe a software system, Kronos, for the generation of custom-tailored products from the Advanced Very High Resolution Radiometer (AVHRR) sensor. It allows the generation of a rich set of products that can be easily specified through a simple interface by scientists wishing to carry out Earth system modeling or analysis. Kronos is based on a flexible methodology and consists of four major components: ingest and preprocessing, indexing and storage, a search and processing engine, and a Java interface. After geo-location and calibration, every pixel is indexed and stored using a combination of data structures. Following the users{\textquoteright} queries, data are selectively retrieved and secondary processing such as atmospheric correction, compositing, and projection are performed as specified. The processing is divided into two stages, the first of which involves the geo-location and calibration of the remotely sensed data and, hence, results in no loss of information. The second stage involves the retrieval of the appropriate data subsets and the application of the secondary processing specified by the user. This scheme allows the indexing and the storage of data from different sensors without any loss of information and, therefore, allows assimilation of data from multiple sensors. User specified processing can be applied later as needed. }, author = {Zhang,Z. and JaJa, Joseph F. and Bader, D.A. and Kalluri, SNV and Song,H. and El Saleous,N. and Vermote,E. and Townshend,J.R.G.} } @article {14626, title = {Ligand-Receptor Pairing Via Tree Comparison}, journal = {Journal of Computational Biology}, volume = {7}, year = {2000}, month = {2000/02//}, pages = {59 - 70}, abstract = {This paper introduces a novel class of tree comparison problems strongly motivated by an important and cost intensive step in drug discovery pipeline viz., mapping cell bound receptors to the ligands they bind to and vice versa. Tree comparison studies motivated by problems such as virus-host tree comparison, gene-species tree comparison and consensus tree problem have been reported. None of these studies are applicable in our context because in all these problems, there is a well-defined mapping of the nodes the trees are built on across the set of trees being compared. A new class of tree comparison problems arises in cases where finding the correspondence among the nodes of the trees being compared is itself the problem. The problem arises while trying to find the interclass correspondence between the members of a pair of coevolving classes, e.g., cell bound receptors and their ligands. Given the evolution of the two classes, the combinatorial problem is to find a mapping among the leaves of the two trees that optimizes a given cost function. In this work we formulate various combinatorial optimization problems motivated by the aforementioned biological problem for the first time. We present hardness results, give an efficient algorithm for a restriction of the problem and demonstrate its applicability.}, isbn = {1066-5277, 1557-8666}, doi = {10.1089/10665270050081388}, url = {http://www.liebertonline.com/doi/abs/10.1089/10665270050081388}, author = {Bafna,Vineet and Hannenhalli, Sridhar and Rice,Ken and Vawter,Lisa} } @conference {13709, title = {A multi-level approach to interlingual machine translation: defining the interface between representational languages}, booktitle = {Natural language processing and knowledge representation}, year = {2000}, month = {2000///}, pages = {207 - 248}, author = {Dorr, Bonnie J and Voss,C.R.} } @conference {18029, title = {A no-busy-wait balanced tree parallel algorithmic paradigm}, booktitle = {Proceedings of the twelfth annual ACM symposium on Parallel algorithms and architectures}, series = {SPAA {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {147 - 155}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Suppose that a parallel algorithm can include any number of parallel threads. Each thread can proceed without ever having to busy wait to another thread. A thread can proceed till its termination, but no new threads can be formed. What kind of problems can such restrictive algorithms solve and still be competitive in the total number of operations they perform with the fastest serial algorithm for the same problem?Intrigued by this informal question, we considered one of the most elementary parallel algorithmic paradigms, that of balanced binary trees. The main contribution of this paper is a new balanced (not necessarily binary) tree no-busy-wait paradigm for parallel algorithms; applications of the basic paradigm to two problems are presented: building heaps, and executing parallel tree contraction (assuming a preparatory stage); the latter is known to be applicable to evaluating a family of general arithmetic expressions. For putting things in context, we also discuss our {\textquotedblleft}PRAM-on-chip{\textquotedblright} vision (actually a small update to it), presented at SPAA98. }, isbn = {1-58113-185-2}, doi = {10.1145/341800.341818}, url = {http://doi.acm.org/10.1145/341800.341818}, author = {Vishkin, Uzi} } @conference {18030, title = {PRAM-On-Chip Vision}, booktitle = {String Processing and Information Retrieval, 2000. SPIRE 2000. Proceedings. Seventh International Symposium on}, year = {2000}, month = {2000///}, pages = {260 - 260}, author = {Vishkin, Uzi} } @conference {16163, title = {A storytelling robot for pediatric rehabilitation}, booktitle = {Proceedings of the fourth international ACM conference on Assistive technologies}, series = {Assets {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {50 - 55}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {children, design process, rehabilitation, robot, therapeutic play, user interface}, isbn = {1-58113-313-8}, doi = {10.1145/354324.354338}, url = {http://doi.acm.org/10.1145/354324.354338}, author = {Plaisant, Catherine and Druin, Allison and Lathan,Corinna and Dakhane,Kapil and Edwards,Kris and Vice,Jack Maxwell and Montemayor,Jaime} } @conference {18947, title = {Telicity as a cue to temporal and discourse structure in Chinese-English machine translation}, series = {NAACL-ANLP-Interlinguas {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {34 - 41}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {Machine translation between any two languages requires the generation of information that is implicit in the source language. In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Moreover, Chinese multiple-clause sentences may contain inter-clausal relations (temporal or otherwise) that must be explicit in English (e.g., by means of a discourse marker). Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is not present in every sentence. We report on a project to use the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text to suggest tense and discourse structure in the English translation of a Chinese newspaper corpus.}, doi = {10.3115/1117554.1117559}, url = {http://dx.doi.org/10.3115/1117554.1117559}, author = {Olsen,Mari and Traum,David and Van Ess-Dykema,Carol and Weinberg, Amy and Dolan,Ron} } @conference {16505, title = {Using quality of data metadata for source selection and ranking}, booktitle = {Proceedings of the Third International Workshop on the Web and Databases, WebDB}, year = {2000}, month = {2000///}, pages = {93 - 98}, author = {Mihaila,G. A and Raschid, Louiqa and Vidal,M. E} } @book {17972, title = {Visualization 2000}, year = {2000}, month = {2000///}, publisher = {IEEE Computer Society Press}, organization = {IEEE Computer Society Press}, author = {Ertl,T. and Hamann,B. and Varshney, Amitabh} } @conference {16455, title = {Web Query Optimizer}, booktitle = {Data Engineering, International Conference on}, year = {2000}, month = {2000///}, pages = {661 - 661}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {We demonstrate a Web Query Optimizer (WQO) within an architecture of mediators and wrappers, for WebSources of limited capability in a wide area environment. The WQO has several innovative features including a CBR (capability based rewriting) Tool, an enhanced randomized relational optimizer extended to a Web environment, and a WebWrapper cost model that can provide relevant metrics for accessing WebSources. The prototype has been tested against a number of WebSources.}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICDE.2000.839484}, author = {Zadorozhny,Vladimir and Bright,Laura and Raschid, Louiqa and Urhan,Tolga and Vidal,Maria Esther} } @article {18034, title = {Experiments with list ranking for Explicit Multi-Threaded (XMT) instruction parallelism}, journal = {Algorithm Engineering}, year = {1999}, month = {1999///}, pages = {43 - 59}, author = {Dascal,S. and Vishkin, Uzi} } @article {16971, title = {An Exploratory Study of Video Browsing, User Interface Designs and Research Methodologies: Effectiveness in Information Seeking Tasks}, journal = {PROCEEDINGS OF THE ANNUAL MEETING-AMERICAN SOCIETY FOR INFORMATION SCIENCE}, volume = {36}, year = {1999}, month = {1999///}, pages = {681 - 692}, abstract = {The purpose of this exploratory study is to develop research methods to compare the effectiveness of two video browsing interface designs, or surrogates one static (storyboard) and one dynamic (slide show) on two distinct information seeking tasks (gist determination and object recognition). Although video data is multimodal, potentially consisting of images, speech, sound, and text, the surrogates tested depend on image data only and use key frames or stills extracted from source video. A test system was developed to determine the effects of different key frame displays on user performance in specified information seeking tasks. The independent variables were interface display and task type. The dependent variables were task accuracy and subjective satisfaction. Covariates included spatial visual ability and time-to-completion. The study used a repeated block factorial 2x2 design; each of 20 participants interacted with all four interface-task combinations. No statistically significant results for task accuracy were found. Statistically significant differences were found, however, for user satisfaction with the display types: users assessed the static display to be "easier" to use than the dynamic display for both task types, even though there were no performance differences. This methodological approach provides a useful way to learn about the relationship between surrogate types and user tasks during video browsing.}, author = {Tse,T. and Vegh,S. and Shneiderman, Ben and Marchionini,G.} } @article {17912, title = {Generalized View-Dependent Simplification}, journal = {Computer Graphics Forum}, volume = {18}, year = {1999}, month = {1999/09/01/}, pages = {83 - 94}, abstract = {We propose a technique for performing view-dependent geometry and topology simplifications for level-of-detail-based renderings of large models. The algorithm proceeds by preprocessing the input dataset into a binary tree, the view-dependence tree of general vertex-pair collapses. A subset of the Delaunay edges is used to limit the number of vertex pairs considered for topology simplification. Dependencies to avoid mesh foldovers in manifold regions of the input object are stored in the view-dependence tree in an implicit fashion. We have observed that this not only reduces the space requirements by a factor of two, it also highly localizes the memory accesses at run time. The view-dependence tree is used at run time to generate the triangles for display. We also propose a cubic-spline-based distance metric that can be used to unify the geometry and topology simplifications by considering the vertex positions and normals in an integrated manner.}, keywords = {Interactive Cut, Physically Based Modeling, Runge Kutta Method, Soft Tissue, Surgery Simulation, Tetrahedralization, Virtual Scalpel}, isbn = {1467-8659}, doi = {10.1111/1467-8659.00330}, url = {http://onlinelibrary.wiley.com/doi/10.1111/1467-8659.00330/abstract}, author = {El-Sana,Jihad and Varshney, Amitabh} } @conference {14927, title = {A hierarchical data archiving and processing system to generate custom tailored products from AVHRR data}, booktitle = {Geoscience and Remote Sensing Symposium, 1999. IGARSS {\textquoteright}99 Proceedings. IEEE 1999 International}, volume = {5}, year = {1999}, month = {1999///}, pages = {2374 -2376 vol.5 - 2374 -2376 vol.5}, abstract = {A novel indexing scheme is described to catalogue satellite data on a pixel basis. The objective of this research is to develop an efficient methodology to archive, retrieve and process satellite data, so that data products can be generated to meet the specific needs of individual scientists. When requesting data, users can specify the spatial and temporal resolution, geographic projection, choice of atmospheric correction, and the data selection methodology. The data processing is done in two stages. Satellite data is calibrated, navigated and quality flags are appended in the initial processing. This processed data is then indexed and stored. Secondary processing such as atmospheric correction and projection are done after a user requests the data to create custom made products. By dividing the processing in to two stages saves time, since the basic processing tasks such as navigation and calibration which are common to all requests are not repeated when different users request satellite data. The indexing scheme described can be extended to allow fusion of data sets from different sensors}, keywords = {archiving;image, AVHRR;GIS;PACS;custom, data, image;land, image;remote, mapping;, mapping;PACS;geophysical, measurement, PROCESSING, processing;geophysical, product;data, remote, scheme;infrared, sensing;optical, sensing;terrain, signal, surface;multispectral, system;indexing, tailored, technique;hierarchical, techniques;remote}, doi = {10.1109/IGARSS.1999.771514}, author = {Kalluri, SNV and Zhang,Z. and JaJa, Joseph F. and Bader, D.A. and Song,H. and El Saleous,N. and Vermote,E. and Townshend,J.R.G.} } @article {17209, title = {Human-centered computing, online communities, and virtual environments}, journal = {IEEE Computer Graphics and Applications}, volume = {19}, year = {1999}, month = {1999/12//Nov}, pages = {70 - 74}, abstract = {This report summarizes results of the first EC/NSF joint Advanced Research Workshop, which identified key research challenges and opportunities in information technology. The group agreed that the first joint research workshop should concentrate on the themes of human-centered computing and VEs. Human-centered computing is perceived as an area of strategic importance because of the move towards greater decentralization and decomposition in the location and provision of computation. The area of VEs is one where increased collaboration should speed progress in solving some of the more intractable problems in building effective applications}, keywords = {Books, Collaboration, Collaborative work, Conferences, EC/NSF joint Advanced Research Workshop, Feeds, Human computer interaction, human-centered computing, Internet, Joining materials, Laboratories, Online communities, Research initiatives, USA Councils, User interfaces, Virtual environment, virtual environments, Virtual reality}, isbn = {0272-1716}, doi = {10.1109/38.799742}, author = {Brown,J. R and van Dam,A. and Earnshaw,R. and Encarnacao,J. and Guedj,R. and Preece,J. and Shneiderman, Ben and Vince,J.} } @conference {16167, title = {Refining query previews techniques for data with multivalued attributes: the case of NASA EOSDIS}, booktitle = {Research and Technology Advances in Digital Libraries, 1999. ADL {\textquoteright}99. Proceedings. IEEE Forum on}, year = {1999}, month = {1999///}, pages = {50 - 59}, abstract = {Query Previews allow users to rapidly gain an understanding of the content and scope of a digital data collection. These previews present overviews of abstracted metadata, enabling users to rapidly and dynamically avoid undesired data. We present our recent work on developing query previews for a variety of NASA EOSDIS situations. We focus on approaches that successfully address the challenge of multi-valued attribute data. Memory requirements and processing time associated with running these new solutions remain independent of the number of records in the dataset. We describe two techniques and their respective prototypes used to preview NASA Earth science data}, keywords = {attribute, attributes;processing, collection;memory, computing;meta, data, data;abstracted, data;digital, data;multivalued, data;query, Earth, EOSDIS;NASA, libraries;geophysics, metadata;dataset;digital, NASA, previews, processing;, requirements;multi-valued, Science, techniques;undesired, time;query}, doi = {10.1109/ADL.1999.777690}, author = {Plaisant, Catherine and Venkatraman,M. and Ngamkajorwiwat,K. and Barth,R. and Harberts,B. and Feng,Wenlan} } @conference {17962, title = {Skip Strips: maintaining triangle strips for view-dependent rendering}, booktitle = {Visualization {\textquoteright}99. Proceedings}, year = {1999}, month = {1999/10//}, pages = {131 - 518}, abstract = {View-dependent simplification has emerged as a powerful tool for graphics acceleration in visualization of complex environments. However, view-dependent simplification techniques have not been able to take full advantage of the underlying graphics hardware. Specifically, triangle strips are a widely used hardware-supported mechanism to compactly represent and efficiently render static triangle meshes. However, in a view-dependent framework, the triangle mesh connectivity changes at every frame, making it difficult to use triangle strips. We present a novel data structure, Skip Strip, that efficiently maintains triangle strips during such view-dependent changes. A Skip Strip stores the vertex hierarchy nodes in a skip-list-like manner with path compression. We anticipate that Skip Strips will provide a road map to combine rendering acceleration techniques for static datasets, typical of retained-mode graphics applications, with those for dynamic datasets found in immediate-mode applications.}, keywords = {(computer, Acceleration, acceleration;graphics, applications;path, applications;skip-list-like, changes;view-dependent, compression;rendering, connectivity;triangle, data, datasets;graphics, datasets;static, environments;data, equipment;data, graphic, Graphics, graphics);spatial, hardware;hardware-supported, hierarchy, manner;static, mechanism;immediate-mode, mesh, meshes;triangle, nodes;view-dependent, rendering;view-dependent, simplification;visualization;computer, Skip, Strips;complex, strips;vertex, structure;dynamic, structures;, techniques;retained-mode, triangle, visualisation;rendering}, doi = {10.1109/VISUAL.1999.809877}, author = {El-Sana,J. and Azanli,E. and Varshney, Amitabh} } @article {18033, title = {Trade-offs between communication throughput and parallel time}, journal = {Journal of Complexity}, volume = {15}, year = {1999}, month = {1999///}, pages = {148 - 166}, author = {Mansour,Y. and Nisan,N. and Vishkin, Uzi} } @article {17957, title = {View-dependent topology simplification}, journal = {Virtual Environments}, volume = {99}, year = {1999}, month = {1999///}, pages = {11 - 22}, abstract = {We propose a technique for performing view-dependent sim-plifications for level-of-detail-based renderings of complex models. Our method is based on exploiting frame-to-frame coherence and is tolerant of various commonly found degeneracies in real-life polygonal models. The algorithm proceeds by preprocessing the input dataset into a binary tree of vertex collapses. This tree is used at run time to generate the triangles for display. Dependencies to avoid mesh foldovers in manifold regions of the input object are stored in the tree in an implicit fashion. This obviates the need for any extra storage for dependency pointers and suggests a potential for application to external memory prefetching algo- rithms. We also propose a distance metric that can be used to unify the geometry and genus simplifications with the view-dependent parameters such as viewpoint, view-frustum, and local illumination. }, author = {El-Sana,J. and Varshney, Amitabh} } @article {18149, title = {XMT-M: A Scalable Decentralized Processor}, volume = {UMIACS-TR-99-55}, year = {1999}, month = {1999/10/09/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {A defining challenge for research in computer science and engineering hasbeen the ongoing quest for reducing the completion time of a single computation task. Even outside the parallel processing communities, there is little doubt that the key to further progress in this quest is to do parallel processing of some kind. A recently proposed parallel processing framework that spans the entire spectrum from (parallel) algorithms to architecture to implementation is the explicit multi-threading (XMT) framework. This framework provides: (i) simple and natural parallel algorithms for essentially every general-purpose application, including notoriously difficult irregular integer applications, and (ii) a multi-threaded programming model for these algorithms which allows an {\textquoteleft}{\textquoteleft}independence-of-order{\textquoteright}{\textquoteright} semantics: every thread can proceed at its own speed, independent of other concurrent threads. To the extent possible, the XMT framework uses established ideas in parallel processing. This paper presents XMT-M, a microarchitecture implementation of the XMT model that is possible with current technology. XMT-M offers an engineering design point that addresses four concerns: buildability, programmability, performance, and scalability. The XMT-M hardware is geared to execute multiple threads in parallel on a single chip: relying on very few new gadgets, it can execute parallel threads without busy-waits! Existing code can be run on XMT-M as a single thread without any modifications, thereby providing backward compatibility for commercial acceptance. Simulation-based studies of XMT-M demonstrate considerable improvements in performance relative to the best serial processor even for small, and therefore practical, input sizes. (Also cross-referenced as UMIACS-TR-99-55) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/1030}, author = {Berkovich,Efraim and Nuzman,Joseph and Franklin,Manoj and Jacob,Bruce and Vishkin, Uzi} } @inbook {14202, title = {Beyond the Epipolar Constraint: Integrating 3D Motion and Structure Estimation}, booktitle = {3D Structure from Multiple Images of Large-Scale Environments3D Structure from Multiple Images of Large-Scale Environments}, series = {Lecture Notes in Computer Science}, volume = {1506}, year = {1998}, month = {1998///}, pages = {109 - 123}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper develops a novel solution to the problem of recovering the structure of a scene given an uncalibrated video sequence depicting the scene. The essence of the technique lies in a method for recovering the rigid transformation between the different views in the image sequence. Knowledge of this 3D motion allows for self-calibration and for subsequent recovery of 3D structure. The introduced method breaks away from applying only the traditionally used epipolar constraint and introduces a new constraint based on the interaction between 3D motion and shape. Up to now, structure from motion algorithms proceeded in two well defined steps, where the first and most important step is recovering the rigid transformation between two views, and the subsequent step is using this transformation to compute the structure of the scene in view. Here both aforementioned steps are accomplished in a synergistic manner. Existing approaches to 3D motion estimation are mostly based on the use of optic flow which however poses a problem at the locations of depth discontinuities. If we knew where depth discontinuities were, we could (using a multitude of approaches based on smoothness constraints) estimate accurately flow values for image patches corresponding to smooth scene patches; but to know the discontinuities requires solving the structure from motion problem first. In the past this dilemma has been addressed by improving the estimation of flow through sophisticated optimization techniques, whose performance often depends on the scene in view. In this paper the main idea is based on the interaction between 3D motion and shape which allows us to estimate the 3D motion while at the same time segmenting the scene. If we use a wrong 3D motion estimate to compute depth, then we obtain a distorted version of the depth function. The distortion, however, is such that the worse the motion estimate, the more likely we are to obtain depth estimates that are locally unsmooth, i.e., they vary more than the correct ones. Since local variability of depth is due either to the existence of a discontinuity or to a wrong 3D motion estimate, being able to differentiate between these two cases provides the correct motion, which yields the {\textquotedblleft}smoothest{\textquotedblright} estimated depth as well as the image locations of scene discontinuities. Although no optic flow values are computed, we show that our algorithm is very much related to minimizing the epipolar constraint when the scene in view is smooth. When however the imaged scene is not smooth, the introduced constraint has in general different properties from the epipolar constraint and we present experimental results with real sequences where it performs better.}, isbn = {978-3-540-65310-3}, url = {http://dx.doi.org/10.1007/3-540-49437-5_8}, author = {Brodsk{\'y},Tom{\'a}{\v s} and Ferm{\"u}ller, Cornelia and Aloimonos, J.}, editor = {Koch,Reinhard and Van Gool,Luc} } @article {16343, title = {Comparing detection methods for software requirements inspections: A replication using professional subjects}, journal = {Empirical Software Engineering}, volume = {3}, year = {1998}, month = {1998///}, pages = {355 - 379}, author = {Porter, Adam and Votta,L.} } @article {17946, title = {Double quaternions for motion interpolation}, journal = {Proceedings of the ASME Design Engineering Technical Conference}, year = {1998}, month = {1998///}, abstract = {This paper describes the concept of double quaternions, anextension of quaternions, and shows how they can be used for ef- fective three-dimensional motion interpolation. Motion interpo- lation using double quaternions has several advantages over the method of interpolating rotation and translation independently and then combining the results. First, double quaternions pro- vide a conceptual framework that allows one to handle rotational and translational components in a unified manner. Second, re- sults obtained by using double quaternions are coordinate frame invariant. Third, double quaternions allow a natural way to trade- off robustness against accuracy. Fourth, double quaternions, be- ing a straightforward extension of quaternions, can be integrated into several existing systems that currently use quaternions with translational components, with a only small coding effort. }, author = {Ge,Q. J and Varshney, Amitabh and Menon,J. P and Chang,C. F} } @conference {18036, title = {Explicit Multi-Threading (XMT) bridging models for instruction parallelism (extended abstract)}, booktitle = {Proceedings of the tenth annual ACM symposium on Parallel algorithms and architectures}, year = {1998}, month = {1998///}, pages = {140 - 151}, author = {Vishkin, Uzi and Dascal,S. and Berkovich,E. and Nuzman,J.} } @article {20053, title = {Genetic nomenclature for Trypanosoma and Leishmania.}, journal = {Mol Biochem Parasitol}, volume = {97}, year = {1998}, month = {1998 Nov 30}, pages = {221-4}, keywords = {Animals, Leishmania, Terminology as Topic, Trypanosoma}, issn = {0166-6851}, author = {Clayton, C and Adams, M and Almeida, R and Baltz, T and Barrett, M and Bastien, P and Belli, S and Beverley, S and Biteau, N and Blackwell, J and Blaineau, C and Boshart, M and Bringaud, F and Cross, G and Cruz, A and Degrave, W and Donelson, J and El-Sayed, N and Fu, G and Ersfeld, K and Gibson, W and Gull, K and Ivens, A and Kelly, J and Vanhamme, L} } @inbook {13815, title = {Lexical Allocation in Interlingua-Based Machine Translation of Spatial Expressions}, booktitle = {Representation and processing of spatial expressions}, year = {1998}, month = {1998///}, pages = {125 - 125}, author = {Dorr, Bonnie J and Voss,C.R. and Sencan,M.U.} } @article {18147, title = {Looking to Parallel Algorithms for ILP and Decentralization}, volume = {CS-TR-3921}, year = {1998}, month = {1998/10/15/}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {We introduce explicit multi-threading (XMT), a decentralized architecturethat exploits fine-grained SPMD-style programming; a SPMD program can translate directly to MIPS assembly language using three additional instruction primitives. The motivation for XMT is: (i) to define an inherently decentralizable architecture, taking into account that the performance of future integrated circuits will be dominated by wire costs, (ii) to increase available instruction-level parallelism (ILP) by leveraging expertise in the world of parallel algorithms, and (iii) to reduce hardware complexity by alleviating the need to detect ILP at run-time: if parallel algorithms can give us an overabundance of work to do in the form of thread-level parallelism, one can extract instruction-level parallelism with greatly simplified dependence-checking. We show that implementations of such an architecture tend towards decentralization and that, when global communication is necessary, overall performance is relatively insensitive to large on-chip delays. We compare the performance of the design to more traditional parallel architectures and to a high-performance superscalar implementation, but the intent is merely to illustrate the performance behavior of the organization and to stimulate debate on the viability of introducing SPMD to the single-chip processor domain. We cannot offer at this stage hard comparisons with well-researched models of execution. When programming for the SPMD model, the total number of operations that the processor has to perform is often slightly higher. To counter this, we have observed that the length of the critical path through the dynamic execution graph is smaller than in the serial domain, and the amount of ILP is correspondingly larger. Fine-grained SPMD programming connects with a broad knowledge base in parallel algorithms and scales down to provide good performance relative to high-performance superscalar designs even with small input sizes and small numbers of functional units. Keywords: Fine-grained SPMD, parallel algorithms. spawn-join, prefix-sum, instruction-level parallelism, decentralized architecture. (Also cross-referenced as UMIACS-TR- 98-40) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/496}, author = {Berkovich,Efraim and Jacob,Bruce and Nuzman,Joseph and Vishkin, Uzi} } @conference {16478, title = {A Meta-Wrapper for Scaling up to Multiple Autonomous Distributed Information Sources}, booktitle = {Cooperative Information Systems, IFCIS International Conference on}, year = {1998}, month = {1998///}, pages = {148 - 148}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Abstract Current mediator and wrapper architectures do not have the flexibility to scale to multiple wrapped sources, where some sources may be redundant, and some sources may provide incomplete answers to a query. We propose a meta-wrapper component which is capable of handling multiple wrapped sources, in a particular domain, where the multiple sources provide related information. The meta-wrapper makes these sources transparent to the mediator, and provides a single meta-wrapper interface for all these sources. Source descriptions specify the content and query capability of the sources. These are used to determine the meta-wrapper interface and to decide which queries from a mediator can be accepted. Sources are partitioned into equivalence classes, based on their descriptions. These equivalence classes are partially ordered, and the lattices that correspond to these orderings are used to identify the relevant sources for a query submitted by the mediator. If there is redundancy of the sources, the meta-wrapper identifies alternate sources for the query. A meta-wrapper cost model is then used to select among alternate relevant sources and choose the best plan.}, keywords = {information resource discovery, integration and interoperability, metadata use and management, wrappers and mediators}, isbn = {0-8186-8380-5}, doi = {http://doi.ieeecomputersociety.org/10.1109/COOPIS.1998.706193}, author = {Vidal,Maria Esther and Raschid, Louiqa and Gruser,Jean Robert} } @conference {16450, title = {Optimization of wrappers and mediators for web accessible data sources (websources)}, booktitle = {Workshop Web Inf. Data Management (WIDM), Washington DC}, year = {1998}, month = {1998///}, author = {Bright,L. and Raschid, Louiqa and Vidal,M. E} } @article {18148, title = {Project for Developing Computer Science Agenda(s) for High-Performance Computing: An Organizer{\textquoteright}s Summary}, volume = {UMIACS-TR-94-129}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Designing a coherent agenda for the implementation of the HighPerformance Computing (HPC) program is a nontrivial technical challenge. Many computer science and engineering researchers in the area of HPC, who are affiliated with U.S. institutions, have been invited to contribute their agendas. We have made a considerable effort to give many in that research community the opportunity to write a position paper. This explains why we view the project as placing a mirror in front of the community, and hope that the mirror indeed reflects many of the opinions on the topic. The current paper is an organizer{\textquoteright}s summary and represents his reading of the position papers. This summary is his sole responsibility. It is respectfully submitted to the NSF. (Also cross-referenced as UMIACS-TR-94-129) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/677}, author = {Vishkin, Uzi} } @article {17733, title = {Rounding Errors in Solving Block Hessenberg Systems}, volume = {UMIACS-TR-94-105}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {A rounding error analysis is presented for a divide-and-conquer algorithm to solve linear systems with block Hessenberg matrices. Conditions are derived under which the algorithm computes a backward stable solution. The algorithm is shown to be stable for diagonally dominant matrices and for M-matrices. (Also cross-referenced as UMIACS-TR-94-105) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/661}, author = {Von Matt,Urs and Stewart, G.W.} } @article {17951, title = {Salient Frame Detection for Molecular Dynamics Simulations}, journal = {Scientific Visualization: Interactions, Features, Metaphors}, volume = {2}, year = {1998}, month = {1998///}, pages = {160 - 175}, abstract = {Saliency-based analysis can be applied to time-varying 3D datasetsfor the purpose of summarization, abstraction, and motion analysis. As the sizes of time-varying datasets continue to grow, it becomes more and more difficult to comprehend vast amounts of data and information in a short period of time. Au- tomatically generated thumbnail images and previewing of time-varying datasets can help viewers explore and understand the datasets significantly faster as well as provide new insights. In this paper, we introduce a novel method for detect- ing salient frames for molecular dynamics simulations. Our method effectively detects crucial transitions in simulated mechanosensitive ion channel (MscS), in agreement with experimental data. }, author = {Kim,Y. and Patro,R. and Ip,C. Y and O{\textquoteright}Leary,D. P and Anishkin,A. and Sukharev,S. and Varshney, Amitabh} } @article {16337, title = {Specification-based Testing of Reactive Software: A Case Study in Technology Transfer}, journal = {Journal of Systems and Software}, volume = {40}, year = {1998}, month = {1998///}, pages = {249 - 262}, author = {Jategaonkar Jagadeesan,L. and Porter, Adam and Puchol,C. and Ramming,J. C and Votta,L. G.} } @article {17963, title = {Topology simplification for polygonal virtual environments}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {4}, year = {1998}, month = {1998/06//apr}, pages = {133 - 144}, abstract = {We present a topology simplifying approach that can be used for genus reductions, removal of protuberances, and repair of cracks in polygonal models in a unified framework. Our work is complementary to the existing work on geometry simplification of polygonal datasets and we demonstrate that using topology and geometry simplifications together yields superior multiresolution hierarchies than is possible by using either of them alone. Our approach can also address the important issue of repair of cracks in polygonal models, as well as for rapid identification and removal of protuberances based on internal accessibility in polygonal models. Our approach is based on identifying holes and cracks by extending the concept of alpha;-shapes to polygonal meshes under the L infin; distance metric. We then generate valid triangulations to fill them using the intuitive notion of sweeping an L infin; cube over the identified regions}, keywords = {accessibility;intuitive, alpha;-shapes;L, approach;triangulations;unified, cube;L, datasets;polygonal, distance, environments;protuberance, framework;computational, geometry;topology;virtual, hierarchies;polygonal, infin;, meshes;polygonal, metric;genus, models;polygonal, notion;multiresolution, reality;, reductions;geometry, removal;topology, simplification;geometry, simplification;topology, simplifications;internal, simplifying, virtual}, isbn = {1077-2626}, doi = {10.1109/2945.694955}, author = {El-Sana,J. and Varshney, Amitabh} } @article {16177, title = {User Interface Reengineering: A Diagnostic Approach}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {User interface technology has advanced rapidly in recent years.Incorporating new developments in existing systems could result in substantial improvements in usability, thereby improving performance and user satisfaction, while shortening training an d reducing error rates. Our focus is on low-effort high-payoff improvements to aspects such as data display and entry, consistency, messages, documentation, and system access. This paper provides guidelines for managers and designers responsible for use r interface reengineering, based on the experience we gained from six projects, and compiles our observations, recommendations and outcomes. (Also cross-referenced as CAR-TR-767) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/430}, author = {Vanniamparampil,Ajit J and Shneiderman, Ben and Plaisant, Catherine and Rose,Anne} } @article {17914, title = {Walkthroughs of complex environments using image-based simplification}, journal = {Computers \& Graphics}, volume = {22}, year = {1998}, month = {1998/02/25/}, pages = {55 - 69}, abstract = {We present an image-based technique to accelerate the navigation in complex static environments. We perform an image-space simplification of each sample of the scene taken at a particular viewpoint and dynamically combine these simplified samples to produce images for arbitrary viewpoints. Since the scene is converted into a bounded complexity representation in the image space, with the base images rendered beforehand, the rendering speed is relatively insensitive to the complexity of the scene. The proposed method correctly simulates the kinetic depth effect (parallax), occlusion, and can resolve the missing visibility information. This paper describes a suitable representation for the samples, a specific technique for simplifying them, and different morphing methods for combining the sample information to reconstruct the scene. We use hardware texture mapping to implement the image-space warping and hardware affine transformations to compute the view-dependent warping function.}, keywords = {image-based rendering, image morphing, virtual walkthroughs, visibility, image compositing}, isbn = {0097-8493}, doi = {10.1016/S0097-8493(97)00083-6}, url = {http://www.sciencedirect.com/science/article/pii/S0097849397000836}, author = {Darsa,L. and Costa,B. and Varshney, Amitabh} } @conference {16499, title = {Wrapper generation for Web accessible data sources}, booktitle = {3rd IFCIS International Conference on Cooperative Information Systems, 1998. Proceedings}, year = {1998}, month = {1998/08/22/22}, pages = {14 - 23}, publisher = {IEEE}, organization = {IEEE}, abstract = {There is an increase in the number of data sources that can be queried across the WWW. Such sources typically support HTML forms-based interfaces and search engines query collections of suitably indexed data. The data is displayed via a browser: One drawback to these sources is that there is no standard programming interface suitable for applications to submit queries. Second, the output (answer to a query) is not well structured. Structured objects have to be extracted from the HTML documents which contain irrelevant data and which may be volatile. Third, domain knowledge about the data source is also embedded in HTML documents and must be extracted. To solve these problems, we present technology to define and (automatically) generate wrappers for Web accessible sources. Our contributions are as follows: (1) Defining a wrapper interface to specify the capability of Web accessible data sources. (2) Developing a wrapper generation toolkit of graphical interfaces and specification languages to specify the capability of sources and the functionality of the wrapper (3) Developing the technology to automatically generate a wrapper appropriate to the Web accessible source, from the specifications.}, keywords = {application program interfaces, data mining, Databases, Educational institutions, Electrical capacitance tomography, HTML, HTML documents, Internet, Query processing, Read only memory, Search engines, Specification languages, Uniform resource locators, World Wide Web, wrapper generation toolkit, WWW}, isbn = {0-8186-8380-5}, doi = {10.1109/COOPIS.1998.706180}, author = {Gruser,J. and Raschid, Louiqa and Vidal,M. E and Bright,L.} } @article {17969, title = {Adaptive real-time level-of-detail based rendering for polygonal models}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {3}, year = {1997}, month = {1997/06//apr}, pages = {171 - 183}, abstract = {We present an algorithm for performing adaptive real-time level-of-detail-based rendering for triangulated polygonal models. The simplifications are dependent on viewing direction, lighting, and visibility and are performed by taking advantage of image-space, object-space, and frame-to-frame coherences. In contrast to the traditional approaches of precomputing a fixed number of level-of-detail representations for a given object, our approach involves statically generating a continuous level-of-detail representation for the object. This representation is then used at run time to guide the selection of appropriate triangles for display. The list of displayed triangles is updated incrementally from one frame to the next. Our approach is more effective than the current level-of-detail-based rendering approaches for most scientific visualization applications, where there are a limited number of highly complex objects that stay relatively close to the viewer. Our approach is applicable for scalar (such as distance from the viewer) as well as vector (such as normal direction) attributes}, keywords = {(computer, adaptive, attributes;scientific, attributes;view-dependent, coherence;image-space;lighting;multiresolution, direction;visibility;computational, geometry;data, graphics);, hierarchies;object-space;run, hierarchies;triangulated, level-of-detail, models;vector, polygonal, real-time, refinement;triangle, rendering;displayed, simplifications;viewing, systems;rendering, time;scalar, triangles;frame-to-frame, visualisation;real-time, visualization;selective}, isbn = {1077-2626}, doi = {10.1109/2945.597799}, author = {Xia,J. C and El-Sana,J. and Varshney, Amitabh} } @conference {16296, title = {Anywhere, Anytime Code Inspections: Using the Web to Remove Inspection Bottlenecks in Large-Scale Software Development}, booktitle = {Software Engineering, International Conference on}, year = {1997}, month = {1997///}, pages = {14 - 14}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {The dissemination of critical information and the synchronization of coordinated activities are critical problems in geographically separated, large-scale, software development. While these problems are not insurmountable, their solutions have varying trade-offs in terms of time, cost and effectiveness. Our previous studies have shown that the inspection interval is typically lengthened because of schedule conflicts among inspectors which delay the (usually) required inspection collection meeting.We present and justify a solution using an intranet web that is both timely in its dissemination of information and effective in its coordination of distributed inspectors. First, exploiting a naturally occurring experiment (reported here), we conclude that the asynchronous collection of inspection results is at least as effective as the synchronous collection of those results. Second, exploiting the information dissemination qualities and the on-demand nature of information retrieval of the web, and the platform independence of browsers, we built an inexpensive tool that integrates seamlessly into the current development process. By seamless we mean an identical paper flow that results in an almost identical inspection process.The acceptance of the inspection tool has been excellent. The cost savings just from the reduction in paper work and the time savings from the reduction in distribution interval of the inspection package (sometimes involving international mailings) have been substantial. These savings together with the seamless integration into the existing environment are the major factors for this acceptance. From our viewpoint as experimentalists, the acceptance came too readily. Therefore we lost our op portunity to explore this tool using a series of controlled experiments to isolate the underlying factors or its effectiveness. Nevertheless, by using historical data we can show that the new process is less expensive in terms of cost and at least as effective in terms of quality (defect detection effectiveness).}, keywords = {asynchronous; natural occurring inspection experiment; automated support for inspections, code inspections: web-based, meetingless}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICSE.1997.610188}, author = {Perpich,J. M. and Perry,D. E. and Porter, Adam and Votta,L. G. and Wade,M. W.} } @article {17949, title = {Collabcad: A toolkit for integrated synchronous and asynchronous sharing of cad applications}, journal = {Proceedings TeamCAD: GVU/NIST Workshop on Collaborative Design, Atlanta, GA, USA}, year = {1997}, month = {1997///}, pages = {131 - 137}, abstract = {We are developing CollabCAD, a novel software architecture and toolkit, that supports shar-ing of arbitrary user-defined objects or applications over intranets and the internet. Developers can use CollabCAD to rapidly re-engineer existing CAD applications to be collaboration-capable or build new collaboration-capable CAD applications. CollabCAD provides the following functionalities: 1. Support for fiexible forms of sharing in which users can interact with multiple presentations of CAD applications or objects. 2. asynchronous and synchronous sharing, as well as a mixture of both, 3. effective communication between users by supporting direct manipulation of user-defined objects, and a new form of sharing called para-synchronous sharing, 4. support for asymmetric collaborations between users with 1 different roles, or, 2 systems with different input-output, hardware and network capabilities. We plan to test and evaluate CollabCAD by building collaboration systems in several CAD- oriented application areas: these include virtual reality for mechanical, architectural and molec- ular CAD, and volume visualization and volume graphics for CAD applications. }, author = {Mishra,P. and Varshney, Amitabh and Kaufman,A.} } @conference {17961, title = {Controlled simplification of genus for polygonal models}, booktitle = {Visualization {\textquoteright}97., Proceedings}, year = {1997}, month = {1997/10//}, pages = {403 - 410}, abstract = {Genus-reducing simplifications are important in constructing multiresolution hierarchies for level-of-detail-based rendering, especially for datasets that have several relatively small holes, tunnels, and cavities. We present a genus-reducing simplification approach that is complementary to the existing work on genus-preserving simplifications. We propose a simplification framework in which genus-reducing and genus-preserving simplifications alternate to yield much better multiresolution hierarchies than would have been possible by using either one of them. In our approach we first identify the holes and the concavities by extending the concept of alpha;-hulls to polygonal meshes under the L infin; distance metric and then generate valid triangulations to fill them.}, keywords = {(computer, /spl, alpha/-hulls;L/sub, distance, framework;small, geometry;controlled, geometry;data, graphics);, hierarchies;object, holes;triangulations;tunnels;computational, infin//, meshes;polygonal, metric;cavities;computational, models;simplification, rendering;multiresolution, representations;polygonal, simplification;datasets;genus-preserving, simplifications;genus-reducing, simplifications;level-of-detail-based, visualisation;rendering}, doi = {10.1109/VISUAL.1997.663909}, author = {El-Sana,J. and Varshney, Amitabh} } @article {17901, title = {Efficient triangle strips for fast rendering}, journal = {ACM Transactions on Graphics}, year = {1997}, month = {1997///}, author = {Evans,F. and Skiena,S. and Varshney, Amitabh} } @article {17934, title = {Enabling virtual reality for large-scale mechanical CAD datasets}, journal = {Proceedings of ASME Design Engineering Technical Conferences, Sept}, volume = {14}, year = {1997}, month = {1997///}, pages = {17 - 17}, abstract = {Reconciling scene realism with interactivity has emerged asone of the most important areas in making virtual reality fea- sible for large-scale mechanical CAD datasets consisting of sev- eral millions of primitives. This paper surveys our research and related work for achieving interactivity without sacrificing real- ism in virtual reality walkthroughs and flythroughs of polygonal CAD datasets. We outline our recent work on efficient genera- tion of triangle strips from polygonal models that takes advan- tage of compression of connectivity information. This results in substantial savings in rendering, transmission, and storage. We outline our work on genus-reducing simplifications as well as real-time view-dependent simplifications that allow on-the-fly selection amongst multiple levels of detail, based upon lighting and viewing parameters. Our method allows multiple levels of detail to coexist on the same object at different regions and to merge seamlessly without any cracks or shading artifacts. We also present an overview of our work on hardware-assisted image- based rendering that allows interactive exploration of computer- generated scenes. }, author = {Varshney, Amitabh and El-Sana,J. and Evans,F. and Darsa,L. and Costa,B. and Skiena,S.} } @article {16297, title = {An experiment to assess the cost-benefits of code inspections in large scale software development}, journal = {IEEE Transactions on Software Engineering}, volume = {23}, year = {1997}, month = {1997/06//}, pages = {329 - 346}, abstract = {We conducted a long term experiment to compare the costs and benefits of several different software inspection methods. These methods were applied by professional developers to a commercial software product they were creating. Because the laboratory for this experiment was a live development effort, we took special care to minimize cost and risk to the project, while maximizing our ability to gather useful data. The article has several goals: (1) to describe the experiment{\textquoteright}s design and show how we used simulation techniques to optimize it; (2) to present our results and discuss their implications for both software practitioners and researchers; and (3) to discuss several new questions raised by our findings. For each inspection, we randomly assigned three independent variables: (1) the number of reviewers on each inspection team (1, 2, or 4); (2) the number of teams inspecting the code unit (1 or 2); and (3) the requirement that defects be repaired between the first and second team{\textquoteright}s inspections. The reviewers for each inspection were randomly selected without replacement from a pool of 11 experienced software developers. The dependent variables for each inspection included inspection interval (elapsed time), total effort, and the defect detection rate. Our results showed that these treatments did not significantly influence the defect detection effectiveness, but that certain combinations of changes dramatically increased the inspection interval}, keywords = {Analysis of variance, code inspection cost benefits, code unit, commercial software product, Computer Society, cost-benefit analysis, Costs, defect detection effectiveness, defect detection rate, Design optimization, experienced software developers, experiment design, independent variables, Inspection, inspection interval, inspection team, Laboratories, large scale software development, Large-scale systems, live development effort, long term experiment, professional aspects, professional developers, Programming, reviewers, simulation techniques, software cost estimation, software inspection methods, software practitioners, Software quality, Switches}, isbn = {0098-5589}, doi = {10.1109/32.601071}, author = {Porter, Adam and Siy,H. P and Toman,C. A and Votta,L. G.} } @conference {18037, title = {From algorithm parallelism to instruction-level parallelism: An encode-decode chain using prefix-sum}, booktitle = {Proceedings of the ninth annual ACM symposium on Parallel algorithms and architectures}, year = {1997}, month = {1997///}, pages = {260 - 271}, author = {Vishkin, Uzi} } @article {17283, title = {Low-effort, high-payoff user interface reengineering}, journal = {IEEE Software}, volume = {14}, year = {1997}, month = {1997/08//Jul}, pages = {66 - 72}, abstract = {Although increasingly sophisticated design methodologies for developing new user interfaces exist, low-effort, high-payoff user interface reengineering represents a new direction-and opportunity. Yet reengineering a working system is complex and risky because of the potential disruption to users and managers, their justifiable fear of change, and the lack of guarantees that such changes will be for the better. Our largely positive experiences with the projects described here lead us to believe that user interface reengineering is a viable and important process. Low effort, high-payoff improvement recommendations can probably be made for most existing systems. Nevertheless, a narrowly focused user interface reengineering plan may be inappropriate when the major problems lie outside the scope of the user interface, such as inadequate functionalities, frequent crashes, and network problems. Attempts at improving less severe problems while ignoring deeper ones may be perceived as insensitive by the users. In such cases it is important to consider either making similar short-term improvements for other parts of the systems or postponing short-term user interface reengineering in favour of a more complete system reengineering. Similarly, the need for interface stability might outweigh the benefits of the short-term improvements if a complete reengineering is planned for the near future. But most likely these proposed diagnostic strategies and opportunities for improvement are only a prelude to the much larger task of business reengineering, which implies extensive user interface reengineering}, keywords = {Business process re-engineering, complete system reengineering, Design methodology, Error analysis, Hardware, inadequate functionalities, interface stability, iterative methods, low-effort high-payoff user interface reengineering, short-term improvements, short-term user interface reengineering, software engineering, Software testing, System analysis and design, System testing, systems re-engineering, User centered design, user centred design, User interfaces}, isbn = {0740-7459}, doi = {10.1109/52.595958}, author = {Plaisant, Catherine and Rose,A. and Shneiderman, Ben and Vanniamparampil,A. J} } @article {17954, title = {Multiresolution surface modeling}, journal = {SIGGRAPH 1997 Course Notes}, year = {1997}, month = {1997///}, author = {Heckbert,P. and Rossignac,J. and Hoppe,H. and Schroeder,W. and Soucy,M. and Varshney, Amitabh} } @conference {17936, title = {Navigating static environments using image-space simplification and morphing}, booktitle = {Proceedings of the 1997 symposium on Interactive 3D graphics}, series = {I3D {\textquoteright}97}, year = {1997}, month = {1997///}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-884-3}, doi = {10.1145/253284.253298}, url = {http://doi.acm.org/10.1145/253284.253298}, author = {Darsa,Lucia and Costa Silva,Bruno and Varshney, Amitabh} } @conference {16336, title = {Specification-based Testing of Reactive Software: Tools and Experiments}, booktitle = {Software Engineering, International Conference on}, year = {1997}, month = {1997///}, pages = {525 - 525}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Testing commercial software is expensive and time consuming. Automated testing methods promise to save a great deal of time and money throughout the software industry. One approach that is well-suited for the reactive systems found in telephone switching systems is specification-based testing.We have built a set of tools to automatically test softmare applications for violations of safety properties expressed in temporal logic. Our testing system automatically constructs finite state machine oracles corresponding to safety properties, builds test harnesses, and integrates them with the application. The test harness then generates inputs automatically to test the application.We describe a study examining the feasibility of this approach for testing industrial applications. To conduct this study we formally modeled an Automatic Protection Switching system (APS), which is an application common to many telephony systems. We then asked a number of computer science graduate students to develop several versions of the APS and use our tools to test them. We found that the tools are very effective, save significant amounts of human effort (at the expense of machine resources), and are easy to use. We also discuss improvements that are needed before we can use the tools with professional developers building commercial products.}, keywords = {empirical studies, reactive systems, specification-based testing, temporal logic}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICSE.1997.610373}, author = {Jagadeesan,Lalita Jategaonkar and Porter, Adam and Puchol,Carlos and Ramming,J. Christopher and Votta,Lawrence G.} } @article {16197, title = {User Interface Reengineering: Low-Effort, High-Payoff Strategies}, journal = {IEEE Software}, volume = {14}, year = {1997}, month = {1997///}, pages = {66 - 72}, abstract = {User interface technology has advanced rapidly in recent years. Incorporating new developments in existing systems could result in substantial improvements in usability, thereby improving performance and user satisfaction, while shortening training and reducing error rates. We describe low-effort, high-payoff strategies that focus attention on improvements to data display and entry, consistency, messages, documentation, system access and additional functionality. We report on experience from six projects, describing observations, recommendations and outcomes. We close with guidance for managers and designers who are responsible for user interface reengineering.}, author = {Plaisant, Catherine and Rose,A. and Shneiderman, Ben and Vanniamparampil,A. J} } @article {12177, title = {A zooming web browser}, journal = {Human Factors in Web Development}, year = {1997}, month = {1997///}, author = {Bederson, Benjamin B. and Hollan,J.D. and Stewart,J. and Rogers,D. and Druin, Allison and Vick,D. and Ring,L. and Grose,E. and Forsythe,C.} } @conference {16460, title = {Answering queries using OQL view expressions}, booktitle = {In Workshop on Materialized Views, in cooperation with ACM SIGMOD}, year = {1996}, month = {1996///}, author = {Florescu,D. and Raschid, Louiqa and Valduriez,P.} } @article {18039, title = {Can parallel algorithms enhance serial implementation?}, journal = {Communications of the ACM}, volume = {39}, year = {1996}, month = {1996///}, pages = {88 - 91}, author = {Vishkin, Uzi} } @article {17964, title = {Controlled topology simplification}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {2}, year = {1996}, month = {1996/06//}, pages = {171 - 184}, abstract = {We present a simple, robust, and practical method for object simplification for applications where gradual elimination of high frequency details is desired. This is accomplished by converting an object into multi resolution volume rasters using a controlled filtering and sampling technique. A multiresolution triangle mesh hierarchy can then be generated by applying the Marching Cubes algorithm. We further propose an adaptive surface generation algorithm to reduce the number of triangles generated by the standard Marching Cubes. Our method simplifies the topology of objects in a controlled fashion. In addition, at each level of detail, multilayered meshes can be used for an efficient antialiased rendering}, keywords = {(computer, algorithm;adaptive, algorithm;controlled, antialiased, Cubes, Cubes;antialiasing;computational, details;multi, filtering;controlled, Frequency, generation, geometry;rendering, graphics);topology;, hierarchy;object, Marching, mesh, meshes;multiresolution, rasters;multilayered, rendering;high, resolution, simplification;efficient, simplification;sampling, surface, technique;standard, Topology, triangle, volume}, isbn = {1077-2626}, doi = {10.1109/2945.506228}, author = {He,Taosong and Hong,Lichan and Varshney, Amitabh and Wang,S.W.} } @conference {17960, title = {Dynamic view-dependent simplification for polygonal models}, booktitle = {Visualization {\textquoteright}96. Proceedings.}, year = {1996}, month = {1996///}, pages = {327 - 334}, abstract = {Presents an algorithm for performing view-dependent simplifications of a triangulated polygonal model in real-time. The simplifications are dependent on viewing direction, lighting and visibility, and are performed by taking advantage of image-space, object-space and frame-to-frame coherences. A continuous level-of-detail representation for an object is first constructed off-line. This representation is then used at run-time to guide the selection of appropriate triangles for display. The list of displayed triangles is updated incrementally from one frame to the next. Our approach is more effective than the current level-of-detail-based rendering approaches for most scientific visualization applications where there are a limited number of highly complex objects that stay relatively close to the viewer.}, keywords = {coherence;image-space, coherence;incremental, coherence;real-time, continuous, direction;visibility;data, dynamic, level-of-detail, list, model;viewing, polygonal, representation;displayed, selection;triangulated, simplification;scientific, triangle, updating;frame-to-frame, updating;lighting;object-space, view-dependent, visualisation;, visualization;triangle}, doi = {10.1109/VISUAL.1996.568126}, author = {Xia,J. C and Varshney, Amitabh} } @conference {18038, title = {Efficient approximate and dynamic matching of patterns using a labeling paradigm}, booktitle = {Foundations of Computer Science, 1996. Proceedings., 37th Annual Symposium on}, year = {1996}, month = {1996/10//}, pages = {320 - 328}, abstract = {A key approach in string processing algorithmics has been the labeling paradigm which is based on assigning labels to some of the substrings of a given string. If these labels are chosen consistently, they can enable fast comparisons of substrings. Until the first optimal parallel algorithm for suffix tree construction was given by the authors in 1994 the labeling paradigm was considered not to be competitive with other approaches. They show that this general method is also useful for several central problems in the area of string processing: approximate string matching, dynamic dictionary matching, and dynamic text indexing. The approximate string matching problem deals with finding all substrings of a text which match a pattern ldquo;approximately rdquo;, i.e., with at most m differences. The differences can be in the form of inserted, deleted, or replaced characters. The text indexing problem deals with finding all occurrences of a pattern in a text, after the text is preprocessed. In the dynamic text indexing problem, updates to the text in the form of insertions and deletions of substrings are permitted. The dictionary matching problem deals with finding all occurrences of each pattern set of a set of patterns in a text, after the pattern set is preprocessed. In the dynamic dictionary matching problem, insertions and deletions of patterns to the pattern set are permitted}, keywords = {algorithm;replaced, algorithmics;substrings;suffix, algorithms;pattern, approximate, characters;dynamic, characters;labeling, characters;string, complexity;indexing;parallel, construction;computational, data, dictionary, dynamic, indexing;efficient, matching;deleted, matching;dynamic, matching;efficient, matching;inserted, matching;string, matching;tree, paradigm;optimal, Parallel, pattern, PROCESSING, string, structures;, text, tree}, doi = {10.1109/SFCS.1996.548491}, author = {Sahinalp,S. C and Vishkin, Uzi} } @conference {16319, title = {An empirical exploration of code evolution}, booktitle = {International Workshop on Empirical Studies of Software Maintenance}, year = {1996}, month = {1996///}, author = {Karr,A. and Porter, Adam and Votta,L.} } @article {16310, title = {Evaluating workflow and process automation in wide-area software development}, journal = {Software Process Technology}, year = {1996}, month = {1996///}, pages = {188 - 193}, author = {Perry,D. and Porter, Adam and Votta,L. and Wade,M.} } @article {18042, title = {A fast parallel algorithm for finding the convex hull of a sorted point set}, journal = {International Journal of Computational Geometry and Applications}, volume = {6}, year = {1996}, month = {1996///}, pages = {231 - 242}, author = {Berkman,O. and Schieber,B. and Vishkin, Uzi} } @conference {17928, title = {FINESSE: a financial information spreadsheet}, booktitle = {Information Visualization {\textquoteright}96, Proceedings IEEE Symposium on}, year = {1996}, month = {1996/10//}, pages = {70 -71, 125 - 70 -71, 125}, abstract = {We outline a spreadsheet-based system for visualization of real-time financial information. Our system permits the user to define arithmetic and presentation relationships amongst the various cells of the spreadsheet. The cells contain primitives that can be numbers, text, images, functions and graphics. Presenting financial information in this format allows its intended clients, the financial analysts, to work in the familiar environment of a spreadsheet and allows them the flexibility afforded by the powerful interface of the spreadsheet paradigm. In addition, our system permits real-time visualization of the financial data stream allowing its user to visually trade the changing market trends in two and three dimensions}, keywords = {cells;text;three, data, dimensions;two, dimensions;user, financial, FINESSE;arithmetic;data, information, information;spreadsheet, interface;data, interfaces;, presentation;market, processing;real-time, programs;user, spreadsheet;functions;graphics;images;information, systems;spreadsheet, trends;numbers;presentation;real-time, visualisation;financial, visualization;financial}, doi = {10.1109/INFVIS.1996.559222}, author = {Varshney, Amitabh and Kaufman,A.} } @article {16206, title = {Hybrid network management (communication systems)}, journal = {16th AIAA International Communications Satellite Systems Conference}, year = {1996}, month = {1996///}, abstract = {We describe our collaborative efforts towards the design and implementation of a next-generation integrated network management system for hybrid networks (INMS/HN). We describe the overall software architecture of the system at its current stage of development. This NMS is specifically designed to address issues relevant to complex heterogeneous networks consisting of seamlessly interoperable terrestrial and satellite networks. NMSs are a key element for interoperability in such networks. We describe the integration of configuration management and performance management. The next step in this integration is fault management. In particular, we describe the object model, issues concerning the graphical user interface, browsing tools, performance data graphical widget displays, and management information database organization issues.}, author = {Baras,J. S and Ball,M. and Karne,R. K and Kelley,S. and Jang,K.D. and Plaisant, Catherine and Roussopoulos, Nick and Stathatos,K. and Vakhutinsky,A. and Valluri,J.} } @article {16202, title = {Integrated network management of hybrid networks}, journal = {AIP Conference Proceedings}, volume = {361}, year = {1996}, month = {1996/03/01/}, pages = {345 - 350}, abstract = {We describe our collaborative efforts towards the design and implementation of a next generation integrated network management system for hybrid networks (INMS/HN). We describe the overall software architecture of the system at its current stage of development. This network management system is specifically designed to address issues relevant for complex heterogeneous networks consisting of seamlessly interoperable terrestrial and satellite networks. Network management systems are a key element for interoperability in such networks. We describe the integration of configuration management and performance management. The next step in this integration is fault management. In particular we describe the object model, issues of the Graphical User Interface (GUI), browsing tools and performance data graphical widget displays, management information database (MIB) organization issues. Several components of the system are being commercialized by Hughes Network Systems. {\textcopyright} 1996 American Institute of Physics.}, isbn = {0094243X}, doi = {doi:10.1063/1.50028}, url = {http://proceedings.aip.org/resource/2/apcpcs/361/1/345_1?isAuthorized=no}, author = {Baras,John S and Ball,Mike and Karne,Ramesh K and Kelley,Steve and Jang,Kap D and Plaisant, Catherine and Roussopoulos, Nick and Stathatos,Kostas and Vakhutinsky,Andrew and Jaibharat,Valluri and Whitefield,David} } @article {16452, title = {A methodology for query reformulation in CIS using semantic knowledge}, journal = {International Journal of Cooperative Information Systems}, volume = {5}, year = {1996}, month = {1996///}, pages = {431 - 468}, author = {Florescu,D. and Raschid, Louiqa and Valduriez,P.} } @article {13710, title = {A Multi-Level Approach to Interlingual MT: Defining the Interface between Representational Languages}, journal = {International Journal of Expert Systems}, volume = {9}, year = {1996}, month = {1996///}, pages = {15 - 51}, abstract = {This paper describes a multi-level design, i.e., a non-uniform approach to interlingual ma-chine translation (MT), in which distinct representational languages are used for di erent types of knowledge. We demonstrate that a linguistically-motivated \division of labor" across multiple representation levels has not complicated, but rather has readily facilitated, the identi cation and construction of systematic relations at the interface between each level. Our approach assumes an interlingua derived from the lexical semantics and predi- cate decomposition approaches of Jackendo (1983; 1990) and Levin and Rappaport-Hovav (1995a; 1995b). We describe a model of interpretation and representation of natural lan- guage sentences which has been implemented as part of an interlingual MT system called PRINCITRAN. }, author = {Dorr, Bonnie J and Voss,C.R.} } @conference {17959, title = {Optimizing triangle strips for fast rendering}, booktitle = {Visualization {\textquoteright}96. Proceedings.}, year = {1996}, month = {1996/11/27/1}, pages = {319 - 326}, abstract = {Almost all scientific visualization involving surfaces is currently done via triangles. The speed at which such triangulated surfaces can be displayed is crucial to interactive visualization and is bounded by the rate at which triangulated data can be sent to the graphics subsystem for rendering. Partitioning polygonal models into triangle strips can significantly reduce rendering times over transmitting each triangle individually. We present new and efficient algorithms for constructing triangle strips from partially triangulated models, and experimental results showing these strips are on average 15\% better than those from previous codes. Further, we study the impact of larger buffer sizes and various queuing disciplines on the effectiveness of triangle strips.}, keywords = {buffer, data;triangulated, disciplines;rendering, model, models;polygonal, optimisation;triangulated, partitioning;queuing, rendering;graphics, sizes;fast, strip, subsystem;interactive, surfaces;data, times;scientific, triangulated, visualisation;, visualization;partially, visualization;triangle}, doi = {10.1109/VISUAL.1996.568125}, author = {Evans,F. and Skiena,S. and Varshney, Amitabh} } @article {17732, title = {Rounding errors in solving block Hessenberg systems}, journal = {Mathematics of Computation}, volume = {65}, year = {1996}, month = {1996/01//}, pages = {115 - 135}, abstract = {A rounding error analysis is presented for a divide-and-conquer algorithm to solve linear systems with block Hessenberg matrices. Conditions are derived under which the algorithm computes a stable solution. The algorithm is shown to be stable for block diagonally dominant matrices and for M-matrices.}, keywords = {block diagonally dominant matrices, block Hessenberg matrices, Linear systems, M-matrices, rounding error analysis}, isbn = {0025-5718}, doi = {10.1090/S0025-5718-96-00667-9}, url = {http://dx.doi.org/10.1090/S0025-5718-96-00667-9}, author = {Von Matt,Urs and Stewart, G.W.} } @conference {16474, title = {Scaling heterogeneous databases and the design of DISCO}, booktitle = {ICDCS}, year = {1996}, month = {1996///}, pages = {449 - 449}, author = {Tomasic,A. and Raschid, Louiqa and Valduriez,P.} } @conference {17940, title = {Simplification envelopes}, booktitle = {Proceedings of the 23rd annual conference on Computer graphics and interactive techniques}, series = {SIGGRAPH {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {119 - 128}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {geometric modeling, hierarchical approximation, levels-of-detail generation, model simplification, offsets, shape approximation}, isbn = {0-89791-746-4}, doi = {10.1145/237170.237220}, url = {http://doi.acm.org/10.1145/237170.237220}, author = {Cohen,Jonathan and Varshney, Amitabh and Manocha,Dinesh and Turk,Greg and Weber,Hans and Agarwal,Pankaj and Brooks,Frederick and Wright,William} } @article {15025, title = {Sorting strings and constructing digital search trees in parallel}, journal = {Theoretical Computer Science}, volume = {154}, year = {1996}, month = {1996/02/05/}, pages = {225 - 245}, abstract = {We describe two simple optimal-work parallel algorithms for sorting a list L= (X1, X2, {\textellipsis}, Xm) of m strings over an arbitrary alphabet Σ, where ∑i = 1m{\textbrokenbar}Xi{\textbrokenbar} = n and two elements of Σ can be compared in unit time using a single processor. The first algorithm is a deterministic algorithm that runs in O(log2m/log log m) time and the second is a randomized algorithm that runs in O(logm) time. Both algorithms use O(m log m + n) operations. Compared to the best-known parallel algorithms for sorting strings, our algorithms offer the following improvements. 1.1. The total number of operations used by our algorithms is optimal while all previous parallel algorithms use a nonoptimal number of operations. 2. 2. We make no assumption about the alphabet while the previous algorithms assume that the alphabet is restricted to {1, 2, {\textellipsis}, no(1)}. 3. 3. The computation model assumed by our algorithms is the Common CRCW PRAM unlike the known algorithms that assume the Arbitrary CRCW PRAM. 4. 4. Our algorithms use O(m log m + n) space, while the previous parallel algorithms use O(n1 + ε) space, where ε is a positive constant. We also present optimal-work parallel algorithms to construct a digital search tree for a given set of strings and to search for a string in a sorted list of strings. We use our parallel sorting algorithms to solve the problem of determining a minimal starting point of a circular string with respect to lexicographic ordering. Our solution improves upon the previous best-known result to solve this problem. }, isbn = {0304-3975}, doi = {10.1016/0304-3975(94)00263-0}, url = {http://www.sciencedirect.com/science/article/pii/0304397594002630}, author = {JaJa, Joseph F. and Ryu,Kwan Woo and Vishkin, Uzi} } @conference {17897, title = {Stripe: a software tool for efficient triangle strips}, booktitle = {ACM SIGGRAPH 96 Visual Proceedings: The art and interdisciplinary programs of SIGGRAPH {\textquoteright}96}, series = {SIGGRAPH {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {153{\textendash} - 153{\textendash}}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-784-7}, doi = {10.1145/253607.253894}, url = {http://doi.acm.org/10.1145/253607.253894}, author = {Evans,Francine and Skiena,Steven and Varshney, Amitabh} } @article {18048, title = {Almost fully-parallel parentheses matching}, journal = {Discrete applied mathematics}, volume = {57}, year = {1995}, month = {1995///}, pages = {11 - 28}, author = {Berkman,O. and Vishkin, Uzi} } @article {17918, title = {Automatic generation of multiresolution for polygonal models}, journal = {First Workshop on Simulation and Interaction in Virtual Environments}, year = {1995}, month = {1995///}, author = {Varshney, Amitabh and Agarwal,P. and Brooks,F. and Wright,W. and Weber,H.} } @article {16314, title = {Comparing detection methods for software requirements inspections: a replicated experiment}, journal = {IEEE Transactions on Software Engineering}, volume = {21}, year = {1995}, month = {1995/06//}, pages = {563 - 575}, abstract = {Software requirements specifications (SRS) are often validated manually. One such process is inspection, in which several reviewers independently analyze all or part of the specification and search for faults. These faults are then collected at a meeting of the reviewers and author(s). Usually, reviewers use Ad Hoc or Checklist methods to uncover faults. These methods force all reviewers to rely on nonsystematic techniques to search for a wide variety of faults. We hypothesize that a Scenario-based method, in which each reviewer uses different, systematic techniques to search for different, specific classes of faults, will have a significantly higher success rate. We evaluated this hypothesis using a 3{\texttimes}24 partial factorial, randomized experimental design. Forty eight graduate students in computer science participated in the experiment. They were assembled into sixteen, three-person teams. Each team inspected two SRS using some combination of Ad Hoc, Checklist or Scenario methods. For each inspection we performed four measurements: (1) individual fault detection rate, (2) team fault detection rate, (3) percentage of faults first identified at the collection meeting (meeting gain rate), and (4) percentage of faults first identified by an individual, but never reported at the collection meeting (meeting loss rate). The experimental results are that (1) the Scenario method had a higher fault detection rate than either Ad Hoc or Checklist methods, (2) Scenario reviewers were more effective at detecting the faults their scenarios are designed to uncover, and were no less effective at detecting other faults than both Ad Hoc or Checklist reviewers, (3) Checklist reviewers were no more effective than Ad Hoc reviewers, and (4) Collection meetings produced no net improvement in the fault detection rate-meeting gains were offset by meeting losses}, keywords = {Assembly, Computer science, Design for experiments, detection methods, Fault detection, fault detection rate, Fault diagnosis, formal specification, formal verification, Gain measurement, individual fault detection rate, Inspection, Loss measurement, nonsystematic techniques, performance evaluation, Performance gain, replicated experiment, scenario-based method, Software development management, software requirements inspections, software requirements specifications, team fault detection rate}, isbn = {0098-5589}, doi = {10.1109/32.391380}, author = {Porter, Adam and Votta,L. G. and Basili, Victor R.} } @article {18045, title = {Data compression using locally consistent parsing}, journal = {TechnicM report, University of Maryland Department of Computer Science}, year = {1995}, month = {1995///}, author = {Sahinalp,S. C and Vishkin, Uzi} } @conference {17944, title = {Defining, Computing, and Visualizing Molecular Interfaces}, booktitle = {Proceedings of the 6th conference on Visualization {\textquoteright}95}, series = {VIS {\textquoteright}95}, year = {1995}, month = {1995///}, pages = {36{\textendash} - 36{\textendash}}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, abstract = {A parallel, analytic approach for defining and computing the inter- and intra-molecular interfaces in three dimensions is described. The "molecular interface surfaces" are derived from approximations to the power-diagrams over the participating molecular units. For a given molecular interface our approach can generate a family of interface surfaces parametrized by alpha and beta, where alpha is the radius of the solvent molecule (also known as the probe-radius) and beta is the interface radius that defines the size of the molecular interface. Molecular interface surfaces provide biochemists with a powerful tool to study surface complementarity and to efficiently characterize the interactions during a protein-substrate docking. The complexity of our algorithm for molecular environments is O(n k \log^2{k}), where n is the number of atoms in the participating molecular units and k is the average number of neighboring atoms -- a constant, given alpha and beta.}, keywords = {Computational Biochemistry Algorithms Molecular Graphics, Connolly Surfaces, Molecular Interfaces, Molecular Surfaces, Protein-Protein Recognition}, isbn = {0-8186-7187-4}, url = {http://dl.acm.org/citation.cfm?id=832271.833834}, author = {Varshney, Amitabh and Brooks Jr,Frederick P. and Manocha,Dinesh and Wright,William V. and Richardson,David C.} } @book {13756, title = {Development of interlingual lexical conceptual structures with syntactic markers for machine translation}, year = {1995}, month = {1995///}, publisher = {University of Maryland}, organization = {University of Maryland}, abstract = {This document reports on research conducted at the University of Mary-land for the Korean/English Machine Translation (MT) project. Our pri- mary objective was to develop an interlingual representation based on lexical conceptual structure (LCS) and to examine the relation between this repre- sentation and a set of linguistically motivated semantic classes. We view the work of the past year as a critical step toward achieving our goal of building a generator: the classification of LCS{\textquoteright}s into a semantic hierarchy provides a systematic mapping between semantic knowledge about verbs and their surface syntactic structures. We have focused on several areas in support of our objectives: (1) inves- tigation of morphological structure including distinctions between Korean and English; (2) porting a fast, message-passing parser to Korean (and to the IBM PC); (3) study of free word order and development of the associ- ated processing algorithm; (4) investigation of the aspectual dimension as it impacts morphology, syntax, and lexical semantics; (5) investigation of the relation between semantic classes and syntactic structure; (6) develop- ment of theta-role and lexical-semantic templates through lexical acquisition techniques; (7) definition a mapping between KR concepts and interlingual representations; (8) formalization of the lexical conceptual structure }, author = {Dorr, Bonnie J and Lee,J. and Voss,C. and Suh,S.} } @conference {16318, title = {Experimental software engineering: A report on the state of the art}, booktitle = {INTERNATIONAL CONFERENCE ON SOFTWARE ENGINEERING}, volume = {17}, year = {1995}, month = {1995///}, pages = {277 - 277}, author = {Votta,L. G. and Porter, Adam and Perry,D.} } @article {17977, title = {Generating levels of detail for large-scale polygonal models}, volume = {CS-1995-20}, year = {1995}, month = {1995///}, institution = {Department of Computer Science, Duke University, North Carolina}, abstract = {We present an e cient algorithm for generating various levels-of-detail approximations for agiven polygonal model. Our algorithm guarantees that all points of an approximation are within a user-speci able distance from the original model and all points of the original model are within a distance from the approximation. Each approximation attempts to minimize the total number of polygons required to satisfy the previous constraint. We show how the problem of generating levels- of-detail approximations reduces to the classic set partition problem. The various approximations are guaranteed to be topologically consistent with the input polygonal model. The approximations can be constrained by the user to preserve any desired edges of the input model. We also propose a method to compute an estimate of the quality of the approximation generated by our algorithm with respect to the optimal approximation satisfying the same constraints. We have implemented our algorithm and have obtained experimental results of multiresolution hierarchy generation on over a thousand polygonal objects from a CAD model of a notional submarine. }, author = {Varshney, Amitabh and Agarwal,P. K and Brooks Jr,F. P and Wright,W. V and Weber,H.} } @book {13298, title = {Image Analysis and Processing: 8th International Conference, Iciap {\textquoteright}95, San Remo, Italy, September 13-15, 1995 : Proceedings}, year = {1995}, month = {1995/09/28/}, publisher = {Springer}, organization = {Springer}, abstract = {This book presents the proceedings of the 8th International Conference on Image Analysis and Processing, ICIAP {\textquoteright}95, held in Sanremo, Italy in September 1995 under the sponsorship of the International Association of Pattern Recognition IAPR.The volume presents 108 papers selected from more than 180 submissions together with six invited contributions. The papers are written by a total of 265 contributing authors and give a comprehensive state-of-the-art report on all current issues of image analysis and processing. Theoretical aspects are addressed as well as systems design and advanced applications, particularly in medical imaging.}, keywords = {Artificial intelligence, COMPUTER AIDED DESIGN, Computer Graphics, Computer science, Computer vision, Computers / CAD-CAM, Computers / Computer Graphics, Computers / Computer Science, Computers / Computer Vision \& Pattern Recognition, Computers / Image Processing, Computers / Intelligence (AI) \& Semantics, Computers / Optical Data Processing, Computers / Software Development \& Engineering / General, Electronic books, IMAGE PROCESSING, Image processing/ Congresses, Imaging systems, Optical data processing, Optical pattern recognition, software engineering}, isbn = {9783540602989}, author = {Braccini,Carlo and De Floriani, Leila and Vernazza,Gianni} } @article {13814, title = {Lexical allocation in interlingua-based machine translation of spatial expressions}, journal = {Working Notes for IJCAI-95 Workshop on the Representation and Processing of Spatial Expressions, Montreal, Canada}, year = {1995}, month = {1995///}, abstract = {Given a spatial expression, or its computationalsemantic form, how is the expression{\textquoteright}s spatial semantics to be allocated lexically, i.e., among entries in the lexicon? In interlingua-based ma- chine translation (MT) research, lexical alloca- tion is the problem of allocating or subdividing a linguistic expression{\textquoteright}s full interlingual (IL) structure into the substructures that are lexical IL forms, i.e., in the lexicon. Here we present our work developing IL forms and an IL lexicon for translating English spatial expressions into Turkish. We examine several co-occurrence patterns between motion verbs (spatial place- ment and displacement) and directional adpo- sitions (particles in English, postpositions in Turkish) and the lexical allocation of spatial vectors in these patterns }, author = {Voss,C.R. and Dorr, Bonnie J and Sencan,M.U.} } @conference {18047, title = {A note on reducing parallel model simulations to integer sorting}, booktitle = {Parallel Processing Symposium, 1995. Proceedings., 9th International}, year = {1995}, month = {1995///}, pages = {208 - 212}, author = {Matias,E. and Vishkin, Uzi} } @conference {18046, title = {Parallel algorithms for database operations and a database operation for parallel algorithms}, booktitle = {Parallel Processing Symposium, 1995. Proceedings., 9th International}, year = {1995}, month = {1995///}, pages = {173 - 179}, author = {Raman,R. and Vishkin, Uzi} } @conference {18043, title = {On a technique for parsing a string}, booktitle = {Combinatorial Pattern Matching}, year = {1995}, month = {1995///}, pages = {386 - 386}, author = {Vishkin, Uzi} } @article {18044, title = {On a Technique for Parsing a String (Invited Lecture)}, journal = {Lecture Notes in Computer Science}, volume = {937}, year = {1995}, month = {1995///}, pages = {386 - 386}, author = {Vishkin, Uzi} } @article {13877, title = {Toward a lexicalized grammar for interlinguas}, journal = {Machine Translation}, volume = {10}, year = {1995}, month = {1995///}, pages = {143 - 184}, abstract = {In this paper we present one aspect of our research on machine translation (MT): capturing the grammatical and computational relation between (i) the interlingua (IL) as defined declaratively in the lexicon and (ii) the IL as defined procedurally by way of algorithms that compose and decompose pivot IL forms. We begin by examining the interlinguas in the lexicons of a variety of current IL-based approaches to MT. This brief survey makes it clear that no consensus exists among MT researchers on the level of representation for defining the IL. In the section that follows, we explore the consequences of this missing formal framework for MT system builders who develop their own lexical-IL entries. The lack of software tools to support rapid IL respecification and testing greatly hampers their ability to modify representations to handle new data and new domains. Our view is that IL-based MT research needs both (a) the formal framework to specify possible IL grammars and (b) the software support tools to implement and test these grammars. With respect to (a), we propose adopting a lexicalized grammar approach, tapping research results from the study of tree grammars for natural language syntax. With respect to (b), we sketch the design and functional specifications for parts of ILustrate, the set of software tools that we need to implement and test the various IL formalisms that meet the requirements of a lexicalized grammar. In this way, we begin to address a basic issue in MT research, how to define and test an interlingua as a computational language {\textemdash} without building a full MT system for each possible IL formalism that might be proposed.}, isbn = {0922-6567}, url = {http://dx.doi.org/10.1007/BF00997234}, author = {Voss,Clare and Dorr, Bonnie J} } @conference {17943, title = {Voxel based object simplification}, booktitle = {Proceedings of the 6th conference on Visualization {\textquoteright}95}, series = {VIS {\textquoteright}95}, year = {1995}, month = {1995///}, pages = {296{\textendash} - 296{\textendash}}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, abstract = {Presents a simple, robust and practical method for object simplification for applications where gradual elimination of high-frequency details is desired. This is accomplished by sampling and low-pass filtering the object into multi-resolution volume buffers and applying the marching cubes algorithm to generate a multi-resolution triangle-mesh hierarchy. Our method simplifies the genus of objects and can also help existing object simplification algorithms achieve better results. At each level of detail, a multi-layered mesh can be used for an optional and efficient antialiased rendering.}, keywords = {antialiased rendering, antialiasing, buffer storage, data visualisation, high-frequency detail elimination, low-pass filtering, low-pass filters, marching cubes algorithm, mesh generation, multi-layered mesh, multi-resolution triangle-mesh hierarchy, multi-resolution volume buffers, object genus, Rendering (computer graphics), sampling, Smoothing methods, voxel based object simplification}, isbn = {0-8186-7187-4}, url = {http://dl.acm.org/citation.cfm?id=832271.833850}, author = {He,Taosong and Hong,Lichan and Kaufman,A. and Varshney, Amitabh and Wang,S.} } @article {15213, title = {Biconnectivity approximations and graph carvings}, journal = {Journal of the ACM (JACM)}, volume = {41}, year = {1994}, month = {1994/03//}, pages = {214 - 235}, abstract = {A spanning tree in a graph is the smallest connected spanning subgraph. Given a graph, how does one find the smallest (i.e., least number of edges) 2-connected spanning subgraph (connectivity refers to both edge and vertex connectivity, if not specified)? Unfortunately, the problem is known to be NP-hard.We consider the problem of finding a better approximation to the smallest 2-connected subgraph, by an efficient algorithm. For 2-edge connectivity, our algorithm guarantees a solution that is no more than 3/2 times the optimal. For 2-vertex connectivity, our algorithm guarantees a solution that is no more than 5/3 times the optimal. The previous best approximation factor is 2 for each of these problems. The new algorithms (and their analyses) depend upon a structure called a carving of a graph, which is of independent interest. We show that approximating the optimal solution to within an additive constant is NP-hard as well. We also consider the case where the graph has edge weights. For this case, we show that an approximation factor of 2 is possible in polynomial time for finding a k-edge connected spanning subgraph. This improves an approximation factor of 3 for k = 2, due to Frederickson and Ja{\textasciiacute}Ja{\textasciiacute} [1981], and extends it for any k (with an increased running time though). }, keywords = {biconnectivity, connectivity, sparse subgraphs}, isbn = {0004-5411}, doi = {10.1145/174652.174654}, url = {http://doi.acm.org/10.1145/174652.174654}, author = {Khuller, Samir and Vishkin, Uzi} } @article {13875, title = {The Case for a MT Developers{\textquoteright} Tool with a Two-Component View of the Interlingua}, journal = {Proceedings of the First Conference of the Association for Machine Translation in the Americas}, year = {1994}, month = {1994///}, pages = {40 - 47}, author = {Dorr, Bonnie J and Voss,C.} } @article {17910, title = {Computing smooth molecular surfaces}, journal = {Computer Graphics and Applications, IEEE}, volume = {14}, year = {1994}, month = {1994/09//}, pages = {19 - 25}, abstract = {We consider how we set out to formulate a parallel analytical molecular surface algorithm that has expected linear complexity with respect to the total number of atoms in a molecule. To achieve this goal, we avoided computing the complete 3D regular triangulation over the entire set of atoms, a process that takes time O(n log n), where n is the number of atoms in the molecule. We aim to compute and display these surfaces at interactive rates, by taking advantage of advances in computational geometry, making further algorithmic improvements and parallelizing the computations.<>}, keywords = {3D, algorithm;smooth, algorithms;physics, analytical, atoms;three, complexity;computational, complexity;parallel, computing;, computing;surface, dimensional, geometry;interactive, geometry;parallel, improvements;computation, molecular, rates;linear, regular, surface, time;computational, triangulation;algorithmic, triangulation;computational}, isbn = {0272-1716}, doi = {10.1109/38.310720}, author = {Varshney, Amitabh and Brooks, F.P.,Jr. and Wright,W. V} } @article {13742, title = {Concept-based lexical selection}, journal = {Proceedings of the AAAI-94 Fall Symposium on Knowledge Representation for Natural Language Processing in Implemented Systems}, year = {1994}, month = {1994///}, author = {Dorr, Bonnie J and Voss,C. and Peterson,E. and Kiker,M.} } @article {18060, title = {On the detection of robust curves}, journal = {CVGIP: Graphical Models and Image Processing}, volume = {56}, year = {1994}, month = {1994///}, pages = {189 - 204}, author = {Cole,R. and Vishkin, Uzi} } @book {18146, title = {Developing a computer science agenda for high-performance computing}, year = {1994}, month = {1994///}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-678-6}, editor = {Vishkin, Uzi} } @conference {16335, title = {An experiment to assess different defect detection methods for software requirements inspections}, booktitle = {Proceedings of the 16th international conference on Software engineering}, year = {1994}, month = {1994///}, pages = {103 - 112}, author = {Porter, Adam and Votta,L. G.} } @article {18061, title = {Finding level-ancestors in trees}, journal = {Journal of Computer and System Sciences}, volume = {48}, year = {1994}, month = {1994///}, pages = {214 - 230}, author = {Berkman,O. and Vishkin, Uzi} } @conference {13789, title = {ILustrate: a MT Developers{\textquoteright} Tool with a Two-Component View of the Interlingua}, booktitle = {in Proceedings of the First AMTA Conference. Columbia MD}, year = {1994}, month = {1994///}, pages = {40 - 47}, abstract = {The interlingua (IL) in machine translation (MT) systems can be defined in terms of two components: (i) \"lexical IL forms \" within language-specific lexicons where each lexical entry has associated with it one or more lexical representations, and (ii) algorithms for creating and decomposing the instantiated \"pivot \" representation. Within this framework, we examine five different approaches to the level of representation for the lexical IL forms and then discuss a tool, ILustrate, 2 we are building to develop and evaluate different IL representations coupled with their corresponding translation algorithms. 1}, author = {Dorr, Bonnie J and Voss,Clare R.} } @article {13577, title = {Instrument Grasp: AModel and its Effects on Handwritten Strokes}, journal = {Pattern Recognition}, volume = {27}, year = {1994}, month = {1994///}, pages = {233 - 245}, author = {David Doermann and Varma,V. and Rosenfeld, A.} } @book {17244, title = {Interacting with virtual environments}, year = {1994}, month = {1994///}, publisher = {John Wiley \& Sons}, organization = {John Wiley \& Sons}, author = {MacDonald,L. and Vince,J. and Shneiderman, Ben} } @conference {17948, title = {Interactive visualization of weighted three-dimensional alpha hulls}, booktitle = {Proceedings of the tenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}94}, year = {1994}, month = {1994///}, pages = {395 - 396}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {An interactive visualization of weighted three-dimensional \&agr;-hulls is presented for static and dynamic spheres. The \&agr;-hull is analytically computed and represented by a triangulated mesh. The entire surface is computed and displayed in real-time at interactive rates. The weighted three-dimensional \&agr;-hulls are equivalent to smooth molecular surfaces of biochemistry. Biochemistry applications of interactive computation and display of \&agr;-hulls or smooth molecular surfaces are outlined.}, isbn = {0-89791-648-4}, doi = {10.1145/177424.178120}, url = {http://doi.acm.org/10.1145/177424.178120}, author = {Varshney, Amitabh and Brooks,Frederick P. and Wright,William V.} } @conference {18059, title = {Optimal parallel approximation for prefix sums and integer sorting}, booktitle = {Proceedings of the fifth annual ACM-SIAM symposium on Discrete algorithms}, year = {1994}, month = {1994///}, pages = {241 - 250}, author = {Goodrich,M. T and Matias,Y. and Vishkin, Uzi} } @conference {18053, title = {Optimal randomized parallel algorithms for computing the row maxima of a totally monotone matrix}, booktitle = {Proceedings of the fifth annual ACM-SIAM symposium on Discrete algorithms}, year = {1994}, month = {1994///}, pages = {613 - 621}, author = {Raman,R. and Vishkin, Uzi} } @article {15214, title = {On the parallel complexity of digraph reachability}, journal = {Information Processing Letters}, volume = {52}, year = {1994}, month = {1994/12/09/}, pages = {239 - 241}, abstract = {We formally show that the directed graph reachability problem can be reduced to several problems using a linear number of processors; hence an efficient parallel algorithm to solve any of these problems would imply an efficient parallel algorithm for the directed graph reachability problem. This formally establishes that all these problems are at least as hard as the s-t reachability problem.}, keywords = {combinatorial problems, Parallel algorithms}, isbn = {0020-0190}, doi = {10.1016/0020-0190(94)00153-7}, url = {http://www.sciencedirect.com/science/article/pii/0020019094001537}, author = {Khuller, Samir and Vishkin, Uzi} } @article {18051, title = {On a parallel-algorithms method for string matching problems}, journal = {Lecture Notes in Computer Science}, volume = {778}, year = {1994}, month = {1994///}, pages = {22 - 32}, author = {Sahinalp,S. C and Vishkin, Uzi} } @inbook {18049, title = {On a parallel-algorithms method for string matching problems (overview)}, booktitle = {Algorithms and ComplexityAlgorithms and Complexity}, series = {Lecture Notes in Computer Science}, volume = {778}, year = {1994}, month = {1994///}, pages = {22 - 32}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, keywords = {Computer, Science}, isbn = {978-3-540-57811-6}, url = {http://dx.doi.org/10.1007/3-540-57811-0_3}, author = {Sahinalp,Suleyman and Vishkin, Uzi}, editor = {Bonuccelli,M. and Crescenzi,P. and Petreschi,R.} } @article {18055, title = {Pattern matching in a digitized image}, journal = {Algorithmica}, volume = {12}, year = {1994}, month = {1994///}, pages = {375 - 408}, author = {Landau,G. M and Vishkin, Uzi} } @article {15212, title = {A primal-dual parallel approximation technique applied to weighted set and vertex covers}, journal = {Journal Algorithms}, volume = {17}, year = {1994}, month = {1994///}, pages = {280 - 289}, author = {Khuller, Samir and Vishkin, Uzi and Young,N.} } @conference {18050, title = {Symmetry breaking for suffix tree construction}, booktitle = {Proceedings of the twenty-sixth annual ACM symposium on Theory of computing}, year = {1994}, month = {1994///}, pages = {300 - 309}, author = {Sahinalp,S. C and Vishkin, Uzi} } @article {15042, title = {Top-Bottom Routing around a Rectangle is as Easy as Computing Prefix Minima}, journal = {SIAM Journal on Computing}, volume = {23}, year = {1994}, month = {1994///}, pages = {449 - 465}, abstract = {A new parallel algorithm for the prefix minima problem is presented for inputs drawn from the range of integers $[1..s]$. For an input of size $n$, it runs in $O(\log \log \log s)$ time and $O(n)$ work (which is optimal). A faster algorithm is presented for the special case $s = n$; it runs in $O(\log ^ * n)$ time with optimal work. Both algorithms are for the Priority concurrent-read concurrent-write parallel random access machine (CROW PRAM). A possibly surprising outcome of this work is that, whenever the range of the input is restricted, the prefix minima problem can be solved significantly faster than the $\Omega (\log \log n)$ time lower bound in a decision model of parallel computation, as described by Valiant [SIAM J. Comput., 4 (1975), pp. 348{\textendash}355].The top-bottom routing problem, which is an important subproblem of routing wires around a rectangle in two layers, is also considered. It is established that, for parallel (and hence for serial) computation, the problem of top-bottom routing is no harder than the prefix minima problem with $s = n$, thus giving an $O(\log ^ * n)$ time optimal parallel algorithm for the top-bottom routing problem. This is one of the first nontrivial problems to be given an optimal parallel algorithm that runs in sublogarithmic time. }, keywords = {Parallel algorithms, pram algorithms, prefix minima, VLSI routing}, doi = {10.1137/S0097539791218275}, url = {http://link.aip.org/link/?SMJ/23/449/1}, author = {Berkman,Omer and JaJa, Joseph F. and Krishnamurthy,Sridhar and Thurimella,Ramakrishna and Vishkin, Uzi} } @conference {18054, title = {Trade-offs between communication throughput and parallel time}, booktitle = {Proceedings of the twenty-sixth annual ACM symposium on Theory of computing}, year = {1994}, month = {1994///}, pages = {372 - 381}, author = {Mansour,Y. and Nisan,N. and Vishkin, Uzi} } @article {18063, title = {Advanced parallel prefix-sums, list ranking and connectivity}, journal = {Synthesis of Parallel Algorithms}, year = {1993}, month = {1993///}, pages = {215 - 257}, author = {Vishkin, Uzi} } @conference {18066, title = {Approximate parallel prefix computation and its applications}, booktitle = {Parallel Processing Symposium, 1993., Proceedings of Seventh International}, year = {1993}, month = {1993///}, pages = {318 - 325}, author = {Goodrich,M. T and Matias,Y. and Vishkin, Uzi} } @article {17978, title = {Bounding the number of neighbors in protein molecules}, volume = {UNC-CS-TR-93-039}, year = {1993}, month = {1993///}, institution = {Department of Computer Science, University of North Carolina at Chapel Hill}, author = {Varshney, Amitabh and Wright,W. V and Brooks Jr,F. P} } @article {18064, title = {A case for the PRAM as a standard programmer{\textquoteright}s model}, journal = {Parallel Architectures and their Efficient Use}, year = {1993}, month = {1993///}, pages = {11 - 19}, author = {Vishkin, Uzi} } @article {13746, title = {Constraints on the Space of MT Divergences}, journal = {AAAI (1993)}, volume = {43}, year = {1993}, month = {1993///}, pages = {53 - 53}, author = {Dorr, Bonnie J and Voss,C.R.} } @article {17953, title = {Evaluating surface intersections in lower dimensions}, journal = {Second International Conference on Curves and Surfaces}, year = {1993}, month = {1993///}, abstract = {We highlight a new algorithm for evaluating the sur-face intersection curve using a matrix formulation. The projection of the intersection curve is represented as the singular set of a bi- variate matrix polynomial. The resulting algorithm for evaluating the intersection curve is based on matrix computations like eigen- decomposition and singular value decomposition. Furthermore, at each stage of the algorithm we make use of inverse power iterations to march back to the curve. We also describe the performance of the resulting robust and accurate approach }, author = {Manocha,D. and Varshney, Amitabh and Weber,H.} } @conference {17942, title = {Fast analytical computation of Richards{\textquoteright}s smooth molecular surface}, booktitle = {Proceedings of the 4th conference on Visualization {\textquoteright}93}, series = {VIS {\textquoteright}93}, year = {1993}, month = {1993///}, pages = {300 - 307}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, abstract = {An algorithm for rapid computation of Richards{\textquoteright}s smooth molecular surface is described. The entire surface is computed analytically, triangulated, and displayed at interactive rates. The faster speeds for our program have been achieved by algorithmic improvements, parallelizing the computations, and by taking advantage of the special geometrical properties of such surfaces. Our algorithm is easily parallelizable and it has a time complexity of O(k log k) over n processors, where n is the number of atoms of the molecule and k is the average number of neighbors per atom.}, isbn = {0-8186-3940-7}, url = {http://dl.acm.org/citation.cfm?id=949845.949900}, author = {Varshney, Amitabh and Brooks,Frederick P.} } @conference {13823, title = {Machine Translation of Spatial Expressions: Defining the Relation betweenan Interlingua and a Knowledge Representation System}, booktitle = {IN PROCEEDINGS OF TWELFTH CONFERENCE OF THE AMERICAN ASSOCIATION FOR ARTIFICIAL INTELLIGENCE}, year = {1993}, month = {1993///}, pages = {374 - 379}, abstract = {In this paper we present one aspect of our presents a clear case of where general as well as specific research on machine translation (MT): defining the re- real world knowledge should assist in eliminating inap- lation between the interlingua (IL) and a knowledge representation (KR) within an MT system. Our interest lies in the translation of natural language (NL) sentences where the "message" contains a spatial relation in particular, where the sentence conveys information about the location or path of physical entities in the real, physical world. We explore several arguments for clarifying the source of constraints on the particular IL structures needed to translate these sentences.}, author = {Dorr, Bonnie J and Voss,Clare R.} } @article {18068, title = {Optimal doubly logarithmic parallel algorithms based on finding all nearest smaller values}, journal = {Journal of Algorithms}, volume = {14}, year = {1993}, month = {1993///}, pages = {344 - 370}, author = {Berkman,O. and Schieber,B. and Vishkin, Uzi} } @article {18067, title = {On parallel integer merging}, journal = {Information and Computation}, volume = {106}, year = {1993}, month = {1993///}, pages = {266 - 285}, author = {Berkman,O. and Vishkin, Uzi} } @conference {18065, title = {Two dimensional pattern matching in a digitized image}, booktitle = {Combinatorial Pattern Matching}, year = {1993}, month = {1993///}, pages = {134 - 151}, author = {Landau,G. and Vishkin, Uzi} } @conference {18070, title = {Biconnectivity Approximations and Graph Carvings}, booktitle = {Proceedings of the Twenty-fourth Annual ACM Symposium on the Theory of Computing, Victoria, British Columbia, Canada, May 4-6, 1992}, year = {1992}, month = {1992///}, pages = {759 - 759}, author = {Vishkin, Uzi} } @article {13744, title = {Constraining MT divergences: spatial relations in the lexicon and knowledge base}, year = {1992}, month = {1992///}, institution = {University of Maryland at College Park}, address = {College Park, MD, USA}, author = {Dorr, Bonnie J and Voss,Clare} } @article {12987, title = {copia-like retrotransposons are ubiquitous among plants}, journal = {Proc Natl Acad Sci USA}, volume = {89}, year = {1992}, month = {1992/08//}, pages = {7124 - 7128}, abstract = {Transposable genetic elements are assumed to be a feature of all eukaryotic genomes. Their identification, however, has largely been haphazard, limited principally to organisms subjected to molecular or genetic scrutiny. We assessed the phylogenetic distribution of copia-like retrotransposons, a class of transposable element that proliferates by reverse transcription, using a polymerase chain reaction assay designed to detect copia-like element reverse transcriptase sequences. copia-like retrotransposons were identified in 64 plant species as well as the photosynthetic protist Volvox carteri. The plant species included representatives from 9 of 10 plant divisions, including bryophytes, lycopods, ferns, gymnosperms, and angiosperms. DNA sequence analysis of 29 cloned PCR products and of a maize retrotransposon cDNA confirmed the identity of these sequences as copia-like reverse transcriptase sequences, thereby demonstrating that this class of retrotransposons is a ubiquitous component of plant genomes.}, author = {Voytas,D. F and Cummings, Michael P. and Koniczny,A. and Ausubel,F. M and Rodermel,S. R} } @article {18075, title = {Efficient pattern matching with scaling}, journal = {Journal of Algorithms}, volume = {13}, year = {1992}, month = {1992///}, pages = {2 - 32}, author = {Amir, A. and Landau,G. M and Vishkin, Uzi} } @article {17956, title = {An environment projection approach to radiosity for meshconnected computers}, journal = {Third Eurographics Workshop on Rendering}, year = {1992}, month = {1992///}, pages = {271 - 281}, author = {Varshney, Amitabh and Prins,J. F} } @article {13576, title = {Instrument Grasp: AModel and its Effects on Handwritten Strokes}, volume = {CAR-TR-614}, year = {1992}, month = {1992///}, institution = {University of Maryland, College Park}, author = {David Doermann and Varma,V. and Rosenfeld, A.} } @article {18072, title = {Methods in parallel algorithmcs}, journal = {Mathematical Foundations of Computer Science 1992}, year = {1992}, month = {1992///}, pages = {81 - 81}, author = {Vishkin, Uzi} } @article {18071, title = {Methods in parallel algorithmics and who may need to know them?}, journal = {Algorithms and Computation}, year = {1992}, month = {1992///}, pages = {1 - 4}, author = {Vishkin, Uzi} } @conference {13603, title = {Modeling of a Grasp for Handwriting}, booktitle = {Proceedings of the Twenty-Third Annual Modeling and Simulation Conference}, year = {1992}, month = {1992///}, pages = {2133 - 2140}, address = {Pittsburgh, PA}, author = {Varma,V. and David Doermann} } @article {18069, title = {A parallel blocking flow algorithm for acyclic networks}, journal = {Journal of Algorithms}, volume = {13}, year = {1992}, month = {1992///}, pages = {489 - 501}, author = {Vishkin, Uzi} } @article {15205, title = {Processor efficient parallel algorithms for the two disjoint paths problem and for finding a Kuratowski homeomorph}, journal = {SIAM Journal on Computing}, volume = {21}, year = {1992}, month = {1992///}, pages = {486 - 486}, author = {Khuller, Samir and Mitchell,S. G and Vazirani,V. V} } @article {18074, title = {Randomized range-maxima in nearly-constant parallel time}, journal = {Computational Complexity}, volume = {2}, year = {1992}, month = {1992///}, pages = {350 - 373}, author = {Berkman,O. and Matias,Y. and Vishkin, Uzi} } @conference {17935, title = {Real-time procedural textures}, booktitle = {Proceedings of the 1992 symposium on Interactive 3D graphics}, series = {I3D {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {95 - 100}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-467-8}, doi = {10.1145/147156.147171}, url = {http://doi.acm.org/10.1145/147156.147171}, author = {Rhoades,John and Turk,Greg and Bell,Andrew and State,Andrei and Neumann,Ulrich and Varshney, Amitabh} } @conference {13647, title = {Simulating Pressure Variations in Handwriting}, booktitle = {Proceedings of the Twenty-Third Annual Modeling and Simulation Conference}, year = {1992}, month = {1992///}, pages = {2141 - 2148}, address = {Pittsburgh, PA}, author = {David Doermann and Varma,V.} } @article {17971, title = {Six Generations of Building Walkthrough: Final Technical Report to the National Science Foundation}, year = {1992}, month = {1992///}, institution = {University of North Carolina at Chapel Hill}, address = {Chapel Hill, NC, USA}, author = {Brooks,Frederick P. and Airey,John and Alspaugh,John and Bell,Andrew and Brown,Randolph and Hill,Curtis and Nimscheck,Uwe and Rheingans,Penny and Rohlf,John and Smith,Dana and Turner,Douglass and Varshney, Amitabh and Wang,Yulan and Weber,Hans and Yuan,Xialin} } @article {18080, title = {Approximate parallel scheduling. II. Applications to logarithmic-time optimal parallel graph algorithms}, journal = {Information and Computation}, volume = {92}, year = {1991}, month = {1991///}, pages = {1 - 47}, author = {Cole,R. and Vishkin, Uzi} } @conference {18076, title = {Can parallel algorithms enhance serial implementation?}, booktitle = {Parallel Processing Symposium, 1994. Proceedings., Eighth International}, year = {1991}, month = {1991///}, pages = {376 - 385}, author = {Vishkin, Uzi} } @conference {18078, title = {Converting high probability into nearly-constant time{\textemdash}with applications to parallel hashing}, booktitle = {Proceedings of the twenty-third annual ACM symposium on Theory of computing}, year = {1991}, month = {1991///}, pages = {307 - 316}, author = {Matias,Y. and Vishkin, Uzi} } @article {18079, title = {On parallel hashing and integer sorting}, journal = {Journal of Algorithms}, volume = {12}, year = {1991}, month = {1991///}, pages = {573 - 606}, author = {Matias,Y. and Vishkin, Uzi} } @article {17976, title = {Parallel radiosity techniques for mesh-connected SIMD computers}, year = {1991}, month = {1991///}, institution = {DTIC Document}, address = {NORTH CAROLINA UNIV AT CHAPEL HILL DEPT OF COMPUTER SCIENCE}, abstract = {This thesis investigates parallel radiosity techniques for highly- parallel, mesh-connected SIMD computers. The approaches studies differ along the two orthogonal dimensions: the method of sampling-by ray-casting or by environment-project and the method of mapping of objects to processors - by object-space-based methods or by a balanced-load method. The environment- projection approach has been observed to perform better than the ray-casting approaches. For the dataset studied, the balanced-load method appears promising. Spatially subdividing the dataset without taking the potential light interactions into account has been observed to violate the locality property of radiosity. This suggests that object-space-based methods for radiosity must take visibility into account during subdivision to achieve any speedups based on exploiting the locality property of radiosity. This thesis also investigates the reuse patterns of form-factors in perfectly diffuse environments during radiosity iterations. Results indicate that reuse is sparse even when significant convergence is achieved.}, author = {Varshney, Amitabh} } @article {15204, title = {Planar graph coloring is not self-reducible, assuming P /= NP}, journal = {Theoretical Computer Science}, volume = {88}, year = {1991}, month = {1991/09/30/}, pages = {183 - 189}, abstract = {We show that obtaining the lexicographically first four coloring of a planar graph is NP-hard. This shows that planar graph four-coloring is not self-reducible, assuming P/= NP. One consequence of our result is that the schema of Jerrum et al. (1986) cannot be used for approximately counting the number of four colorings of a planar graph. These results extend to planar graph k-coloring, for k⩾4.}, isbn = {0304-3975}, doi = {10.1016/0304-3975(91)90081-C}, url = {http://www.sciencedirect.com/science/article/pii/030439759190081C}, author = {Khuller, Samir and Vazirani,Vijay V.} } @article {18077, title = {Structural parallel algorithmics}, journal = {Automata, Languages and Programming}, year = {1991}, month = {1991///}, pages = {363 - 380}, author = {Vishkin, Uzi} } @article {12977, title = {A superfamily of {\i}t Arabidopsis thaliana retrotransposons}, journal = {Genetics}, volume = {127}, year = {1991}, month = {1991/04//}, pages = {801 - 809}, abstract = {We describe a superfamily of Arabidopsis thaliana retrotransposable elements that consists of at least ten related families designated Ta1-Ta10. The Ta1 family has been described previously. Two genomic clones representing the Ta2 and Ta3 elements were isolated from an A. thaliana (race Landsberg erecta) lambda library using sequences derived from the reverse transcriptase region of Ta1 as hybridization probes. Nucleotide sequence analysis showed that the Ta1, Ta2 and Ta3 families share greater than 75\% amino acid identity in pairwise comparisons of their reverse transcriptase and RNase H genes. In addition to Ta1, Ta2 and Ta3, we identified seven other related retrotransposon families in Landsberg erecta, Ta4-Ta10, using degenerate primers and the polymerase chain reaction to amplify a highly conserved region of retrotransposon-encoded reverse transcriptase. One to two copies of elements Ta2-Ta10 are present in the genomes of the A. thaliana races Landsberg erecta and Columbia indicating that the superfamily comprises at least 0.1\% of the A. thaliana genome. The nucleotide sequences of the reverse transcriptase regions of the ten element families place them in the category of copia-like retrotransposons and phylogenetic analysis of the amino acid sequences suggests that horizontal transfer may have played a role in their evolution.}, author = {Konieczny,A and Voytas,D. F and Cummings, Michael P. and Ausubel,F. M} } @conference {18145, title = {Towards a theory of nearly constant time parallel algorithms}, booktitle = {Foundations of Computer Science, 1991. Proceedings., 32nd Annual Symposium on}, year = {1991}, month = {1991/10//}, pages = {698 - 710}, abstract = {It is demonstrated that randomization is an extremely powerful tool for designing very fast and efficient parallel algorithms. Specifically, a running time of O(lg* n) (nearly-constant), with high probability, is achieved using n/lg* n (optimal speedup) processors for a wide range of fundamental problems. Also given is a constant time algorithm which, using n processors, approximates the sum of n positive numbers to within an error which is smaller than the sum by an order of magnitude. A variety of known and new techniques are used. New techniques, which are of independent interest, include estimation of the size of a set in constant time for several settings, and ways for deriving superfast optimal algorithms from superfast nonoptimal ones}, keywords = {computational complexity, Estimation, nearly constant time parallel algorithms, Parallel algorithms, positive numbers, randomization, running time, superfast optimal algorithms}, doi = {10.1109/SFCS.1991.185438}, author = {Gil,J. and Matias,Y. and Vishkin, Uzi} } @article {18083, title = {[31] Fast alignment of DNA and protein sequences}, journal = {Methods in Enzymology}, volume = {183}, year = {1990}, month = {1990///}, pages = {487 - 502}, author = {Landau,G. M and Vishkin, Uzi and Nussinov,R.} } @conference {18081, title = {Deterministic sampling{\textemdash}a new technique for fast pattern matching}, booktitle = {Proceedings of the twenty-second annual ACM symposium on Theory of computing}, year = {1990}, month = {1990///}, pages = {170 - 180}, author = {Vishkin, Uzi} } @article {18084, title = {Fast alignment of DNA and protein sequences}, journal = {Methods in enzymology}, volume = {183}, year = {1990}, month = {1990///}, pages = {487 - 502}, author = {Landau,G. M and Vishkin, Uzi and Nussinov,R.} } @article {18082, title = {Finding all nearest neighbors for convex polygons in parallel: a new lower bound technique and a matching algorithm}, journal = {Discrete Applied Mathematics}, volume = {29}, year = {1990}, month = {1990///}, pages = {97 - 111}, author = {Schieber,B. and Vishkin, Uzi} } @article {15200, title = {On-line algorithms for weighted matching and stable marriages}, volume = {TR90-1143}, year = {1990}, month = {1990///}, institution = {Cornell University}, abstract = {We give an on-line deterministic algorithm for the bipartite weighted matching problem that achieves a competitive ratio of $O(n)$. In fact, this algorithm is almost optimal - the lower bound on the performance ratio of any deterministic online algorithm is $\Omega (n/ \sqrt{log n})$. We also study the stable marriage problem, where we are interested in the number of unstable pairs produced. We show that the simple "first come, first served" deterministic algorithm yields on the average $O(n$ log $n$) unstable pairs, but in the worst case no deterministic or randomized on-line algorithm can do better that $\Omega(n^{2})$ unstable pairs.}, author = {Khuller, Samir and Mitchell,S. G and Vazirani,V. V} } @article {13025, title = {The structure, distribution and evolution of the {\i}t Ta1 retrotransposable element family of {\i}t Arabidopsis thaliana}, journal = {Genetics}, volume = {126}, year = {1990}, month = {1990/11//}, pages = {713 - 721}, abstract = {The Ta1 elements are a low copy number, copia-like retrotransposable element family of Arabidopsis thaliana. Six Ta1 insertions comprise all of the Ta1 element copies found in three geographically diverse A. thaliana races. These six elements occupy three distinct target sites: Ta1-1 is located on chromosome 5 and is common to all three races (Col-0, Kas-1 and La-0). Ta1-2 is present in two races on chromosome 4 (Kas-1 and La-0), and Ta1-3, also located on chromosome 4, is present only in one race (La-0). The six Ta1 insertions share greater than 96\% nucleotide identity, yet are likely to be incapable of further transposition due to deletions or nucleotide changes that alter either the coding capacity of the elements or conserved protein domains required for retrotransposition. Nucleotide sequence comparisons of these elements and the distribution of Ta1 among 12 additional A. thaliana geographical races suggest that Ta1-1 predated the global dispersal of A. thaliana. As the species spread throughout the world, two additional transposition events occurred which gave rise first to Ta1-2 and finally to Ta1-3.}, author = {Voytas,D. F and Konieczny,A and Cummings, Michael P. and Ausubel,F. M} } @article {18085, title = {Fast parallel and serial approximate string matching* 1}, journal = {Journal of Algorithms}, volume = {10}, year = {1989}, month = {1989///}, pages = {157 - 169}, author = {Landau,G. M and Vishkin, Uzi} } @article {18086, title = {Faster optimal parallel prefix sums and list ranking}, journal = {Information and Computation}, volume = {81}, year = {1989}, month = {1989///}, pages = {334 - 352}, author = {Cole,R. and Vishkin, Uzi} } @article {18097, title = {The accelerated centroid decomposition technique for optimal parallel tree evaluation in logarithmic time}, journal = {Algorithmica}, volume = {3}, year = {1988}, month = {1988///}, pages = {329 - 346}, author = {Cole,R. and Vishkin, Uzi} } @article {18095, title = {Approximate parallel scheduling. Part I: The basic technique with applications to optimal parallel list ranking in logarithmic time}, journal = {SIAM Journal on Computing}, volume = {17}, year = {1988}, month = {1988///}, pages = {128 - 128}, author = {Cole,R. and Vishkin, Uzi} } @conference {18089, title = {Efficient parallel triconnectivity in logarithmic time}, booktitle = {VLSI Algorithms and Architectures}, year = {1988}, month = {1988///}, pages = {33 - 42}, author = {Ramachandran,V. and Vishkin, Uzi} } @article {18092, title = {Fast string matching with k differences.}, journal = {J. COMP. SYST. SCI.}, volume = {37}, year = {1988}, month = {1988///}, pages = {63 - 78}, author = {Landau,G. M and Vishkin, Uzi} } @article {18093, title = {Fast string matching with k differences* 1,* 2}, journal = {Journal of Computer and System Sciences}, volume = {37}, year = {1988}, month = {1988///}, pages = {63 - 78}, author = {Landau,G. M and Vishkin, Uzi} } @article {18090, title = {On finding a minimum dominating set in a tournament}, journal = {Theoretical Computer Science}, volume = {61}, year = {1988}, month = {1988///}, pages = {307 - 316}, author = {Megiddo,N. and Vishkin, Uzi} } @conference {18088, title = {On finding lowest common ancestors: simplification and parallelization}, booktitle = {VLSI Algorithms and Architectures}, year = {1988}, month = {1988///}, pages = {111 - 123}, author = {Schieber,B. and Vishkin, Uzi} } @article {18091, title = {Locating alignments with k differences for nucleotide and amino acid sequences}, journal = {Computer applications in the biosciences: CABIOS}, volume = {4}, year = {1988}, month = {1988///}, pages = {19 - 19}, author = {Landau,G. M and Vishkin, Uzi and Nussinov,R.} } @article {18094, title = {Matching patterns in strings subject to multi-linear transformations}, journal = {Theoretical Computer Science}, volume = {60}, year = {1988}, month = {1988///}, pages = {231 - 254}, author = {Eilam-Tzoreff,T. and Vishkin, Uzi} } @conference {18096, title = {Optimal parallel algorithms for expression tree evaluation and list ranking}, booktitle = {VLSI Algorithms and Architectures}, year = {1988}, month = {1988///}, pages = {91 - 100}, author = {Cole,R. and Vishkin, Uzi} } @article {18098, title = {Parallel construction of a suffix tree with applications}, journal = {Algorithmica}, volume = {3}, year = {1988}, month = {1988///}, pages = {347 - 365}, author = {Apostolico,A. and Iliopoulos,C. and Landau,G. M and Schieber,B. and Vishkin, Uzi} } @conference {18087, title = {PRAM algorithms: teach and preach}, booktitle = {collection of position papers, IBM-NSF Workshop on$\textbackslashbackslash$ Opportunities and Constraints of Parallel Computing", IBM Almaden}, year = {1988}, month = {1988///}, author = {Vishkin, Uzi} } @article {16370, title = {ALEX-an Alexical Programming Language}, volume = {TR87-835}, year = {1987}, month = {1987///}, institution = {Cornell University}, abstract = {ALEX is an experimental language for high-level parallel programming. It is a testbed for exploring various non-traditional ways of expressing algorithmic ideas, making extensive use of high-resolution color graphics. The language itself is not a programming language in the traditional sense, since there is no lexical syntax. This paper discusses the basic design of the ALEX user interface.}, url = {http://hdl.handle.net/1813/6675}, author = {Kozen,D. and Teitelbaum,T. and Chen,W. Z and Field,J. H and Pugh, William and Vander Zanden,B. T} } @article {18101, title = {An efficient string matching algorithm with K substitutions for nucleotide and amino acid sequences*}, journal = {Journal of theoretical biology}, volume = {126}, year = {1987}, month = {1987///}, pages = {483 - 490}, author = {Landau,G. M and Vishkin, Uzi and Nussinov,R.} } @inbook {15755, title = {Fine and Medium Grained Parallel Algorithms for Matrix QR Factorization}, booktitle = {Algorithms and Applications on Vector and Parallel ComputersAlgorithms and Applications on Vector and Parallel Computers}, year = {1987}, month = {1987///}, pages = {347 - 349}, publisher = {Elsevier Science Publishers B.V. (North Holland)}, organization = {Elsevier Science Publishers B.V. (North Holland)}, author = {O{\textquoteright}Leary, Dianne P.}, editor = {Riele,H.J.J. te and Dekker,Th.J. and Vorst,H.A. van der} } @article {18099, title = {An optimal parallel algorithm for selection}, journal = {Parallel and Distributed Computing}, volume = {4}, year = {1987}, month = {1987///}, pages = {79 - 86}, author = {Vishkin, Uzi} } @article {18103, title = {Parallel construction of a suffix tree}, journal = {Automata, Languages and Programming}, year = {1987}, month = {1987///}, pages = {314 - 325}, author = {Landau,G. and Schieber,B. and Vishkin, Uzi} } @article {18100, title = {Randomized parallel speedups for list ranking* 1}, journal = {Journal of Parallel and Distributed Computing}, volume = {4}, year = {1987}, month = {1987///}, pages = {319 - 333}, author = {Vishkin, Uzi} } @article {18104, title = {Tight comparison bounds on the complexity of parallel sorting}, journal = {SIAM J. Comput.}, volume = {16}, year = {1987}, month = {1987///}, pages = {458 - 464}, author = {Azar,Y. and Vishkin, Uzi} } @article {18115, title = {The accelerated centroid decomposition technique for optimal parallel tree evaluation in logarithmic time, Ultracomputer Note-108}, year = {1986}, month = {1986///}, institution = {TR-242, Department of Computer Science, Courant Institute NYU}, author = {Cole,R. and Vishkin, Uzi} } @conference {18112, title = {Approximate and exact parallel scheduling with applications to list, tree and graph problems}, booktitle = {27th Annual Symposium on Foundations of Computer Science}, year = {1986}, month = {1986///}, pages = {478 - 491}, author = {Cole,R. and Vishkin, Uzi} } @conference {18113, title = {Approximate scheduling, exact scheduling, and applications to parallel algorithms}, booktitle = {Proceedings Symposium on Foundations of Computer Science}, year = {1986}, month = {1986///}, pages = {478 - 491}, author = {Cole,R. and Vishkin, Uzi} } @conference {18114, title = {Deterministic coin tossing and accelerating cascades: micro and macro techniques for designing parallel algorithms}, booktitle = {Proceedings of the eighteenth annual ACM symposium on Theory of computing}, year = {1986}, month = {1986///}, pages = {206 - 219}, author = {Cole,R. and Vishkin, Uzi} } @article {18111, title = {Deterministic coin tossing with applications to optimal parallel list ranking}, journal = {Information and Control}, volume = {70}, year = {1986}, month = {1986///}, pages = {32 - 53}, author = {Cole,R. and Vishkin, Uzi} } @article {18108, title = {Efficient Parallel and Serial String Matching}, journal = {Computer Science Department Technical Report}, volume = {221}, year = {1986}, month = {1986///}, author = {Landau,G. M and Vishkin, Uzi} } @article {18107, title = {An efficient string matching algorithm with k differences for nudeotide and amino acid sequences}, journal = {Nucleic acids research}, volume = {14}, year = {1986}, month = {1986///}, pages = {31 - 31}, author = {Landau,G. M and Vishkin, Uzi and Nussinov,R.} } @article {18109, title = {Efficient string matching with k mismatches}, journal = {Theoretical Computer Science}, volume = {43}, year = {1986}, month = {1986///}, pages = {239 - 249}, author = {Landau,G. M and Vishkin, Uzi} } @conference {18110, title = {Introducing efficient parallelism into approximate string matching and a new serial algorithm}, booktitle = {Proceedings of the eighteenth annual ACM symposium on Theory of computing}, year = {1986}, month = {1986///}, pages = {220 - 230}, author = {Landau,G. M and Vishkin, Uzi} } @article {18105, title = {Parallel ear decomposition search (EDS) and st-numbering in graphs}, journal = {Theoretical Computer Science}, volume = {47}, year = {1986}, month = {1986///}, pages = {277 - 298}, author = {Maon,Y. and Schieber,B. and Vishkin, Uzi} } @conference {18116, title = {Tight complexity bounds for parallel comparison sorting}, booktitle = {27th Annual Symposium on Foundations of Computer Science}, year = {1986}, month = {1986///}, pages = {502 - 510}, author = {Alon,N. and Azar,Y. and Vishkin, Uzi} } @article {18120, title = {Efficient implementation of a shifting algorithm}, journal = {Discrete applied mathematics}, volume = {12}, year = {1985}, month = {1985///}, pages = {71 - 80}, author = {Perl,Y. and Vishkin, Uzi} } @article {18119, title = {An efficient parallel biconnectivity algorithm}, journal = {SIAM Journal on Computing}, volume = {14}, year = {1985}, month = {1985///}, pages = {862 - 862}, author = {Tarjan,R. E and Vishkin, Uzi} } @article {18117, title = {On efficient parallel strong orientation}, journal = {Information Processing Letters}, volume = {20}, year = {1985}, month = {1985///}, pages = {235 - 240}, author = {Vishkin, Uzi} } @conference {18121, title = {Efficient string matching in the presence of errors}, booktitle = {Foundations of Computer Science, 1985., 26th Annual Symposium on}, year = {1985}, month = {1985///}, pages = {126 - 136}, author = {Landau,G. M and Vishkin, Uzi} } @article {18123, title = {Optimal parallel generation of a computation tree form}, journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)}, volume = {7}, year = {1985}, month = {1985///}, pages = {348 - 357}, author = {Bar-On,I. and Vishkin, Uzi} } @article {18118, title = {Optimal parallel pattern matching in strings}, journal = {Information and Control}, volume = {67}, year = {1985}, month = {1985///}, pages = {91 - 113}, author = {Vishkin, Uzi} } @article {18122, title = {Solving NP-hard problems in [] almost trees{\textquoteright}: Vertex cover}, journal = {Discrete applied mathematics}, volume = {10}, year = {1985}, month = {1985///}, pages = {27 - 45}, author = {Coppersmith,D. and Vishkin, Uzi} } @conference {18127, title = {Finding biconnected components and computing tree functions in logarithmic parallel time}, booktitle = {1998 International Conference on Parallel Architectures and Compilation Techniques, Paris, France, October 12-18, 1998: proceedings}, year = {1984}, month = {1984///}, pages = {12 - 12}, author = {Vishkin,R. E.T.U} } @article {18131, title = {Finding Euler tours in parallel* 1}, journal = {Journal of Computer and System Sciences}, volume = {29}, year = {1984}, month = {1984///}, pages = {330 - 337}, author = {Atallah,M. and Vishkin, Uzi} } @article {18125, title = {An optimal parallel connectivity algorithm* 1}, journal = {Discrete applied mathematics}, volume = {9}, year = {1984}, month = {1984///}, pages = {197 - 207}, author = {Vishkin, Uzi} } @article {18124, title = {A parallel-design distributed-implementation (PDDI) general-purpose computer}, journal = {Theoretical Computer Science}, volume = {32}, year = {1984}, month = {1984///}, pages = {157 - 172}, author = {Vishkin, Uzi} } @article {18129, title = {Randomized and deterministic simulations of PRAMs by parallel machines with restricted granularity of parallel memories}, journal = {Acta Informatica}, volume = {21}, year = {1984}, month = {1984///}, pages = {339 - 374}, author = {Mehlhorn,K. and Vishkin, Uzi} } @conference {18126, title = {Randomized speed-ups in parallel computation}, booktitle = {Proceedings of the sixteenth annual ACM symposium on Theory of computing}, year = {1984}, month = {1984///}, pages = {230 - 239}, author = {Vishkin, Uzi} } @article {18130, title = {Solving NP-hard problems on graphs that are almost trees and an application to facility location problems}, journal = {Journal of the ACM (JACM)}, volume = {31}, year = {1984}, month = {1984///}, pages = {459 - 473}, author = {Gurevich,Y. and Stockmeyer,L. and Vishkin, Uzi} } @article {18133, title = {Dynamic parallel memories}, journal = {Information and control}, volume = {56}, year = {1983}, month = {1983///}, pages = {174 - 182}, author = {Vishkin, Uzi and Wigderson,A.} } @article {18136, title = {Efficient distributed orientation algorithm.}, journal = {IEEE TRANS. INFO. THEORY.}, volume = {29}, year = {1983}, month = {1983///}, pages = {624 - 628}, author = {Vishkin, Uzi} } @article {18135, title = {Implementation of simultaneous memory address access in models that forbid it}, journal = {Journal of algorithms}, volume = {4}, year = {1983}, month = {1983///}, pages = {45 - 50}, author = {Vishkin, Uzi} } @article {18138, title = {Parallel computation on 2-3-trees}, journal = {RAIRO Informatique th{\'e}orique}, volume = {17}, year = {1983}, month = {1983///}, pages = {397 - 404}, author = {Paul,W. and Vishkin, Uzi and Wagener,H.} } @article {18137, title = {Parallel dictionaries on 2{\textendash}3 trees}, journal = {Automata, Languages and Programming}, year = {1983}, month = {1983///}, pages = {597 - 609}, author = {Paul,W. and Vishkin, Uzi and Wagener,H.} } @conference {18132, title = {Trade-offs between depth and width in parallel computation}, booktitle = {Foundations of Computer Science, 1983., 24th Annual Symposium on}, year = {1983}, month = {1983/11//}, pages = {146 - 153}, abstract = {A new technique for proving lower bounds for parallel computation is introduced. This technique enables us to obtain, for the first time. non-trivial tight lower bounds for shared-memory models of parallel computation that allow simultaneous read/write access to the same memory location. The size m of the common memory is called communication width or width in short. For a wide variety of problems (including parity and majority) we show that the time complexity T (depth) and the communication width m are related by the trade-off curve mT2 = $\#$x03A9;(n) (where n is the size of the input). This bound is tight lot every m $\#$x02264;n/log2n We extend our technique to prove mT3 = $\#$x03A9;(n) trade-off for a class of "simpler" functions (includind Boolean Or) on a weaker model that forbids simultaneous write access. This result improves the lower bound of Cook and Dwork [CD-82] when communication is limited.}, doi = {10.1109/SFCS.1983.77}, author = {Vishkin, Uzi and Wigderson,A.} } @article {18143, title = {Complexity of finding k-path-free dominating sets in graphs}, journal = {INFO. PROC. LETT.}, volume = {14}, year = {1982}, month = {1982///}, pages = {228 - 232}, author = {Bar-Yehuda,R. and Vishkin, Uzi} } @conference {18142, title = {A complexity theory for unbounded fan-in parallelism}, booktitle = {23rd Annual Symposium on Foundations of Computer Science}, year = {1982}, month = {1982///}, pages = {1 - 13}, author = {Chandra,A. K and Stockmeyer,L. J and Vishkin, Uzi} } @article {18141, title = {An O (logn) parallel connectivity algorithm}, journal = {Journal of Algorithms}, volume = {3}, year = {1982}, month = {1982///}, pages = {57 - 67}, author = {Shiloach,Y. and Vishkin, Uzi} } @article {18140, title = {An O (n2log n) parallel max-flow algorithm}, journal = {Journal of Algorithms}, volume = {3}, year = {1982}, month = {1982///}, pages = {128 - 146}, author = {Shiloach,Y. and Vishkin, Uzi} } @article {18139, title = {parallel MAX-FLOW algorithm}, journal = {Journal of Algorithms}, volume = {3}, year = {1982}, month = {1982///}, pages = {128 - 146}, author = {Shiloach,Y. and Vishkin, Uzi and An,O.} } @article {18144, title = {Finding the maximum, merging, and sorting in a parallel computation model}, journal = {Journal of Algorithms}, volume = {2}, year = {1981}, month = {1981///}, pages = {88 - 102}, author = {Shiloach,Y. and Vishkin, Uzi} } @article {15750, title = {Estimating the Largest Eigenvalue of a Positive Definite Matrix}, journal = {Mathematics of Computation}, volume = {33}, year = {1979}, month = {1979///}, pages = {1289 - 1292}, author = {O{\textquoteright}Leary, Dianne P. and Stewart, G.W. and Vandergraft,James S.} }