@conference {19290, title = {Loop transformations for interface-based hierarchies IN SDF graphs}, booktitle = {2010 21st IEEE International Conference on Application-specific Systems Architectures and Processors (ASAP)}, year = {2010}, month = {2010}, pages = {341 - 344}, abstract = {Data-flow has proven to be an attractive computation model for programming digital signal processing (DSP) applications. A restricted version of data-flow, termed synchronous data-flow (SDF), offers strong compile-time predictability properties, but has limited expressive power. A new type of hierarchy (Interface-based SDF) has been proposed allowing more expressivity while maintaining its predictability. One of the main problems with this hierarchical SDF model is the lack of trade-off between parallelism and network clustering. This paper presents a systematic method for applying an important class of loop transformation techniques in the context of interface-based SDF semantics. The resulting approach provides novel capabilities for integrating parallelism extraction properties of the targeted loop transformations with the useful modeling, analysis, and code reuse properties provided by SDF.}, keywords = {Application software, code generation, Computer architecture, Computer interfaces, Data-Flow programming, Digital signal processing, Loop parallelization, PARALLEL PROCESSING, Power engineering computing, Power system modeling, Processor scheduling, Programming profession, scheduling, SDF graph, system recovery}, author = {Piat, J. and Bhattacharyya, Shuvra S. and Raulet, M.} } @conference {12479, title = {Moving vistas: Exploiting motion for describing scenes}, booktitle = {2010 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2010}, month = {2010/06/13/18}, pages = {1911 - 1918}, publisher = {IEEE}, organization = {IEEE}, abstract = {Scene recognition in an unconstrained setting is an open and challenging problem with wide applications. In this paper, we study the role of scene dynamics for improved representation of scenes. We subsequently propose dynamic attributes which can be augmented with spatial attributes of a scene for semantically meaningful categorization of dynamic scenes. We further explore accurate and generalizable computational models for characterizing the dynamics of unconstrained scenes. The large intra-class variation due to unconstrained settings and the complex underlying physics present challenging problems in modeling scene dynamics. Motivated by these factors, we propose using the theory of chaotic systems to capture dynamics. Due to the lack of a suitable dataset, we compiled a dataset of {\textquoteleft}in-the-wild{\textquoteright} dynamic scenes. Experimental results show that the proposed framework leads to the best classification rate among other well-known dynamic modeling techniques. We also show how these dynamic features provide a means to describe dynamic scenes with motion-attributes, which then leads to meaningful organization of the video data.}, keywords = {Application software, Automation, Chaos, chaotic system, Computational modeling, Computer vision, dynamic scene categorization, Educational institutions, HUMANS, image recognition, in the wild dynamic scene, Layout, motion attribute, natural scenes, Physics, probability, scene recognition, Snow, video data}, isbn = {978-1-4244-6984-0}, doi = {10.1109/CVPR.2010.5539864}, author = {Shroff, N. and Turaga,P. and Chellapa, Rama} } @conference {19296, title = {Simulating dynamic communication systems using the core functional dataflow model}, booktitle = {2010 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)}, year = {2010}, month = {2010}, pages = {1538 - 1541}, abstract = {The latest communication technologies invariably consist of modules with dynamic behavior. There exists a number of design tools for communication system design with their foundation in dataflow modeling semantics. These tools must not only support the functional specification of dynamic communication modules and subsystems but also provide accurate estimation of resource requirements for efficient simulation and implementation. We explore this trade-off - between flexible specification of dynamic behavior and accurate estimation of resource requirements - using a representative application employing an adaptive modulation scheme. We propose an approach for precise modeling of such applications based on a recently-introduced form of dynamic dataflow called core functional dataflow. From our proposed modeling approach, we show how parameterized looped schedules can be generated and analyzed to simulate applications with low run-time overhead as well as guaranteed bounded memory execution. We demonstrate our approach using the Advanced Design System from Agilent Technologies, Inc., which is a commercial tool for design and simulation of communication systems.}, keywords = {adaptive modulation, Analytical models, Application software, Computational modeling, core functional dataflow model, Dataflow, dataflow modeling semantics, design tools, Digital signal processing, dynamic communication systems, functional specification, Hardware, modeling and simulation, Power system modeling, Predictive models, Processor scheduling, Production, Signal processing, software tools, wireless communication}, author = {Sane, N. and Chia-Jui Hsu and Pino,J. L and Bhattacharyya, Shuvra S.} } @conference {16470, title = {ApproxRank: Estimating Rank for a Subgraph}, booktitle = {IEEE 25th International Conference on Data Engineering, 2009. ICDE {\textquoteright}09}, year = {2009}, month = {2009/04/29/March}, pages = {54 - 65}, publisher = {IEEE}, organization = {IEEE}, abstract = {Customized semantic query answering, personalized search, focused crawlers and localized search engines frequently focus on ranking the pages contained within a subgraph of the global Web graph. The challenge for these applications is to compute PageRank-style scores efficiently on the subgraph, i.e., the ranking must reflect the global link structure of the Web graph but it must do so without paying the high overhead associated with a global computation. We propose a framework of an exact solution and an approximate solution for computing ranking on a subgraph. The IdealRank algorithm is an exact solution with the assumption that the scores of external pages are known. We prove that the IdealRank scores for pages in the subgraph converge. Since the PageRank-style scores of external pages may not typically be available, we propose the ApproxRank algorithm to estimate scores for the subgraph. Both IdealRank and ApproxRank represent the set of external pages with an external node L1 and extend the subgraph with links to L1. They also modify the PageRank-style transition matrix with respect to L1. We analyze the L1 distance between IdealRank scores and ApproxRank scores of the subgraph and show that it is within a constant factor of the L1 distance of the external pages (e.g., the true PageRank scores and uniform scores assumed by ApproxRank). We compare ApproxRank and a stochastic complementation approach (SC), a current best solution for this problem, on different types of subgraphs. ApproxRank has similar or superior performance to SC and typically improves on the runtime performance of SC by an order of magnitude or better. We demonstrate that ApproxRank provides a good approximation to PageRank for a variety of subgraphs.}, keywords = {Application software, ApproxRank, Computer applications, Crawlers, customized semantic query answering, Data engineering, Educational institutions, Explosions, focused crawlers, global Web graph, graph theory, IdealRank algorithm, Internet, localized search engines, PageRank-style, personalized search, Query processing, Runtime, Search engines, Stochastic processes, Web pages}, isbn = {978-1-4244-3422-0}, doi = {10.1109/ICDE.2009.108}, author = {Yao Wu and Raschid, Louiqa} } @conference {17453, title = {Understanding social computing participation with visual exploration tools}, booktitle = {International Symposium on Collaborative Technologies and Systems, 2009. CTS {\textquoteright}09}, year = {2009}, month = {2009/05/18/22}, pages = {xi-xii - xi-xii}, publisher = {IEEE}, organization = {IEEE}, abstract = {The rapid growth of socio-technical systems, social media and social networking websites has raised the importance of understanding the determinants of their success. The pressure to understand success is increased by the shift from playful discretionary applications to mission critical applications in government, business, and civic settings. These include homeland defense, energy sustainability, environmental conservation, disaster response, and community safety. Information visualization tools and statistical methods can both be helpful, but their utility grows when they are well-integrated. This talk will demonstrate novel tools for network evolution and offer a framework for thinking about motivating technology-mediated social participation.}, keywords = {Application software, Books, Collaborative tools, Computer science, Data visualization, Educational institutions, History, International collaboration, Social network services, Sociotechnical systems}, isbn = {978-1-4244-4584-4}, doi = {10.1109/CTS.2009.5067426}, author = {Shneiderman, Ben} } @conference {19633, title = {Configuration Reasoning and Ontology For Web}, booktitle = {IEEE International Conference on Services Computing, 2007. SCC 2007}, year = {2007}, month = {2007/07//}, pages = {387 - 394}, abstract = {Configuration plays a central role in the deployment and management of Web infrastructures and applications. A configuration often consists of assigning "values" to a pre-defined set of parameters defined in one or more files. Although the task of assigning values to (configuration) parameters looks simple, configuring infrastructures and applications is a very complex process. In this paper we present a framework for defining and analyzing configuration of an Apache server. We define the notion of "configuration space" of an Apache server as a set of possible values that can be assigned to configuration parameters. We then define the notion of an "obstacle" and "forbidden region" in the configuration space that should be avoided. We model configuration space using a logical framework based on OWL (Web ontology language). The obstacles and forbidden regions in the configuration space are modeled as constraints in the logical framework. These obstacles and forbidden regions are essentially "anti-patterns" that a typical installation should avoid. Given an instance of a configuration (that is, a "point" in the configuration space) we then check if the instance is "obstacle free" using logical reasoning.}, keywords = {Apache server, Application software, configuration management, configuration reasoning, configuration space, File servers, Information security, Internet, knowledge representation languages, logical framework, logical reasoning, ontologies, ontologies (artificial intelligence), Orbital robotics, OWL, path planning, Robot kinematics, Runtime environment, Taxonomy, Web infrastructures management, Web ontology language, Web server}, author = {Dana Dachman-Soled and Sreedhar, V.C.} } @article {12352, title = {Parameterized Looped Schedules for Compact Representation of Execution Sequences in DSP Hardware and Software Implementation}, journal = {IEEE Transactions on Signal Processing}, volume = {55}, year = {2007}, month = {2007/06//}, pages = {3126 - 3138}, abstract = {In this paper, we present a technique for compact representation of execution sequences in terms of efficient looping constructs. Here, by a looping construct, we mean a compact way of specifying a finite repetition of a set of execution primitives. Such compaction, which can be viewed as a form of hierarchical run-length encoding (RLE), has application in many very large scale integration (VLSI) signal processing contexts, including efficient control generation for Kahn processes on field-programmable gate arrays (FPGAs), and software synthesis for static dataflow models of computation. In this paper, we significantly generalize previous models for loop-based code compaction of digital signal processing (DSP) programs to yield a configurable code compression methodology that exhibits a broad range of achievable tradeoffs. Specifically, we formally develop and apply to DSP hardware and software synthesis a parameterizable loop scheduling approach with compact format, dynamic reconfigurability, and low-overhead decompression}, keywords = {Application software, array signal processing, code compression methodology, compact representation, Compaction, data compression, Design automation, Digital signal processing, digital signal processing chips, DSP, DSP hardware, embedded systems, Encoding, Field programmable gate arrays, field-programmable gate arrays (FPGAs), FPGA, Hardware, hierarchical runlength encoding, high-level synthesis, Kahn process, loop-based code compaction, looping construct, parameterized loop schedules, program compilers, reconfigurable design, runlength codes, scheduling, Signal generators, Signal processing, Signal synthesis, software engineering, software implementation, static dataflow models, Very large scale integration, VLSI}, isbn = {1053-587X}, doi = {10.1109/TSP.2007.893964}, author = {Ming-Yung Ko and Zissulescu,C. and Puthenpurayil,S. and Bhattacharyya, Shuvra S. and Kienhuis,B. and Deprettere,E. F} } @conference {12347, title = {A Communication Interface for Multiprocessor Signal Processing Systems}, booktitle = {Proceedings of the 2006 IEEE/ACM/IFIP Workshop on Embedded Systems for Real Time Multimedia}, year = {2006}, month = {2006/10//}, pages = {127 - 132}, publisher = {IEEE}, organization = {IEEE}, abstract = {Parallelization of embedded software is often desirable for power/performance-related considerations for computation-intensive applications that frequently occur in the signal-processing domain. Although hardware support for parallel computation is increasingly available in embedded processing platforms, there is a distinct lack of effective software support. One of the most widely known efforts in support of parallel software is the message passing interface (MPI). However, MPI suffers from several drawbacks with regards to customization to specialized parallel processing contexts, and performance degradation for communication-intensive applications. In this paper, we propose a new interface, the signal passing interface (SPI), that is targeted toward signal processing applications and addresses the limitations of MPI for this important domain of embedded software by integrating relevant properties of MPI and coarse-grain dataflow modeling. SPI is much easier and more intuitive to use, and due to its careful specialization, more performance-efficient for the targeted application domain. We present our preliminary version of SPI, along with experiments using SPI on a practical face detection system that demonstrate the capabilities of SPI}, keywords = {application program interfaces, Application software, coarse-grain dataflow modeling, Computer applications, Concurrent computing, Context, data flow graphs, Embedded computing, Embedded software, face detection system, Hardware, Message passing, message passing interface, MPI, Multiprocessing systems, multiprocessor signal processing system, PARALLEL PROCESSING, signal passing interface, Signal processing, SPI}, isbn = {0-7803-9783-5}, doi = {10.1109/ESTMED.2006.321285}, author = {Sankalita Saha and Bhattacharyya, Shuvra S. and Wayne Wolf} } @conference {17887, title = {Comparing the Performance of High-Level Middleware Systems in Shared and Distributed Memory Parallel Environments}, booktitle = {Parallel and Distributed Processing Symposium, 2005. Proceedings. 19th IEEE International}, year = {2005}, month = {2005/04//}, pages = {30 - 30}, publisher = {IEEE}, organization = {IEEE}, abstract = {The utilization of toolkits for writing parallel and/or distributed applications has been shown to greatly enhance developer{\textquoteright}s productivity. Such an approach hides many of the complexities associated with writing these applications, rather than relying solely on programming language aids and parallel library support, such as MPI or PVM. In this work, we evaluate three different middleware systems that have been used to implement a computation and I/O-intensive data analysis application from the domain of computer vision. This study shows the benefits and overheads associated with each of the middleware systems, in different homogeneous computational environments and with different workloads. Our results lead the way toward being able to make better decisions for tuning the application environment, for selecting the appropriate middleware, and also for designing more powerful middleware systems to efficiently build and run highly complex applications in both parallel and distributed computing environments.}, keywords = {Application software, Computer science, Computer vision, Data analysis, Distributed computing, distributed computing environment, distributed memory parallel environment, distributed shared memory systems, Educational institutions, high-level middleware system, I/O-intensive data analysis application, Libraries, Middleware, parallel computing environment, parallel library support, parallel memories, programming language, programming languages, Runtime environment, shared memory parallel environment, Writing}, isbn = {0-7695-2312-9}, doi = {10.1109/IPDPS.2005.144}, author = {Kim,Jik-Soo and Andrade,H. and Sussman, Alan} } @conference {14793, title = {EMPS: an environment for memory performance studies}, booktitle = {Parallel and Distributed Processing Symposium, 2005. Proceedings. 19th IEEE International}, year = {2005}, month = {2005/04//}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper describes an overview of environment for memory performance studies (EMPS). EMPS is a framework to allow different data gathering and simulation tools to be composed together to predict the performance of parallel programs on a variety of current and future high end computing (HEC) systems. The framework seeks to combine the automated nature of direct execution simulation with the predictive capabilities of performance modeling.}, keywords = {Application software, cache storage, Computational modeling, Concurrent computing, data gathering, Delay, digital simulation, Economic forecasting, EMP radiation effects, high end computing system, High performance computing, memory architecture, memory performance, Moore{\textquoteright}s Law, PARALLEL PROCESSING, parallel program, Predictive models, simulation tool, Writing}, isbn = {0-7695-2312-9}, doi = {10.1109/IPDPS.2005.196}, author = {Hollingsworth, Jeffrey K and Snavely, A. and Sbaraglia, S. and Ekanadham, K.} } @conference {17288, title = {Meaningful presentations of photo libraries: rationale and applications of bi-level radial quantum layouts}, booktitle = {Proceedings of the 5th ACM/IEEE-CS Joint Conference on Digital Libraries, 2005. JCDL {\textquoteright}05}, year = {2005}, month = {2005/06/07/11}, pages = {188 - 196}, publisher = {IEEE}, organization = {IEEE}, abstract = {Searching photo libraries can be made more satisfying and successful if search results are presented in a way that allows users to gain an overview of the photo categories. Since photo layouts on computer displays are the primary way that users get an overview, we propose a novel approach to show more photos in meaningful groupings. Photo layouts can be linear strips, or zoomable three dimensional arrangements, but the most common form is the two-dimensional grid. This paper introduces a novel bi-level hierarchical layout with motivating examples. In a bilevel hierarchy, one region is designated for primary content - an image, text, or combination. Adjacent to that region, groups of photos are placed radially in an ordered fashion, such that the relationship of the single primary region to its many secondary regions is apparent. A compelling aspect is the interactive experience in which the layout is dynamically resized, allowing users to rapidly, incrementally, and reversibly alter the dimensions and content. It can accommodate hundreds of photos in dozens of regions, can be customized in a corner or center layout, and can scale from an element on a web page to a large poster size. On typical displays (1024 times 1280 or 1200 times 1600 pixels), bi-level radial quantum layouts can conveniently accommodate 2-20 regions with tens or hundreds of photos per region}, keywords = {1024 pixel, 1200 pixel, 1280 pixel, 1310720 pixel, 1600 pixel, 1920000 pixel, Application software, bi-level radial quantum layouts, Computer displays, Computer science, digital libraries, Educational institutions, Image retrieval, Layout, layout generation, Lifting equipment, linear strips, Permission, photo layouts, photo library searching, photo management, Photography, Quantum computing, software libraries, Strips, two-dimensional grid, User interfaces, visual databases, visual presentation, zoomable three dimensional arrangements}, isbn = {1-58113-876-8}, doi = {10.1145/1065385.1065431}, author = {Kustanowitz,J. and Shneiderman, Ben} } @conference {19665, title = {Multitarget tracking with split and merged measurements}, booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2005. CVPR 2005}, volume = {1}, year = {2005}, month = {2005/06//}, pages = {605 - 610 vol. 1}, abstract = {In many multitarget tracking applications in computer vision, a detection algorithm provides locations of potential targets. Subsequently, the measurements are associated with previously estimated target trajectories in a data association step. The output of the detector is often imperfect and the detection data may include multiple, split measurements from a single target or a single merged measurement from several targets. To address this problem, we introduce a multiple hypothesis tracker for interacting targets that generate split and merged measurements. The tracker is based on an efficient Markov chain Monte Carlo (MCMC) based auxiliary variable particle filter. The particle filter is Rao-Blackwellized such that the continuous target state parameters are estimated analytically, and an MCMC sampler generates samples from the large discrete space of data associations. In addition, we include experimental results in a scenario where we track several interacting targets that generate these split and merged measurements.}, keywords = {Application software, Computer vision, Detection algorithms, Detectors, filtering theory, Markov chain Monte Carlo based auxiliary variable particle filter, Markov processes, merged measurements, Monte Carlo methods, multiple hypothesis tracker, multitarget tracking, parameter estimation, Particle filters, Particle tracking, Rao-Blackwellized filter, split measurements, target tracking, Trajectory}, author = {Zia Khan and Balch, T. and Dellaert, F.} } @article {14768, title = {Resource policing to support fine-grain cycle stealing in networks of workstations}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {15}, year = {2004}, month = {2004/10//}, pages = {878 - 892}, abstract = {We present the design, implementation, and performance evaluation of a suite of resource policing mechanisms that allow guest processes to efficiently and unobtrusively exploit otherwise idle workstation resources. Unlike traditional policies that harvest cycles only from unused machines, we employ fine-grained cycle stealing to exploit resources even from machines that have active users. We developed a suite of kernel extensions that enable these policies to operate without significantly impacting host processes: 1) a new starvation-level CPU priority for guest jobs, 2) a new page replacement policy that imposes hard bounds on physical memory usage by guest processes, and 3) a new I/O scheduling mechanism called rate windows that throttle guest processes{\textquoteright} usage of I/O and network bandwidth. We evaluate both the individual impacts of each mechanism, and their utility for our fine-grain cycle stealing.}, keywords = {65, Application software, Bandwidth, cluster computing, Computer networks, Computer Society, Concurrent computing, cycle stealing, cycle stealing., grid computing, I/O scheduling, Intelligent networks, Kernel, network bandwidth, networks of workstations, page replacement policy, parallel computing, performance evaluation, Processor scheduling, resource allocation, resource scheduling, starvation-level CPU priority, workstation clusters, workstation resources, Workstations}, isbn = {1045-9219}, doi = {10.1109/TPDS.2004.58}, author = {Ryu, K. D and Hollingsworth, Jeffrey K} } @conference {17878, title = {Improving access to multi-dimensional self-describing scientific datasets}, booktitle = {3rd IEEE/ACM International Symposium on Cluster Computing and the Grid, 2003. Proceedings. CCGrid 2003}, year = {2003}, month = {2003/05/12/15}, pages = {172 - 179}, publisher = {IEEE}, organization = {IEEE}, abstract = {Applications that query into very large multidimensional datasets are becoming more common. Many self-describing scientific data file formats have also emerged, which have structural metadata to help navigate the multi-dimensional arrays that are stored in the files. The files may also contain application-specific semantic metadata. In this paper, we discuss efficient methods for performing searches for subsets of multi-dimensional data objects, using semantic information to build multidimensional indexes, and group data items into properly sized chunks to maximize disk I/O bandwidth. This work is the first step in the design and implementation of a generic indexing library that will work with various high-dimension scientific data file formats containing semantic information about the stored data. To validate the approach, we have implemented indexing structures for NASA remote sensing data stored in the HDF format with a specific schema (HDF-EOS), and show the performance improvements that are gained from indexing the datasets, compared to using the existing HDF library for accessing the data.}, keywords = {Application software, application-specific semantic metadata, Bandwidth, Computer science, database indexing, disk I/O bandwidth, distributed databases, Educational institutions, Indexing, indexing structures, Libraries, meta data, Middleware, multidimensional arrays, multidimensional datasets, Multidimensional systems, NASA, NASA remote sensing data, Navigation, query formulation, self-describing scientific data file formats, structural metadata, very large databases}, isbn = {0-7695-1919-9}, doi = {10.1109/CCGRID.2003.1199366}, author = {Nam,B. and Sussman, Alan} } @conference {12055, title = {Polydioptric camera design and 3D motion estimation}, booktitle = {2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings}, volume = {2}, year = {2003}, month = {2003/06/18/20}, pages = {II- 294-301 vol.2 - II- 294-301 vol.2}, publisher = {IEEE}, organization = {IEEE}, abstract = {Most cameras used in computer vision applications are still based on the pinhole principle inspired by our own eyes. It has been found though that this is not necessarily the optimal image formation principle for processing visual information using a machine. We describe how to find the optimal camera for 3D motion estimation by analyzing the structure of the space formed by the light rays passing through a volume of space. Every camera corresponds to a sampling pattern in light ray space, thus the question of camera design can be rephrased as finding the optimal sampling pattern with regard to a given task. This framework suggests that large field-of-view multi-perspective (polydioptric) cameras are the optimal image sensors for 3D motion estimation. We conclude by proposing design principles for polydioptric cameras and describe an algorithm for such a camera that estimates its 3D motion in a scene independent and robust manner.}, keywords = {3D motion estimation, Algorithm design and analysis, Application software, CAMERAS, Computer vision, Eyes, field-of-view camera, Image motion analysis, image sampling, image sensor, Image sensors, Layout, light ray, Motion estimation, multiperspective camera, optimal camera, optimal image formation, optimal sampling pattern, pinhole principle, polydioptric camera design, ray space, scene independent estimation, space structure analysis, stereo image processing, visual information processing}, isbn = {0-7695-1900-8}, doi = {10.1109/CVPR.2003.1211483}, author = {Neumann, J. and Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @conference {14795, title = {Active Harmony: Towards Automated Performance Tuning}, booktitle = {Supercomputing, ACM/IEEE 2002 Conference}, year = {2002}, month = {2002/11//}, pages = {44 - 44}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we present the Active Harmony automated runtime tuning system. We describe the interface used by programs to make applications tunable. We present the Library Specification Layer which helps program library developers expose multiple variations of the same API using different algorithms.The Library Specification Language helps to select the most appropriate program library to tune the overall performance. We also present the optimization algorithm used to adjust parameters in the application and the libraries. Finally, we present results that show how the system is able to tune several real applications. The automated tuning system is able to tune the application parameers to within a few percent of the best value after evaluating only 11 out of over 1,700 possible configurations.}, keywords = {Application software, Automatic control, Computational modeling, Computer science, Computerized monitoring, Control systems, grid computing, Runtime library, software libraries, Specification languages}, isbn = {0-7695-1524-X}, doi = {10.1109/SC.2002.10062}, author = {Tapus, C. and I-Hsin Chung and Hollingsworth, Jeffrey K} } @conference {17583, title = {Dependent rounding in bipartite graphs}, booktitle = {The 43rd Annual IEEE Symposium on Foundations of Computer Science, 2002. Proceedings}, year = {2002}, month = {2002///}, pages = {323 - 332}, publisher = {IEEE}, organization = {IEEE}, abstract = {We combine the pipage rounding technique of Ageev \& Sviridenko with a recent rounding method developed by Srinivasan (2001), to develop a new randomized rounding approach for fractional vectors defined on the edge-sets of bipartite graphs. We show various ways of combining this technique with other ideas, leading to the following applications: richer random-graph models for graphs with a given degree-sequence; improved approximation algorithms for: (i) throughput-maximization in broadcast scheduling, (ii) delay-minimization in broadcast scheduling, and (iii) capacitated vertex cover; fair scheduling of jobs on unrelated parallel machines. A useful feature of our method is that it lets us prove certain (probabilistic) per-user fairness properties.}, keywords = {Application software, Approximation algorithms, bipartite graph, bipartite graphs, broadcast channels, broadcast scheduling, Broadcasting, capacitated vertex cover, Character generation, computational complexity, Computer science, Delay, edge-sets, Educational institutions, fair scheduling, fractional vectors, graph theory, per-user fairness properties, pipage rounding technique, Processor scheduling, Random variables, random-graph models, randomized rounding approach, rounding method, scheduling, Scheduling algorithm, telecommunication computing, unrelated parallel machines}, isbn = {0-7695-1822-2}, doi = {10.1109/SFCS.2002.1181955}, author = {Gandhi,R. and Khuller, Samir and Parthasarathy,S. and Srinivasan, Aravind} } @conference {17835, title = {Multiple Query Optimization for Data Analysis Applications on Clusters of SMPs}, booktitle = {2nd IEEE/ACM International Symposium on Cluster Computing and the Grid, 2002}, year = {2002}, month = {2002/05/21/24}, pages = {154 - 154}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper is concerned with the efficient execution of multiple query workloads on a cluster of SMPs. We target applications that access and manipulate large scientific datasets. Queries in these applications involve user-defined processing operations and distributed data structures to hold intermediate and final results. Our goal is to implement system components to leverage previously computed query results and to effectively utilize processing power and aggregated I/O bandwidth on SMP nodes so that both single queries and multi-query batches can be efficiently executed.}, keywords = {Aggregates, Application software, Bandwidth, Data analysis, Data structures, Delay, Query processing, scheduling, Subcontracting, Switched-mode power supply}, isbn = {0-7695-1582-7}, doi = {10.1109/CCGRID.2002.1017123}, author = {Andrade,H. and Kurc, T. and Sussman, Alan and Saltz, J.} } @conference {11901, title = {A non-intrusive Kalman filter-based tracker for pursuit eye movement}, booktitle = {American Control Conference, 2002. Proceedings of the 2002}, volume = {2}, year = {2002}, month = {2002///}, pages = {1443- 1447 vol.2 - 1443- 1447 vol.2}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we introduce a new non-intrusive approach to estimating the eye position during pursuit motion of the eye. We introduce a new characterization for the pursuit eye movement. Our characterization is based on the decomposition of the pursuit eye motion into a deterministic component and random component. We use a discrete Kalman filter to estimate the random component and calculate the deterministic component. We add the two components to obtain an estimate of the eye position. Simulation results are provided to illustrate the eye position estimation.}, keywords = {Application software, characterization, Computer vision, Current measurement, deterministic component, Electric variables measurement, eye position estimation, eye tracking, gaze tracking, Human computer interaction, Kalman filter, Kalman filters, Lenses, Motion estimation, Optical reflection, pursuit eye movement, pursuit motion, random component, Skin, tracking}, isbn = {0-7803-7298-0}, doi = {10.1109/ACC.2002.1023224}, url = {http://ieeexplore.ieee.org/ielx5/7965/22015/01023224.pdf?tp=\&arnumber=1023224\&isnumber=22015}, author = {Abd-Almageed, Wael and Fadali,M. S and Bebis,G.} } @conference {14725, title = {Practical programmable packets}, booktitle = {IEEE INFOCOM 2001. Twentieth Annual Joint Conference of the IEEE Computer and Communications Societies. Proceedings}, volume = {1}, year = {2001}, month = {2001///}, pages = {41-50 vol.1 - 41-50 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present SNAP (safe and nimble active packets), a new scheme for programmable (or active) packets centered around a new low-level packet language. Unlike previous active packet approaches, SNAP is practical: namely, adding significant flexibility over IP without compromising safety and security or efficiency. In this paper we show how to compile from the well-known active picket language PLAN to SNAP, showing that SNAP retains PLAN{\textquoteright}s flexibility; give proof sketches of its novel approach to resource control; and present experimental data showing SNAP attains performance very close to that of a software IP router}, keywords = {active picket language, Application software, complier, Contracts, Data security, efficiency, Explosives, INFORMATION SCIENCE, Internet, IP, IP networks, low-level packet language, packet switching, performance, PLAN, practical programmable packets, program compilers, Protection, resource control, Resource management, safe and nimble active packets, Safety, Security, SNAP, software IP router, Software performance, telecommunication security, Transport protocols}, isbn = {0-7803-7016-3}, doi = {10.1109/INFCOM.2001.916685}, author = {Moore,J. T and Hicks, Michael W. and Nettles,S.} } @conference {18463, title = {An audio-video front-end for multimedia applications}, booktitle = {2000 IEEE International Conference on Systems, Man, and Cybernetics}, volume = {2}, year = {2000}, month = {2000///}, pages = {786-791 vol.2 - 786-791 vol.2}, publisher = {IEEE}, organization = {IEEE}, abstract = {Applications such as video gaming, virtual reality, multimodal user interfaces and videoconferencing, require systems that can locate and track persons in a room through a combination of visual and audio cues, enhance the sound that they produce, and perform identification. We describe the development of a particular multimodal sensor fusion system that is portable, runs in real time and achieves these objectives. The system employs novel algorithms for acoustical source location, video-based person tracking and overall system control, which are also described}, keywords = {Acoustic noise, acoustical source location, Application software, audio cues, audio-video front-end, CAMERAS, Computer vision, Microphones, multimedia applications, multimedia systems, multimodal sensor fusion system, multimodal user interfaces, Position measurement, REAL TIME, Real time systems, real-time systems, sensor fusion, sound, Speech recognition, User interfaces, video cameras, video gaming, video-based person tracking, Videoconference, videoconferencing, Virtual reality, visual cues, Working environment noise}, isbn = {0-7803-6583-6}, doi = {10.1109/ICSMC.2000.885945}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Davis, Larry S. and Haritaoglu,I.} } @conference {15687, title = {Network service selection for distributed multimedia applications}, booktitle = {Third International Conference on Computational Intelligence and Multimedia Applications, 1999. ICCIMA {\textquoteright}99. Proceedings}, year = {1999}, month = {1999///}, pages = {388 - 392}, publisher = {IEEE}, organization = {IEEE}, abstract = {An important question in the development of system support for distributed multimedia is the type of network service offered to applications. This paper compares two network service disciplines: weighted fair queueing (WFQ) and non-preemptive earliest deadline first (NEDF). We show that, for a broad class of high-bandwidth distributed multimedia applications, WFQ outperforms NEDF in terms of network throughput while still providing an application-adequate end-to-end service. This result holds despite the fact that NEDF offers applications far greater flexibility in terms of control over end-to-end delivery delay}, keywords = {Admission control, Application software, application-adequate end-to-end service, Bandwidth, Communication system traffic control, Computer science, Delay, distributed processing, end-to-end delivery delay control, flexibility, high-bandwidth distributed multimedia applications, interactive multimedia, multimedia systems, network service selection, network throughput, nonpreemptive earliest deadline first, queueing theory, Regulators, system support, telecommunication services, Throughput, Traffic control, weighted fair queueing}, isbn = {0-7695-0300-4}, doi = {10.1109/ICCIMA.1999.798561}, author = {Simon,R. and Sood,A. and Mundur, Padma} } @conference {17535, title = {Predicting the CPU availability of time-shared Unix systems on the computational grid}, booktitle = {The Eighth International Symposium on High Performance Distributed Computing, 1999. Proceedings}, year = {1999}, month = {1999///}, pages = {105 - 112}, publisher = {IEEE}, organization = {IEEE}, abstract = {Focuses on the problem of making short- and medium-term forecasts of CPU availability on time-shared Unix systems. We evaluate the accuracy with which availability can be measured using the Unix load average, the Unix utility {\textquotedblleft}vmstat{\textquotedblright} and the Network Weather Service (NWS) CPU sensor that uses both. We also examine the autocorrelation between successive CPU measurements to determine their degree of self-similarity. While our observations show a long-range autocorrelation dependence, we demonstrate how this dependence manifests itself in the short- and medium-term predictability of the CPU resources in our study}, keywords = {accuracy, Application software, Autocorrelation, Availability, Central Processing Unit, computational grid, correlation methods, CPU availability prediction, CPU resources predictability, CPU sensor, Dynamic scheduling, grid computing, Load forecasting, long-range autocorrelation dependence, medium-term forecasts, network operating systems, Network Weather Service, NWS, performance evaluation, self-similarity degree, short-term forecasts, successive CPU measurements, Time measurement, Time sharing computer systems, time-shared Unix systems, time-sharing systems, Unix, Unix load average, vmstat utility, Weather forecasting}, isbn = {0-7803-5681-0}, doi = {10.1109/HPDC.1999.805288}, author = {Wolski,R. and Spring, Neil and Hayes,J.} } @conference {14762, title = {Benchmarking a network of PCs running parallel applications}, booktitle = {Performance, Computing and Communications, 1998. IPCCC {\textquoteright}98., IEEE International}, year = {1998}, month = {1998/02/16/18}, pages = {1 - 7}, publisher = {IEEE}, organization = {IEEE}, abstract = {Presents a benchmarking study that compares the performance of a network of four PCs connected by a 100 Mbit/s fast Ethernet running three different system software configurations: TCP/IP on Windows NT, TCP/IP on Linux and a lightweight message-passing protocol (U-Net active messages) on Linux. For each configuration, we report results for communication micro-benchmarks and the NAS (Numerical Aerodynamics Simulation) parallel benchmarks. For the NAS benchmarks, the overall running time using Linux TCP/IP was 12-500\% less than the Windows NT TCP/IP configuration. Likewise, the Linux U-Net based message-passing protocol outperformed the Linux TCP/IP version by 5-200\%+. We also show that, by using Linux U-Net, we are able to achieve 125 μs latency between two processes using PVM. Finally, we report that the default mathematical libraries supplied with NT (for both gcc and Visual C++) are substantially slower than the one supplied with Linux}, keywords = {100 Mbit/s, 125 mus, Aerodynamics, Application software, communication micro-benchmarks, default mathematical libraries, Delay, Ethernet, Ethernet networks, gcc, latency, lightweight message-passing protocol, Linux, Local area networks, mathematics computing, Message passing, microcomputer applications, Microsoft Windows NT, NAS parallel benchmarks, network operating systems, Numerical simulation, parallel applications, PARALLEL PROCESSING, PC network benchmarking, performance comparison, performance evaluation, Personal communication networks, Protocols, PVM, running time, software libraries, System software, system software configurations, TCP/IP, TCPIP, Transport protocols, U-Net active messages, Visual C++}, isbn = {0-7803-4468-5}, doi = {10.1109/PCCC.1998.659876}, author = {Hollingsworth, Jeffrey K and Guven, E. and Akinlar, C.} } @conference {17609, title = {Improved bounds and algorithms for hypergraph two-coloring}, booktitle = {39th Annual Symposium on Foundations of Computer Science, 1998. Proceedings}, year = {1998}, month = {1998/11/08/11}, pages = {684 - 693}, publisher = {IEEE}, organization = {IEEE}, abstract = {We show that for all large n, every n-uniform hypergraph with at most 0.7√(n/lnn){\texttimes}2n edges can be two-colored. We, in fact, present fast algorithms that output a proper two-coloring with high probability for such hypergraphs. We also derandomize and parallelize these algorithms, to derive NC1 versions of these results. This makes progress on a problem of Erdos (1963), improving the previous-best bound of n1/3-0(1){\texttimes}2n due to Beck (1978). We further generalize this to a {\textquotedblleft}local{\textquotedblright} version, improving on one of the first applications of the Lovasz Local Lemma}, keywords = {algorithms, Application software, Approximation algorithms, bounds, computational geometry, Computer science, Contracts, Erbium, graph colouring, History, hypergraph two-coloring, Lab-on-a-chip, MATHEMATICS, n-uniform hypergraph, Parallel algorithms, Polynomials, probability}, isbn = {0-8186-9172-7}, doi = {10.1109/SFCS.1998.743519}, author = {Radhakrishnan,J. and Srinivasan, Aravind} } @article {14804, title = {Modeling and evaluating design alternatives for an on-line instrumentation system: a case study}, journal = {IEEE Transactions on Software Engineering}, volume = {24}, year = {1998}, month = {1998/06//}, pages = {451 - 470}, abstract = {This paper demonstrates the use of a model-based evaluation approach for instrumentation systems (ISs). The overall objective of this study is to provide early feedback to tool developers regarding IS overhead and performance; such feedback helps developers make appropriate design decisions about alternative system configurations and task scheduling policies. We consider three types of system architectures: network of workstations (NOW), symmetric multiprocessors (SMP), and massively parallel processing (MPP) systems. We develop a Resource OCCupancy (ROCC) model for an on-line IS for an existing tool and parameterize it for an IBM SP-2 platform. This model is simulated to answer several {\textquotedblleft}what if{\textquotedblright} questions regarding two policies to schedule instrumentation data forwarding: collect-and-forward (CF) and batch-and-forward (BF). In addition, this study investigates two alternatives for forwarding the instrumentation data: direct and binary tree forwarding for an MPP system. Simulation results indicate that the BF policy can significantly reduce the overhead and that the tree forwarding configuration exhibits desirable scalability characteristics for MPP systems. Initial measurement-based testing results indicate more than 60 percent reduction in the direct IS overhead when the BF policy was added to Paradyn parallel performance measurement tool}, keywords = {alternative system configurations, Application software, batch-and-forward, collect-and-forward, Computer aided software engineering, design alternatives, design decisions, Feedback, IBM SP-2 platform, Instruments, massively parallel processing, model-based evaluation approach, Monitoring, multiprocessing programs, on-line instrumentation system, Paradyn parallel performance measurement tool, PARALLEL PROCESSING, Real time systems, scalability characteristics, software metrics, software tools, Space technology, symmetric multiprocessors, system architectures, system monitoring, System testing, task scheduling policies, tool developers, tree forwarding configuration, Workstations}, isbn = {0098-5589}, doi = {10.1109/32.689402}, author = {Waheed, A. and Rover, D. T and Hollingsworth, Jeffrey K} } @conference {17627, title = {Multicommodity flow and circuit switching}, booktitle = {, Proceedings of the Thirty-First Hawaii International Conference on System Sciences, 1998}, volume = {7}, year = {1998}, month = {1998/01/06/9}, pages = {459-465 vol.7 - 459-465 vol.7}, publisher = {IEEE}, organization = {IEEE}, abstract = {Given a set of request pairs in a network, the problem of routing virtual circuits with low congestion is to connect each pair by a path so that few paths use the same link in the network. We build on an earlier multicommodity flow based approach of Leighton and Rao (1996) to show that short flow paths lead to path selections with low congestion. This shows that such good path selections exist for constant-degree expanders with strong expansion, generalizing a result of (Broder et al., 1994). We also show, for infinitely many n, n-vertex undirected graphs Gn along with a set T of connection requests, such that: T is fractionally realizable using flow-paths that impose a (fractional) congestion of at most 1; but any rounding of such a flow to the given set of flow-paths, leads to a congestion of Ω(log n/log log n). This is progress on a long-standing open problem}, keywords = {Application software, Bandwidth, circuit switching, Computer science, constant-degree expanders, graph theory, High speed integrated circuits, Integrated circuit technology, Laboratories, low congestion, MATHEMATICS, multicommodity flow, National electric code, network routing, path selections, Routing, short flow paths, Switching circuits, switching theory, undirected graphs, virtual circuit routing}, isbn = {0-8186-8255-8}, doi = {10.1109/HICSS.1998.649241}, author = {Leighton,T. and Rao,S. and Srinivasan, Aravind} } @conference {14813, title = {MDL: a language and compiler for dynamic program instrumentation}, booktitle = {, 1997 International Conference on Parallel Architectures and Compilation Techniques., 1997. Proceedings}, year = {1997}, month = {1997/11/10/14}, pages = {201 - 212}, publisher = {IEEE}, organization = {IEEE}, abstract = {We use a form of dynamic code generation, called dynamic instrumentation, to collect data about the execution of an application program. Dynamic instrumentation allows us to instrument running programs to collect performance and other types of information. The instrumentation code is generated incrementally and can be inserted and removed at any time. Our instrumentation currently runs on the SPARC, PA-RISC, Power 2, Alpha, and x86 architectures. Specification of what data to collect are written in a specialized language called the Metric Description Language, that is part of the Paradyn Parallel Performance Tools. This language allows platform independent descriptions of how to collect performance data. It also provides a concise way to specify, how to constrain performance data to particular resources such as modules, procedures, nodes, files, or message channels (or combinations of these resources). We also describe the details of how we weave instrumentation into a running program}, keywords = {Alpha architecture, application program, application program interfaces, Application software, compiler generators, Computer science, dynamic code generation, Dynamic compiler, dynamic program instrumentation, Educational institutions, files, instrumentation code, Instruments, MDL, measurement, message channels, Message passing, Metric Description Language, modules, nodes, Operating systems, optimising compilers, PA-RISC, Paradyn Parallel Performance Tools, Parallel architectures, parallel programming, performance data, platform independent descriptions, Power 2 architecture, Power generation, procedures, program debugging, Program processors, running programs, Runtime, software metrics, SPARC, Specification languages, x86 architecture}, isbn = {0-8186-8090-3}, doi = {10.1109/PACT.1997.644016}, author = {Hollingsworth, Jeffrey K and Niam, O. and Miller, B. P and Zhichen Xu and Goncalves,M. J.R and Ling Zheng} } @conference {17577, title = {Computing with very weak random sources}, booktitle = {, 35th Annual Symposium on Foundations of Computer Science, 1994 Proceedings}, year = {1994}, month = {1994/11/20/22}, pages = {264 - 275}, publisher = {IEEE}, organization = {IEEE}, abstract = {For any fixed ε>0, we show how to simulate RP algorithms in time nO(log n) using the output of a δ-source with min-entropy R(ε). Such a weak random source is asked once for R(ε) bits; it outputs an R-bit string such that any string has probability at most 2-R(ε). If ε>1-1/(k+1), our BPP simulations take time nO(log(k n)) (log(k) is the logarithm iterated k times). We also give a polynomial-time BPP simulation using Chor-Goldreich sources of min-entropy RΩ(1), which is optimal. We present applications to time-space tradeoffs, expander constructions, and the hardness of approximation. Also of interest is our randomness-efficient Leftover Hash Lemma, found independently by Goldreich and Wigderson}, keywords = {Application software, BPP simulations, Chor-Goldreich sources, computational complexity, Computational modeling, Computer science, Computer simulation, cryptography, distributed algorithms, expander constructions, hardness, MATHEMATICS, min-entropy, Physics computing, Polynomials, probability, R-bit string, randomness-efficient Leftover Hash Lemma, RP algorithms simulation, Testing, time-space tradeoffs, very weak random sources}, isbn = {0-8186-6580-7}, doi = {10.1109/SFCS.1994.365688}, author = {Srinivasan, Aravind and Zuckerman,D.} } @conference {14760, title = {Dynamic program instrumentation for scalable performance tools}, booktitle = {Scalable High-Performance Computing Conference, 1994., Proceedings of the}, year = {1994}, month = {1994/05//}, pages = {841 - 850}, publisher = {IEEE}, organization = {IEEE}, abstract = {Presents a new technique called {\textquoteleft}dynamic instrumentation{\textquoteright} that provides efficient, scalable, yet detailed data collection for large-scale parallel applications. Our approach is unique because it defers inserting any instrumentation until the application is in execution. We can insert or change instrumentation at any time during execution by modifying the application{\textquoteright}s binary image. Only the instrumentation required for the currently selected analysis or visualization is inserted. As a result, our technique collects several orders of magnitude less data than traditional data collection approaches. We have implemented a prototype of our dynamic instrumentation on the CM-5, and present results for several real applications. In addition, we include recommendations to operating system designers, compiler writers, and computer architects about the features necessary to permit efficient monitoring of large-scale parallel systems}, keywords = {Application software, binary image, compiler writing, Computer architecture, Computer displays, Computerized monitoring, Concurrent computing, data acquisition, data collection, data visualisation, Data visualization, dynamic program instrumentation, efficient monitoring, executing program, Instruments, large-scale parallel applications, Large-scale systems, operating system design, Operating systems, parallel programming, program analysis, program diagnostics, program visualization, Programming profession, Sampling methods, scalable performance tools, software tools}, isbn = {0-8186-5680-8}, doi = {10.1109/SHPCC.1994.296728}, author = {Hollingsworth, Jeffrey K and Miller, B. P and Cargille, J.} } @article {17104, title = {Dynamic queries for visual information seeking}, journal = {IEEE Software}, volume = {11}, year = {1994}, month = {1994/11//}, pages = {70 - 77}, abstract = {Considers how dynamic queries allow users to "fly through" databases by adjusting widgets and viewing the animated results. In studies, users reacted to this approach with an enthusiasm more commonly associated with video games. Adoption requires research into retrieval and display algorithms and user-interface design. The author discusses how experts may benefit from visual interfaces because they will be able to formulate more complex queries and interpret intricate results.<>}, keywords = {Algorithm design and analysis, animated results, animation, Application software, Command languages, complex queries, database management systems, Databases, display algorithms, Displays, dynamic queries, Educational institutions, Games, Graphical user interfaces, human factors, Query processing, retrieval, Runtime, Technological innovation, user-interface design, visual databases, visual information seeking, visual interfaces, widgets}, isbn = {0740-7459}, doi = {10.1109/52.329404}, author = {Shneiderman, Ben} } @article {12216, title = {A miniature pan-tilt actuator: the spherical pointing motor}, journal = {IEEE Transactions on Robotics and Automation}, volume = {10}, year = {1994}, month = {1994/06//}, pages = {298 - 308}, abstract = {A pan-tilt mechanism is a computer-controlled actuator designed to point an object such as a camera sensor. For applications in active vision, a pan-tilt mechanism should be accurate, fast, small, inexpensive and have low power requirements. The authors have designed and constructed a new type of actuator meeting these requirements, which incorporates both pan and tilt into a single, two-degree-of-freedom device. The spherical pointing motor (SPM) consists of three orthogonal motor windings in a permanent magnetic field, configured to move a small camera mounted on a gimbal. It is an absolute positioning device and is run open-loop. The SPM is capable of panning and tilting a load of 15 grams, for example a CCD image sensor, at rotational velocities of several hundred degrees per second with a repeatability of .15{\textdegree}. The authors have also built a miniature camera consisting of a single CCD sensor chip and miniature lens assembly that fits on the rotor of this motor. In this paper, the authors discuss the theory of the SPM, which includes its basic electromagnetic principles, and derive the relationship between applied currents and resultant motor position. The authors present an automatic calibration procedure and discuss open- and closed-loop control strategies. Finally, the authors present the physical characteristics and results of their prototype}, keywords = {absolute positioning device, active vision, Actuators, Application software, Assembly, automatic calibration, camera sensor, CAMERAS, CCD image sensors, CCD sensor chip, Charge coupled devices, Charge-coupled image sensors, closed-loop control strategies, computerised control, Lenses, Magnetic fields, miniature lens assembly, miniature pan-tilt actuator, open-loop control strategies, orthogonal motor windings, pan-tilt mechanism, Permanent magnet motors, position control, Scanning probe microscopy, spherical pointing motor, two-degree-of-freedom device}, isbn = {1042-296X}, doi = {10.1109/70.294205}, author = {Bederson, Benjamin B. and Wallace,R.S. and Schwartz,E.L.} } @article {17430, title = {Touch screens now offer compelling uses}, journal = {IEEE Software}, volume = {8}, year = {1991}, month = {1991/03//}, pages = {93 - 94}, abstract = {Research on improving the user interfaces of touch screen applications is described. The advantages of touch screens are discusses, their current capabilities are examined, and possible future developments are considered.<>}, keywords = {Application software, Computer aided instruction, Computer displays, Computer science, Cultural differences, Fingers, future developments, information resources, Marketing and sales, Mice, Psychology, touch screen applications, touch sensitive screens, User interfaces}, isbn = {0740-7459}, doi = {10.1109/52.73754}, author = {Shneiderman, Ben} } @article {16330, title = {Empirically guided software development using metric-based classification trees}, journal = {IEEE Software}, volume = {7}, year = {1990}, month = {1990/03//}, pages = {46 - 54}, abstract = {The identification of high-risk components early in the life cycle is addressed. A solution that casts this as a classification problem is examined. The proposed approach derives models of problematic components, based on their measurable attributes and those of their development processes. The models provide a basis for forecasting which components are likely to share the same high-risk properties, such as being error-prone or having a high development cost. Developers can use these classification techniques to localize the troublesome 20\% of the system. The method for generating the models, called automatic generation of metric-based classification trees, uses metrics from previous releases or projects to identify components that are historically high-risk.}, keywords = {Application software, Area measurement, automatic generation, classification problem, Classification tree analysis, Costs, empirically guided software development, Error correction, life cycle, measurable attributes, metric-based classification trees, Predictive models, Programming, software engineering, Software measurement, software metrics, Software systems}, isbn = {0740-7459}, doi = {10.1109/52.50773}, author = {Porter, Adam and Selby,R. W} } @article {16445, title = {A Special-Function Unit for Sorting and Sort-Based Database Operations}, journal = {IEEE Transactions on Computers}, volume = {C-35}, year = {1986}, month = {1986/12//}, pages = {1071 - 1077}, abstract = {Achieving efficiency in database management functions is a fundamental problem underlying many computer applications. Efficiency is difficult to achieve using the traditional general-purpose von Neumann processors. Recent advances in microelectronic technologies have prompted many new research activities in the design, implementation, and application of database machines which are tailored for processing database management functions. To build an efficient system, the software algorithms designed for this type of system need to be tailored to take advantage of the hardware characteristics of these machines. Furthermore, special hardware units should be used, if they are cost- effective, to execute or to assist the execution of these software algorithms.}, keywords = {Application software, Computer applications, Database machines, Hardware, hardware sorter, Microelectronics, Software algorithms, Software design, Software systems, sort-based algorithms for database operations, sorting, special-function processor, Technology management}, isbn = {0018-9340}, doi = {10.1109/TC.1986.1676715}, author = {Raschid, Louiqa and Fei,T. and Lam,H. and Su,S. Y.W} } @article {16788, title = {Timing Requirements for Time-Driven Systems Using Augmented Petri Nets}, journal = {IEEE Transactions on Software Engineering}, volume = {SE-9}, year = {1983}, month = {1983/09//}, pages = {603 - 616}, abstract = {A methodology for the statement of timing requirements is presented for a class of embedded computer systems. The notion of a "time-driven" system is introduced which is formalized using a Petri net model augmented with timing information. Several subclasses of time-driven systems are defined with increasing levels of complexity. By deriving the conditions under which the Petri net model can be proven to be safe in the presence of time, timing requirements for modules in the system can be obtained. Analytical techniques are developed for proving safeness in the presence of time for the net constructions used in the defined subclasses of time-driven systems.}, keywords = {Application software, Concurrent computing, Control systems, Embedded computing, Embedded system, Helium, Modeling methodology, performance specifications, Petri nets, Power system modeling, Real time systems, real-time systems, Timing, timing requirements}, isbn = {0098-5589}, doi = {10.1109/TSE.1983.235261}, author = {Coolahan,J. E. and Roussopoulos, Nick} } @article {17301, title = {Multiparty Grammars and Related Features for Defining Interactive Systems}, journal = {IEEE Transactions on Systems, Man and Cybernetics}, volume = {12}, year = {1982}, month = {1982/03//}, pages = {148 - 154}, abstract = {Multiparty grammars are introduced which contain labeled nonterminals to indicate the party that produces the terminal string. For interactive person-computer systems, both the user commands and system responses can be described by the linked BNF grammars. Multiparty grammars may also be used to describe communication among several people (by way of computers or in normal dialogue), network protocols among several machines, or complex interactions involving several people and machines. Visual features such as underlining, reversal, blinking, and color, window declarations, and dynamic operations dependent on cursor movement are also covered.}, keywords = {Application software, Computer aided instruction, Computer displays, Computer languages, Computer networks, Debugging, HUMANS, interactive systems, Protocols, Writing}, isbn = {0018-9472}, doi = {10.1109/TSMC.1982.4308798}, author = {Shneiderman, Ben} } @conference {17051, title = {Database Program Conversion: A Framework For Research}, booktitle = {Fifth International Conference on Very Large Data Bases, 1979}, year = {1979}, month = {1979/10/03/5}, pages = {299 - 312}, publisher = {IEEE}, organization = {IEEE}, keywords = {Application software, Costs, Data conversion, Data structures, Databases, Delay, Prototypes, Technology planning, US Government, Writing}, doi = {10.1109/VLDB.1979.718145}, author = {Taylor,R. W and Fry,J. P and Shneiderman, Ben and Smith,D. C.P and Su,S. Y.W} } @article {17195, title = {Human Factors Experiments in Designing Interactive Systems}, journal = {Computer}, volume = {12}, year = {1979}, month = {1979/12//}, pages = {9 - 19}, abstract = {Successful industrial design gracefully unites esthetics and function at minimum cost. However, designers face special problems when they apply their skills to interactive computer systems.}, keywords = {Application software, Computer languages, Design engineering, Home computing, human factors, interactive systems, Process design, Testing}, isbn = {0018-9162}, doi = {10.1109/MC.1979.1658571}, author = {Shneiderman, Ben} }