@article {17238, title = {Integrating Statistics and Visualization for Exploratory Power: From Long-Term Case Studies to Design Guidelines}, journal = {IEEE Computer Graphics and Applications}, volume = {29}, year = {2009}, month = {2009/06//May}, pages = {39 - 51}, abstract = {Evaluating visual-analytics systems is challenging because laboratory-based controlled experiments might not effectively represent analytical tasks. One such system, Social Action, integrates statistics and visualization in an interactive exploratory tool for social network analysis. This article describes results from long-term case studies with domain experts and extends established design goals for information visualization.}, keywords = {case studies, Control systems, Data analysis, data mining, data visualisation, Data visualization, data-mining, design guidelines, Employment, exploration, Filters, Guidelines, Information Visualization, insights, laboratory-based controlled experiments, Performance analysis, social network analysis, Social network services, social networking (online), social networks, SocialAction, statistical analysis, Statistics, visual analytics, visual-analytics systems, Visualization}, isbn = {0272-1716}, doi = {10.1109/MCG.2009.44}, author = {Perer,A. and Shneiderman, Ben} } @article {17397, title = {Temporal Summaries: Supporting Temporal Categorical Searching, Aggregation and Comparison}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {15}, year = {2009}, month = {2009/12//Nov}, pages = {1049 - 1056}, abstract = {When analyzing thousands of event histories, analysts often want to see the events as an aggregate to detect insights and generate new hypotheses about the data. An analysis tool must emphasize both the prevalence and the temporal ordering of these events. Additionally, the analysis tool must also support flexible comparisons to allow analysts to gather visual evidence. In a previous work, we introduced align, rank, and filter (ARF) to accentuate temporal ordering. In this paper, we present temporal summaries, an interactive visualization technique that highlights the prevalence of event occurrences. Temporal summaries dynamically aggregate events in multiple granularities (year, month, week, day, hour, etc.) for the purpose of spotting trends over time and comparing several groups of records. They provide affordances for analysts to perform temporal range filters. We demonstrate the applicability of this approach in two extensive case studies with analysts who applied temporal summaries to search, filter, and look for patterns in electronic health records and academic records.}, keywords = {Aggregates, Collaborative work, Computational Biology, Computer Graphics, Data analysis, data visualisation, Data visualization, Databases, Factual, Displays, Event detection, Filters, Heparin, History, Human computer interaction, Human-computer interaction, HUMANS, Information Visualization, Interaction design, interactive visualization technique, Medical Records Systems, Computerized, Pattern Recognition, Automated, Performance analysis, Springs, temporal categorical data visualization, temporal categorical searching, temporal ordering, temporal summaries, Thrombocytopenia, Time factors}, isbn = {1077-2626}, doi = {10.1109/TVCG.2009.187}, author = {Wang,T. D and Plaisant, Catherine and Shneiderman, Ben and Spring, Neil and Roseman,D. and Marchand,G. and Mukherjee,V. and Smith,M.} } @conference {15694, title = {Immunity-Based Epidemic Routing in Intermittent Networks}, booktitle = {5th Annual IEEE Communications Society Conference on Sensor, Mesh and Ad Hoc Communications and Networks, 2008. SECON {\textquoteright}08}, year = {2008}, month = {2008/06/16/20}, pages = {609 - 611}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this research, we propose to modify and extend epidemic routing used in intermittent networks. In particular, we propose to include immunity-based information disseminated in the reverse once messages get delivered to their destination. The goal is to design a more efficient routing protocol in terms of resource utilization. The idea is to analyze and evaluate the network performance using an immunity scheme in the context of epidemic routing and its variants. The reverse dissemination of such information requires minimal resources and the tradeoff in timely purging of delivered messages can be significant. We are using ns2 to implement a detailed simulation of the proposed immunity-based epidemic routing.}, keywords = {Analytical models, Delay, delivered messages, Disruption tolerant networking, Educational institutions, immunity-based epidemic routing, information dissemination, intermittent networks, Mobile ad hoc networks, Network topology, Performance analysis, Resource management, resource utilization, routing protocol, routing protocols, telecommunication network topology}, isbn = {978-1-4244-1777-3}, doi = {10.1109/SAHCN.2008.86}, author = {Mundur, Padma and Seligman,M. and Jin Na Lee} } @article {17591, title = {Efficient lookup on unstructured topologies}, journal = {IEEE Journal on Selected Areas in Communications}, volume = {25}, year = {2007}, month = {2007/01//}, pages = {62 - 72}, abstract = {We present LMS, a protocol for efficient lookup on unstructured networks. Our protocol uses a virtual namespace without imposing specific topologies. It is more efficient than existing lookup protocols for unstructured networks, and thus is an attractive alternative for applications in which the topology cannot be structured as a Distributed Hash Table (DHT). We present analytic bounds for the worst-case performance of LMS. Through detailed simulations (with up to 100,000 nodes), we show that the actual performance on realistic topologies is significantly better. We also show in both simulations and a complete implementation (which includes over five hundred nodes) that our protocol is inherently robust against multiple node failures and can adapt its replication strategy to optimize searches according to a specific heuristic. Moreover, the simulation demonstrates the resilience of LMS to high node turnover rates, and that it can easily adapt to orders of magnitude changes in network size. The overhead incurred by LMS is small, and its performance approaches that of DHTs on networks of similar size}, keywords = {Computer science, DHT, distributed algorithms, Distributed computing, distributed hash table, Least squares approximation, LMS, local minima search, lookup protocol, Network topology, node failures, Peer to peer computing, Performance analysis, Protocols, replication strategy, Resilience, Robustness, table lookup, telecommunication network topology, unstructured network topology}, isbn = {0733-8716}, doi = {10.1109/JSAC.2007.07007}, author = {Morselli,R. and Bhattacharjee, Bobby and Marsh,M.A. and Srinivasan, Aravind} } @article {15678, title = {Class-based access control for distributed video-on-demand systems}, journal = {IEEE Transactions on Circuits and Systems for Video Technology}, volume = {15}, year = {2005}, month = {2005/07//}, pages = {844 - 853}, abstract = {The focus of this paper is the analysis of threshold-based admission control policies for distributed video-on-demand (VoD) systems. Traditionally, admission control methods control access to a resource based on the resource capacity. We have extended that concept to include the significance of an arriving request to the VoD system by enforcing additional threshold restrictions in the admission control process on request classes deemed less significant. We present an analytical model for computing blocking performance of the VoD system under threshold-based admission control. Extending the same methodology to a distributed VoD architecture we show through simulation that the threshold performance conforms to the analytical model. We also show that threshold-based analysis can work in conjunction with other request handling policies and are useful for manipulating the VoD performance since we are able to distinguish between different request classes based on their merit. Enforcing threshold restrictions with the option of downgrading blocked requests in a multirate service environment results in improved performance at the same time providing different levels of quality of service (QoS). In fact, we show that the downgrade option combined with threshold restrictions is a powerful tool for manipulating an incoming request mix over which we have no control into a workload that the VoD system can handle.}, keywords = {Access control, Admission control, Analytical models, blocking performance, class-based access control, Computational modeling, Computer architecture, Computer science, Distributed control, Distributed video-on-demand (VoD) system, distributed video-on-demand system, multimedia systems, multirate service model, Performance analysis, QoS, quality of service, request handling policy, resource allocation, resource capacity, telecommunication congestion control, threshold-based admission control, video on demand}, isbn = {1051-8215}, doi = {10.1109/TCSVT.2005.848351}, author = {Mundur, Padma and Sood,A. K and Simon,R.} } @article {15679, title = {End-to-end analysis of distributed video-on-demand systems}, journal = {IEEE Transactions on Multimedia}, volume = {6}, year = {2004}, month = {2004/02//}, pages = {129 - 141}, abstract = {The focus of the research presented in this paper is the end-to-end analysis of a distributed Video-on-Demand (VoD) system. We analyze the distributed architecture of a VoD system to design global request handling and admission control strategies and evaluate them using global metrics. The performance evaluation methodology developed in this paper helps in determining efficient ways of using all resources in the VoD architecture within the constraints of providing guaranteed high quality service to each request. For instance, our simulation results show that request handling policies based on limited redirection of blocked requests to other resources perform better than load sharing policies. We also show that request handling policies based on redirection have simpler connection establishment semantics than load sharing policies and, therefore, are easily incorporated into reservation or signaling protocols.}, keywords = {Admission control, admission control strategies, Analytical models, Computer science, distributed architecture, distributed video-on-demand systems, end-to-end analysis, global request handling, High-speed networks, Network servers, Next generation networking, Performance analysis, performance evaluation methodology, Protocols, request handling, reservation protocols, resource allocation, Resource management, signaling protocols, telecommunication congestion control, video on demand, Video sharing}, isbn = {1520-9210}, doi = {10.1109/TMM.2003.819757}, author = {Mundur, Padma and Simon,R. and Sood,A. K} } @article {12754, title = {Performance analysis of a simple vehicle detection algorithm}, journal = {Image and Vision Computing}, volume = {20}, year = {2002}, month = {2002/01/01/}, pages = {1 - 13}, abstract = {We have performed an end-to-end analysis of a simple model-based vehicle detection algorithm for aerial parking lot images. We constructed a vehicle detection operator by combining four elongated edge operators designed to collect edge responses from the sides of a vehicle. We derived the detection and localization performance of this algorithm, and verified them by experiments. Performance degradation due to different camera angles and illuminations was also examined using simulated images. Another important aspect of performance characterization {\textemdash} whether and how much prior information about the scene improves performance {\textemdash} was also investigated. As a statistical diagnostic tool for the detection performance, a computational approach employing bootstrap was used.}, keywords = {Aerial image, Bootstrap, empirical evaluation, Performance analysis, Vehicle detection}, isbn = {0262-8856}, doi = {10.1016/S0262-8856(01)00059-2}, url = {http://www.sciencedirect.com/science/article/pii/S0262885601000592}, author = {Moon, H. and Chellapa, Rama and Rosenfeld, A.} } @article {14805, title = {A tool to help tune where computation is performed}, journal = {IEEE Transactions on Software Engineering}, volume = {27}, year = {2001}, month = {2001/07//}, pages = {618 - 629}, abstract = {We introduce a new performance metric, called load balancing factor (LBF), to assist programmers when evaluating different tuning alternatives. The LBF metric differs from traditional performance metrics since it is intended to measure the performance implications of a specific tuning alternative rather than quantifying where time is spent in the current version of the program. A second unique aspect of the metric is that it provides guidance about moving work within a distributed or parallel program rather than reducing it. A variation of the LBF metric can also be used to predict the performance impact of changing the underlying network. The LBF metric is computed incrementally and online during the execution of the program to be tuned. We also present a case study that shows that our metric can accurately predict the actual performance gains for a test suite of six programs}, keywords = {Computational modeling, Current measurement, Distributed computing, distributed program, distributed programming, load balancing factor, Load management, parallel program, parallel programming, Performance analysis, performance evaluation, Performance gain, performance metric, Programming profession, software metrics, software performance evaluation, Testing, Time measurement, tuning}, isbn = {0098-5589}, doi = {10.1109/32.935854}, author = {Eom, Hyeonsang and Hollingsworth, Jeffrey K} } @conference {12098, title = {A trend analysis of exploitations}, booktitle = {2001 IEEE Symposium on Security and Privacy, 2001. S\&P 2001. Proceedings}, year = {2001}, month = {2001///}, pages = {214 - 229}, publisher = {IEEE}, organization = {IEEE}, abstract = {We have conducted an empirical study of a number of computer security exploits and determined that the rates at which incidents involving the exploit are reported to CERT can be modeled using a common mathematical framework. Data associated with three significant exploits involving vulnerabilities in phf, imap, and bind can all be modeled using the formula C=I+S{\texttimes}√M where C is the cumulative count of reported incidents, M is the time since the start of the exploit cycle, and I and S are the regression coefficients determined by analysis of the incident report data. Further analysis of two additional exploits involving vulnerabilities in mountd and statd confirm the model. We believe that the models will aid in predicting the severity of subsequent vulnerability exploitations, based on the rate of early incident reports}, keywords = {Computer science, computer security exploits, Data analysis, data mining, Educational institutions, exploitations, Performance analysis, Predictive models, Regression analysis, Risk management, security of data, software engineering, system intrusions, System software, trend analysis, vulnerabilities, vulnerability exploitation}, isbn = {0-7695-1046-9}, doi = {10.1109/SECPRI.2001.924300}, author = {Browne,H. K and Arbaugh, William A. and McHugh,J. and Fithen,W. L} } @conference {15691, title = {Integrated admission control in hierarchical video-on-demand systems}, booktitle = {IEEE International Conference on Multimedia Computing and Systems, 1999}, volume = {1}, year = {1999}, month = {1999/07//}, pages = {220-225 vol.1 - 220-225 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {We develop a unified model of a hierarchical video-on-demand (VoD) system by integrating the storage and the network subsystems. Rather than restricting the analysis to an isolated subsystem the performance of the VoD system is analyzed as an end-to-end system. On a system-wide basis, request handling and admission control policies are designed to minimize global performance metrics. Through our simulation, we compare different request handling policies and show that a hierarchical VoD architecture with request handling that allows retrials at more than one resource will minimize overall blocking}, keywords = {Admission control, Bandwidth, blocking, Computer science, Design methodology, end-to-end system, hierarchical video-on-demand systems, integrated admission control, Intelligent networks, Load management, Motion pictures, Network servers, network subsystem, performance, Performance analysis, performance evaluation, quality of service, request handling, resource allocation, Resource management, simulation, storage subsystem, video on demand, video servers}, isbn = {0-7695-0253-9}, doi = {10.1109/MMCS.1999.779196}, author = {Mundur, Padma and Simon,R. and Sood,A.} } @conference {16363, title = {Code generation for multiple mappings}, booktitle = {Frontiers of Massively Parallel Computation, 1995. Proceedings. Frontiers {\textquoteright}95., Fifth Symposium on the}, year = {1995}, month = {1995/02/06/9}, pages = {332 - 341}, publisher = {IEEE}, organization = {IEEE}, abstract = {There has been a great amount of recent work toward unifying iteration reordering transformations. Many of these approaches represent transformations as affine mappings from the original iteration space to a new iteration space. These approaches show a great deal of promise, but they all rely on the ability to generate code that iterates over the points in these new iteration spaces in the appropriate order. This problem has been fairly well-studied in the case where all statements use the same mapping. We have developed an algorithm for the less well-studied case where each statement uses a potentially different mapping. Unlike many other approaches, our algorithm can also generate code from mappings corresponding to loop blocking. We address the important trade-off between reducing control overhead and duplicating code}, keywords = {code generation, Computer science, Concurrent computing, control overhead, Educational institutions, iteration reordering transformations, Law, Legal factors, loop blocking, multiple mappings, optimisation, optimising compilers, Optimizing compilers, PARALLEL PROCESSING, Performance analysis, program compilers}, isbn = {0-8186-6965-9}, doi = {10.1109/FMPC.1995.380437}, author = {Kelly,W. and Pugh, William and Rosser,E.} } @conference {12787, title = {Efficient on-the-fly model checking for CTL}, booktitle = {, Tenth Annual IEEE Symposium on Logic in Computer Science, 1995. LICS {\textquoteright}95. Proceedings}, year = {1995}, month = {1995/06/26/29}, pages = {388 - 397}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper gives an on-the-fly algorithm for determining whether a finite-state system satisfies a formula in the temporal logic CTL. The time complexity of our algorithm matches that of the best existing {\textquotedblleft}global algorithm{\textquotedblright} for model checking in this logic, and it performs as well as the best known global algorithms for the sublogics CTL and LTL. In contrast with these approaches, however, our routine constructs the state space of the system under consideration in a need-driven fashion and will therefore perform better in practice}, keywords = {Algorithm design and analysis, Automata, computational complexity, Computer science, CTL, Encoding, finite automata, finite-state system, global algorithm, Logic, LTL, on-the-fly model checking, Performance analysis, Safety, State-space methods, sublogic, temporal logic, time complexity}, isbn = {0-8186-7050-9}, doi = {10.1109/LICS.1995.523273}, author = {Bhat,G. and Cleaveland, Rance and Grumberg,O.} } @conference {18297, title = {Early vision processing using a multi-stage diffusion process}, booktitle = {1993 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 1993. Proceedings CVPR {\textquoteright}93}, year = {1993}, month = {1993/06//}, pages = {41 - 46}, publisher = {IEEE}, organization = {IEEE}, abstract = {The use of a multistage diffusion process in the early processing of range data is examined. The input range data are interpreted as occupying a volume in 3-D space. Each diffusion stage simulates the process of diffusing part of the boundary of the volume into the volume. The outcome of the process can be used for both discontinuity detection and segmentation into shape homogeneous regions. The process is applied to synthetic noise-free and noisy step, roof, and valley edges as well as to real range images}, keywords = {3-D space, Computational modeling, Computer vision, Diffusion processes, discontinuity detection, early vision processing, Educational institutions, Image edge detection, Image segmentation, Laboratories, multistage diffusion process, Noise shaping, noise-free edges, noisy edges, Performance analysis, roof edges, segmentation, SHAPE, shape homogeneous regions, step edges, valley edges}, isbn = {0-8186-3880-X}, doi = {10.1109/CVPR.1993.341003}, author = {Yacoob,Yaser and Davis, Larry S.} } @article {16795, title = {Performance comparison of three modern DBMS architectures}, journal = {IEEE Transactions on Software Engineering}, volume = {19}, year = {1993}, month = {1993/02//}, pages = {120 - 138}, abstract = {The introduction of powerful workstations connected through local area networks (LANs) inspired new database management system (DBMS) architectures that offer high performance characteristics. The authors examine three such software architecture configurations: client-server (CS), the RAD-UNIFY type of DBMS (RU), and enhanced client-server (ECS). Their specific functional components and design rationales are discussed. Three simulation models are used to provide a performance comparison under different job workloads. Simulation results show that the RU almost always performs slightly better than the CS, especially under light workloads, and that ECS offers significant performance improvement over both CS and RU. Under reasonable update rates, the ECS over CS (or RU) performance ratio is almost proportional to the number of participating clients (for less than 32 clients). The authors also examine the impact of certain key parameters on the performance of the three architectures and show that ECS is more scalable that the other two}, keywords = {client-server, Computational modeling, Computer architecture, database management systems, DBMS architectures, design rationales, functional components, Indexes, Local area networks, Military computing, Packaging, Performance analysis, performance evaluation, RAD-UNIFY type, simulation models, simulation results, Software architecture, software architecture configurations, software engineering, Throughput, Workstations}, isbn = {0098-5589}, doi = {10.1109/32.214830}, author = {Delis,A. and Roussopoulos, Nick} } @article {14791, title = {IPS-2: the second generation of a parallel program measurement system}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {1}, year = {1990}, month = {1990/04//}, pages = {206 - 217}, abstract = {IPS, a performance measurement system for parallel and distributed programs, is currently running on its second implementation. IPS{\textquoteright}s model of parallel programs uses knowledge about the semantics of a program{\textquoteright}s structure to provide two important features. First, IPS provides a large amount of performance data about the execution of a parallel program, and this information is organized so that access to it is easy and intuitive. Secondly, IPS provides performance analysis techniques that help to guide the programmer automatically to the location of program bottlenecks. The first implementation of IPS was a testbed for the basic design concepts, providing experience with a hierarchical program and measurement model, interactive program analysis, and automatic guidance techniques. It was built on the Charlotte distributed operating system. The second implementation, IPS-2, extends the basic system with new instrumentation techniques, an interactive and graphical user interface, and new automatic guidance analysis techniques. This implementation runs on 4.3BSD UNIX systems, on the VAX, DECstation, Sun 4, and Sequent Symmetry multiprocessor}, keywords = {4.3BSD UNIX systems, automatic guidance techniques, Automatic testing, Charlotte distributed operating system, CPA, DECstation, design concepts, distributed programs, graphical user interface, Graphical user interfaces, Instruments, interactive program analysis, IPS-2, measurement, message systems, network operating systems, Operating systems, parallel program measurement system, parallel programming, parallel programs, Performance analysis, performance analysis techniques, performance evaluation, performance measurement system, Power system modeling, program bottlenecks, program diagnostics, Programming profession, semantics, Sequent Symmetry multiprocessor, shared-memory systems, software tools, Springs, Sun, Sun 4, Unix, VAX}, isbn = {1045-9219}, doi = {10.1109/71.80132}, author = {Miller, B. P and Clark, M. and Hollingsworth, Jeffrey K and Kierstead, S. and Lim,S. -S and Torzewski, T.} }