@conference {12105, title = {A case study of measuring process risk for early insights into software safety}, booktitle = {Software Engineering (ICSE), 2011 33rd International Conference on}, year = {2011}, month = {2011/05//}, pages = {623 - 632}, abstract = {In this case study, we examine software safety risk in three flight hardware systems in NASA{\textquoteright}s Constellation spaceflight program. We applied our Technical and Process Risk Measurement (TPRM) methodology to the Constellation hazard analysis process to quantify the technical and process risks involving software safety in the early design phase of these projects. We analyzed 154 hazard reports and collected metrics to measure the prevalence of software in hazards and the specificity of descriptions of software causes of hazardous conditions. We found that 49-70\% of 154 hazardous conditions could be caused by software or software was involved in the prevention of the hazardous condition. We also found that 12-17\% of the 2013 hazard causes involved software, and that 23-29\% of all causes had a software control. The application of the TRPM methodology identified process risks in the application of the hazard analysis process itself that may lead to software safety risk.}, keywords = {analysis;software, and, computing;risk, constellation, control;software, Hardware, maintenance;, measurement;aerospace, measurement;software, NASA, process, program;TPRM;flight, risk, safety;technical, spaceflight, systems;process}, doi = {10.1145/1985793.1985881}, author = {Layman,L. and Basili, Victor R. and Zelkowitz, Marvin V and Fisher,K.L.} } @conference {19295, title = {Buffer management for multi-application image processing on multi-core platforms: Analysis and case study}, booktitle = {2010 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)}, year = {2010}, month = {2010}, pages = {1662 - 1665}, abstract = {Due to the limited amounts of on-chip memory, large volumes of data, and performance and power consumption overhead associated with interprocessor communication, efficient management of buffer memory is critical to multi-core image processing. To address this problem, this paper develops new modeling and analysis techniques based on dataflow representations, and demonstrates these techniques on a multi-core implementation case study involving multiple, concurrently-executing image processing applications. Our techniques are based on careful representation and exploitation of frame- or block-based operations, which involve repeated invocations of the same computations across regularly- arranged subsets of data. Using these new approaches to manage block-based image data, this paper demonstrates methods to analyze synchronization overhead and FIFO buffer sizes when mapping image processing applications onto heterogeneous, multi core architectures.}, keywords = {block-based image data, buffer memory management, buffer storage, concurrently-executing image processing application, data flow computing, data flow representation, data subset, Dataflow, Digital signal processing, Energy consumption, Energy management, Engineering management, FIFO buffer sizes, Hardware, Image analysis, IMAGE PROCESSING, image processing application, image representation, interprocessor communication, memory architecture, Memory management, multiapplication image processing, multicore image processing, multiprocessing, on-chip memory, power consumption, power consumption overhead, Runtime, scheduling, set theory, shared memory, storage management chips, synchronization overhead}, author = {Ko,Dong-Ik and Won, N. and Bhattacharyya, Shuvra S.} } @conference {19297, title = {FPGA-based design and implementation of the 3GPP-LTE physical layer using parameterized synchronous dataflow techniques}, booktitle = {2010 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)}, year = {2010}, month = {2010}, pages = {1510 - 1513}, abstract = {Synchronous dataflow (SDF) is an ubiquitous dataflow model of computation that has been studied extensively for efficient simulation and software synthesis of DSP applications. In recent years, parameterized SDF (PSDF) has evolved as a useful framework for modeling SDF graphs in which arbitrary parameters can be changed dynamically. However, the potential to enable efficient hardware synthesis has been treated relatively sparsely in the literature for SDF and even more so for the newer, more general PSDF model. This paper investigates efficient FPGA-based design and implementation of the physical layer for 3GPP-Long Term Evolution (LTE), a next generation cellular standard. To capture the SDF behavior of the functional core of LTE along with higher level dynamics in the standard, we use a novel PSDF-based FPGA architecture framework. We implement our PSDF-based, LTE design framework using National Instrument{\textquoteright}s LabVIEW FPGA, a recently-introduced commercial platform for reconfigurable hardware implementation. We show that our framework can effectively model the dynamics of the LTE protocol, while also providing a synthesis framework for efficient FPGA implementation.}, keywords = {3G mobile communication, 3GPP-long term evolution, 3GPP-LTE physical layer, 4G communication systems, Computational modeling, data flow analysis, data flow graphs, Dataflow modeling, Digital signal processing, DSP applications, Field programmable gate arrays, FPGA architecture framework, FPGA implementation, FPGA-based design, Hardware, hardware synthesis, Instruments, LabVIEW FPGA, Logic Design, LTE, next generation cellular standard, parameterized synchronous data flow technique, Pervasive computing, Physical layer, Physics computing, Production, PSDF graph, reconfigurable hardware implementation, Runtime, software synthesis, Ubiquitous Computing, ubiquitous data flow model}, author = {Kee, Hojin and Bhattacharyya, Shuvra S. and Wong, I. and Yong Rao} } @conference {19286, title = {Methods for efficient implementation of Model Predictive Control on multiprocessor systems}, booktitle = {2010 IEEE International Conference on Control Applications (CCA)}, year = {2010}, month = {2010}, pages = {1357 - 1362}, abstract = {Model Predictive Control (MPC) has been used in a wide range of application areas including chemical engineering, food processing, automotive engineering, aerospace, and metallurgy. An important limitation on the application of MPC is the difficulty in completing the necessary computations within the sampling interval. Recent trends in computing hardware towards greatly increased parallelism offer a solution to this problem. This paper describes modeling and analysis tools to facilitate implementing the MPC algorithms on parallel computers, thereby greatly reducing the time needed to complete the calculations. The use of these tools is illustrated by an application to a class of MPC problems.}, keywords = {Computational modeling, COMPUTERS, Equations, Hardware, Linear systems, Mathematical model, model predictive control, MPC algorithms, Multiprocessing systems, Multiprocessor systems, parallel computers, predictive control, Program processors}, author = {Gu, Ruirui and Bhattacharyya, Shuvra S. and Levine,W. S} } @conference {19291, title = {Rapid prototyping for digital signal processing systems using Parameterized Synchronous Dataflow graphs}, booktitle = {2010 21st IEEE International Symposium on Rapid System Prototyping (RSP)}, year = {2010}, month = {2010}, pages = {1 - 7}, abstract = {Parameterized Synchronous Dataflow (PSDF) has been used previously for abstract scheduling and as a model for architecting embedded software and FPGA implementations. PSDF has been shown to be attractive for these purposes due to its support for flexible dynamic reconfiguration, and efficient quasi-static scheduling. To apply PSDF techniques more deeply into the design flow, support for comprehensive functional simulation and efficient hardware mapping is important. By building on the DIF (Dataflow Interchange Format), which is a design language and associated software package for developing and experimenting with dataflow-based design techniques for signal processing systems, we have developed a tool for functional simulation of PSDF specifications. This simulation tool allows designers to model applications in PSDF and simulate their functionality, including use of the dynamic parameter reconfiguration capabilities offered by PSDF. Based on this simulation tool, we also present a systematic design methodology for applying PSDF to the design and implementation of digital signal processing systems, with emphasis on FPGA-based systems for signal processing. We demonstrate capabilities for rapid and accurate prototyping offered by our proposed design methodology, along with its novel support for PSDF-based FPGA system implementation.}, keywords = {abstract scheduling, Computational modeling, Computer architecture, data flow graphs, dataflow based design, dataflow interchange format, design flow, design language, Digital signal processing, digital signal processing systems, dynamic parameter reconfiguration, Dynamic scheduling, efficient hardware mapping, efficient quasistatic scheduling, Embedded software, embedded systems, Field programmable gate arrays, flexible dynamic reconfiguration, FPGA based systems, FPGA implementations, functional simulation, Hardware, parameterized synchronous dataflow graphs, rapid prototyping, Schedules, scheduling, semantics, simulation tool, software package, systematic design methodology}, author = {Wu, Hsiang-Huang and Kee, Hojin and Sane, N. and Plishker,W. and Bhattacharyya, Shuvra S.} } @conference {19296, title = {Simulating dynamic communication systems using the core functional dataflow model}, booktitle = {2010 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)}, year = {2010}, month = {2010}, pages = {1538 - 1541}, abstract = {The latest communication technologies invariably consist of modules with dynamic behavior. There exists a number of design tools for communication system design with their foundation in dataflow modeling semantics. These tools must not only support the functional specification of dynamic communication modules and subsystems but also provide accurate estimation of resource requirements for efficient simulation and implementation. We explore this trade-off - between flexible specification of dynamic behavior and accurate estimation of resource requirements - using a representative application employing an adaptive modulation scheme. We propose an approach for precise modeling of such applications based on a recently-introduced form of dynamic dataflow called core functional dataflow. From our proposed modeling approach, we show how parameterized looped schedules can be generated and analyzed to simulate applications with low run-time overhead as well as guaranteed bounded memory execution. We demonstrate our approach using the Advanced Design System from Agilent Technologies, Inc., which is a commercial tool for design and simulation of communication systems.}, keywords = {adaptive modulation, Analytical models, Application software, Computational modeling, core functional dataflow model, Dataflow, dataflow modeling semantics, design tools, Digital signal processing, dynamic communication systems, functional specification, Hardware, modeling and simulation, Power system modeling, Predictive models, Processor scheduling, Production, Signal processing, software tools, wireless communication}, author = {Sane, N. and Chia-Jui Hsu and Pino,J. L and Bhattacharyya, Shuvra S.} } @article {12352, title = {Parameterized Looped Schedules for Compact Representation of Execution Sequences in DSP Hardware and Software Implementation}, journal = {IEEE Transactions on Signal Processing}, volume = {55}, year = {2007}, month = {2007/06//}, pages = {3126 - 3138}, abstract = {In this paper, we present a technique for compact representation of execution sequences in terms of efficient looping constructs. Here, by a looping construct, we mean a compact way of specifying a finite repetition of a set of execution primitives. Such compaction, which can be viewed as a form of hierarchical run-length encoding (RLE), has application in many very large scale integration (VLSI) signal processing contexts, including efficient control generation for Kahn processes on field-programmable gate arrays (FPGAs), and software synthesis for static dataflow models of computation. In this paper, we significantly generalize previous models for loop-based code compaction of digital signal processing (DSP) programs to yield a configurable code compression methodology that exhibits a broad range of achievable tradeoffs. Specifically, we formally develop and apply to DSP hardware and software synthesis a parameterizable loop scheduling approach with compact format, dynamic reconfigurability, and low-overhead decompression}, keywords = {Application software, array signal processing, code compression methodology, compact representation, Compaction, data compression, Design automation, Digital signal processing, digital signal processing chips, DSP, DSP hardware, embedded systems, Encoding, Field programmable gate arrays, field-programmable gate arrays (FPGAs), FPGA, Hardware, hierarchical runlength encoding, high-level synthesis, Kahn process, loop-based code compaction, looping construct, parameterized loop schedules, program compilers, reconfigurable design, runlength codes, scheduling, Signal generators, Signal processing, Signal synthesis, software engineering, software implementation, static dataflow models, Very large scale integration, VLSI}, isbn = {1053-587X}, doi = {10.1109/TSP.2007.893964}, author = {Ming-Yung Ko and Zissulescu,C. and Puthenpurayil,S. and Bhattacharyya, Shuvra S. and Kienhuis,B. and Deprettere,E. F} } @conference {12347, title = {A Communication Interface for Multiprocessor Signal Processing Systems}, booktitle = {Proceedings of the 2006 IEEE/ACM/IFIP Workshop on Embedded Systems for Real Time Multimedia}, year = {2006}, month = {2006/10//}, pages = {127 - 132}, publisher = {IEEE}, organization = {IEEE}, abstract = {Parallelization of embedded software is often desirable for power/performance-related considerations for computation-intensive applications that frequently occur in the signal-processing domain. Although hardware support for parallel computation is increasingly available in embedded processing platforms, there is a distinct lack of effective software support. One of the most widely known efforts in support of parallel software is the message passing interface (MPI). However, MPI suffers from several drawbacks with regards to customization to specialized parallel processing contexts, and performance degradation for communication-intensive applications. In this paper, we propose a new interface, the signal passing interface (SPI), that is targeted toward signal processing applications and addresses the limitations of MPI for this important domain of embedded software by integrating relevant properties of MPI and coarse-grain dataflow modeling. SPI is much easier and more intuitive to use, and due to its careful specialization, more performance-efficient for the targeted application domain. We present our preliminary version of SPI, along with experiments using SPI on a practical face detection system that demonstrate the capabilities of SPI}, keywords = {application program interfaces, Application software, coarse-grain dataflow modeling, Computer applications, Concurrent computing, Context, data flow graphs, Embedded computing, Embedded software, face detection system, Hardware, Message passing, message passing interface, MPI, Multiprocessing systems, multiprocessor signal processing system, PARALLEL PROCESSING, signal passing interface, Signal processing, SPI}, isbn = {0-7803-9783-5}, doi = {10.1109/ESTMED.2006.321285}, author = {Sankalita Saha and Bhattacharyya, Shuvra S. and Wayne Wolf} } @conference {19493, title = {Eye of the Beholder: Phone-Based Text-Recognition for the Visually-Impaired}, booktitle = {2006 10th IEEE International Symposium on Wearable Computers}, year = {2006}, month = {2006///}, pages = {145 - 146}, abstract = {Blind and visually-impaired people cannot access essential information in the form of written text in our environment (e.g., on restaurant menus, street signs, door labels, product names and instructions, expiration dates). In this paper, we present and evaluate a mobile text-recognition system capable of extracting written information from a wide variety of sources and communicating it on-demand to the user. The user needs no additional hardware except an ordinary, Internet- enabled mobile camera-phone - a device that many visually-impaired individuals already own. This approach fills a gap in assistive technologies for the visually- impaired because it makes users aware of textual information not available to them through any other means.}, keywords = {CAMERAS, data mining, handicapped aids, Hardware, Image quality, Internet, Land mobile radio cellular systems, Mobile computing, mobile handsets, mobile text-recognition system, Optical character recognition software, phone-based text-recognition, Product safety, Text recognition, visually-impaired}, author = {Tudor Dumitras and Lee, M. and Quinones, P. and Smailagic, A. and Siewiorek, Dan and Narasimhan, P.} } @article {17258, title = {Interactive sonification of choropleth maps}, journal = {IEEE Multimedia}, volume = {12}, year = {2005}, month = {2005/06//April}, pages = {26 - 35}, abstract = {Auditory information is an important channel for the visually impaired. Effective sonification (the use of non-speech audio to convey information) promotes equal working opportunities for people with vision impairments by helping them explore data collections for problem solving and decision making. Interactive sonification systems can make georeferenced data accessible to people with vision impairments. The authors compare methods for using sound to encode georeferenced data patterns and for navigating maps.}, keywords = {audio signal processing, audio user interfaces, Auditory (non-speech) feedback, auditory information, cartography, choropleth maps, data collections, decision making, Evaluation, Feedback, georeferenced data, Guidelines, handicapped aids, Hardware, HUMANS, information resources, interaction style, Interactive sonification, interactive systems, Navigation, nonspeech audio, problem solving, Problem-solving, sound, universal usability, US Government, User interfaces, vision impairments, World Wide Web}, isbn = {1070-986X}, doi = {10.1109/MMUL.2005.28}, author = {Zhao,Haixia and Smith,B. K and Norman,K. and Plaisant, Catherine and Shneiderman, Ben} } @conference {11975, title = {Robust Contrast Invariant Stereo Correspondence}, booktitle = {Proceedings of the 2005 IEEE International Conference on Robotics and Automation, 2005. ICRA 2005}, year = {2005}, month = {2005/04/18/22}, pages = {819 - 824}, publisher = {IEEE}, organization = {IEEE}, abstract = {A stereo pair of cameras attached to a robot will inevitably yield images with different contrast. Even if we assume that the camera hardware is identical, due to slightly different points of view, the amount of light entering the two cameras is also different, causing dynamically adjusted internal parameters such as aperture, exposure and gain to be different. Due to the difficulty of obtaining and maintaining precise intensity or color calibration between the two cameras, contrast invariance becomes an extremely desirable property of stereo correspondence algorithms. The problem of achieving point correspondence between a stereo pair of images is often addressed by using the intensity or color differences as a local matching metric, which is sensitive to contrast changes. We present an algorithm for contrast invariant stereo matching which relies on multiple spatial frequency channels for local matching. A fast global framework uses the local matching to compute the correspondences and find the occlusions. We demonstrate that the use of multiple frequency channels allows the algorithm to yield good results even in the presence of significant amounts of noise.}, keywords = {Apertures, Calibration, CAMERAS, Computer science, contrast invariance, diffusion, Educational institutions, Frequency, gabor, Hardware, occlusions, Robot vision systems, Robotics and automation, Robustness, stereo}, isbn = {0-7803-8914-X}, doi = {10.1109/ROBOT.2005.1570218}, author = {Ogale, A. S and Aloimonos, J.} } @conference {12295, title = {Running on the bare metal with GeekOS}, booktitle = {Proceedings of the 35th SIGCSE technical symposium on Computer science education}, series = {SIGCSE {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {315 - 319}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Undergraduate operating systems courses are generally taught using one of two approaches: abstract or concrete. In the abstract approach, students learn the concepts underlying operating systems theory, and perhaps apply them using user-level threads in a host operating system. In the concrete approach, students apply concepts by working on a real operating system kernel. In the purest manifestation of the concrete approach, students implement operating system projects that run on real hardware.GeekOS is an instructional operating system kernel which runs on real hardware. It provides the minimum functionality needed to schedule threads and control essential devices on an x86 PC. On this foundation, we have developed projects in which students build processes, semaphores, a multilevel feedback scheduler, paged virtual memory, a filesystem, and inter-process communication. We use the Bochs emulator for ease of development and debugging. While this approach (tiny kernel run on an emulator) is not new, we believe GeekOS goes further towards the goal of combining realism and simplicity than previous systems have.}, keywords = {education, emulation, Hardware, Operating systems}, isbn = {1-58113-798-2}, doi = {10.1145/971300.971411}, url = {http://doi.acm.org/10.1145/971300.971411}, author = {Hovemeyer, David and Hollingsworth, Jeffrey K and Bhattacharjee, Bobby} } @conference {12381, title = {Exploring the probabilistic design space of multimedia systems}, booktitle = {14th IEEE International Workshop on Rapid Systems Prototyping, 2003. Proceedings}, year = {2003}, month = {2003/06/09/11}, pages = {233 - 240}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we propose the novel concept of probabilistic design for multimedia systems and a methodology to quickly explore such design space at an early design stage. The probabilistic design is motivated by the challenge of how to design, but not over-design, multimedia embedded systems while systematically incorporating such application{\textquoteright}s performance requirements, uncertainties in execution time, and tolerance for reasonable execution failures. Our goal is to bridge the gap between real-time analysis and embedded software implementation for rapid and economic (multimedia) system prototyping. Our method takes advantage of multimedia system{\textquoteright}s unique features mentioned above to relax the rigid hardware requirements for software implementation and eventually avoid over-designing the system.}, keywords = {economic system prototyping, embedded software implementation, Embedded system, Energy consumption, execution time uncertainties, Hardware, multimedia embedded systems, multimedia system prototyping, multimedia systems, performance requirements, probabilistic design space, rapid system prototyping, Real time systems, real-time analysis, reasonable execution failure tolerance, Resource management, software prototyping, Space exploration, Streaming media, systems analysis, Timing, Uncertainty}, isbn = {0-7695-1943-1}, doi = {10.1109/IWRSP.2003.1207053}, author = {Shaoxiong Hua and Gang Qu and Bhattacharyya, Shuvra S.} } @conference {14809, title = {Performance measurement using low perturbation and high precision hardware assists}, booktitle = {, The 19th IEEE Real-Time Systems Symposium, 1998. Proceedings}, year = {1998}, month = {1998/12/02/4}, pages = {379 - 388}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present the design and implementation of MultiKron PCI, a hardware performance monitor that can be plugged into any computer with a free PCI bus slot. The monitor provides a series of high-resolution timers, and the ability to monitor the utilization of the PCI bus. We also demonstrate how the monitor can be integrated with online performance monitoring tools such as the Paradyn parallel performance measurement tools to improve the overhead of key timer operations by a factor of 25. In addition, we present a series of case studies using the MultiKron hardware performance monitor to measure and tune high-performance parallel completing applications. By using the monitor, we were able to find and correct a performance bug in a popular implementation of the MPI message passing library that caused some communication primitives to run at one half their potential speed}, keywords = {Clocks, Computerized monitoring, Counting circuits, Debugging, Hardware, hardware performance monitor, high precision hardware assists, low perturbation, measurement, MPI message passing library, MultiKron hardware performance monitor, MultiKron PCI, NIST, online performance monitoring tools, Paradyn parallel performance measurement tools, PCI bus slot, performance bug, performance evaluation, performance measurement, program debugging, program testing, real-time systems, Runtime, Timing}, isbn = {0-8186-9212-X}, doi = {10.1109/REAL.1998.739771}, author = {Mink, A. and Salamon, W. and Hollingsworth, Jeffrey K and Arunachalam, R.} } @article {16769, title = {Techniques for update handling in the enhanced client-server DBMS}, journal = {IEEE Transactions on Knowledge and Data Engineering}, volume = {10}, year = {1998}, month = {1998/06//May}, pages = {458 - 476}, abstract = {The Client-Server computing paradigm has significantly influenced the way modern Database Management Systems are designed and built. In such systems, clients maintain data pages in their main-memory caches, originating from the server{\textquoteright}s database. The Enhanced Client-Server architecture takes advantage of all the available client resources, including their long-term memory. Clients can cache server data into their own disk units if these data are part of their operational spaces. However, when updates occur at the server, a number of clients may need to not only be notified about these changes, but also obtain portions of the updates as well. In this paper, we examine the problem of managing server imposed updates that affect data cached on client disk managers. We propose a number of server update propagation techniques in the context of the Enhanced Client-Server DBMS architecture, and examine the performance of these strategies through detailed simulation experiments. In addition, we study how the various settings of the network affect the performance of these policies}, keywords = {client disk managers, client resources, client-server computing paradigm, client-server systems, Computational modeling, Computer architecture, concurrency control, data pages, Database systems, distributed databases, enhanced client-server DBMS, Hardware, Local area networks, long-term memory, main-memory caches, Network servers, operational spaces, Personal communication networks, server update propagation techniques, Transaction databases, update handling, Workstations, Yarn}, isbn = {1041-4347}, doi = {10.1109/69.687978}, author = {Delis,A. and Roussopoulos, Nick} } @article {17283, title = {Low-effort, high-payoff user interface reengineering}, journal = {IEEE Software}, volume = {14}, year = {1997}, month = {1997/08//Jul}, pages = {66 - 72}, abstract = {Although increasingly sophisticated design methodologies for developing new user interfaces exist, low-effort, high-payoff user interface reengineering represents a new direction-and opportunity. Yet reengineering a working system is complex and risky because of the potential disruption to users and managers, their justifiable fear of change, and the lack of guarantees that such changes will be for the better. Our largely positive experiences with the projects described here lead us to believe that user interface reengineering is a viable and important process. Low effort, high-payoff improvement recommendations can probably be made for most existing systems. Nevertheless, a narrowly focused user interface reengineering plan may be inappropriate when the major problems lie outside the scope of the user interface, such as inadequate functionalities, frequent crashes, and network problems. Attempts at improving less severe problems while ignoring deeper ones may be perceived as insensitive by the users. In such cases it is important to consider either making similar short-term improvements for other parts of the systems or postponing short-term user interface reengineering in favour of a more complete system reengineering. Similarly, the need for interface stability might outweigh the benefits of the short-term improvements if a complete reengineering is planned for the near future. But most likely these proposed diagnostic strategies and opportunities for improvement are only a prelude to the much larger task of business reengineering, which implies extensive user interface reengineering}, keywords = {Business process re-engineering, complete system reengineering, Design methodology, Error analysis, Hardware, inadequate functionalities, interface stability, iterative methods, low-effort high-payoff user interface reengineering, short-term improvements, short-term user interface reengineering, software engineering, Software testing, System analysis and design, System testing, systems re-engineering, User centered design, user centred design, User interfaces}, isbn = {0740-7459}, doi = {10.1109/52.595958}, author = {Plaisant, Catherine and Rose,A. and Shneiderman, Ben and Vanniamparampil,A. J} } @conference {12092, title = {A secure and reliable bootstrap architecture}, booktitle = {, 1997 IEEE Symposium on Security and Privacy, 1997. Proceedings}, year = {1997}, month = {1997/05/04/7}, pages = {65 - 71}, publisher = {IEEE}, organization = {IEEE}, abstract = {In a computer system, the integrity of lower layers is typically treated as axiomatic by higher layers. Under the presumption that the hardware comprising the machine (the lowest layer) is valid, the integrity of a layer can be guaranteed if and only if: (1) the integrity of the lower layers is checked and (2) transitions to higher layers occur only after integrity checks on them are complete. The resulting integrity {\textquotedblleft}chain{\textquotedblright} inductively guarantees system integrity. When these conditions are not met, as they typically are not in the bootstrapping (initialization) of a computer system, no integrity guarantees can be made, yet these guarantees are increasingly important to diverse applications such as Internet commerce, security systems and {\textquotedblleft}active networks{\textquotedblright}. In this paper, we describe the AEGIS architecture for initializing a computer system. It validates integrity at each layer transition in the bootstrap process. AEGIS also includes a recovery process for integrity check failures, and we show how this results in robust systems}, keywords = {active networks, AEGIS architecture, bootstrap architecture, Computer architecture, computer bootstrapping, data integrity, Distributed computing, Hardware, hardware validity, initialization, integrity chain, integrity check failures, Internet, Internet commerce, IP networks, Laboratories, lower-layer integrity, Microprogramming, Operating systems, recovery process, reliability, robust systems, Robustness, Security, security of data, software reliability, system integrity guarantees, system recovery, transitions, Virtual machining}, isbn = {0-8186-7828-3}, doi = {10.1109/SECPRI.1997.601317}, author = {Arbaugh, William A. and Farber,D. J and Smith,J. M} } @article {16445, title = {A Special-Function Unit for Sorting and Sort-Based Database Operations}, journal = {IEEE Transactions on Computers}, volume = {C-35}, year = {1986}, month = {1986/12//}, pages = {1071 - 1077}, abstract = {Achieving efficiency in database management functions is a fundamental problem underlying many computer applications. Efficiency is difficult to achieve using the traditional general-purpose von Neumann processors. Recent advances in microelectronic technologies have prompted many new research activities in the design, implementation, and application of database machines which are tailored for processing database management functions. To build an efficient system, the software algorithms designed for this type of system need to be tailored to take advantage of the hardware characteristics of these machines. Furthermore, special hardware units should be used, if they are cost- effective, to execute or to assist the execution of these software algorithms.}, keywords = {Application software, Computer applications, Database machines, Hardware, hardware sorter, Microelectronics, Software algorithms, Software design, Software systems, sort-based algorithms for database operations, sorting, special-function processor, Technology management}, isbn = {0018-9340}, doi = {10.1109/TC.1986.1676715}, author = {Raschid, Louiqa and Fei,T. and Lam,H. and Su,S. Y.W} }