@conference {19601, title = {Efficient Integrity Checking of Untrusted Network Storage}, booktitle = {StorageSS {\textquoteright}08 Proceedings of the 4th ACM International Workshop on Storage Security and Survivability }, series = {StorageSS {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {43 - 54}, publisher = {ACM}, organization = {ACM}, abstract = {Outsourced storage has become more and more practical in recent years. Users can now store large amounts of data in multiple servers at a relatively low price. An important issue for outsourced storage systems is to design an efficient scheme to assure users that their data stored at remote servers has not been tampered with. This paper presents a general method and a practical prototype application for verifying the integrity of files in an untrusted network storage service. The verification process is managed by an application running in a trusted environment (typically on the client) that stores just one cryptographic hash value of constant size, corresponding to the "digest" of an authenticated data structure. The proposed integrity verification service can work with any storage service since it is transparent to the storage technology used. Experimental results show that our integrity verification method is efficient and practical for network storage systems.}, keywords = {authenticated data structures, data integrity, Security, untrusted outsourced storage}, isbn = {978-1-60558-299-3}, url = {http://doi.acm.org/10.1145/1456469.1456479}, author = {Heitzmann, Alexander and Palazzi, Bernardo and Charalampos Papamanthou and Tamassia, Roberto} } @conference {19027, title = {BIND: a fine-grained attestation service for secure distributed systems}, year = {2005}, month = {2005}, pages = {154 - 168}, abstract = {In this paper we propose BIND (binding instructions and data), a fine-grained attestation service for securing distributed systems. Code attestation has recently received considerable attention in trusted computing. However, current code attestation technology is relatively immature. First, due to the great variability in software versions and configurations, verification of the hash is difficult. Second, the time-of-use and time-of-attestation discrepancy remains to be addressed, since the code may be correct at the time of the attestation, but it may be compromised by the time of use. The goal of BIND is to address these issues and make code attestation more usable in securing distributed systems. BIND offers the following properties: (1) BIND performs fine-grained attestation. Instead of attesting to the entire memory content, BIND attests only to the piece of code we are concerned about. This greatly simplifies verification. (2) BIND narrows the gap between time-of-attestation and time-of-use. BIND measures a piece of code immediately before it is executed and uses a sandboxing mechanism to protect the execution of the attested code. (3) BIND ties the code attestation with the data that the code produces, such that we can pinpoint what code has been run to generate that data. In addition, by incorporating the verification of input data integrity into the attestation, BIND offers transitive integrity verification, i.e., through one signature, we can vouch for the entire chain of processes that have performed transformations over a piece of data. BIND offers a general solution toward establishing a trusted environment for distributed system designers.}, keywords = {BIND, binding instructions and data, code attestation, data integrity, digital signatures, distributed processing, fine-grained attestation service, input data integrity, program verification, sandboxing mechanism, secure distributed systems, signature, time-of-attestation, time-of-use, transitive integrity verification, trusted computing}, author = {Elaine Shi and Perrig, A. and Van Doorn, L.} } @article {18661, title = {AQuA: an adaptive architecture that provides dependable distributed objects}, journal = {Computers, IEEE Transactions on}, volume = {52}, year = {2003}, month = {2003/01//}, pages = {31 - 50}, abstract = {Building dependable distributed systems from commercial off-the-shelf components is of growing practical importance. For both cost and production reasons, there is interest in approaches and architectures that facilitate building such systems. The AQuA architecture is one such approach; its goal is to provide adaptive fault tolerance to CORBA applications by replicating objects. The AQuA architecture allows application programmers to request desired levels of dependability during applications{\textquoteright} runtimes. It provides fault tolerance mechanisms to ensure that a CORBA client can always obtain reliable services, even if the CORBA server object that provides the desired services suffers from crash failures and value faults. AQuA includes a replicated dependability manager that provides dependability management by configuring the system in response to applications{\textquoteright} requests and changes in system resources due to faults. It uses Maestro/Ensemble to provide group communication services. It contains a gateway to intercept standard CORBA IIOP messages to allow any standard CORBA application to use AQuA. It provides different types of replication schemes to forward messages reliably to the remote replicated objects. All of the replication schemes ensure strong, data consistency among replicas. This paper describes the AQuA architecture and presents, in detail, the active replication pass-first scheme. In addition, the interface to the dependability manager and the design of the dependability manager replication are also described. Finally, we describe performance measurements that were conducted for the active replication pass-first scheme, and we present results from our study of fault detection, recovery, and blocking times.}, keywords = {active replication pass-first scheme, adaptive architecture, adaptive fault tolerance, AQuA, CORBA, data consistency, data integrity, dependable distributed objects, distributed object management, performance measurements, quality of service, replicated dependability manager, replication schemes, software fault tolerance, system resources}, isbn = {0018-9340}, doi = {10.1109/TC.2003.1159752}, author = {Ren,Yansong and Bakken,D. E. and Courtney,T. and Michel Cukier and Karr,D. A. and Rubel,P. and Sabnis,C. and Sanders,W. H. and Schantz,R.E. and Seri,M.} } @article {18650, title = {An adaptive algorithm for tolerating value faults and crash failures}, journal = {Parallel and Distributed Systems, IEEE Transactions on}, volume = {12}, year = {2001}, month = {2001/02//}, pages = {173 - 192}, abstract = {The AQuA architecture provides adaptive fault tolerance to CORBA applications by replicating objects and providing a high-level method that an application can use to specify its desired level of dependability. This paper presents the algorithms that AQUA uses, when an application{\textquoteright}s dependability requirements can change at runtime, to tolerate both value faults in applications and crash failures simultaneously. In particular, we provide an active replication communication scheme that maintains data consistency among replicas, detects crash failures, collates the messages generated by replicated objects, and delivers the result of each vote. We also present an adaptive majority voting algorithm that enables the correct ongoing vote while both the number of replicas and the majority size dynamically change. Together, these two algorithms form the basis of the mechanism for tolerating and recovering from value faults and crash failures in AQuA}, keywords = {active replication communication, adaptive algorithm, adaptive fault tolerance, adaptive majority voting algorithm, AQuA architecture, client-server systems, CORBA, crash failures, data consistency, data integrity, Dependability, distributed object management, fault tolerant computing, objects replication, value faults}, isbn = {1045-9219}, doi = {10.1109/71.910872}, author = {Ren,Yansong and Michel Cukier and Sanders,W. H.} } @article {16479, title = {Logic-based query optimization for object databases}, journal = {IEEE Transactions on Knowledge and Data Engineering}, volume = {12}, year = {2000}, month = {2000/08//Jul}, pages = {529 - 547}, abstract = {We present a technique for transferring query optimization techniques, developed for relational databases, into object databases. We demonstrate this technique for ODMG database schemas defined in ODL and object queries expressed in OQL. The object schema is represented using a logical representation (Datalog). Semantic knowledge about the object data model, e.g., class hierarchy information, relationship between objects, etc., as well as semantic knowledge about a particular schema and application domain are expressed as integrity constraints. An OQL object query is represented as a logic query and query optimization is performed in the Datalog representation. We obtain equivalent (optimized) logic queries, and subsequently obtain equivalent (optimized) OQL queries for each equivalent logic query. We present one optimization technique for semantic query optimization (SQO) based on the residue technique of U. Charavarthy et al. (1990; 1986; 1988). We show that our technique generalizes previous research on SQO for object databases. We handle a large class of OQL queries, including queries with constructors and methods. We demonstrate how SQO can be used to eliminate queries which contain contradictions and simplify queries, e.g., by eliminating joins, or by reducing the access scope for evaluating a query to some specific subclass(es). We also demonstrate how the definition of a method or integrity constraints describing the method, can be used in optimizing a query with a method}, keywords = {access scope, application domain, class hierarchy information, Constraint optimization, data integrity, Data models, Datalog, Datalog representation, deductive databases, equivalent logic query, integrity constraints, Lifting equipment, Logic, logic based query optimization, logic programming, logic queries, logic query, logical representation, object data model, object databases, object queries, object schema, object-oriented databases, ODL, ODMG database schemas, optimization technique, optimized OQL queries, OQL object query, query languages, Query optimization, query optimization techniques, Query processing, Relational databases, residue technique, semantic knowledge, semantic query optimization}, isbn = {1041-4347}, doi = {10.1109/69.868906}, author = {Grant,J. and Gryz,J. and Minker, Jack and Raschid, Louiqa} } @conference {12092, title = {A secure and reliable bootstrap architecture}, booktitle = {, 1997 IEEE Symposium on Security and Privacy, 1997. Proceedings}, year = {1997}, month = {1997/05/04/7}, pages = {65 - 71}, publisher = {IEEE}, organization = {IEEE}, abstract = {In a computer system, the integrity of lower layers is typically treated as axiomatic by higher layers. Under the presumption that the hardware comprising the machine (the lowest layer) is valid, the integrity of a layer can be guaranteed if and only if: (1) the integrity of the lower layers is checked and (2) transitions to higher layers occur only after integrity checks on them are complete. The resulting integrity {\textquotedblleft}chain{\textquotedblright} inductively guarantees system integrity. When these conditions are not met, as they typically are not in the bootstrapping (initialization) of a computer system, no integrity guarantees can be made, yet these guarantees are increasingly important to diverse applications such as Internet commerce, security systems and {\textquotedblleft}active networks{\textquotedblright}. In this paper, we describe the AEGIS architecture for initializing a computer system. It validates integrity at each layer transition in the bootstrap process. AEGIS also includes a recovery process for integrity check failures, and we show how this results in robust systems}, keywords = {active networks, AEGIS architecture, bootstrap architecture, Computer architecture, computer bootstrapping, data integrity, Distributed computing, Hardware, hardware validity, initialization, integrity chain, integrity check failures, Internet, Internet commerce, IP networks, Laboratories, lower-layer integrity, Microprogramming, Operating systems, recovery process, reliability, robust systems, Robustness, Security, security of data, software reliability, system integrity guarantees, system recovery, transitions, Virtual machining}, isbn = {0-8186-7828-3}, doi = {10.1109/SECPRI.1997.601317}, author = {Arbaugh, William A. and Farber,D. J and Smith,J. M} }