@inbook {19631, title = {Can Optimally-Fair Coin Tossing Be Based on One-Way Functions?}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2014}, month = {2014/01/01/}, pages = {217 - 239}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Coin tossing is a basic cryptographic task that allows two distrustful parties to obtain an unbiased random bit in a way that neither party can bias the output by deviating from the protocol or halting the execution. Cleve [STOC{\textquoteright}86] showed that in any r round coin tossing protocol one of the parties can bias the output by Ω(1/r) through a {\textquotedblleft}fail-stop{\textquotedblright} attack; namely, they simply execute the protocol honestly and halt at some chosen point. In addition, relying on an earlier work of Blum [COMPCON{\textquoteright}82], Cleve presented an r-round protocol based on one-way functions that was resilient to bias at most O(1/r√)O(1/\sqrt r) . Cleve{\textquoteright}s work left open whether {\textquotedblright}{\textquoteleft}optimally-fair{\textquoteright}{\textquotedblright} coin tossing (i.e. achieving bias O(1/r) in r rounds) is possible. Recently Moran, Naor, and Segev [TCC{\textquoteright}09] showed how to construct optimally-fair coin tossing based on oblivious transfer, however, it was left open to find the minimal assumptions necessary for optimally-fair coin tossing. The work of Dachman-Soled et al. [TCC{\textquoteright}11] took a step toward answering this question by showing that any black-box construction of optimally-fair coin tossing based on a one-way functions with n-bit input and output needs Ω(n/logn) rounds. In this work we take another step towards understanding the complexity of optimally-fair coin-tossing by showing that this task (with an arbitrary number of rounds) cannot be based on one-way functions in a black-box way, as long as the protocol is {\textquotedblright}{\textquoteleft}oblivious{\textquoteright}{\textquotedblright} to the implementation of the one-way function. Namely, we consider a natural class of black-box constructions based on one-way functions, called function oblivious, in which the output of the protocol does not depend on the specific implementation of the one-way function and only depends on the randomness of the parties. Other than being a natural notion on its own, the known coin tossing protocols of Blum and Cleve (both based on one-way functions) are indeed function oblivious. Thus, we believe our lower bound for function-oblivious constructions is a meaningful step towards resolving the fundamental open question of the complexity of optimally-fair coin tossing.}, keywords = {Algorithm Analysis and Problem Complexity, black-box separations, Coin-Tossing, Computation by Abstract Devices, Data Encryption, Discrete Mathematics in Computer Science, One-Way Functions, Systems and Data Security}, isbn = {978-3-642-54241-1, 978-3-642-54242-8}, url = {http://link.springer.com/chapter/10.1007/978-3-642-54242-8_10}, author = {Dana Dachman-Soled and Mahmoody, Mohammad and Malkin, Tal}, editor = {Lindell, Yehuda} } @inbook {19639, title = {On Minimal Assumptions for Sender-Deniable Public Key Encryption}, booktitle = {Public-Key Cryptography {\textendash} PKC 2014}, series = {Lecture Notes in Computer Science}, year = {2014}, month = {2014/01/01/}, pages = {574 - 591}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {The primitive of deniable encryption was introduced by Canetti et al. (CRYPTO, 1997). Deniable encryption is an encryption scheme with the added feature that after transmitting a message m, both sender and receiver may produce random coins showing that the transmitted ciphertext was an encryption of any message m' in the message space. Deniable encryption is a key tool for constructing incoercible protocols, since it allows a party to send one message and later provide apparent evidence to a coercer that a different message was sent. In addition, deniable encryption may be used to obtain adaptively-secure multiparty computation (MPC) protocols and is secure under selective-opening attacks. Different flavors such as sender-deniable and receiver-deniable encryption, where only the sender or receiver produce fake random coins, have been considered. Recently, over 15 years after the primitive was first introduced, Sahai and Waters (IACR Cryptology ePrint Archive, 2013), gave the first construction of sender-deniable encryption schemes with super-polynomial security, where an adversary has negligible advantage in distinguishing real and fake openings. Their construction is based on the construction of an indistinguishability obfuscator for general programs recently introduced in a breakthrough result of Garg et al. (FOCS, 2013). Although feasibility has now been demonstrated, the question of determining the minimal assumptions necessary for sender-deniable encryption with super-polynomial security remains open. The primitive of simulatable public key encryption (PKE), introduced by Damg{\r a}rd and Nielsen (CRYPTO, 2000), is a public key encryption scheme with additional properties that allow oblivious sampling of public keys and ciphertexts. It is one of the low-level primitives used to construct adaptively-secure MPC protocols and was used by O{\textquoteright}Neill et al. in their construction of bi-deniable encryption in the multi-distributional model (CRYPTO, 2011). Moreover, the original construction of sender-deniable encryption with polynomial security given by Canetti et al. can be instantiated with simulatable PKE. Thus, a natural question to ask is whether it is possible to construct sender-deniable encryption with super-polynomial security from simulatable PKE. In this work, we investigate the possibility of constructing sender-deniable public key encryption from simulatable PKE in a black-box manner. We show that there is no black-box construction of sender-deniable public key encryption with super-polynomial security from simulatable PKE. This indicates that improving on the original construction of Canetti et al. requires the use of non-black-box techniques, stronger assumptions, or interaction, thus giving some evidence that strong assumptions such as those used by Sahai and Waters are necessary.}, keywords = {Algorithm Analysis and Problem Complexity, black-box separation, Coding and Information Theory, Data Encryption, sender-deniable encryption, simulatable PKE, Systems and Data Security}, isbn = {978-3-642-54630-3, 978-3-642-54631-0}, url = {http://link.springer.com/chapter/10.1007/978-3-642-54631-0_33}, author = {Dana Dachman-Soled}, editor = {Krawczyk, Hugo} } @inbook {19629, title = {Adaptive and Concurrent Secure Computation from New Adaptive, Non-malleable Commitments}, booktitle = {Advances in Cryptology - ASIACRYPT 2013}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {316 - 336}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a unified approach for obtaining general secure computation that achieves adaptive-Universally Composable (UC)-security. Using our approach we essentially obtain all previous results on adaptive concurrent secure computation, both in relaxed models (e.g., quasi-polynomial time simulation), as well as trusted setup models (e.g., the CRS model, the imperfect CRS model). This provides conceptual simplicity and insight into what is required for adaptive and concurrent security, as well as yielding improvements to set-up assumptions and/or computational assumptions in known models. Additionally, we provide the first constructions of concurrent secure computation protocols that are adaptively secure in the timing model, and the non-uniform simulation model. As a corollary we also obtain the first adaptively secure multiparty computation protocol in the plain model that is secure under bounded-concurrency. Conceptually, our approach can be viewed as an adaptive analogue to the recent work of Lin, Pass and Venkitasubramaniam [STOC {\textquoteleft}09], who considered only non-adaptive adversaries. Their main insight was that the non-malleability requirement could be decoupled from the simulation requirement to achieve UC-security. A main conceptual contribution of this work is, quite surprisingly, that it is still the case even when considering adaptive security. A key element in our construction is a commitment scheme that satisfies a strong definition of non-malleability. Our new primitive of concurrent equivocal non-malleable commitments, intuitively, guarantees that even when a man-in-the-middle adversary observes concurrent equivocal commitments and decommitments, the binding property of the commitments continues to hold for commitments made by the adversary. This definition is stronger than previous ones, and may be of independent interest. Previous constructions that satisfy our definition have been constructed in setup models, but either require existence of stronger encryption schemes such as CCA-secure encryption or require independent {\textquotedblleft}trapdoors{\textquotedblright} provided by the setup for every pair of parties to ensure non-malleability. A main technical contribution of this work is to provide a construction that eliminates these requirements and requires only a single trapdoor.}, keywords = {Algorithm Analysis and Problem Complexity, Applications of Mathematics, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-642-42032-0, 978-3-642-42033-7}, url = {http://link.springer.com/chapter/10.1007/978-3-642-42033-7_17}, author = {Dana Dachman-Soled and Malkin, Tal and Raykova, Mariana and Venkitasubramaniam, Muthuramakrishnan}, editor = {Sako, Kazue and Sarkar, Palash} } @inbook {19620, title = {Parallel and Dynamic Searchable Symmetric Encryption}, booktitle = {Financial Cryptography and Data Security}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {258 - 274}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Searchable symmetric encryption (SSE) enables a client to outsource a collection of encrypted documents in the cloud and retain the ability to perform keyword searches without revealing information about the contents of the documents and queries. Although efficient SSE constructions are known, previous solutions are highly sequential. This is mainly due to the fact that, currently, the only method for achieving sub-linear time search is the inverted index approach (Curtmola, Garay, Kamara and Ostrovsky, CCS {\textquoteright}06) which requires the search algorithm to access a sequence of memory locations, each of which is unpredictable and stored at the previous location in the sequence. Motivated by advances in multi-core architectures, we present a new method for constructing sub-linear SSE schemes. Our approach is highly parallelizable and dynamic. With roughly a logarithmic number of cores in place, searches for a keyword w in our scheme execute in o(r) parallel time, where r is the number of documents containing keyword w (with more cores, this bound can go down to O(logn), i.e., independent of the result size r). Such time complexity outperforms the optimal Θ(r) sequential search time{\textemdash}a similar bound holds for the updates. Our scheme also achieves the following important properties: (a) it enjoys a strong notion of security, namely security against adaptive chosen-keyword attacks; (b) compared to existing sub-linear dynamic SSE schemes (e.g., Kamara, Papamanthou, Roeder, CCS {\textquoteright}12), updates in our scheme do not leak any information, apart from information that can be inferred from previous search tokens; (c) it can be implemented efficiently in external memory (with logarithmic I/O overhead). Our technique is simple and uses a red-black tree data structure; its security is proven in the random oracle model.}, keywords = {cloud storage, Computer Appl. in Administrative Data Processing, Data Encryption, e-Commerce/e-business, parallel search, Searchable encryption, Systems and Data Security}, isbn = {978-3-642-39883-4, 978-3-642-39884-1}, url = {http://link.springer.com/chapter/10.1007/978-3-642-39884-1_22}, author = {Kamara, Seny and Charalampos Papamanthou}, editor = {Sadeghi, Ahmad-Reza} } @inbook {19603, title = {Signatures of Correct Computation}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {222 - 242}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We introduce Signatures of Correct Computation (SCC), a new model for verifying dynamic computations in cloud settings. In the SCC model, a trusted source outsources a function f to an untrusted server, along with a public key for that function (to be used during verification). The server can then produce a succinct signature σ vouching for the correctness of the computation of f, i.e., that some result v is indeed the correct outcome of the function f evaluated on some point a. There are two crucial performance properties that we want to guarantee in an SCC construction: (1) verifying the signature should take asymptotically less time than evaluating the function f; and (2) the public key should be efficiently updated whenever the function changes. We construct SCC schemes (satisfying the above two properties) supporting expressive manipulations over multivariate polynomials, such as polynomial evaluation and differentiation. Our constructions are adaptively secure in the random oracle model and achieve optimal updates, i.e., the function{\textquoteright}s public key can be updated in time proportional to the number of updated coefficients, without performing a linear-time computation (in the size of the polynomial). We also show that signatures of correct computation imply Publicly Verifiable Computation (PVC), a model recently introduced in several concurrent and independent works. Roughly speaking, in the SCC model, any client can verify the signature σ and be convinced of some computation result, whereas in the PVC model only the client that issued a query (or anyone who trusts this client) can verify that the server returned a valid signature (proof) for the answer to the query. Our techniques can be readily adapted to construct PVC schemes with adaptive security, efficient updates and without the random oracle model.}, keywords = {Algorithm Analysis and Problem Complexity, Computation by Abstract Devices, Data Encryption, Systems and Data Security}, isbn = {978-3-642-36593-5, 978-3-642-36594-2}, url = {http://link.springer.com/chapter/10.1007/978-3-642-36594-2_13}, author = {Charalampos Papamanthou and Shi, Elaine and Tamassia, Roberto}, editor = {Sahai, Amit} } @inbook {19613, title = {Streaming Authenticated Data Structures}, booktitle = {Advances in Cryptology {\textendash} EUROCRYPT 2013}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {353 - 370}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We consider the problem of streaming verifiable computation, where both a verifier and a prover observe a stream of n elements x 1,x 2,{\textellipsis},x n and the verifier can later delegate some computation over the stream to the prover. The prover must return the output of the computation, along with a cryptographic proof to be used for verifying the correctness of the output. Due to the nature of the streaming setting, the verifier can only keep small local state (e.g., logarithmic) which must be updatable in a streaming manner and with no interaction with the prover. Such constraints make the problem particularly challenging and rule out applying existing verifiable computation schemes. We propose streaming authenticated data structures, a model that enables efficient verification of data structure queries on a stream. Compared to previous work, we achieve an exponential improvement in the prover{\textquoteright}s running time: While previous solutions have linear prover complexity (in the size of the stream), even for queries executing in sublinear time (e.g., set membership), we propose a scheme with O(logM logn)O(\log M\ log n) prover complexity, where n is the size of the stream and M is the size of the universe of elements. Our schemes support a series of expressive queries, such as (non-)membership, successor, range search and frequency queries, over an ordered universe and even in higher dimensions. The central idea of our construction is a new authentication tree, called generalized hash tree. We instantiate our generalized hash tree with a hash function based on lattices assumptions, showing that it enjoys suitable algebraic properties that traditional Merkle trees lack. We exploit such properties to achieve our results.}, keywords = {Algorithm Analysis and Problem Complexity, Data Encryption, Discrete Mathematics in Computer Science, Systems and Data Security}, isbn = {978-3-642-38347-2, 978-3-642-38348-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-38348-9_22}, author = {Charalampos Papamanthou and Shi, Elaine and Tamassia, Roberto and Yi, Ke}, editor = {Johansson, Thomas and Nguyen, Phong Q.} } @inbook {19646, title = {Why {\textquotedblleft}Fiat-Shamir for Proofs{\textquotedblright} Lacks a Proof}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {182 - 201}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {The Fiat-Shamir heuristic [CRYPTO {\textquoteright}86] is used to convert any 3-message public-coin proof or argument system into a non-interactive argument, by hashing the prover{\textquoteright}s first message to select the verifier{\textquoteright}s challenge. It is known that this heuristic is sound when the hash function is modeled as a random oracle. On the other hand, the surprising result of Goldwasser and Kalai [FOCS {\textquoteright}03] shows that there exists a computationally sound argument on which the Fiat-Shamir heuristic is never sound, when instantiated with any actual efficient hash function. This leaves us with the following interesting possibility: perhaps we can securely instantiates the Fiat-Shamir heuristic for all 3-message public-coin statistically sound proofs, even if we must fail for some computationally sound arguments. Indeed, this has been conjectured to be the case by Barak, Lindell and Vadhan [FOCS {\textquoteright}03], but we do not have any provably secure instantiation under any {\textquotedblleft}standard assumption{\textquotedblright}. In this work, we give a broad black-box separation result showing that the security of the Fiat-Shamir heuristic for statistically sound proofs cannot be proved under virtually any standard assumption via a black-box reduction. More precisely: {\textendash}If we want to have a {\textquotedblleft}universal{\textquotedblright} instantiation of the Fiat-Shamir heuristic that works for all 3-message public-coin proofs, then we cannot prove its security via a black-box reduction from any assumption that has the format of a {\textquotedblleft}cryptographic game{\textquotedblright}. {\textendash}For many concrete proof systems, if we want to have a {\textquotedblleft}specific{\textquotedblright} instantiation of the Fiat-Shamir heuristic for that proof system, then we cannot prove its security via a black box reduction from any {\textquotedblleft}falsifiable assumption{\textquotedblright} that has the format of a cryptographic game with an efficient challenger.}, keywords = {Algorithm Analysis and Problem Complexity, Computation by Abstract Devices, Data Encryption, Systems and Data Security}, isbn = {978-3-642-36593-5, 978-3-642-36594-2}, url = {http://link.springer.com/chapter/10.1007/978-3-642-36594-2_11}, author = {Bitansky, Nir and Dana Dachman-Soled and Garg, Sanjam and Jain, Abhishek and Kalai, Yael Tauman and L{\'o}pez-Alt, Adriana and Wichs, Daniel}, editor = {Sahai, Amit} } @inbook {19641, title = {On the Centrality of Off-Line E-Cash to Concrete Partial Information Games}, booktitle = {Security and Cryptography for Networks}, series = {Lecture Notes in Computer Science}, year = {2012}, month = {2012/01/01/}, pages = {264 - 280}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Cryptography has developed numerous protocols for solving {\textquotedblleft}partial information games{\textquotedblright} that are seemingly paradoxical. Some protocols are generic (e.g., secure multi-party computation) and others, due to the importance of the scenario they represent, are designed to solve a concrete problem directly. Designing efficient and secure protocols for (off-line) e-cash, e-voting, and e-auction are some of the most heavily researched concrete problems, representing various settings where privacy and correctness of the procedure is highly important. In this work, we initiate the exploration of the relationships among e-cash, e-voting and e-auction in the universal composability (UC) framework, by considering general variants of the three problems. In particular, we first define ideal functionalities for e-cash, e-voting, and e-auction, and then give a construction of a protocol that UC-realizes the e-voting (resp., e-auction) functionality in the e-cash hybrid model. This (black-box) reducibility demonstrates the centrality of off-line e-cash and implies that designing a solution to e-cash may bear fruits in other areas. Constructing a solution to one protocol problem based on a second protocol problem has been traditional in cryptography, but typically has concentrated on building complex protocols on simple primitives (e.g., secure multi-party computation from Oblivious Transfer, signature from one-way functions, etc.). The novelty here is reducibility among mature protocols and using the ideal functionality as a design tool in realizing other ideal functionalities. We suggest this new approach, and we only consider the very basic general properties from the various primitives to demonstrate its viability. Namely, we only consider the basic coin e-cash model, the e-voting that is correct and private and relies on trusted registration, and e-auction relying on a trusted auctioneer. Naturally, relationships among protocols with further properties (i.e., extended functionalities), using the approach advocated herein, are left as open questions.}, keywords = {Computer Appl. in Administrative Data Processing, Computer Communication Networks, Data Encryption, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-642-32927-2, 978-3-642-32928-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-32928-9_15}, author = {Choi, Seung Geol and Dana Dachman-Soled and Yung, Moti}, editor = {Visconti, Ivan and Prisco, Roberto De} } @inbook {19632, title = {Computational Extractors and Pseudorandomness}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2012}, month = {2012/01/01/}, pages = {383 - 403}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Computational extractors are efficient procedures that map a source of sufficiently high min-entropy to an output that is computationally indistinguishable from uniform. By relaxing the statistical closeness property of traditional randomness extractors one hopes to improve the efficiency and entropy parameters of these extractors, while keeping their utility for cryptographic applications. In this work we investigate computational extractors and consider questions of existence and inherent complexity from the theoretical and practical angles, with particular focus on the relationship to pseudorandomness. An obvious way to build a computational extractor is via the {\textquotedblleft}extract-then-prg{\textquotedblright} method: apply a statistical extractor and use its output to seed a PRG. This approach carries with it the entropy cost inherent to implementing statistical extractors, namely, the source entropy needs to be substantially higher than the PRG{\textquoteright}s seed length. It also requires a PRG and thus relies on one-way functions. We study the necessity of one-way functions in the construction of computational extractors and determine matching lower and upper bounds on the {\textquotedblleft}black-box efficiency{\textquotedblright} of generic constructions of computational extractors that use a one-way permutation as an oracle. Under this efficiency measure we prove a direct correspondence between the complexity of computational extractors and that of pseudorandom generators, showing the optimality of the extract-then-prg approach for generic constructions of computational extractors and confirming the intuition that to build a computational extractor via a PRG one needs to make up for the entropy gap intrinsic to statistical extractors. On the other hand, we show that with stronger cryptographic primitives one can have more entropy- and computationally-efficient constructions. In particular, we show a construction of a very practical computational extractor from any weak PRF without resorting to statistical extractors.}, keywords = {Algorithm Analysis and Problem Complexity, Coding and Information Theory, Computer Communication Networks, Data Encryption, Math Applications in Computer Science, Systems and Data Security}, isbn = {978-3-642-28913-2, 978-3-642-28914-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-28914-9_22}, author = {Dana Dachman-Soled and Gennaro, Rosario and Krawczyk, Hugo and Malkin, Tal}, editor = {Cramer, Ronald} } @inbook {19634, title = {Efficient Password Authenticated Key Exchange via Oblivious Transfer}, booktitle = {Public Key Cryptography {\textendash} PKC 2012}, series = {Lecture Notes in Computer Science}, year = {2012}, month = {2012/01/01/}, pages = {449 - 466}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a new framework for constructing efficient password authenticated key exchange (PAKE) protocols based on oblivious transfer (OT). Using this framework, we obtain: an efficient and simple UC-secure PAKE protocol that is secure against adaptive corruptions without erasures. efficient and simple PAKE protocols under the Computational Diffie-Hellman (CDH) assumption and the hardness of factoring. (Previous efficient constructions rely on hash proof systems, which appears to be inherently limited to decisional assumptions.) All of our constructions assume a common reference string (CRS) but do not rely on random oracles.}, keywords = {adaptive security, Algorithm Analysis and Problem Complexity, Computer Communication Networks, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, oblivious transfer, Password Authenticated Key Exchange, search assumptions, Systems and Data Security, UC security}, isbn = {978-3-642-30056-1, 978-3-642-30057-8}, url = {http://link.springer.com/chapter/10.1007/978-3-642-30057-8_27}, author = {Canetti, Ran and Dana Dachman-Soled and Vaikuntanathan, Vinod and Wee, Hoeteck}, editor = {Fischlin, Marc and Buchmann, Johannes and Manulis, Mark} } @inbook {19644, title = {Securing Circuits against Constant-Rate Tampering}, booktitle = {Advances in Cryptology {\textendash} CRYPTO 2012}, series = {Lecture Notes in Computer Science}, year = {2012}, month = {2012/01/01/}, pages = {533 - 551}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a compiler that converts any circuit into one that remains secure even if a constant fraction of its wires are tampered with. Following the seminal work of Ishai et. al. (Eurocrypt 2006), we consider adversaries who may choose an arbitrary set of wires to corrupt, and may set each such wire to 0 or to 1, or may toggle with the wire. We prove that such adversaries, who continuously tamper with the circuit, can learn at most logarithmically many bits of secret information (in addition to black-box access to the circuit). Our results are information theoretic.}, keywords = {circuit compiler, Computer Communication Networks, computers and society, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, PCP of proximity, side-channel attacks, Systems and Data Security, tampering}, isbn = {978-3-642-32008-8, 978-3-642-32009-5}, url = {http://link.springer.com/chapter/10.1007/978-3-642-32009-5_31}, author = {Dana Dachman-Soled and Kalai, Yael Tauman}, editor = {Safavi-Naini, Reihaneh and Canetti, Ran} } @inbook {19640, title = {On the Black-Box Complexity of Optimally-Fair Coin Tossing}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2011}, month = {2011/01/01/}, pages = {450 - 467}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {A fair two-party coin tossing protocol is one in which both parties output the same bit that is almost uniformly distributed (i.e., it equals 0 and 1 with probability that is at most negligibly far from one half). It is well known that it is impossible to achieve fair coin tossing even in the presence of fail-stop adversaries (Cleve, FOCS 1986). In fact, Cleve showed that for every coin tossing protocol running for r rounds, an efficient fail-stop adversary can bias the output by Ω(1/r). Since this is the best possible, a protocol that limits the bias of any adversary to O(1/r) is called optimally-fair. The only optimally-fair protocol that is known to exist relies on the existence of oblivious transfer, because it uses general secure computation (Moran, Naor and Segev, TCC 2009). However, it is possible to achieve a bias of O(1/r√)O(1/\sqrt{r}) in r rounds relying only on the assumption that there exist one-way functions. In this paper we show that it is impossible to achieve optimally-fair coin tossing via a black-box construction from one-way functions for r that is less than O(n/logn), where n is the input/output length of the one-way function used. An important corollary of this is that it is impossible to construct an optimally-fair coin tossing protocol via a black-box construction from one-way functions whose round complexity is independent of the security parameter n determining the security of the one-way function being used. Informally speaking, the main ingredient of our proof is to eliminate the random-oracle from {\textquotedblleft}secure{\textquotedblright} protocols with {\textquotedblleft}low round-complexity{\textquotedblright} and simulate the protocol securely against semi-honest adversaries in the plain model. We believe our simulation lemma to be of broader interest.}, keywords = {Algorithm Analysis and Problem Complexity, black-box separations, Coding and Information Theory, coin tossing, Computer Communication Networks, Data Encryption, lower-bound, Math Applications in Computer Science, optimally-fair coin tossing, round-complexity, Systems and Data Security}, isbn = {978-3-642-19570-9, 978-3-642-19571-6}, url = {http://link.springer.com/chapter/10.1007/978-3-642-19571-6_27}, author = {Dana Dachman-Soled and Lindell, Yehuda and Mahmoody, Mohammad and Malkin, Tal}, editor = {Ishai, Yuval} } @inbook {19600, title = {Optimal Verification of Operations on Dynamic Sets}, booktitle = {Advances in Cryptology {\textendash} CRYPTO 2011}, series = {Lecture Notes in Computer Science}, year = {2011}, month = {2011/01/01/}, pages = {91 - 110}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We study the design of protocols for set-operation verification, namely the problem of cryptographically checking the correctness of outsourced set operations performed by an untrusted server over a dynamic collection of sets that are owned (and updated) by a trusted source. We present new authenticated data structures that allow any entity to publicly verify a proof attesting the correctness of primitive set operations such as intersection, union, subset and set difference. Based on a novel extension of the security properties of bilinear-map accumulators as well as on a primitive called accumulation tree, our protocols achieve optimal verification and proof complexity (i.e., only proportional to the size of the query parameters and the answer), as well as optimal update complexity (i.e., constant), while incurring no extra asymptotic space overhead. The proof construction is also efficient, adding a logarithmic overhead to the computation of the answer of a set-operation query. In contrast, existing schemes entail high communication and verification costs or high storage costs. Applications of interest include efficient verification of keyword search and database queries. The security of our protocols is based on the bilinear q-strong Diffie-Hellman assumption.}, keywords = {Computer Communication Networks, computers and society, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-642-22791-2, 978-3-642-22792-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-22792-9_6}, author = {Charalampos Papamanthou and Tamassia, Roberto and Triandopoulos, Nikos}, editor = {Rogaway, Phillip} } @inbook {19643, title = {Secure Efficient Multiparty Computing of Multivariate Polynomials and Applications}, booktitle = {Applied Cryptography and Network Security}, series = {Lecture Notes in Computer Science}, year = {2011}, month = {2011/01/01/}, pages = {130 - 146}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a robust secure methodology for computing functions that are represented as multivariate polynomials where parties hold different variables as private inputs. Our generic efficient protocols are fully black-box and employ threshold additive homomorphic encryption; they do not assume honest majority, yet are robust in detecting any misbehavior. We achieve solutions that take advantage of the algebraic structure of the polynomials, and are polynomial-time in all parameters (security parameter, polynomial size, polynomial degree, number of parties). We further exploit a {\textquotedblleft}round table{\textquotedblright} communication paradigm to reduce the complexity in the number of parties. A large collection of problems are naturally and efficiently represented as multivariate polynomials over a field or a ring: problems from linear algebra, statistics, logic, as well as operations on sets represented as polynomials. In particular, we present a new efficient solution to the multi-party set intersection problem, and a solution to a multi-party variant of the polynomial reconstruction problem.}, keywords = {additive homomorphic encryption, Algorithm Analysis and Problem Complexity, Computer Communication Networks, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, multiparty set intersection, multivariate polynomial evaluation, secret sharing, secure multiparty computation, Systems and Data Security, threshold cryptosystems}, isbn = {978-3-642-21553-7, 978-3-642-21554-4}, url = {http://link.springer.com/chapter/10.1007/978-3-642-21554-4_8}, author = {Dana Dachman-Soled and Malkin, Tal and Raykova, Mariana and Yung, Moti}, editor = {Lopez, Javier and Tsudik, Gene} } @inbook {19604, title = {Optimal Authenticated Data Structures with Multilinear Forms}, booktitle = {Pairing-Based Cryptography - Pairing 2010}, series = {Lecture Notes in Computer Science}, year = {2010}, month = {2010/01/01/}, pages = {246 - 264}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Cloud computing and cloud storage are becoming increasingly prevalent. In this paradigm, clients outsource their data and computations to third-party service providers. Data integrity in the cloud therefore becomes an important factor for the functionality of these web services.Authenticated data structures, implemented with various cryptographic primitives, have been widely studied as a means of providing efficient solutions to data integrity problems (e.g., Merkle trees). In this paper, we introduce a new authenticated dictionary data structure that employs multilinear forms, a cryptographic primitive proposed by Silverberg and Boneh in 2003 [10], the construction of which, however, remains an open problem to date. Our authenticated dictionary is optimal, that is, it does not add any extra asymptotic cost to the plain dictionary data structure, yielding proofs of constant size, i.e., asymptotically equal to the size of the answer, while maintaining other relevant complexities logarithmic. Instead, solutions based on cryptographic hashing (e.g., Merkle trees) require proofs of logarithmic size [40]. Because multilinear forms are not known to exist yet, our result can be viewed from a different angle: if one could prove that optimal authenticated dictionaries cannot exist in the computational model, irrespectively of cryptographic primitives, then our solution would imply that cryptographically interesting multilinear form generators cannot exist as well (i.e., it can be viewed as a reduction). Thus, we provide an alternative avenue towards proving the nonexistence of multilinear form generators in the context of general lower bounds for authenticated data structures [40] and for memory checking [18], a model similar to the authenticated data structures model.}, keywords = {Algorithm Analysis and Problem Complexity, authenticated dictionary, Coding and Information Theory, Computer Communication Networks, Data Encryption, Discrete Mathematics in Computer Science, multilinear forms, Systems and Data Security}, isbn = {978-3-642-17454-4, 978-3-642-17455-1}, url = {http://link.springer.com/chapter/10.1007/978-3-642-17455-1_16}, author = {Charalampos Papamanthou and Tamassia, Roberto and Triandopoulos, Nikos}, editor = {Joye, Marc and Miyaji, Atsuko and Otsuka, Akira} } @inbook {19635, title = {Efficient Robust Private Set Intersection}, booktitle = {Applied Cryptography and Network Security}, series = {Lecture Notes in Computer Science}, year = {2009}, month = {2009/01/01/}, pages = {125 - 142}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Computing Set Intersection privately and efficiently between two mutually mistrusting parties is an important basic procedure in the area of private data mining. Assuring robustness, namely, coping with potentially arbitrarily misbehaving (i.e., malicious) parties, while retaining protocol efficiency (rather than employing costly generic techniques) is an open problem. In this work the first solution to this problem is presented.}, keywords = {Coding and Information Theory, Computer Communication Networks, Cryptographic protocols, Data Encryption, Data Structures, Cryptology and Information Theory, Information Systems Applications (incl.Internet), Privacy Preserving Data Mining, Secure Two-party Computation, Set Intersection, Systems and Data Security}, isbn = {978-3-642-01956-2, 978-3-642-01957-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-01957-9_8}, author = {Dana Dachman-Soled and Malkin, Tal and Raykova, Mariana and Yung, Moti}, editor = {Abdalla, Michel and Pointcheval, David and Fouque, Pierre-Alain and Vergnaud, Damien} } @inbook {19638, title = {Improved Non-committing Encryption with Applications to Adaptively Secure Protocols}, booktitle = {Advances in Cryptology {\textendash} ASIACRYPT 2009}, series = {Lecture Notes in Computer Science}, year = {2009}, month = {2009/01/01/}, pages = {287 - 302}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a new construction of non-committing encryption schemes. Unlike the previous constructions of Canetti et al. (STOC {\textquoteright}96) and of Damg{\r a}rd and Nielsen (Crypto {\textquoteright}00), our construction achieves all of the following properties: Optimal round complexity. Our encryption scheme is a 2-round protocol, matching the round complexity of Canetti et al. and improving upon that in Damg{\r a}rd and Nielsen. Weaker assumptions. Our construction is based on trapdoor simulatable cryptosystems, a new primitive that we introduce as a relaxation of those used in previous works. We also show how to realize this primitive based on hardness of factoring. Improved efficiency. The amortized complexity of encrypting a single bit is O(1) public key operations on a constant-sized plaintext in the underlying cryptosystem. As a result, we obtain the first non-committing public-key encryption schemes under hardness of factoring and worst-case lattice assumptions; previously, such schemes were only known under the CDH and RSA assumptions. Combined with existing work on secure multi-party computation, we obtain protocols for multi-party computation secure against a malicious adversary that may adaptively corrupt an arbitrary number of parties under weaker assumptions than were previously known. Specifically, we obtain the first adaptively secure multi-party protocols based on hardness of factoring in both the stand-alone setting and the UC setting with a common reference string.}, keywords = {adaptive corruption, Algorithm Analysis and Problem Complexity, Applications of Mathematics, Data Encryption, Data Structures, Cryptology and Information Theory, Discrete Mathematics in Computer Science, non-committing encryption, public-key encryption, secure multi-party computation, Systems and Data Security}, isbn = {978-3-642-10365-0, 978-3-642-10366-7}, url = {http://link.springer.com/chapter/10.1007/978-3-642-10366-7_17}, author = {Choi, Seung Geol and Dana Dachman-Soled and Malkin, Tal and Wee, Hoeteck}, editor = {Matsui, Mitsuru} } @inbook {19645, title = {Simple, Black-Box Constructions of Adaptively Secure Protocols}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2009}, month = {2009/01/01/}, pages = {387 - 402}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a compiler for transforming an oblivious transfer (OT) protocol secure against an adaptive semi-honest adversary into one that is secure against an adaptive malicious adversary. Our compiler achieves security in the universal composability framework, assuming access to an ideal commitment functionality, and improves over previous work achieving the same security guarantee in two ways: it uses black-box access to the underlying protocol and achieves a constant multiplicative overhead in the round complexity. As a corollary, we obtain the first constructions of adaptively secure protocols in the stand-alone model using black-box access to a low-level primitive.}, keywords = {Algorithm Analysis and Problem Complexity, computers and society, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-642-00456-8, 978-3-642-00457-5}, url = {http://link.springer.com/chapter/10.1007/978-3-642-00457-5_23}, author = {Choi, Seung Geol and Dana Dachman-Soled and Malkin, Tal and Wee, Hoeteck}, editor = {Reingold, Omer} } @inbook {19630, title = {Black-Box Construction of a Non-malleable Encryption Scheme from Any Semantically Secure One}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2008}, month = {2008/01/01/}, pages = {427 - 444}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We show how to transform any semantically secure encryption scheme into a non-malleable one, with a black-box construction that achieves a quasi-linear blow-up in the size of the ciphertext. This improves upon the previous non-black-box construction of Pass, Shelat and Vaikuntanathan (Crypto {\textquoteright}06). Our construction also extends readily to guarantee non-malleability under a bounded-CCA2 attack, thereby simultaneously improving on both results in the work of Cramer et al. (Asiacrypt {\textquoteright}07). Our construction departs from the oft-used paradigm of re-encrypting the same message with different keys and then proving consistency of encryptions; instead, we encrypt an encoding of the message with certain locally testable and self-correcting properties. We exploit the fact that low-degree polynomials are simultaneously good error-correcting codes and a secret-sharing scheme.}, keywords = {Algorithm Analysis and Problem Complexity, black-box constructions, computers and society, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, non-malleability, public-key encryption, semantic security, Systems and Data Security}, isbn = {978-3-540-78523-1, 978-3-540-78524-8}, url = {http://link.springer.com/chapter/10.1007/978-3-540-78524-8_24}, author = {Choi, Seung Geol and Dana Dachman-Soled and Malkin, Tal and Wee, Hoeteck}, editor = {Canetti, Ran} } @inbook {19622, title = {Time and Space Efficient Algorithms for Two-Party Authenticated Data Structures}, booktitle = {Information and Communications Security}, series = {Lecture Notes in Computer Science}, year = {2007}, month = {2007/01/01/}, pages = {1 - 15}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Authentication is increasingly relevant to data management. Data is being outsourced to untrusted servers and clients want to securely update and query their data. For example, in database outsourcing, a client{\textquoteright}s database is stored and maintained by an untrusted server. Also, in simple storage systems, clients can store very large amounts of data but at the same time, they want to assure their integrity when they retrieve them. In this paper, we present a model and protocol for two-party authentication of data structures. Namely, a client outsources its data structure and verifies that the answers to the queries have not been tampered with. We provide efficient algorithms to securely outsource a skip list with logarithmic time overhead at the server and client and logarithmic communication cost, thus providing an efficient authentication primitive for outsourced data, both structured (e.g., relational databases) and semi-structured (e.g., XML documents). In our technique, the client stores only a constant amount of space, which is optimal. Our two-party authentication framework can be deployed on top of existing storage applications, thus providing an efficient authentication service. Finally, we present experimental results that demonstrate the practical efficiency and scalability of our scheme.}, keywords = {Algorithm Analysis and Problem Complexity, Computer Communication Networks, computers and society, Data Encryption, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-540-77047-3, 978-3-540-77048-0}, url = {http://link.springer.com/chapter/10.1007/978-3-540-77048-0_1}, author = {Charalampos Papamanthou and Tamassia, Roberto}, editor = {Qing, Sihan and Imai, Hideki and Wang, Guilin} }