@article {14408, title = {Learning probabilistic relational models}, journal = {International Joint Conference on Artificial Intelligence}, volume = {16}, year = {1999}, month = {1999///}, pages = {1300 - 1309}, abstract = {A large portion of real-world data is stored in com-mercial relational database systems. In contrast, most statistical learning methods work only with {\textquotedblleft}flat{\textquotedblright} data representations. Thus, to apply these methods, we are forced to convert our data into a flat form, thereby losing much of the relational structure present in our database. This paper builds on the recent work on probabilistic relational mod- els (PRMs), and describes how to learn them from databases. PRMs allow the properties of an object to depend probabilistically both on other proper- ties of that object and on properties of related ob- jects. Although PRMs are significantly more ex- pressive than standard models, such as Bayesian networks, we show how to extend well-known sta- tistical methods for learning Bayesian networks to learn these models. We describe both parameter estimation and structure learning {\textemdash} the automatic induction of the dependency structure in a model. Moreover, we show how the learning procedure can exploit standard database retrieval techniques for efficient learning from large datasets. We present experimental results on both real and synthetic re- lational databases. }, author = {Friedman,N. and Getoor, Lise and Koller,D. and Pfeffer,A.} }