1998.bib

@COMMENT{{Automatically generated - DO NOT MODIFY!}}

@TECHREPORT{Cleary1998,
  AUTHOR = {Cleary, J.G. and Trigg, L.},
  TITLE = {Experience with OB1: an optimal Bayes decision tree learner},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {1998},
  MONTH = {May},
  NUMBER = {98/10},
  ABSTRACT = {Machine learning algorithms for inferring decision trees typically choose a single "best" tree to describe the training data. Recent research has shown that classification performance can be significantly improved by voting predictions of multiple, independently produced decision trees. This paper describes an algorithm, OB1, that makes a weighted sum over many possible models. We describe one instance of OB1, that includes all possible decision trees as well as naive Bayesian models. OB1 is compared with a number of other decision tree and instance based learning alogrithms on some of the data sets from the UCI repository. Both an information gain and an accuracy measure are used for the comparison. On the information gain measure OB1 performs significantly better than all the other algorithms. On the accuracy measure it is significantly better than all the algorithms except naive Bayes which performs comparably to OB1.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Cleary-Trigg-OB1.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Cleary-Trigg-OB1.ps}
}

@INPROCEEDINGS{FrankW98,
  AUTHOR = {Eibe Frank and
               Ian H. Witten},
  TITLE = {Generating Accurate Rule Sets Without Global Optimization},
  BOOKTITLE = {Proc 15th International Conference on Machine Learning},
  YEAR = {1998},
  PAGES = {144-151},
  SERIES = {Madison, Wisconsin},
  PUBLISHER = {Morgan Kaufmann},
  NOTE = {Also available as Working Paper 98/2, Department of Computer Science, University of Waikato; January},
  ABSTRACT = {The two dominant schemes for rule-learning, C4.5 and RIPPER, both operate in two stages. First they induce an initial rule set and then they refine it using a rather complex optimization stage that discards (C4.5) or adjusts (RIPPER) individual rules to make them work better together. In contrast, this paper shows how good rule sets can be learned one rule at a time, without any need for global optimization. We present an algorithm for inferring rules by repeatedly generating partial decision trees, thus combining the two major paradigms for rule generation-creating rules from decision trees and the separate-and-conquer rule-learning technique. The algorithm is straightforward and elegant: despite this, experiments on standard datasets show that it produces rule sets that are as accurate as and of similar size to those generated by C4.5, and more accurate than RIPPER's. Moreover, it operates efficiently, and because it avoids postprocessing, does not suffer the extremely slow performance on pathological example sets for which the C4.5 method has been criticized.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Frank-Witten-Generating-Rules.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Frank-Witten-Generating-Rules.ps}
}

@INPROCEEDINGS{FrankW98a,
  AUTHOR = {Eibe Frank and
               Ian H. Witten},
  TITLE = {Using a Permutation Test for Attribute Selection in Decision
               Trees},
  BOOKTITLE = {Proc 15th International Conference on Machine Learning},
  YEAR = {1998},
  PAGES = {152-160},
  PUBLISHER = {Morgan Kaufmann},
  SERIES = {Madison, Wisconsin},
  ABSTRACT = {Most techniques for attribute selection in decision trees are biased towards attributes with many values, and several ad hoc solutions to this problem have appeared in the machine learning literature. Statistical tests for the existence of an association with a prespecified significance level provide a well-founded basis for addressing the problem. However, many statistical tests are computed from a chi-squared distribution, which is only a valid approximation to the actual distribution in the large-sample case-and this patently does not hold near the leaves of a decision tree. An exception is the class of permutation tests. We describe how permutation tests can be applied to this problem. We choose one such test for further exploration, and give a novel two-stage method for applying it to select attributes in a decision tree. Results on practical datasets compare favorably with other methods that also adopt a pre-pruning strategy.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Frank-Witten-Permutation98.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Frank-Witten-Permutation98.ps}
}

@ARTICLE{FrankWIHW98,
  AUTHOR = {Eibe Frank and
               Yong Wang and
               Stuart Inglis and
               Geoffrey Holmes and
               Ian H. Witten},
  TITLE = {Using Model Trees for Classification},
  JOURNAL = {Machine Learning},
  VOLUME = {32},
  NUMBER = {1},
  YEAR = {1998},
  PAGES = {63-76},
  NOTE = {Also available as Working Paper 97/12, Department of Computer Science, University of Waikato; April},
  ABSTRACT = {Model trees, which are a type of decision tree with linear regression functions at the leaves, form the basis of a recent successful technique for predicting continuous numeric values. They can be applied to classification problems by employing a standard method of transforming a classification problem into a problem of function approximation. Surprisingly, using this simple transformation the model tree inducer M5', based on Quinlan's M5, generates more accurate classifiers than the state-of-the-art decision tree learner C5.0, particularly when most of the attributes are numeric.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Frank-et-al-Model-Trees.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Frank-et-al-Model-Trees.ps}
}

@INPROCEEDINGS{hall98:_pract,
  AUTHOR = {Mark Andrew Hall and Lloyd Smith},
  TITLE = {Practical feature subset selection for machine learning},
  BOOKTITLE = {Proc 21st Australian Computer Science Conference},
  PAGES = {181-191},
  YEAR = {1998},
  PUBLISHER = {Springer},
  ADDRESS = {Perth, Australia},
  ABSTRACT = {Machine learning algorithms automatically extract knowledge from machine readable information. Unfortunately, their success is usually dependant on the quality of the data that they operate on. If the data is inadequate, or contains extraneous and irrelevant information, machine learning algorithms may produce less accurate and less understandable results, or may fail to discover anything of use at all. Feature subset selectors are algorithms that attempt to identify and remove as much irrelevant and redundant information as possible prior to learning. Feature subset selection can result in enhanced performance, a reduced hypothesis search space, and, in some cases, reduced storage requirement. This paper describes a new feature selection algorithm that uses a correlation based heuristic to determine the "goodness" of feature subsets, and evaluates its effectiveness with three common machine learning algorithms. Experiments using a number of standard machine learning data sets are presented. Feature subset selection gave significant improvement for all three algorithms.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Hall-Smith98.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Hall-Smith98.ps}
}

@ARTICLE{HartR.andLarcombe1998,
  AUTHOR = {Hart R. and Larcombe, M.T. and Sherlock, R.A. and Smith, L.A.},
  TITLE = {Optimisation techniques for a computer simulation of a pastoral dairy farm},
  JOURNAL = {Journal Computing and Electronics in Agriculture},
  YEAR = {1998},
  VOLUME = {19},
  PAGES = {129-153},
  ABSTRACT = {This paper compares different methods of optimising the management variables in UDDER, a commercially-available computer simulation model of a pastoral dairy farm. The emphasis is on identifying the best optimisation strategy for this complex multi-dimensional system, taking the simulation model as a given constant. The optimisation methods studied are based on significantly different principles, with differing strengths and weaknesses: two hill-climbing algorithms (Nelder-Mead Simplex and Powell's Direction Set), and a genetic algorithm. Rather than examine all facets of dairy farm management, a single problem is optimised-that of maximising milkfat production while maintaining the health of the herd and pasture.\\
\\
The results show that while the genetic algorithm can determine good regions within the search space quickly, it is considerably slower than either hill-climber at finding the optimal point within that region. The hillclimbers, in contrast, are fast but have a tendency to get trapped on local maxima and thus fail to find the true optimum. This led to the development of a hybrid algorithm which utilises the initial global search of the genetic algorithm, followed by the more efficient local search of a hill-climber. This hybrid algorithm discovered near-optimal points much more quickly than the genetic algorithm, and with more reliability than the hill-climber.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Hart-Larcombe-Sherlock-Smith98.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Hart-Larcombe-Sherlock-Smith98.ps}
}

@INPROCEEDINGS{HolmesG.andCunningham1998,
  AUTHOR = {Holmes G. and Cunningham, S.J. and Dela Rue, B.T. and Bollen, A.F.},
  TITLE = {Predicting apple bruising using machine learning},
  BOOKTITLE = {Model-IT Conference, Acta Horticulturae},
  EDITOR = {L.M.M. Tijskens and M.L.A.T.M. Hertog},
  PAGES = {289-296},
  VOLUME = {476},
  ADDRESS = {The Netherlands},
  YEAR = {1998},
  ABSTRACT = {Many models have been used to describe the influence of internal or external factors on apple bruising. Few of these have addressed the application of derived relationships to the evaluation of commercial operations. From an industry perspective, a model must enable fruit to be rejected on the basis of a commercially significant bruise and must also accurately quantify the effects of various combinations of input features (such as cultivar, maturity, size, and so on) on bruise prediction. Input features must in turn have characteristics which are measurable commercially; for example the measure of force should be impact energy rather than energy absorbed. Further, as the commercial criteria for acceptable damage levels change, the model should be versatile enough to regenerate new bruise thresholds from existing data.\\
\\
Machine learning is a burgeoning technology with a vast range of potential applications particularly in agriculture where large amounts of data can be readily collected [1]. The main advantage of using a machine learning method in an application is that the models built for prediction can be viewed and understood by the owner of the data who is in a position to determine the usefulness of the model, an essential component in a commercial environment.\\
\\
Machine Learning software recently developed at Waikato University [2] offers potential as a prediction tool for the classification of bruising based on historical data. It gives the user the opportunity to select any number of measured input attributes and determine the influence of that combination on a range of bruise size categories. The user may require a high degree of accuracy in the classification and therefore prune the attributes or bruise classes accordingly, or alternatively seek to discover trends in the dataset (in which case a lower level of accuracy often clarifies implicit structures in the data).\\
\\
Models such as the theory of elasticity suggest that impact energy and radius of curvature will have a significant effect on the bruise surface area. Cell structure is also thought to contribute to variation in bruise size. The experiment described in this paper uses the machine learning programs C5.4 [3] and M5' [4] to determine the influence of impact energy, radius of curvature and impact site location on bruise area.},
  NOTE = {Also available as Working Paper 98/7, Department of Computer Science, University of Waikato; April},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/GH-SJC-BTD-AFB-App-Bruise98.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/GH-SJC-BTD-AFB-App-Bruise98.ps}
}

@ARTICLE{Humphrey1998,
  AUTHOR = {Humphrey, M. and Cunningham, S.J. and Witten, I.H.},
  TITLE = {Knowledge visualization techniques for machine learning},
  JOURNAL = {Intelligent Data Analysis},
  YEAR = {1998},
  VOLUME = {2},
  NUMBER = {1-4},
  PAGES = {333-347},
  ABSTRACT = {Researchers in machine learning primarily use decision trees, production rules, and decision graphs for visualizing classification data, with the graphic form in which a structure is portrayed as having a strong influence on comprehensibility. We analyze the questions that, in our experience, end users of machine learning tend to ask of the structures inferred from their empirical data. By mapping these questions onto visualization tasks, we have created new graphical representations that show the flow of examples through a decision structure. The knowledge visualization techniques are particularly appropriate in helping to answer the questions that users typically ask, and we describe their use in discovering new properties of a data set. In the case of decision trees, an automated software tool has been developed to construct the visualizations.}
}

@INPROCEEDINGS{Kusabs1998,
  AUTHOR = {Kusabs, N. and Bollen, F. and Trigg, L. and Holmes, G. and Inglis, S.},
  TITLE = {Objective measurement of mushroom quality},
  BOOKTITLE = {Proc New Zealand Institute of Agricultural Science and the New Zealand Society for Horticultural Science Annual Convention},
  PAGES = {51},
  ADDRESS = {Hawke's Bay, New Zealand},
  YEAR = {1998},
  ABSTRACT = {This paper describes a methodology for establishing an objective measurement of mushroom quality, based on a set of measured physical attributes. These attributes were used by a machine learning tool to model quality based on the classification of professional graders (experts).\\
\\
Four experts visually evaluate 300 mushrooms and graded them into three major and eight subclasses of commercial quality. Weight, firmness and images of the top and bottom of each mushroom were then measured. These physical parameters were used to construct a model of the grades assigned by the four experts. Grader consistency was also assessed by repeated classification (four repetitions) of two 100-mushroom sets. Grader repeatability ranged from 6 to 15\% misclassification.\\
\\
Misclassification by the model increased with grading complexity (16-32\% for the three major grades and 17-63\% for the eight subclasses). This depended heavily on grader classifications and the variables used for training the model (weight, firmness, estimated gill opening and red/green/blue histograms from the image). Accuracy of classification using various combinations of physical attributes, which indicate the relative weight placed on each attribute by the different graders, are detailed in the paper.}
}

@INPROCEEDINGS{McNabR.andWang1998,
  AUTHOR = {McNab R. and Wang, Y. and Witten, I.H. and Gutwin, C.},
  TITLE = {Predicting query times},
  BOOKTITLE = {Proc ACM SIGIR Conference on Research and Development in Information Retrieval},
  EDITOR = {W.B. Croft and et al.},
  PAGES = {355-356},
  PUBLISHER = {ACM Press},
  ADDRESS = {Melbourne, Australia},
  YEAR = {1998},
  ABSTRACT = {We outline the need for search engines to provide user feedback on the expected time for a query, describe a scheme for learning a model of query time by observing sample queries, and discuss the results obtained for a set of actual user queries on a document collection using the MG search engine.}
}

@INPROCEEDINGS{McQueen1998,
  AUTHOR = {McQueen, R.J. and Holmes, G.},
  TITLE = {User perceptions of machine learning},
  BOOKTITLE = {Proc Association of Information Systems Conference},
  EDITOR = {E.D. Hoadley and I. Benbasat},
  PAGES = {180-182},
  PUBLISHER = {Association for Information Systems, Atlanta, GA},
  ADDRESS = {Maryland, Baltimore},
  YEAR = {1998},
  ABSTRACT = {Machine learning has potential use in the understanding of information hidden in large datasets, but little is known about user's perceptions about the use of the technology. In this study, a number of datasets were solicited from agricultural researchers and processed using a machine learning workbench. The results were reported to the researchers, and then interviews were conducted with some of them to determine their perceptions about the use of machine learning as an additional analysis technique to traditional statistical analysis. An number of themes about their satisfaction with this technique were constructed from the interview transcripts, which generally indicate that machine learning may be able to contribute to analysis and understanding of these kinds of datasets.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/RJM-GH-User-Percept.pdf}
}

@ARTICLE{McQueen1998_2,
  AUTHOR = {McQueen, R.J. and Holmes, G. and Hunt, L.},
  TITLE = {User satisfaction with machine learning as a data analysis method in agricultural research},
  JOURNAL = {New Zealand Journal of Agricultural Research},
  YEAR = {1998},
  VOLUME = {41},
  NUMBER = {4},
  PAGES = {577-584},
  PUBLISHER = {SIR Publications},
  ADDRESS = {Wellington, New Zealand},
  ABSTRACT = {Machine learning has potential use in the understanding of information hidden in large datasets, but little is know about user perceptions about the use of the technology. In this study, a number of datasets were solicited from agricultural researchers and processed using a machine learning workbench. The results were reported to the researchers, and then interviews were conducted with some of them to determine their perceptions about the use of machine learning as an additional analysis technique to traditional statistical analysis. An number of themes about their satisfaction with this technique were constructed from the interview transcripts, which generally indicate that machine learning may be able to contribute to analysis and understanding of these kinds of datasets.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/RJM-GH-LH-User-Satisfact.pdf}
}

@INPROCEEDINGS{Nevill-Manning1997,
  AUTHOR = {Nevill-Manning, C.G. and Witten, I.H.},
  TITLE = {Inferring lexical and grammatical structure from sequences},
  BOOKTITLE = {Proc Compression and Complexity of Sequences},
  EDITOR = {B. Carpentieri and et al.},
  PAGES = {265-274},
  PUBLISHER = {IEEE Computer Society Press},
  ADDRESS = {Los Alamitos, CA},
  YEAR = {1997},
  NOTE = {Published as Nevill-Manning and Witten, 1998},
  ABSTRACT = {In a wide variety of sequences from various sources, from music and text to DNA and computer programs, two different but related kinds of structure can be discerned. First, some segments tend to be repeated exactly, such as motifs in music, words or phrases in text, identifiers and syntactic idioms in computer programs. Second, these segments interact with each other in variable but constrained ways. For example, in English text only certain syntactic word classes can appear after the word 'the'-many parts of speech (such as verbs) are necessarily excluded.\\
\\
This paper shows how these kinds of structure can be inferred automatically from sequences. Let us make clear at the outset what aspects of sequence structure we are not concerned with. We take no account of numerical frequencies other than the 'more than once' that defines repetition. We do not consider any similarity metrics between the individual symbols that make up the sequence, nor between 'similar' subsequences such as transposed or transformed motifs in music. Finally, although we are certainly interested in nested repetitions, we do not analyze recursive structure in sequences-such as self-similarity in fractal sequences. All of these regularities are interesting ones that would be well worth taking into account, but lie beyond the scope of this paper.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/NM-IHW-Infer-Lexical98.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/NM-IHW-Infer-Lexical98.ps}
}

@INPROCEEDINGS{Nevill-Manning1998,
  AUTHOR = {Nevill-Manning, C.G. and Witten, I.H.},
  TITLE = {Phrase hierarchy inference and compression in bounded space},
  BOOKTITLE = {Proc Data Compression Conference},
  EDITOR = {J.A. Storer and M. Cohn},
  PAGES = {179-188},
  PUBLISHER = {IEEE Press},
  ADDRESS = {Los Alamitos, CA},
  YEAR = {1998},
  ABSTRACT = {Text compression by inferring a phrase hierarch from the input is a recent technique that shows promise both as a compression scheme and as a machine learning method that extracts some comprehensible account of the structure of the input text. Its performance as a data compression scheme outstrips other dictionary schemes, and the structures that it learns from sequences have been put to such eclectic uses as phrase browsing in digital libraries, music analysis, and inferring rules for fractal images.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/98NM-IHW-Phrase.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/98NM-IHW-Phrase.ps}
}

@INPROCEEDINGS{Smith1998,
  AUTHOR = {Smith, T.C. and Peeters, R.},
  TITLE = {Fast convergence with a greedy tag-phrase dictionary},
  BOOKTITLE = {Proc Data Compression Conference},
  EDITOR = {J.A. Storer and M. Cohn},
  PAGES = {33-42},
  PUBLISHER = {IEEE Press},
  ADDRESS = {Los Alamitos, CA},
  YEAR = {1998},
  ABSTRACT = {The best general-purpose compression schemes make their gains by estimating a probability distribution over all possible next symbols given the context established by some number of previous symbols. Such context models typically obtain good compression results for plain text by taking advantage of regularities in character sequences. Frequent words and syllables can be incorporated into the model quickly and thereafter used for reasonably accurate prediction. However, the precise context in which frequent patterns emerge is often extremely varied, and each new word or phrase immediately introduces new contexts which can adversely affect the compression rate.\\
\\
A great deal of the structural regularity in a natural language is given rather more by properties of its grammar than by the orthographic transcription of its phonology. This implies that access to a grammatical abstraction might lead to good compression. While grammatical models have been used successfully for compressing computer programs [4], grammar-based compression of plain text has received little attention, primarily because of the difficulties associated with constructing a suitable natural language grammar. But even without a precise formulation of the syntax of a language, there is a linguistic abstraction which is easily accessed and which demonstrates a high degree of regularity which can be exploited for compression purposes-namely, lexical categories.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/TCSmith-Peeters98.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/TCSmith-Peeters98.ps}
}

@INPROCEEDINGS{Smith1998_2,
  AUTHOR = {Smith, T.C.},
  TITLE = {Learning feature-value grammars from plain text},
  BOOKTITLE = {Proc Joint Conference on New Methods in Language Processing and Computational Natural Language Learning},
  EDITOR = {David M.-W. Powers},
  PAGES = {291-294},
  ADDRESS = {Sydney, Australia},
  YEAR = {1998},
  ABSTRACT = {This paper outlines preliminary work on learning feature-value grammars from plain text. Common suffixes are gleaned from a word suffix tree and used to form a first approximation of how regular inflection is marked. Words are generalised into lexical categories according to regularities in how these suffixes appear in trigram context. The categories are expressed as a lexical feature whose value is given by the most frequent suffix for similar trigrams. The trigrams are subsequently used to infer agreement dependencies which are captured through the creation of additional feature structures. Agreement and linear precedence are preserved through the iterative creation of unification rules for pairs of terms.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/TCSmith-Learn-Feature98.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/TCSmith-Learn-Feature98.ps}
}

@INPROCEEDINGS{Ting1998,
  AUTHOR = {Ting, K.M. and Zheng, Z.},
  TITLE = {Boosting trees for cost-sensitive classification},
  BOOKTITLE = {Proc European Conference on Machine Learning},
  PAGES = {190-195},
  SERIES = {LNAI},
  VOLUME = {1398},
  ADDRESS = {Berlin},
  YEAR = {1998},
  ABSTRACT = {This paper explores two boosting techniques for cost-sensitive tree classifications in the situation where misclassification costs change very often. Ideally, one would like to have only one induction, and use the induced model for different misclassification costs. Thus, it demands robustness of the induced model against cost changes. Combining multiple trees gives robust predictions against this change. We demonstrate that the two boosting techniques are a good solution in different aspects under this situation.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Ting-Zheng.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Ting-Zheng.ps}
}

@TECHREPORT{Trigg1998,
  AUTHOR = {Trigg, L.},
  TITLE = {An entropy gain measure of numeric prediction performance},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {1998},
  MONTH = {May},
  NUMBER = {98/11},
  ABSTRACT = {Categorical classifier performance is typically evaluated with respect to error rate, expressed as a percentage of test instances that were not correctly classified. When a classifier produces multiple classifications for a test instance, the prediction is counted as incorrect (even if the correct class was one of the predictions). Although commonly used in the literature, error rate is a coarse measure of classifier performance, as it is based only on a single prediction offered for a test instance. Since many classifiers can produce a class distribution as a prediction, we should use this to provide a better measure of how much information the classifier is extracting from the domain.\\
\\
Numeric classifiers are a relatively new development in machine learning, and as such there is no single performance measure that has become standard. Typically these machine learning schemes predict a single real number for each test instance, and the error between the predicted and actual value is used to calculate a myriad of performance measures such as correlation coefficient, root mean squared error, mean absolute error, relative absolute error, and root relative squared error. With so many performance measures it is difficult to establish an overall performance evaluation.\\
\\
The next section describes a performance measure for machine learning schemes that attempts to overcome the problems with current measures. In addition, the same evaluation measure is used for categorical and numeric classifier.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Trigg-Entropy.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1998/Trigg-Entropy.ps}
}