1996.bib

@COMMENT{{Automatically generated - DO NOT MODIFY!}}

@INPROCEEDINGS{frank96:_activ_learn_soft_rules_system_model,
  AUTHOR = {Eibe Frank and Klaus-Peter Huber},
  TITLE = {Active Learning of Soft Rules for System Modelling},
  BOOKTITLE = {Proc 2nd European Congress on Intelligent Techniques and Soft Computing},
  PAGES = {1430-1434},
  YEAR = 1996,
  SERIES = {Aachen, Germany},
  ADDRESS = {Aachen},
  PUBLISHER = {Verlag Mainz},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/eufit96.eibe.paper.ps.gz},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/eufit96.eibe.paper.pdf}
}

@ARTICLE{hall96:_comput_model_blues_music_evaluat,
  AUTHOR = {Mark Andrew Hall and Lloyd Smith},
  TITLE = {A Computer Model of Blues Music and its Evaluation},
  JOURNAL = {Journal of the Acoustical Society of America},
  YEAR = 1996,
  VOLUME = 100,
  NUMBER = 2,
  PAGES = {1163-1167}
}

@INPROCEEDINGS{Cleary1996,
  AUTHOR = {Cleary, J.G. and Legg, S. and Witten, I.H.},
  TITLE = {An MDL estimate of the significance of rules},
  BOOKTITLE = {Proc ISIS: Information, Statistics, and Induction in Science},
  PAGES = {43-53},
  ADDRESS = {Melbourne, Australia},
  YEAR = {1996},
  MONTH = {August},
  ABSTRACT = {This paper proposes a new method for measuring the performance of models-whether decision trees or sets of rules-inferred by machine learning methods. Inspired by the minimum description length (MDL) philosophy and theoretically rooted in information theory, the new method measures the complexity of test data with respect to the model. It has been evaluated on rule sets produced by several different machine learning schemes on a large number of standard data sets. When compared with the usual percentage correct measure, it is shown to agree with it in restricted cases. However, in other more general cases taken from real data sets-for example, when rule sets make multiple or no predictions-it disagrees substantially. It is argued that the MDL measure is more reasonable in these cases and represents a better way of assessing the significance of a rule set's performance. The question of the complexity of the rule set itself is not addressed in the paper.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Cleary96-MDL.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Cleary96-MDL.ps}
}

@INPROCEEDINGS{ClearyJ.G.andHolmes1996,
  AUTHOR = {Cleary J.G. and Holmes, G. and Cunningham, S.J. and Witten, I.H.},
  TITLE = {MetaData for database mining},
  BOOKTITLE = {Proc IEEE Metadata Conference},
  ADDRESS = {Silver Spring, MD},
  YEAR = {1996},
  MONTH = {April},
  ABSTRACT = {At present, a machine learning application is accomplished by carefully crafting asingle table from an often complex, multi-table database. The metadata necessary to create this table is rarely formally recorded, and is sometimes implicit in the structure of the database or the typing of the attributes. We categorize the types of metadata that we have encountered in our work with machine learning applications in agriculture, and describe a first generation tool that we have built to aid in the recording and use of metadata in database mining.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Cleary-et-al-96-Metadata.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Cleary-et-al-96-Metadata.ps}
}

@TECHREPORT{Cunningham1996,
  AUTHOR = {Cunningham, S.J.},
  TITLE = {Dataset cataloguing metadata for machine learning applications and research},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Computer Science Department, Hamilton, New Zealand},
  YEAR = {1996},
  MONTH = {October},
  NUMBER = {96/26},
  ABSTRACT = {As the field of machine learning (ML) matures, two types of data archives are developing: collections of benchmark data sets used to test the performance of new algorithms, and data stores to which machine learning/data mining algorithms are applied to create scientific or commercial applications. At present, the catalogs of these archives are ad hoc and not tailored to machine learning analysis. This paper considers the cataloging metadata required to support these two types of repositories, and discusses the organizational support necessary for archive catalog maintenance.}
}

@TECHREPORT{Cunningham1996_2,
  AUTHOR = {Cunningham, S.J. and Humphrey, M.C. and Witten, I.H.},
  TITLE = {Understanding what Machine Learning produces Part I: Representations and their comprehensibility},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Computer Science Department, Hamilton, New Zealand},
  YEAR = {1996},
  MONTH = {October},
  NUMBER = {96/21},
  ABSTRACT = {The aim of many machine learning users is to comprehend the structures that are inferred from a dataset, and such users may be far more interested in understanding the structure of their data than in predicting the outcome of new test data. Part I of this paper surveys representations based on decision trees, production rules and decision graphs, that have been developed and used for machine learning. These representations have differing degrees of expressive power, and particular attention is paid to their comprehensibility for non-specialist users. The graphic form in which a structure is portrayed also has a strong effect on comprehensibility, and Part II of this paper develops knowledge visualization techniques that are particularly appropriate to help answer the questions that machine learning users typically ask about the structures produced.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/SJC-MCH-IHW-PartI.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/SJC-MCH-IHW-PartI.ps}
}

@TECHREPORT{Frank1996,
  AUTHOR = {Frank, E. and Witten, I.H.},
  TITLE = {Selecting multiway splits in decision trees},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {1996},
  MONTH = {December},
  NUMBER = {96/31},
  ABSTRACT = {Decision trees in which numeric attributes are split several ways are more comprehensible than the usual binary trees because attributes rarely appear more than once in any path from root to leaf. There are efficient algorithms for finding the optimal multiway split for a numeric attribute, given the number of intervals in which it is to be divided. The problem we tackle is how to choose this number in order to obtain small, accurate trees.\\
\\
We view each multiway decision as a model and a decision tree as a recursive structure of such models. Standard methods of choosing between competing models include resampling techniques (such as cross-validation, holdout, or bootstrap) for estimating the classification error; and minimum description length techniques. However, the recursive situation differs from the usual one, and may call for new model selection methods.\\
\\
This paper introduces a new criterion for model selection: a resampling estimate of the information gain. Empirical results are presented for building multiway decision trees using this new criterion, and compared with criteria adopted by previous authors. The new method generates multiway trees that are both smaller and more accurate than those produced previously, and their performance is comparable with standard binary decision trees.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Frank-Witten96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Frank-Witten96.ps}
}

@MASTERSTHESIS{Hart1996,
  AUTHOR = {Hart, R.},
  TITLE = {Experimental comparison of optimisation techniques: computer optimisation of dairy farm management},
  SCHOOL = {University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {1996}
}

@TECHREPORT{Humphrey1996,
  AUTHOR = {Humphrey, M.C. and Cunningham, S.J. and Witten, I.H.},
  TITLE = {Understanding what Machine Learning produces Part II: Knowledge visualization techniques},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Computer Science Department, Hamilton, New Zealand},
  YEAR = {1996},
  MONTH = {October},
  NUMBER = {96/22},
  ABSTRACT = {Researchers in machine learning use decision trees, production rules, and decision graphs for visualizing classification data. Part I of this paper surveyed these representations, paying particular attention to their comprehensibility for non-specialist users. Part II turns attention to knowledge visualization the graphic form in which a structure is portrayed and its strong influence on comprehensibility. We analyze the questions that, in our experience, end users of machine learning tend to ask of the structures inferred from their empirical data. By mapping these questions onto visualization tasks, we have created new graphical representations that show the flow of examples through a decision structure. These knowledge visualization techniques are particularly appropriate in helping to answer the questions that users typically ask, and we describe their use in discovering new properties of a data set. In the case of decision trees, an automated software tool has been developed to construct the visualizations.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/MCH-SJC-IHW-PartII.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/MCH-SJC-IHW-PartII.ps}
}

@MASTERSTHESIS{Littin1996,
  AUTHOR = {Littin, J.N.},
  TITLE = {Learning relational ripple-down rules},
  SCHOOL = {University of Waikato},
  ADDRESS = {Computer Science Department, Hamilton, New Zealand},
  YEAR = {1996},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/JLittin96-Thesis.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/JLittin96-Thesis.ps}
}

@ARTICLE{Mitchell1996,
  AUTHOR = {Mitchell, R.S. and Sherlock, R.A. and Smith, L.A.},
  TITLE = {An investigation into the use of machine learning for the determining of oestrus in cows},
  JOURNAL = {Computing and Electronics in Agriculture},
  YEAR = {1996},
  VOLUME = {15},
  PAGES = {195-215},
  ABSTRACT = {A preliminary investigation of the application of two well-known machine learning schemes-C4.5 and FOIL-to detection of oestrus in dairy cows has been made. This is a problem of practical economic significance as each missed opportunity for artificial insemination results in 21 days lost milk production. Classifications were made on normalised diviations of milk volume production and milking order time series data. The best learning scheme was C4.5 which was able to detect 69\% of oestrus events, albeit with an unacceptably high rate of "false positives" (74\%). Several directions for further work and improvements are identified.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Mitchell96-Oestrus.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Mitchell96-Oestrus.ps}
}

@INPROCEEDINGS{Maulsby1996,
  AUTHOR = {Maulsby, D. and Witten, I.H.},
  TITLE = {Machine learning in programming by demonstration: lessons learned from CIMA},
  BOOKTITLE = {Acquisition, learning and demonstration: Automating tasks for users (Proc AAAI Symposium, Stanford)},
  EDITOR = {Y. Gil},
  PAGES = {66-72},
  PUBLISHER = {AAAI Press},
  ADDRESS = {Menlo Park, CA},
  YEAR = {1996},
  ABSTRACT = {Programming-by-demonstration (PBD) systems learn tasks by watching the user perform them. CIMA is an interactive learning system for modeling the data selected and modified by a user as he or she undertakes a task. Part of a PBD system, CIMA is invoked when user actions are matched to find a common description of their "operands." Although the system's interfaces to users and applications are still too rough-hewn to permit field trials, its performance on recorded dialogs between users and a simulated agent meets the design goals established prior to its implementation.\\
\\
The contributions of this work lie in three areas:\\
\\
   (a) a design methodology for PBD systems;\\
   (b) a framework for user actions in PBD;\\
   (c) novel methods of interaction with the user.\\
\\
These are discussed in separate sections below. First, however, it is necessary to convey the flavor of what it is like to use the CIMA system, and this is done in the following section.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Maulsby-IHW-CIMA.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Maulsby-IHW-CIMA.ps}
}

@PHDTHESIS{Nevill-Manning1996,
  AUTHOR = {Nevill-Manning, C.G.},
  TITLE = {Detecting sequential structure},
  SCHOOL = {University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {1996},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/NevMan-Thesis.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/NevMan-Thesis.ps}
}

@INPROCEEDINGS{Nevill-Manning1996_2,
  AUTHOR = {Nevill-Manning, C.G. and Witten, I.H. and Olsen, D.R.},
  TITLE = {Compressing semi-structured text using hierarchical phrase identification},
  BOOKTITLE = {Proc Data Compression Conference},
  EDITOR = {J.A. Storer and M. Cohn},
  PAGES = {63-72},
  PUBLISHER = {IEEE Press},
  ADDRESS = {Los Alamitos, CA},
  YEAR = {1996},
  ABSTRACT = {Many computer files contain highly-structured, predictable information interspersed with information which has less regularity and is therefore less predictable-such as free text. Examples range from word-processing source files, which contain precisely-expressed formatting specifications enclosing tracts of natural-language text, to files containing a sequence of filled-out forms which have a predefined skeleton clothed with relatively unpredictable entries. These represent extreme ends of a spectrum. Word-processing files are dominated by free text, and respond well to general-purpose compression techniques. Forms generally contain database-style information, and are most appropriately compressed by taking into account their special structure. But one frequently encounters intermediate cases. For example, in many email messages the formal header and the informal free-text content are equally voluminous. Short SGML files often contain comparable amounts of formal structure and informal text. Although such files may be compressed quite well by general-purpose adaptive text compression algorithms, which will soon pick up the regular structure during the course of normal adaptation, better compression can often be obtained by methods that are equipped to deal with both formal and informal structure.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Nevill-Manning-et-al-dcc96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Nevill-Manning-et-al-dcc96.ps}
}

@INPROCEEDINGS{Sherlock1996,
  AUTHOR = {Sherlock, R.A. and Smith, L.A. and Mitchell, R.S.},
  TITLE = {Automatic oestrus detection from milking data - a preliminary investigation},
  BOOKTITLE = {Proc NZ Society for Animal Production},
  VOLUME = {65},
  PAGES = {228-229},
  ADDRESS = {Hamilton, New Zealand},
  YEAR = {1996},
  MONTH = {February},
  ABSTRACT = {Efficient oestrus detection is important economically, particularly with seasonal-calving herds employing artificial insemination, as every missed oestrus effectively costs 21 days milk production for that cow in that season which is about 8\% of the total. Traditionally, oestrus detection relies mainly on visual observation of animal behaviour - a cow in oestrus will stand and allow herself to be mounted by herdmates. Such events are readily noted when they take place amongst assembled cows, such as at milking times, but may be missed entirely in herds of free-grazing animals which are only brought in for milking twice a day, as is usual in New Zealand style systems. A common low-cost aid to oestrus detection is the use of "tail-paint" (Macmillan and Curnow, 1977), and although this can be very effective when properly executed the regular visual inspection and repainting is labour-intensive.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Sherlock-et-al96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Sherlock-et-al96.ps}
}

@INBOOK{Smith1996,
  AUTHOR = {Smith, T.C. and Witten, I.H.},
  TITLE = {Learning language using genetic algorithms},
  SERIES = {LNCS: Connectionist, statistical and symbolic approaches to learning for natural language processing},
  VOLUME = {1040},
  EDITOR = {S. Wermter and E. Riloff and G. Scheler},
  PUBLISHER = {Springer-Verlag},
  ADDRESS = {New York},
  YEAR = {1996},
  PAGES = {132-145},
  ABSTRACT = {Strict pattern-based methods of grammar induction are often frustrated by the apparently inexhaustible variety of novel word combinations in large corpora. Statistical methods offer a possible solution by allowing frequent well-formed expressions to overwhelm the infrequent ungrammatical ones. They also have the desirable property of being able to construct robust grammars from positive instances alone. Unfortunately, the "zero-frequency" problem entails assigning a small probability to all possible word patterns, thus ungrammatical n-grams become as probable as unseen grammatical ones. Further, such grammars are unable to take advantage of inherent lexical properties that should allow infrequent words to inherit the syntactic properties of the class to which they belong.\\
\\
This paper describes a genetic algorithm (GA) that adapts a population of hypothesis grammars towards a more effective model of language structure. The GA is statistically sensitive in that the utility of frequent patterns is reinforced by the persistence of efficient substructures. It also supports the view of language learning as a "bootstrapping problem," a learning domain where it appears necessary to simultaneously discover a set of categories and a set of rules defined over them. Results from a number of tests indicate that the GA is a robust, fault-tolerant method for inferring grammars from positive examples of natural language.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/TCS-IHW96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/TCS-IHW96.ps}
}

@INPROCEEDINGS{Teahan1996,
  AUTHOR = {Teahan, W.J. and Cleary, J.G.},
  TITLE = {The entropy of English using PPM-based models},
  BOOKTITLE = {Proc Data Compression Conference},
  EDITOR = {J.A. Storer and M. Cohn},
  PAGES = {53-62},
  PUBLISHER = {EEE Press},
  ADDRESS = {Los Alamitos, CA},
  YEAR = {1996},
  ABSTRACT = {Over 45 years ago Claude E. Shannon estimated the entropy of English to be about 1 bit per character [16]. He did this by having human subjects guess samples of text, letter by letter. From the number of guesses made by each subject he estimated upper and lower bounds of 1.3 and 0.6 bits per character (bpc) for the entropy of English. Shannon's methodology was not improved upon until 1978 when Cover and King [6] used a gambling approach to estimate the upper bound to be 1.25 bpc from the same text. In the cryptographic community n-gram analysis suggests 1.5 bpc as the asymptotic limit for 26-letter English (Tilbourg [19]).},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Teahan-Cleary-entropy96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Teahan-Cleary-entropy96.ps}
}

@TECHREPORT{Thomson1996,
  AUTHOR = {Thomson, K. and McQueen, R.J.},
  TITLE = {Machine Learning applied to fourteen agricultural datasets},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Computer Science Department, Hamilton, New Zealand},
  MONTH = {September},
  YEAR = {1996},
  NUMBER = {96/18},
  ABSTRACT = {This document reports on an investigation conducted between November, 1995 and March, 1996 into the use of machine learning on 14 sets of data supplied by agricultural researchers in New Zealand. Our purpose here is to collect together short reports on trials with these datasets using the WEKA machine learning workbench, so that some understanding of the applicability and potential application of machine learning to similar datasets may result.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Thomson-McQueen-96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Thomson-McQueen-96.ps}
}

@INPROCEEDINGS{Ting1996,
  AUTHOR = {Ting, K.M.},
  TITLE = {Decision combination based on the characterization of predictive accuracy},
  BOOKTITLE = {Proc 3rd International Workshop on Multistrategy Learning},
  EDITOR = {R.S. Michalski and J. Wnek},
  PAGES = {191-202},
  ADDRESS = {Fairfax, VA},
  YEAR = {1996}
}

@INPROCEEDINGS{Ting1996_2,
  AUTHOR = {Ting, K.M.},
  TITLE = {The characterization of predictive accuracy and decision combination},
  BOOKTITLE = {Proc International Conference on Machine Learning},
  PAGES = {498-506},
  ADDRESS = {Bari, Italy},
  YEAR = {1996},
  ABSTRACT = {In this paper, we first explore an intrinsic problem that exists in the theories induced by learning algorithms. Regardless of the selected algorithm, search methodology and hypothesis representation by which the theory is induced, one would expect the theory to make better predictions in some regions of the description space than others. We term the fact that an induced theory will have some regions of relatively poor performance the problem of locally low predictive accuracy. Having characterised the problem of locally low predictive accuracy in Instance-Based and Naive Bayesian classifiers, we propose to counter this problem using a composite learner that incorporates both classifiers. The strategy is to select an estimated better performing classifier to do the final prediction during classification. Empirical results show that the strategy is capable of partially overcoming the problem and at the same time improving the overall performance of its constituent classifiers. We provide explanations of why the proposed composite learner performs better than the cross-validation method and the better of its constituent classifiers.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/KaiMing-Ting96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/KaiMing-Ting96.ps}
}

@TECHREPORT{Ting1996_3,
  AUTHOR = {Ting, K.M. and Low, B.T.},
  TITLE = {Theory combination: an alternative to data combination},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {1996},
  NUMBER = {96/19},
  ABSTRACT = {The approach of combining theories learned from multiple batches of data provide an alternative to the common practice of learning one theory from all the available data (i.e., the data combination approach). This paper empirically examines the base-line behaviour of the theory combination approach in classification tasks. We find that theory combination can lead to better performance even if the disjoint batches of data are drawn randomly from a larger sample, and relate the relative performance of the two approaches to the learning curve of the classifier used.\\
\\
The practical implication of our results is that one should consider using theory combination rather tan data combination, especially when multiple batches of data for the same task are readily available.\\
\\
Another interesting result is that we empirically show that the near-asymptotic performance of a single theory, in some classification task, can be significantly improved by combining multiple theories (of the same algorithm) if the constituent theories are substantially different and there is some regularity in the theories to be exploited by the combination method used. Comparisons with known theoretical results are also provided.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/KaiMing-WP96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/KaiMing-WP96.ps}
}

@INPROCEEDINGS{Witten1996,
  AUTHOR = {Witten, I.H. and Nevill-Manning, C.G. and Maulsby, D.L.},
  TITLE = {Interacting with learning agents: implications for ML from HCI},
  BOOKTITLE = {Workshop on Machine Learning meets Human-Computer Interaction, ML'96},
  PAGES = {51-58},
  ADDRESS = {Bari, Italy},
  YEAR = {1996},
  ABSTRACT = {Computers excel at repetitive tasks. But automating them usually involves programming, which is beyond the reach of most non-specialist users. One solution is for machines to learn procedures by observing users at work-and if this enhanced users' productivity and sense of achievement, they might even be persuaded to help the system by supplying some additional information. In principle, combining machine learning with instructional interaction should increase the speed with which tasks are acquired, and enhance reliability too.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Witten-NevMan-Maulsby96.pdf}
}

@INPROCEEDINGS{Yeates1996,
  AUTHOR = {Yeates, S. and Thomson, K.},
  TITLE = {Applications of Machine Learning on two agricultural datasets},
  BOOKTITLE = {Proc New Zealand Conference of Postgraduate Students in Engineering and Technology},
  PAGES = {495-496},
  ADDRESS = {Christchurch, New Zealand},
  YEAR = {1996},
  ABSTRACT = {The induction of decision trees from tabulated data is a field of machine learning which has been demonstrated successfully in several practical applications. This paper looks at the application of this technology to two datasets in the agricultural domain, and show why it was not possible to achieve the success obtained in other domains.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Yeates-and-Thomson96.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/1996/Yeates-and-Thomson96.ps}
}

@MISC{Waikato1996,
  AUTHOR = {Waikato ML group},
  TITLE = {Tutorial-Weka: the Waikato environment of knowledge analysis},
  YEAR = {1996},
  MONTH = {November},
  INSTITUTION = {University of Waikato},
  ADDRESS = {Department of Computer Science},
  PAGES = {101 pp}
}