2004.bib

@COMMENT{{Automatically generated - DO NOT MODIFY!}}

@INCOLLECTION{hunt04:_unsup,
  AUTHOR = {Hunt, Lynette and Jorgensen, Murray},
  TITLE = {Unsupervised learning from incomplete data using a mixture model approach},
  BOOKTITLE = {Statistical Data Mining and Knowledge Discovery},
  PUBLISHER = {Chapman \& Hall/CRC},
  YEAR = 2004,
  PAGES = {173-188},
  ABSTRACT = {Many unsupervised learning tasks involve high dimensional data sets where some of the attributes are continuous and some are categorical. One possible approach to clustering such data is to assume that the data to be clustered arise from a finite mixture of populations. The mixture likelihood approach has been well developed and much used, especially for mixtures where the component distributions are multivariate normal. Hunt and Jorgensen [17] presented methodology that enabled the clustering of mixed categorical and continuous data using a mixture approach. However many multivariate data sets encountered also contain unobserved or missing values. In this paper we demonstrate that the mixture approach to clustering can handle mixed categorical and continuous data where data are missing at random in the sense of Little and Rubin [30]. The methodology is illustrated by clustering two data sets.}
}

@ARTICLE{Blockeel2004,
  AUTHOR = {Blockeel, H. and Dzeroski, S. and Kompare, B. and Kramer, S. and Pfahringer, B. and Van Laer, W.},
  TITLE = {Experiments in predicting biodegradability},
  JOURNAL = {Journal of Applied Artificial Intelligence},
  YEAR = {2004},
  MONTH = {Feburary},
  VOLUME = {18},
  NUMBER = {2},
  ABSTRACT = {This paper is concerned with the use of AI techniques in ecology. More specifically, we present a novel application of inductive logic programming (ILP) in the area of quantitative structure-activity relationships (QSARs). The activity we want to predict is the biodegradability of chemical compounds in water. In particular, the target variable is the half-life for aerobic aqueous biodegradation. Structural descriptions of chemicals in terms of atoms and bonds are derived from the chemicals' SMILES encodings. Definition of substructures are used as background knowledge. Predicting biodegradability is essentially a regression problem, but we also consider a discretized version of the target variable. We thus employ a number of relational classification and regression methods on the relational representation and compare these to propositional methods applied to different propositionalizations of the problem. We also experiment with the prediction technique that consists of merging upper and lower bound predictions into one prediction. Some conclusions are drawn concerning the applicability of machine learning systems and the merging technique in this domain and concerning the evaluation of hypotheses.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/blockeel-etal.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/blockeel-etal.ps}
}

@TECHREPORT{Bouckaert2004,
  AUTHOR = {Bouckaert, R.},
  TITLE = {Bayesian network classifiers in Weka},
  INSTITUTION = {The University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {2004},
  NUMBER = {14/2004},
  ABSTRACT = {Various Bayesian network classifier learning algorithms are implemented in Weka. This note provides some user documentation and implementation details. Summary of the main capabilities:\\
    \\
    (a) Structure learning of Bayesian networks using various hill climbing (K2, B, etc) and general purpose (simulated annealing, tabu search) algorithms.\\
    (b) Local score metrics implemented; Bayes, BDe, MDL, entropy, AIC.\\
    (c) Global score metrics implemented; leave one out cv, k-fold cv and cumulative cv.\\
    (d) Conditional independence based causal recovery algorithm available.\\
    (e) Parameter estimation using direct estimates and Bayesian model averaging.\\
    (f) GUI for easy inspection of Bayesian networks.\\
    (g) Part of Weka allowing systematic experiments to compare Bayes net performance with general purpose classifiers like C4.5, nearest neighbor, support vector, etc.\\
    (h) Source code available under GPL allows for integration in other systems and makes it easy to extend.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/uow-cs-wp-2004-14.pdf}
}

@INPROCEEDINGS{Bouckaert2004_2,
  AUTHOR = {Bouckaert, R.},
  TITLE = {Estimating replicability of classifier learning},
  BOOKTITLE = {Proc International Conference on Machine Learning},
  YEAR = {2004},
  ABSTRACT = {Replicability of machine learning experiments measures how likely it is that the outcome of one experiment is repeated when performed with a different randomization of the data. In this paper, we present an estimator or replicability of an experiment that is efficient. More precisely, the estimator is unbiased and has lowest variance in the class of estimators formed by a linear combination of outcomes of experiments on a given data set.\\
\\
We gathered empirical data for comparing experiments consisting of different sampling schemes and hypothesis tests. Both factors are shown to have an impact on replicability of experiments. The data suggests that sign tests should not be used due to low replicability. Ranked sum tests show better performance, but the combination of a sorted runs sampling scheme with a t-test gives the most desirable performance judged on Type I and II error and replicability.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/bouckaert-estimating.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/bouckaert-estimating.ps}
}

@INPROCEEDINGS{BouckaertF04,
  AUTHOR = {Remco R. Bouckaert and
               Eibe Frank},
  TITLE = {Evaluating the Replicability of Significance Tests for Comparing
               Learning Algorithms},
  BOOKTITLE = {Proc 8th Pacific-Asia Conference on Knowledge Discovery and Data Mining},
  ADDRESS = {Sydney, Australia},
  PUBLISHER = {Springer},
  YEAR = {2004},
  PAGES = {3-12},
  SERIES = {LNAI},
  VOLUME = {3056},
  ABSTRACT = {Empirical research in learning algorithms for classification tasks generally requires the use of significance tests. The quality of a test is typically judged on Type I error (how often the test indicates a difference when it should not) and Type II error (how often it indicates no difference when it should). In this paper we argue that the replicability of a test is also of importance. We say that a test has low replicability if its outcome strongly depends on the particular random partitioning of the data that is used to perform it. We present empirical measures of replicability and use them to compare the performance of several popular tests in a realistic setting involving standard learning algorithms and benchmark datasets. Based on our results we give recommendations on which test to use.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/bouckaert-frank.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/bouckaert-frank.ps}
}

@INPROCEEDINGS{Bouckaert2004_3,
  AUTHOR = {Bouckaert, R.},
  TITLE = {Naive Bayes classifiers that perform well with continuous variables},
  BOOKTITLE = {Proc Seventeenth Australian Joint Conference on Artificial Intelligence (AI 2004), Advances in Artificial Intelligence},
  EDITOR = {G.I. Webb and X. Yu},
  PAGES = {1089-1094},
  SERIES = {LNAI},
  VOLUME = {3339},
  PUBLISHER = {Springer},
  ADDRESS = {Cairns, Australia},
  YEAR = {2004},
  MONTH = {December},
  ABSTRACT = {There are three main methods for handling continuous variables in naive Bayes classifiers, namely, the normal method (parametric approach), the kernel method (non parametric approach) and discretization. In this article, we perform a methodologically sound comparison of the three methods, which shows large mutual differences of each of the methods and no single method being universally better. This suggests that a method for selecting one of the three approaches to continuous variables could improve overall performance of the naive Bayes classifier. We present three methods that can be implemented efficiently v-fold cross validation for the normal, kernel and discretization method. Empirical evidence suggests that selection using 10 fold cross validation (especially when repeated 10 times) can largely and significantly improve over all performance of naive Bayes classifiers and consistently outperform any of the three popular methods for dealing with continuous variables on their own. This is remarkable, since selection among more classifiers does not consistently result in better accuracy.}
}

@ARTICLE{FrankHTHW04,
  AUTHOR = {Eibe Frank and
               Mark Hall and
               Leonard E. Trigg and
               Geoffrey Holmes and
               Ian H. Witten},
  TITLE = {Data mining in bioinformatics using {W}eka},
  JOURNAL = {Bioinformatics},
  VOLUME = {20},
  NUMBER = {15},
  YEAR = {2004},
  MONTH = {April},
  PAGES = {2479-2481},
  ABSTRACT = {The Weka machine learning workbench provides a general-purpose environment for automatic classification, regression, clustering, and feature selection---common data mining problems in bioinformatics research. It contains an extensive collection of machine learning algorithms and data pre-processing methods complemented by graphical user interfaces for data exploration and the experimental comparison of different machine learning techniques on the same problem. Weka can process data given in the form of a single relational table. Its main objectives are to (a) assist users in extracting useful information from data and (b) enable them to easily identify a suitable algorithm for generating an accurate predictive model from it.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/frank-etal-bioinformatics.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/frank-etal-bioinformatics.ps},
  HTTP = {https://academic.oup.com/bioinformatics/article/20/15/2479/233305}
}

@ARTICLE{FrankP04,
  AUTHOR = {Eibe Frank and
               Gordon W. Paynter},
  TITLE = {Predicting Library of Congress classifications from Library
               of Congress subject headings},
  JOURNAL = {JASIST},
  VOLUME = {55},
  NUMBER = {3},
  YEAR = {2004},
  PAGES = {214-227},
  NOTE = {Also published as Working Paper 01/2003, Department of Computer Science, The University of Waikato, Hamilton.},
  ABSTRACT = {This paper addresses the problem of automatically assigning a Library of Congress Classification (LCC) to work given its set of Library of Congress Subject Headings (LCSH). LCC are organized in a tree: the root node of this hierarchy comprises all possible topics, and leaf nodes correspond to the most specialized topic areas defined. We describe a procedure that, given a resource identified by its LCSH, automatically places that resource in the LCC hierarchy. The procedure uses machine learning techniques and training data from a large library catalog to learn a classification model mapping from sets of LCSH to nodes in the LCC tree. We present empirical results for our technique showing its accuracy on an independent collection of 50,000 LCSH/LCC pairs.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/frank-paynter-predicting-library.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/frank-paynter-predicting-library.ps}
}

@INPROCEEDINGS{frank04:_ensem,
  AUTHOR = {Eibe Frank and Stefan Kramer},
  TITLE = {Ensembles of nested dichotomies for multi-class problems},
  BOOKTITLE = {Proc 21st International Conference on Machine Learning},
  EDITOR = {R. Greiner and D. Schuurmans},
  ADDRESS = {Banff, Canada},
  PAGES = {305-312},
  YEAR = 2004,
  PUBLISHER = {ACM Press},
  NOTE = {Also published as Working Paper 06/2004, Department of Computer Science, The University of Waikato, Hamilton.},
  ABSTRACT = {Nested dichotomies are a standard statistical technique for tackling certain polytomous classification problems with logistic regression. They can be represented as binary trees that recursively split a multi-class classification task into a system of dichotomies and provide a statistically sound way of applying two-class learning algorithms to multi-class problems (assuming these algorithms generate class probability estimates). However, there are usually many candidate trees for a given problem and in the standard approach the choice of a particular tree is based on domain knowledge that may not be available in practice. An alternative is to treat every system of nested dichotomies as equally likely and to form an ensemble classifier based on this assumption. We show that this approach produces more accurate classifications than applying C4.5 and logistic regression directly to multi-class problems. Our results also show that ensembles of nested dichotomies produce more accurate classifiers than pairwise classification if both techniques are used with C4.5, and comparable results for logistic regression. Compared to error-correcting output codes, they are preferable if logistic regression is used, and comparable in the case of C4.5. An additional benefit is that they generate class probability estimates. Consequently they appear to be a good general-purpose method for applying binary classifiers to multi-class problems.},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/frank_and_kramer_cr_updated_copyright_info.ps.gz},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/frank_and_kramer_cr_updated_copyright_info.pdf}
}

@INPROCEEDINGS{HolmesF04,
  AUTHOR = {Geoffrey Holmes and
               Dale Fletcher},
  TITLE = {An Instrument Control System Using Predictive Modelling},
  BOOKTITLE = {Proceedings of the First International Conference
               on Informatics in Control, Automation and Robotics},
  EDITOR = {H. Ara\?{u}jo and et al.},
  YEAR = {2004},
  PAGES = {292-295},
  PUBLISHER = {INSTICC Press},
  ADDRESS = {Set\?{u}bal, Portugal},
  ABSTRACT = {We describe a system for providing early warning of possible error to an operator in control of an instrument providing results in batches from samples, for example, chemical elements found in soil or water samples. The system has the potential to be used with any form of instrument that provides multiple results for a given sample. The idea is to train models for each measurement, using historical data. The set of trained models are then capable of making predictions on new data based on the values of the other measurements. This approach has the potential to uncover previously unknown relationships between the measurements. An example application has been constructed that highlights the difference of the actual value for a measurement from its predicted value. The operator is provided with sliders to attenuate the sensitivity of the measurement perhaps based on its importance or its known sensitivity.}
}

@TECHREPORT{Holmes2004,
  AUTHOR = {Holmes, G. and Kirkby, R. and Pfahringer, B.},
  TITLE = {Mining data streams using option trees (revised edition, 2004)},
  INSTITUTION = {The University of Waikato},
  ADDRESS = {Department of Computer Science, Hamilton, New Zealand},
  YEAR = {2004},
  NUMBER = {03/2004},
  ABSTRACT = {The data stream model for data mining places harsh restrictions on a learning algorithm. A model must be induced following the briefest interrogation of the data, must use only available memory and must update itself over time within these constraints. Additionally, the model must be able to be used for data mining at any point in time. This paper describes a data stream classification algorithm using an ensemble of option trees. The ensemble of trees is induced by boosting and iteratively combined into a single interpretable model. The algorithm is evaluated using benchmark datasets for accuracy against state-of-the-art algorithms that make use of the entire dataset.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/uow-cs-wp-2004-03.pdf}
}

@INPROCEEDINGS{Holmes2004_2,
  AUTHOR = {Holmes, G. and Kirkby, R. and Pfahrinnger, B.},
  TITLE = {Mining data streams using option trees},
  BOOKTITLE = {Proc Workshop W10 on Knowledge Discovery in Data Streams, Fifthteenth Conference on Machine Learning, Eighth European Conference on Principles and Practice of Knowledge Discovery in Databases (ECML/PKDD)},
  EDITOR = {J. Aguilar-Ruiz and J. Gama},
  PAGES = {75-84},
  ADDRESS = {Pisa, Italy},
  YEAR = {2004},
  ABSTRACT = {The data stream model for data mining places harsh restrictions on a learning algorithm. A model must be induced following the briefest interrogation of the data, must use only available memory and must update itself over time within these constraints. Additionally, the model must be able to be used for data mining at any point in time. This paper describes a data stream classification algorithm using an ensemble of option trees. This ensemble is iteratively combined into a single interpretable model. The algorithm is evaluated using benchmark datasets for accuracy against state-of-the-art algorithms that make use of the entire dataset.}
}

@INPROCEEDINGS{KibriyaFPH04,
  AUTHOR = {Ashraf M. Kibriya and
               Eibe Frank and
               Bernhard Pfahringer and
               Geoffrey Holmes},
  TITLE = {Multinomial Naive {B}ayes for Text Categorization Revisited},
  BOOKTITLE = {Proc 17th Australian Joint Conference on Artificial Intelligence},
  EDITOR = {G. I. Webb and X. Yu},
  YEAR = {2004},
  PAGES = {488-499},
  SERIES = {LNAI},
  VOLUME = {3339},
  ADDRESS = {Cairns, Australia},
  PUBLISHER = {Springer},
  ABSTRACT = {This paper presents empirical results for several versions of the multinomial naive Bayes classifier on four text categorization problems, and a way of improving it using locally weighted learning. More specifically, it compares standard multinomial naive Bayes to the recently proposed transformed weight-normalized complement naive Bayes classifier (TWCNB) [1], and shows that some of the modifications included in TWCNB may not be necessary to achieve optimum performance on some datasets. However, it does show that TFIDF conversion and document length normalization are important. It also shows that support vector machines can, in fact, sometimes very significantly outperform both methods. Finally, it shows how the performance of multinomial naive Bayes can be improved using locally weighted learning. However, the overall conclusion of our paper is that support vector machines are still the method of choice if the aim is to maximize accuracy.},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/kibriya_et_al_cr.ps.gz},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/kibriya_et_al_cr.pdf}
}

@INPROCEEDINGS{LiHP04,
  AUTHOR = {Mi Li and
               Geoffrey Holmes and
               Bernhard Pfahringer},
  TITLE = {Clustering Large Datasets Using Cobweb and K-Means in Tandem},
  YEAR = {2004},
  PAGES = {368-379},
  BOOKTITLE = {Proc 17th Australian Joint Conference on Artificial Intelligence},
  EDITOR = {G. I. Webb and X. Yu},
  ADDRESS = {Cairns, Australia},
  PUBLISHER = {Springer},
  SERIES = {LNAI},
  VOLUME = {3339},
  ABSTRACT = {This paper presents a single scan algorithm for clustering large datasets based on a two phase process which combines two well known clustering methods. The Cobweb algorithm is modified to produce a balanced tree with subclusters at the leaves, and then K-means is applied to the resulting subclusters. The resulting method, Scalable Cobweb, is then compared to a single pass K-means algorithm and standard K-means. The evaluation looks at error as measured by the sum of squared error and vulnerability to the order in which data points are processed.}
}

@INPROCEEDINGS{McGregor2004,
  AUTHOR = {McGregor, A. and Hall, M. and Lorier, P. and Brunskill, J.},
  TITLE = {Flow clustering using machine learning techniques},
  BOOKTITLE = {Proc Fifth International Workshop on Passive and Active Network Measurement (PAM 2004)},
  EDITOR = {C. Barakat and I. Pratt},
  PAGES = {205-214},
  SERIES = {LNCS},
  VOLUME = {3015},
  PUBLISHER = {Springer},
  ADDRESS = {Antibes Juan-les-Pins, France},
  YEAR = {2004},
  ABSTRACT = {Packet header traces are widely used in network analysis. Header traces are the aggregate of traffic from many concurrent applications. We present a methodology, based on machine learning, that can break the trace down into clusters of traffic where each cluster has different traffic characteristics. Typical clusters include bulk transfer, single and multiple transactions and interactive traffic, amongst others. The paper includes a description of the methodology, a visualisation of the attribute statistics that aids in recognising cluster types and a discussion of the stability and effectiveness of the methodology.}
}

@INPROCEEDINGS{MutterHF04,
  AUTHOR = {Stefan Mutter and
               Mark Hall and
               Eibe Frank},
  TITLE = {Using Classification to Evaluate the Output of Confidence-Based
               Association Rule Mining},
  BOOKTITLE = {Proc 17th Australian Joint Conference on Artificial Intelligence},
  EDITOR = {G. I. Webb and X. Yu},
  YEAR = {2004},
  PAGES = {538-549},
  ADDRESS = {Cairns, Australia},
  PUBLISHER = {Springer},
  SERIES = {LNAI},
  VOLUME = {3339},
  ABSTRACT = {Association rule mining is a data mining technique that reveals interesting relationships in a database. Existing approaches employ different parameters to search for interesting rules. This fact and the large number of rules make it difficult to compare the output of confidence-based association rule miners. This paper explores the use of classification performance as a metric for evaluating their output. Previous work on forming classifiers from association rules has focussed on accurate classification, whereas we concentrate on using the properties of the resulting classifiers as a basis for comparing confidence-based association rule learners. Therefore, we present experimental results on 12 UCI datasets showing that the quality of small rule sets generated by Apriori can be improved by using the predictive Apriori algorithm. We also show that CBA, the standard method for classification using association rules, is generally inferior to standard rule learners concerning both running time and size of rule sets.},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/mutter_et_al.ps.gz},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/mutter_et_al.pdf}
}

@ARTICLE{PaynterW04,
  AUTHOR = {Gordon W. Paynter and
               Ian H. Witten},
  TITLE = {Applying machine learning to programming by demonstration},
  JOURNAL = {J. Exp. Theor. Artif. Intell.},
  VOLUME = {16},
  NUMBER = {3},
  YEAR = {2004},
  PAGES = {161-188},
  ABSTRACT = {'Familiar' is a tool that helps end-users automate iterative tasks in their applications by showing examples of what they want to do. It observes the user's actions, predicts what they will do next, and then offers to complete their task. Familiar learns in two ways. First, it creates a model, based on data gathered from training tasks, that selects the best prediction from among several candidates. Experiments show that decision trees outperform heuristic methods, and can be further improved by incrementally updating the classifier at task time. Second, it uses decision stumps inferred from analogous examples in the event trace to predict the parameters of conditional rules. Because data is sparse---for most users balk at giving more than a few training examples---permutation tests are used to calculate the statistical significance of each stump, successfully eliminating bias towards attributes with many different values.}
}

@INPROCEEDINGS{ReutemannPF04,
  AUTHOR = {Peter Reutemann and
               Bernhard Pfahringer and
               Eibe Frank},
  TITLE = {A Toolbox for Learning from Relational Data with Propositional
               and Multi-instance Learners},
  BOOKTITLE = {Proc 17th Australian Joint Conference on Artificial Intelligence},
  EDITOR = {G. I. Webb and X. Yu},
  YEAR = {2004},
  PAGES = {1017-1023},
  ADDRESS = {Cairns, Australia},
  PUBLISHER = {Springer},
  SERIES = {LNAI},
  VOLUME = {3339},
  ABSTRACT = {Most databases employ the relational model for data storage. To use this data in a propositional learner, a propositionalization step has to take place. Similarly, the data has to be transformed to be amenable to a multi-instance learner. The Proper Toolbox contains an extended version of RELAGGS, the Multi-Instance Learning Kit MILK, and can also combine the multi-instance data with aggregated data from RELAGGS. RELAGGS was extended to handle arbitrarily nested relations and to work with both primary keys and indices. For MILK the relational model is flattened into a single table and this data is fed into a multi-instance learner. REMILK finally combines the aggregated data produced by RELAGGS and the multi-instance data, flattened for MILK, into a single table that is once again the input for a multi-instance learner. Several well-known datasets are used for experiments which highlight the strengths and weaknesses of the different approaches.},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/reutemann_et_al.ps.gz},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/reutemann_et_al.pdf}
}

@INPROCEEDINGS{Smith2004,
  AUTHOR = {Smith, T. C. and Giganto, R. T.},
  TITLE = {Computer Aided Software Engineering (CASE), empowered by Natural Language Processing (NLP) for object oriented analysis and design using Unified Modeling Language (UML)},
  BOOKTITLE = {Proc Second Mindanao Conference on Information Technology Education (MCITE'04)},
  PAGES = {19-23},
  ADDRESS = {Davao City, Philippines},
  YEAR = {2004},
  ABSTRACT = {Computer Aided Software Engineering (CASE) tools are often regarded as the ultimate aide for software engineers in developing software. However, studies show that CASE tools are not enough to assist developers fully in their analysis tasks. Natural Language Processing (NLP) has been empowering these CASE tools to at least aid developers in the analysis phases. Still, NLP-based CASE tools have certain weaknesses when it comes to detecting classes or objects in the system. With the advent of object oriented analysis and design (OOAD), Unified Modeling Language (UML) has come to be the preferred medium for modeling systems. Existing NLP-based CASE tools produce object models in UML; however, unnecessary classes are often created in the process, while other object models are not produced at all. This paper introduces a research motivation aimed at designing a new NLP-based CASE system that minimizes generation of unnecessary classes and expresses object models in UML.}
}

@INPROCEEDINGS{Smith2004_2,
  AUTHOR = {Smith, T. C.},
  TITLE = {A text-classification approach to the prediction and characterization of signal peptides},
  BOOKTITLE = {Proc X Convencion Internacional y Feria, Libro Reumen, Informatica 2004},
  PAGES = {361-362},
  ADDRESS = {La Habana, Cuba},
  YEAR = {2004},
  ABSTRACT = {This paper describes an unconventional machine learning approach to predicting signal peptide cleavage points through a kind of pseudo-text classification procedure. It is based on the view that the first amino residue in the mature protein can be characterized with a textual summary of some of its specific properties. By creating a textual description of every amino residue---that is, an account of such things as the chemical properties of the residue, its context in the amino-acid chain, its relative proximity to the amino terminus, and so forth---a text classification algorithm can infer a characteristic model of those textual features that imply whether or not any one residue is or is not the start of the mature protein. More to the point, insofar as a biologist might give an account, in English, as to why some particular residue in the amino-acid chain actually marks the cleavage site, a ranked list of the textual attributes inferred by a text classification algorithm can do the same.}
}

@INPROCEEDINGS{Wilson2004,
  AUTHOR = {Wilson, A.J. and Morgenstern, M.P. and Pfahringer, B. and Leschi, C.},
  TITLE = {Data mining bread quality and process data in a plant bakery},
  BOOKTITLE = {Proc Twelfth ICC Cereal and Bread Congress},
  ADDRESS = {Harrogate, UK},
  YEAR = {2004},
  MONTH = {May},
  ABSTRACT = {In modern automated plant bakeries a large amount of data is collected on the operation of the plant. When this data is combined with product quality data such as loaf colour, appearance, consumer complaints sales data etc, it has the potential to be used to improve processing efficiency, final product quality, and product marketability. However the huge volume of this data means it is often ignored as being too hard to analyse in any meaningful way. Data mining, which is a combination of techniques that produces information from large data sets, has the potential to be applied to this data to extract useful information.
This paper describes our experience of applying data mining techniques to a plant bakery in New Zealand. The process involved setting up the systems required to extract data from the bakeries SCADA system, setting up sensors to automatically measure and record quality parameters, cleaning the data to remove faulted or anomalous results and then combining all the separate data blocks into one complete database for analysis.
Data were analysed at two levels. Firstly, selected data were analysed for simple trends on an individual loaf basis which served to identify variability caused by divider pockets, tin positioning etc. Secondly data mining techniques such as various classifiers and principal components were applied to the whole data set to find relationships between process data and product quality.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/wilson-etal.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/wilson-etal.ps}
}

@ARTICLE{WittenDDT04,
  AUTHOR = {Ian H. Witten and
               Katherine J. Don and
               Michael Dewsnip and
               Valentin Tablan},
  TITLE = {Text mining in a digital library},
  JOURNAL = {Int. J. on Digital Libraries},
  VOLUME = {4},
  NUMBER = {1},
  YEAR = {2004},
  PAGES = {56-59},
  ABSTRACT = {Digital librarians strive to add value to the collections they create and maintain. One way is through selectivity: a carefully chosen set of authoritative documents in a particular topic area is far more useful to those working in the area than a huge, unfocused collections (like the Web). Another is by augmenting the collection with high-quality metedata, which supports activities of searching and browsing in a uniform and useful way. A third way, and our topic here, is to enrich the documents by examining their content, extracting information, and using it to enhance the ways they can be located and presented.}
}

@ARTICLE{Witten04,
  AUTHOR = {Ian H. Witten},
  TITLE = {Adaptive text mining: inferring structure from sequences},
  JOURNAL = {J. Discrete Algorithms},
  VOLUME = {2},
  NUMBER = {2},
  YEAR = {2004},
  PAGES = {137-159},
  ABSTRACT = {Text mining is about inferring structure from sequences representing natural language text, and may be defined as the process of analyzing text to extract information that is useful for particular purposes. Although hand-crafted heuristics are a common practical approach for extracting information from text, a general, and generalizable, approach requires adaptive techniques. This paper studies the way in which the adaptive techniques used in text compression can be applied to text mining. It develops several examples: extraction of hierarchical phrase structures from text, identification of keyphrases in documents, locating proper names and quantities of interest in a piece of text, text categorization, word segmentation, acronym extraction, and structure recognition. We conclude that compression forms a sound unifying principle that allows many text mining problems to be tacked adaptively.}
}

@INPROCEEDINGS{XuF04,
  AUTHOR = {Xin Xu and
               Eibe Frank},
  TITLE = {Logistic Regression and Boosting for Labeled Bags of Instances},
  BOOKTITLE = {Proc 8th Pacific-Asia Conference on Knowledge Discovery and Data Mining},
  EDITOR = {H. Dai and R. Srikant and C. Zhang},
  ADDRESS = {Sydney, Australia},
  PUBLISHER = {Springer},
  YEAR = {2004},
  PAGES = {272-281},
  SERIES = {LNAI},
  VOLUME = {3056},
  ABSTRACT = {In this paper we upgrade linear logistic regression and boosting to multi-instance data, where each example consists of a labeled bag of instances. This is done by connecting predictions for individual instances to a bag-level probability estimate by simple averaging and maximizing the likelihood at the bag level---in other words, by assuming that all instances contribute equally and independently to a bag?s label. We present empirical results for artificial data generated according to the underlying generative model that we assume, and also show that the two algorithms produce competitive results on the Musk benchmark datasets.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2004/xu-frank.pdf},
  PS = {http://www.cs.waikato.ac.nz/~ml/publications/2004/xu-frank.ps}
}