2010.bib

@COMMENT{{Automatically generated - DO NOT MODIFY!}}

@PHDTHESIS{read10:_effic,
  AUTHOR = {Jesse Read},
  TITLE = {Efficient multi-label classification for evolving data streams},
  SCHOOL = {Department of Computer Science, University of Waikato},
  YEAR = 2010,
  PDF = {http://hdl.handle.net/10289/3931},
  ABSTRACT = {Multi-label classification is relevant to many domains, such as text,
image and other media, and bioinformatics. Researchers have already
noticed that in multi-label data, correlations exist between labels,
and a variety of approaches, drawing inspiration from many spheres of
machine learning, have been able to model these correlations. However,
data sources from the real world are growing ever larger and the
multi-label task is particularly sensitive to this due to the
complexity associated with multiple labels and the correlations
between them. Consequently, many methods do not scale up to large
problems.
This thesis deals with scalable multi-label classification: methods
which exhibit high predictive performance, but are also able to scale
up to larger problems. The first major contribution is the pruned sets
method, which is able to model label correlations directly for high
predictive performance, but reduces overfitting and complexity over
related methods by pruning and subsampling label sets, and can thus
scale up to larger datasets. The second major contribution is the
classifier chains method, which models correlations with a chain of
binary classifiers. The use of binary models allows for scalability to
even larger datasets. Pruned sets and classifier chains are robust with
respect to both the variety and scale of data that they can deal with,
and can be incorporated into other methods. In an ensemble scheme,
these methods are able to compete with state-of-the-art methods in
terms of predictive performance as well as scale up to large datasets
of hundreds of thousands of training examples.
This thesis also puts a special emphasis on multi-label evaluation;
introducing a new evaluation measure and studying threshold
calibration. With one of the largest and most varied collections of
multi-label datasets in the literature, extensive experimental
evaluation shows the advantage of these methods, both in terms of
predictive performance, and computational efficiency and scalability.}
}

@ARTICLE{bouckaert10:_weka,
  AUTHOR = {Remco R. Bouckaert and Eibe Frank and Mark A. Hall and Geoffrey Holmes and Bernhard Pfahringer and Peter Reutemann and Ian H. Witten},
  TITLE = {{WEKA}--Experiences with a Java Open-Source Project},
  JOURNAL = {Journal of Machine Learning Research},
  YEAR = 2010,
  VOLUME = 11,
  PAGES = {2533--2541},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2010/bouckaert10a.pdf},
  ABSTRACT = {WEKA is a popular machine learning workbench with a development life of nearly two decades. This article provides an overview of the factors that we believe to be important to its success. Rather than focussing on the software's functionality, we review aspects of project management and historical development decisions that likely had an impact on the uptake of the project.}
}

@INPROCEEDINGS{bifet10:_accur_ensem_for_data_stream,
  AUTHOR = {Albert Bifet and Eibe Frank and Geoffrey Holmes and Bernhard Pfahringer},
  TITLE = {Accurate Ensembles for Data Streams: Combining Restricted {H}oeffding Trees using Stacking},
  BOOKTITLE = {Proc 2nd Asian Conference on Machine Learning},
  YEAR = 2010,
  SERIES = {Tokyo},
  PUBLISHER = {JMLR},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2010/Bifet_et_al_ACML_2010.pdf},
  ABSTRACT = {The success of simple methods for classification shows that is is often not necessary to model complex attribute interactions to obtain good classification accuracy on practical problems. In this paper, we propose to exploit this phenomenon in the data stream context by building an ensemble of Hoeffding trees that are each limited to a small subset of attributes. In this way, each tree is restricted to model interactions between attributes in its corresponding subset. Because it is not known a priori which attribute subsets are relevant for prediction, we build exhaustive ensembles that consider all possible attribute subsets of a given size. As the resulting Hoeffding trees are not all equally important, we weigh them in a suitable manner to obtain accurate classifications. This is done by combining the log-odds of their probability estimates using sigmoid perceptrons, with one perceptron per class. We propose a mechanism for setting the perceptrons' learning rate using the ADWIN change detection method for data streams, and also use ADWIN to reset ensemble members (i.e. Hoeffding trees) when they no longer perform well. Our experiments show that the resulting ensemble classifier outperforms bagging for data streams in terms of accuracy when both are used in conjunction with adaptive naive Bayes Hoeffding trees, at the expense of runtime and memory consumption.}
}

@INPROCEEDINGS{zhang10:_enhan,
  ABSTRACT = {This paper presents a new model for capturing spatial information for object categorization with bag-of-words (BOW). BOW models have recently become popular for the task of object recognition, owing to their good performance and simplicity. Much work has been proposed over the years to improve the BOW model, where the Spatial Pyramid Matching (SPM) technique is the most notable. We propose a new method to exploit spatial relationships between image features, based on binned log-polar grids. Our model works by partitioning the image into grids of different scales and orientations and computing histogram of local features within each grid. Experimental results show that our approach improves the results on three diverse datasets over the SPM technique.},
  SERIES = {Sydney, Australia},
  HTTP = {http://hdl.handle.net/10289/4544},
  AUTHOR = {Edmond Zhang and Michael Mayo},
  TITLE = {Enhanced spatial pyramid matching using log-polar-based image subdivision and representation},
  BOOKTITLE = {Proc International Conference on Digital Image Computing: Techniques and Applications},
  YEAR = 2010,
  PUBLISHER = {IEEE Computer Society}
}

@INPROCEEDINGS{foulds10:_speed_up_and_boost_diver_densit_learn,
  AUTHOR = {James R. Foulds and Eibe Frank},
  TITLE = {Speeding Up and Boosting Diverse Density Learning},
  BOOKTITLE = {Proc 13th International Conference on Discovery Science},
  YEAR = 2010,
  PAGES = {102-116},
  SERIES = {Canberra, Australia},
  PUBLISHER = {Springer},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2010/dd_speedup.pdf},
  ABSTRACT = {In multi-instance learning, each example is described by a bag of instances instead of a single feature vector. In this paper, we revisit the idea of performing multi-instance classification based on a point-and-scaling concept by searching for the point in instance space with the highest diverse density. This is a computationally expensive process, and we describe several heuristics designed to improve runtime. Our results show that simple variants of existing algorithms can be used to find diverse density maxima more efficiently. We also show how significant increases in accuracy can be obtained by applying a boosting algorithm with a modified version of the diverse density algorithm as the weak learner.}
}

@INPROCEEDINGS{bifet10:_sentim_knowl_discov_in_twitt_stream_data,
  AUTHOR = {Albert Bifet and Eibe Frank},
  TITLE = {Sentiment Knowledge Discovery in {T}witter Streaming Data},
  BOOKTITLE = {Proc 13th International Conference on Discovery Science},
  YEAR = 2010,
  PAGES = {1-15},
  SERIES = {Canberra, Australia},
  PUBLISHER = {Springer},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2010/Twitter-crc.pdf},
  ABSTRACT = {Micro-blogs are a challenging new source of information for data mining techniques. Twitter is a micro-blogging service built to dis- cover what is happening at any moment in time, anywhere in the world. Twitter messages are short, and generated constantly, and well suited for knowledge discovery using data stream mining. We briefly discuss the challenges that Twitter data streams pose, focusing on classification problems, and then consider these streams for opinion mining and sentiment analysis. To deal with streaming unbalanced classes, we propose a sliding window Kappa statistic for evaluation in time-changing data streams. Using this statistic we perform a study on Twitter data using learning algorithms for data streams.}
}

@ARTICLE{zimekss:_study_of_hierar_and_flat,
  AUTHOR = {Arthur Zimek and Fabian Buchwald and Eibe Frank and Stefan Kramer},
  TITLE = {A Study of Hierarchical and Flat Classification of Proteins},
  JOURNAL = {IEEE/ACM Transactions on Computational Biology and Bioinformatics},
  YEAR = {2010},
  VOLUME = {7},
  PAGES = {563-571},
  HTTP = {http://hdl.handle.net/10289/2677},
  ABSTRACT = {Automatic classification of proteins using machine learning is an important problem that has received significant attention in the literature. One feature of this problem is that expert-defined hierarchies of protein classes exist and can potentially be exploited to improve classification performance. In this article we investigate empirically whether this is the case for two such hierarchies. We compare multi-class classification techniques that exploit the information in those class hierarchies and those that do not, using logistic regression, decision trees, bagged decision trees, and support vector machines as the underlying base learners. In particular, we compare hierarchical and flat variants of ensembles of nested dichotomies. The latter have been shown to deliver strong classification performance in multi-class settings. We present experimental results for synthetic, fold recognition, enzyme classification, and remote homology detection data. Our results show that exploiting the class hierarchy improves performance on the synthetic data, but not in the case of the protein classification problems. Based on this we recommend that strong flat multi-class methods be used as a baseline to establish the benefit of exploiting class hierarchies in this area.}
}

@INPROCEEDINGS{mayo10:_evolv_petri,
  HTTP = {http://hdl.handle.net/10289/3492},
  ABSTRACT = {A genetic algorithm is used to learn a non-deterministic Petri netbased model of non-linear gene interactions, or statistical epistasis. Petri nets are computational models of concurrent processes. However, often certain global assumptions (e.g. transition priorities) are required in order to convert a non-deterministic Petri net into a simpler deterministic model for easier analysis and evaluation. We show, by converting a Petri net into a set of state trees, that it is possible to both retain Petri net non-determinism (i.e. allowing local interactions only, thereby making the model more realistic), whilst also learning useful Petri nets with practical applications. Our Petri nets produce predictions of genetic disease risk assessments derived from clinical data that match with over 92% accuracy.},
  AUTHOR = {Michael Mayo and Lorenzo Beretta},
  TITLE = {Evolving concurrent Petri net models of epistasis},
  BOOKTITLE = {Proc 2nd Asian Conference on Intelligent Information and Database Systems},
  PAGES = {166-175},
  YEAR = 2010,
  SERIES = {Hue City, Vietnam},
  PUBLISHER = {Springer}
}

@ARTICLE{beretta10:_italian,
  TITLE = {A 3-factor epistatic model predicts digital ulcers in Italian scleroderma patients},
  JOURNAL = {European Journal of Internal Medicine},
  VOLUME = 21,
  NUMBER = 4,
  PAGES = {347-353},
  YEAR = 2010,
  HTTP = {http://dx.doi.org/10.1016/j.ejim.2010.05.010},
  AUTHOR = {Lorenzo Beretta and Alessandro Santaniello and Michael Mayo and Francesca Cappiello and Maurizio Marchini and Raffaella Scorza},
  ABSTRACT = {Background The genetic background may predispose systemic sclerosis (SSc) patients to the development of digital ulcers (DUs). Methods Twenty-two functional cytokine single nucleotide polymorphisms (SNPs) and 3 HLA class I and II antigens were typed at the genomic level by polymerase chain reaction in 200 Italian SSc patients. Associations with DUs were sought by parametric models and with the Multifactor Dimensionality Reduction (MDR) algorithm to depict the presence of epistasis. Biological models consistent with MDR results were built by means of Petri nets to describe the metabolic significance of our findings. Results On the exploratory analysis, the diffuse cutaneous subset (dcSSc) was the only single factor statistically associated with DUs (p = 0.045, ns after Bonferroni correction). Gene-gene analysis showed that a 3-factor model comprising the IL-6 C-174G, the IL-2 G-330T SNPs and the HLA-B*3501 allele was predictive for the occurrence of DUs in our population (testing accuracy = 66.9%; p < 0.0001, permutation testing). Conclusion Biological interpretation via Petri net showed that IL-6 is a key factor in determining DUs occurrence and that this cytokines may synergise with HLA-B*3501 to determine DUs onset. Owing to the limited number of patients included in the study, future research are needed to replicate our statistical findings as well as to better determine their functional meaning.}
}

@INPROCEEDINGS{holmes10:_predic,
  AUTHOR = {Geoffrey Holmes and Dale Fletcher and Peter Reutemann},
  TITLE = {Predicting polycyclic aromatic hydrocarbon concentrations in soil and water samples},
  BOOKTITLE = {Proc International Congress on Environmental Modelling and Software},
  YEAR = 2010,
  HTTP = {http://hdl.handle.net/10289/4374},
  ABSTRACT = {Polycyclic Aromatic Hydrocarbons (PAHs) are compounds found in the environment that can be harmful to humans. They are typically formed due to incomplete combustion and as such remain after burning coal, oil, petrol, diesel, wood, household waste and so forth. Testing laboratories routinely screen soil and water samples taken from potentially contaminated sites for PAHs using Gas Chromatography Mass Spectrometry (GC-MS). A GC-MS device produces a chromatogram which is processed by an analyst to determine the concentrations of PAH compounds of interest. In this paper we investigate the application of data mining techniques to PAH chromatograms in order to provide reliable prediction of compound concentrations. A workflow engine with an easy-to-use graphical user interface is at the heart of processing the data. This engine allows a domain expert to set up workflows that can load the data, preprocess it in parallel in various ways and convert it into data suitable for data mining toolkits. The generated output can then be evaluated using different data mining techniques, to determine the impact of preprocessing steps on the performance of the generated models and for picking the best approach. Encouraging results for predicting PAH compound concentrations, in terms of correlation coefficients and root-mean-squared error are demonstrated.}
}

@INPROCEEDINGS{buchwald10:_fast_condit_densit_estim_for,
  AUTHOR = {Fabian Buchwald and Tobias Girschick and Eibe Frank and Stefan Kramer},
  TITLE = {Fast Conditional Density Estimation for Quantitative Structure-Activity Relationships},
  BOOKTITLE = {Proc 24th AAAI Conference on Artificial Intelligence},
  YEAR = 2010,
  PDF = {http://www.aaai.org/ocs/index.php/AAAI/AAAI10/paper/download/1819/2194},
  ABSTRACT = {Many methods for quantitative structure-activity relation- ships (QSARs) deliver point estimates only, without quantifying the uncertainty inherent in the prediction. One way to quantify the uncertainy of a QSAR prediction is to pre- dict the conditional density of the activity given the structure instead of a point estimate. If a conditional density estimate is available, it is easy to derive prediction intervals of activities. In this paper, we experimentally evaluate and compare three methods for conditional density estimation for their suitability in QSAR modeling. In contrast to traditional methods for conditional density estimation, they are based on generic machine learning schemes, more specifically, class probability estimators. Our experiments show that a kernel estimator based on class probability estimates from a random forest classifier is highly competitive with Gaussian process regression, while taking only a fraction of the time for train- ing. Therefore, generic machine-learning based methods for conditional density estimation may be a good and fast option for quantifying uncertainty in QSAR modeling.}
}

@INPROCEEDINGS{bifet10:_fast_percep_decis_tree_learn,
  AUTHOR = {Albert Bifet and
               Geoff Holmes and
               Bernhard Pfahringer and
               Eibe Frank},
  TITLE = {Fast Perceptron Decision Tree Learning from Evolving Data
               Streams},
  BOOKTITLE = {Proc 14th Pacific-Asia Conference on Knowledge Discovery and Data Mining},
  YEAR = {2010},
  PAGES = {299-310},
  SERIES = {Hyderabad, India},
  PUBLISHER = {Springer},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2010/bifet_PAKDD10.pdf},
  ABSTRACT = {Mining of data streams must balance three evaluation dimensions: accuracy, time and memory. Excellent accuracy on data streams has been obtained with Naive Bayes Hoeffding Trees--Hoeffding Trees with naive Bayes models at the leaf nodes--albeit with increased runtime compared to standard Hoeffding Trees. In this paper, we show that runtime can be reduced by replacing naive Bayes with perceptron classifiers, while maintaining highly competitive accuracy. We also show that accuracy can be increased even further by combining majority vote, naive Bayes, and perceptrons. We evaluate four perceptron-based learning strategies and compare them against appropriate baselines: simple perceptrons, Perceptron Hoeffding Trees, hybrid Naive Bayes Perceptron Trees, and bagged versions thereof. We implement a perceptron that uses the sigmoid activation function instead of the threshold activation function and optimizes the squared error, with one perceptron per class value. We test our methods by performing an evaluation study on synthetic and real-world datasets comprising up to ten million examples. }
}

@ARTICLE{bifet10:_moa,
  TITLE = {MOA:  Massive Online Analysis},
  AUTHOR = {Albert Bifet and  Geoff Holmes and Richard Kirkby and Bernhard Pfahringer },
  JOURNAL = {Journal of Machine Learning Research (JMLR)},
  YEAR = {2010},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2010/MOA10.pdf},
  ABSTRACT = {Massive Online Analysis (MOA) is a software environment for implementing algorithms and running experiments for online learning from evolving data streams. MOA includes a collection of offline and online methods as well as tools for evaluation. In particular, it implements boosting, bagging, and Hoeffding Trees, all with and without Naive Bayes classifiers at the leaves. MOA supports bi-directional interaction with WEKA, the Waikato Environment for Knowledge Analysis, and is released under the GNU GPL license.}
}

@INPROCEEDINGS{carmona10:_gnusmail,
  AUTHOR = {Jos{\'e} M. Carmona-Cejudo and
               Manuel Baena-Garc\'{\i}a and
               Jos{\'e} del Campo-{\'A}vila and
               Rafael Morales Bueno and
               Albert Bifet},
  TITLE = {GNUsmail: Open Framework for On-line Email Classification},
  BOOKTITLE = {ECAI 2010 - 19th European Conference on Artificial Intelligence,
               Lisbon, Portugal, August 16-20, 2010, Proceedings},
  YEAR = {2010},
  PAGES = {1141-1142},
  HTTP = {http://dx.doi.org/10.3233/978-1-60750-606-5-1141},
  ABSTRACT = {Real-time classification of massive email data is a challenging task that presents its own particular difficulties. Since email data presents an important temporal component, several problems arise: emails arrive continuously, and the criteria used to classify those emails can change, so the learning algorithms have to be able to deal with concept drift. Our problem is more general than spam detection, which has received much more attention in the literature. In this paper we present GNUsmail, an open-source extensible framework for email classification, which structure supports incremental and on-line learning. This framework enables the incorporation of algorithms developed by other researchers, such as those included in WEKA and MOA. We evaluate this framework, characterized by two overlapping phases (pre-processing and learning), using the ENRON dataset, and we compare the results achieved by WEKA and MOA algorithms.}
}

@INPROCEEDINGS{bifet10:_leveraging,
  AUTHOR = {Albert Bifet and
               Geoffrey Holmes and
               Bernhard Pfahringer},
  TITLE = {Leveraging Bagging for Evolving Data Streams},
  BOOKTITLE = {Machine Learning and Knowledge Discovery in Databases, European
               Conference, ECML PKDD 2010, Barcelona, Spain, September
               20-24, 2010, Proceedings, Part I},
  YEAR = {2010},
  PAGES = {135-150},
  HTTP = {http://dx.doi.org/10.1007/978-3-642-15880-3_15},
  ABSTRACT = {Bagging, boosting and Random Forests are classical ensemble methods used to improve the performance of single classifiers. They obtain superior performance by increasing the accuracy and diversity of the single classifiers. Attempts have been made to reproduce these methods in the more challenging context of evolving data streams. In this paper, we propose a new variant of bagging, called leveraging bagging. This method combines the simplicity of bagging with adding more randomization to the input, and output of the classifiers. We test our method by performing an evaluation study on synthetic and real-world datasets comprising up to ten million examples.}
}

@ARTICLE{bifet10:_moa_framework,
  AUTHOR = {Albert Bifet and
               Geoff Holmes and
               Bernhard Pfahringer and
               Philipp Kranen and
               Hardy Kremer and
               Timm Jansen and
               Thomas Seidl},
  TITLE = {MOA: Massive Online Analysis, a Framework for Stream Classification
               and Clustering},
  JOURNAL = {Journal of Machine Learning Research - Proceedings Track},
  VOLUME = {11},
  YEAR = {2010},
  PAGES = {44-50},
  HTTP = {http://www.jmlr.org/proceedings/papers/v11/bifet10a.html},
  ABSTRACT = {Massive Online Analysis (MOA) is a software environment for implementing algorithms and running experiments for online learning from evolving data streams. MOA is designed to deal with the challenging problem of scaling up the implementation of state of the art algorithms to real world dataset sizes. It contains collection of offline and online for both classification and clustering as well as tools for evaluation. In particular, for classification it implements boosting, bagging, and Hoeffding Trees, all with and without Naive Bayes classifiers at the leaves. For clustering, it implements StreamKM++, CluStream, ClusTree, Den-Stream, D-Stream and CobWeb. Researchers benefit from MOA by getting insights into workings and problems of different approaches, practitioners can easily apply and compare several algorithms to real world data set and settings. MOA supports bi-directional interaction with WEKA, the Waikato Environment for Knowledge Analysis, and is released under the GNU GPL license.}
}

@INPROCEEDINGS{kranen10:_clustering,
  AUTHOR = {Philipp Kranen and
               Hardy Kremer and
               Timm Jansen and
               Thomas Seidl and
               Albert Bifet and
               Geoff Holmes and
               Bernhard Pfahringer},
  TITLE = {Clustering Performance on Evolving Data Streams: Assessing
               Algorithms and Evaluation Measures within MOA},
  BOOKTITLE = {ICDM Workshops},
  YEAR = {2010},
  PAGES = {1400-1403},
  HTTP = {http://dx.doi.org/10.1109/ICDMW.2010.17},
  ABSTRACT = {In today's applications, evolving data streams are ubiquitous. Stream clustering algorithms were introduced to gain useful knowledge from these streams in real-time. The quality of the obtained clusterings, i.e. how good they reflect the data, can be assessed by evaluation measures. A multitude of stream clustering algorithms and evaluation measures for clusterings were introduced in the literature, however, until now there is no general tool for a direct comparison of the different algorithms or the evaluation measures. In our demo, we present a novel experimental framework for both tasks. It offers the means for extensive evaluation and visualization and is an extension of the Massive Online Analysis (MOA) software environment released under the GNU GPL License.}
}

@BOOK{bifet10:_adaptive_data_mining,
  AUTHOR = {Albert Bifet},
  TITLE = {ADAPTIVE STREAM MINING: Pattern Learning and Mining from Evolving Data Streams},
  PUBLISHER = {IOS Press},
  YEAR = {2010},
  ADDRESS = {Amsterdam},
  HTTP = {http://adaptive-mining.sourceforge.net/}
}

@ARTICLE{foulds10:_review_of_multi_instan_learn_assum,
  AUTHOR = {James Foulds and Eibe Frank},
  TITLE = {A Review of Multi-Instance Learning Assumptions},
  JOURNAL = {Knowledge Engineering Review},
  VOLUME = 25,
  NUMBER = 1,
  PAGES = {1-25},
  YEAR = 2010,
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2010/FouldsAndFrankMIreview.pdf},
  ABSTRACT = {Multi-instance (MI) learning is a variant of inductive machine learning where each learning example contains a bag of instances instead of a single feature vector. The term commonly refers to the supervised setting, where each bag is associated with a label. This type of representation is a natural fit for a number of real-world learning scenarios, including drug activity prediction and image classification, hence many multi-instance learning algorithms have been proposed. Any MI learning method must relate instances to bag-level class labels, but many types of relationships between instances and class labels are possible. Although all early work in MI learning assumes a specific MI concept class known to be appropriate for a drug activity prediction domain, this "standard MI assumption" is not guaranteed to hold in other domains. Much of the recent work in MI learning has concentrated on a relaxed view of the MI problem, where the standard MI assumption is dropped, and alternative assumptions are considered instead. However, often it is not clearly stated what particular assumption is used and how it relates to other assumptions that have been proposed. In this paper, we aim to clarify the use of alternative MI assumptions by reviewing the work done in this area.}
}