2014.bib

@COMMENT{{Automatically generated - DO NOT MODIFY!}}

@INPROCEEDINGS{reutemann14:_scien_adams,
  AUTHOR = {Peter Reutemann and Geoffrey Holmes},
  TITLE = {Scientific workflow management with ADAMS: building and data mining a database of crop protection and related data},
  BOOKTITLE = {The plant protection data tool box},
  PAGES = {167-174},
  YEAR = 2014,
  EDITOR = {Beresford RM and Froud KJ and Kean JM and Worner SP},
  ABSTRACT = {Data mining is said to be a field that encourages data to speak for itself rather than “forcing” data to conform to a pre-specified model, but we have to acknowledge that what is spoken by the data may well be gibberish. To obtain meaning from data it is important to use techniques systematically, to follow sound experimental procedure and to examine results expertly. This paper presents a framework for scientific discovery from data with two examples from the biological sciences. The first case is a re-investigation of previously published work on aphid trap data to predict aphid phenology and the second is a commercial application for identifying and counting insects captured on sticky plates in greenhouses. Using support vector machines rather than neural networks or linear regression gives better results in case of the aphid trap data. For both cases, we use the open source machine learning workbench WEKA for predictive modelling and the open source ADAMS workflow system for automating data collection, preparation, feature generation, application of predictive models and output generation.},
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2014/Reutemann2014.pdf}
}

@TECHREPORT{frank14:_fully_super_train_of_gauss,
  AUTHOR = {Eibe Frank},
  TITLE = {Fully Supervised Training of {G}aussian Radial Basis Function Networks in {WEKA}},
  INSTITUTION = {Department of Computer Science, University of Waikato},
  NUMBER = {04/14},
  YEAR = 2014,
  PDF = {http://www.cs.waikato.ac.nz/~ml/publications/2014/rbf_networks_in_weka_description.pdf},
  ABSTRACT = {Radial basis function networks are a type of feedforward network with a long history in machine learning. In spite of this, there is relatively little literature on how to train them so that accurate predictions are obtained. A common strategy is to train the hidden layer of the network using k-means clustering and the output layer using supervised learning. However, Wettschereck and Dietterich [2] found that supervised training of hidden layer parameters can improve predictive performance. They investigated learning center locations, local variances of the basis functions, and attribute weights, in a supervised manner. This document discusses supervised training of Gaussian radial basis function networks in the WEKA machine learning software. More specifically, we discuss the RBFClassifier and RBFRegressor classes available as part of the RBFNetwork package for WEKA 3.7 and consider (a) learning of center locations and one global variance parameter, (b) learning of center locations and one local variance parameter per basis function, and (c) learning center locations with per-attribute local variance parameters. We also consider learning attribute weights jointly with other parameters.}
}

@ARTICLE{harris14:_dna_methy_assoc_colon_mucos,
  AUTHOR = {R Alan Harris and Dorottya Nagy-Szakal and Sabina AV Mir and Eibe Frank and Reka Szigeti and Jess L Kaplan and Jiri Bronsky and Antone Opekun and George D Ferry and Harland Winter and Richard Kellermayer},
  TITLE = {{DNA} methylation-associated colonic mucosal immune and defense responses in treatment-na{\"i}ve pediatric ulcerative colitis},
  JOURNAL = {Epigenetics},
  YEAR = 2014,
  PAGES = {1131-1137},
  PDF = {http://www.tandfonline.com/doi/pdf/10.4161/epi.29446},
  ABSTRACT = {Inflammatory bowel diseases (IBD) are emerging globally, indicating that environmental factors may be important in their pathogenesis. Colonic mucosal epigenetic changes, such as DNa methylation, can occur in response to the environment and have been implicated in IBD pathology. However, mucosal DNa methylation has not been examined in treatment-naive patients. We studied DNa methylation in untreated, left sided colonic biopsy specimens using the Infinium humanMethylation450 Beadchip array. We analyzed 22 control (c) patients, 15 untreated crohn’s disease (cD) patients, and 9 untreated ulcerative colitis (Uc) patients from two cohorts. Samples obtained at the time of clinical remission from two of the treatment-naive Uc patients were also included into the analysis. Uc-specific gene expression was interrogated in a subset of adjacent samples (5 c and 5 Uc) using the affymetrix Genechip PrimeView human Gene expression arrays. Only treatment-naive Uc separated from control. One-hundred-and-20 genes with significant expression change in Uc (> 2-fold, P < 0.05) were associated with differentially methylated regions (DMRs). epigenetically associated gene expression changes (including gene expression changes in the IFITM1, ITGB2, S100A9, SLPI, SAA1, and STAT3 genes) were linked to colonic mucosal immune and defense responses. These findings underscore the relationship between epigenetic changes and inflammation in pediatric treatment-naive Uc and may have potential etiologic, diagnostic, and therapeutic relevance for IBD.},
  VOLUME = {9},
  NUMBER = {8}
}

@INPROCEEDINGS{ienco14:_chang_detec_in_categ_evolv_data_stream,
  AUTHOR = {Ienco, Dino and Bifet, Albert and Pfahringer, Bernhard and Poncelet, Pascal},
  TITLE = {Change detection in categorical evolving data streams},
  BOOKTITLE = {Proc 29th Annual ACM Symposium on Applied Computing},
  YEAR = 2014,
  PUBLISHER = {ACM},
  PAGES = {792-797},
  ABSTRACT = {Detecting change in evolving data streams is a central issue for accurate adaptive learning. In real world applications, data streams have categorical features, and changes induced in the data distribution of these categorical features have not been considered extensively so far. Previous work on change detection focused on detecting changes in the accuracy of the learners, but without considering changes in the data distribution.
To cope with these issues, we propose a new unsupervised change detection method, called CDCStream (Change Detection in Categorical Data Streams), well suited for categorical data streams. The proposed method is able to detect changes in a batch incremental scenario. It is based on the two following characteristics: (i) a summarization strategy is proposed to compress the actual batch by extracting a descriptive summary and (ii) a new segmentation algorithm is proposed to highlight changes and issue warnings for a data stream. To evaluate our proposal we employ it in a learning task over real world data and we compare its results with state of the art methods. We also report qualitative evaluation in order to show the behavior of CDCStream.
 }
}

@INPROCEEDINGS{RNTI/papers/1001968,
  AUTHOR = {Dino Ienco and Albert Bifet and Bernhard Pfahringer and Pascal Poncelet},
  TITLE = {Détection de changements dans des flots de données qualitatives},
  BOOKTITLE = {Proc 14èmes Journées Francophones Extraction et Gestion des Connaissances},
  YEAR = {2014},
  PAGES = {517-520},
  PUBLISHER = {Hermann-Éditions},
  ABSTRACT = {Pour mieux analyser et extraire de la connaissance de flots de données,
des approches spécifiques ont été proposées ces dernières années. L’un des
challenges auquel elles doivent faire face est la détection de changement dans
les données. Alors que de plus en plus de données qualitatives sont générées,
peu de travaux de recherche se sont intéressés à la détection de changement dans
ce contexte et les travaux existants se sont principalement focalisés sur la qualité
d’un modèle appris plutôt qu’au réel changement dans les données. Dans
cet article nous proposons une nouvelle méthode de détection de changement
non supervisée, appelée CDCStream (Change Detection in Categorical Data
Streams), adaptée aux flux de données qualitatives.}
}

@INPROCEEDINGS{mayo14:_evolv_artif_datas_to_improv_inter_class,
  AUTHOR = {Mayo, Michael and Sun, Quan},
  TITLE = {Evolving artificial datasets to improve interpretable classifiers},
  BOOKTITLE = {Proc 2014 IEEE Congress on Evolutionary Computation},
  YEAR = 2014,
  PUBLISHER = {IEEE},
  PAGES = {2367-2374},
  ABSTRACT = {Differential Evolution can be used to construct effective and compact artificial training datasets for machine learning algorithms. In this paper, a series of comparative experiments are performed in which two simple interpretable supervised classifiers (specifically, Naive Bayes and linear Support Vector Machines) are trained (i) directly on “real” data, as would be the normal case, and (ii) indirectly, using special artificial datasets derived from real data via evolutionary optimization. The results across several challenging test problems show that supervised classifiers trained indirectly using our novel evolution-based approach produce models with superior predictive classification performance. Besides presenting the accuracy of the learned models, we also analyze the sensitivity of our artificial data optimization process to Differential Evolution's parameters, and then we examine the statistical characteristics of the artificial data that is evolved.}
}

@ARTICLE{DBLP:journals/corr/PuurulaRB14,
  AUTHOR = {Antti Puurula and
               Jesse Read and
               Albert Bifet},
  TITLE = {Kaggle {LSHTC4} Winning Solution},
  JOURNAL = {CoRR},
  VOLUME = {abs/1405.0546},
  YEAR = {2014},
  URL = {http://arxiv.org/abs/1405.0546},
  ABSTRACT = {Our winning submission to the 2014 Kaggle competition for Large Scale Hierarchical Text Classification (LSHTC) consists mostly of an ensemble of sparse generative models extending Multinomial Naive Bayes. The base-classifiers consist of hierarchically smoothed models combining document, label, and hierarchy level Multinomials, with feature pre-processing using variants of TF-IDF and BM25. Additional diversification is introduced by different types of folds and random search optimization for different measures. The ensemble algorithm optimizes macroFscore by predicting the documents for each label, instead of the usual prediction of labels per document. Scores for documents are predicted by weighted voting of base-classifier outputs with a variant of Feature-Weighted Linear Stacking. The number of documents per label is chosen using label priors and thresholding of vote scores. This document describes the models and software used to build our solution. Reproducing the results for our solution can be done by running the scripts included in the Kaggle package. A package omitting precomputed result files is also distributed. All code is open source, released under GNU GPL 2.0, and GPL 3.0 for Weka and Meka dependencies.}
}

@INPROCEEDINGS{DBLP:conf/icdm/ReadPB14,
  AUTHOR = {Jesse Read and
               Antti Puurula and
               Albert Bifet},
  TITLE = {Multi-label Classification with Meta-Labels},
  BOOKTITLE = {Proc 2014 {IEEE} International Conference on Data Mining},
  PAGES = {941--946},
  YEAR = {2014},
  PUBLISHER = {IEEE},
  ABSTRACT = {The area of multi-label classification has rapidly developed in recent years. It has become widely known that the baseline binary relevance approach can easily be outperformed by methods which learn labels together. A number of methods have grown around the label powerset approach, which models label combinations together as class values in a multi-class problem. We describe the label-powerset-based solutions under a general framework of meta-labels and provide some theoretical justification for this framework which has been lacking; explaining how meta-labels essentially allow a random projection into a space where non-linearities can easily be tackled with established linear learning algorithms. The proposed framework enables comparison and combination of related approaches to different multi-label problems. We present a novel model in the framework and evaluate it empirically against several high-performing methods, with respect to predictive performance and scalability, on a number of datasets and evaluation metrics. This deployment obtains competitive accuracy for a fraction of the computation required by the current meta-label methods for multi-label classification.}
}

@INPROCEEDINGS{DBLP:conf/fois/SarjantLSW14,
  AUTHOR = {Samuel Sarjant and
               Catherine Legg and
               Matt Stannett and
               Duncan Willcock},
  TITLE = {Crowd-Sourcing Ontology Content and Curation: The Massive Ontology
               Interface},
  BOOKTITLE = {Proc 8th International Conference on Formal Ontology in Information Systems},
  PAGES = {251--260},
  YEAR = {2014},
  URL = {http://dx.doi.org/10.3233/978-1-61499-438-1-251},
  ABSTRACT = {Crowd-sourcing is an increasingly popular approach to building large, complex public-interest projects. The ontology infrastructure that is required to scaffold the goals of the Semantic Web is such a project. We have been thinking hard about what ‘crowd-sourced ontology’ might look like, and are currently advancing on two fronts: user-supplied content and user-supplied curation. We achieve the former by mining 90\% of the concepts and relations in our ontology from Wikipedia. However other research groups are also pursuing this strategy (e.g. DBpedia, YAGO). Our claim to be on the cutting edge is in our latter goal. We are building a web portal: The Massive Ontology Interface, for users to interact with our ontology in a clean, syntax-light format. The interface is designed to enable users to identify errors and add new concepts and assertions, and to discuss the knowledge in the open-ended way that fosters real collaboration in Wikipedia. We here present our system, discuss the design decisions that have shaped it and the motivation we offer users to interact with it.},
  PUBLISHER = {IOS Press}
}

@PHDTHESIS{sun14:_meta,
  AUTHOR = {Quan Sun},
  TITLE = {Meta-learning and the full model selection problem},
  SCHOOL = {Department of Computer Science, University of Waikato},
  YEAR = 2014,
  URL = {http://researchcommons.waikato.ac.nz/handle/10289/8520},
  ABSTRACT = {When working as a data analyst, one of my daily tasks is to select appropriate tools from a set of existing data analysis techniques in my toolbox, including data preprocessing, outlier detection, feature selection, learning algorithm and evaluation techniques, for a given data project. This indeed was an enjoyable job at the beginning, because to me finding patterns and valuable information from data is always fun. Things become tricky when several projects needed to be done in a relatively short time.
Naturally, as a computer science graduate, I started to ask myself, "What can be automated here?"; because, intuitively, part of my work is more or less a loop that can be programmed. Literally, the loop is "choose, run, test and choose again... until some criterion/goals are met".
In other words, I use my experience or knowledge about machine learning and data mining to guide and speed up the process of selecting and applying techniques in order to build a relatively good predictive model for a given dataset for some purpose. So the following questions arise:
"Is it possible to design and implement a system that helps a data analyst to choose from a set of data mining tools? Or at least that provides a useful recommendation about tools that potentially save some time for a human analyst."
To answer these questions, I decided to undertake a long-term study on this topic, to think, define, research, and simulate this problem before coding my dream system. This thesis presents research results, including new methods, algorithms, and theoretical and empirical analysis from two directions, both of which try to propose systematic and efficient solutions to the questions above, using different resource requirements, namely, the meta-learning-based algorithm/parameter ranking approach and the meta-heuristic search-based full-model selection approach.
Some of the results have been published in research papers; thus, this thesis also serves as a coherent collection of results in a single volume.}
}

@INPROCEEDINGS{sun14:_hierar_meta_rules_for_scalab_meta_learn,
  AUTHOR = {Sun, Quan and Pfahringer, Bernhard},
  TITLE = {Hierarchical meta-rules for scalable meta-learning},
  EDITOR = {Pham, Duc-Nghia and Park, Seong-Bae},
  BOOKTITLE = {Proc 13th Pacific Rim International Conference on Artificial intelligence},
  YEAR = 2014,
  PUBLISHER = {Springer},
  PAGES = {383-395},
  ABSTRACT = {The Pairwise Meta-Rules (PMR) method proposed in [18] has been shown to improve the predictive performances of several meta-learning algorithms for the algorithm ranking problem. Given m target objects (e.g., algorithms), the training complexity of the PMR method with respect to m is quadratic: (m over 2)=m×(m−1)/2. This is usually not a problem when m is moderate, such as when ranking 20 different learning algorithms. However, for problems with a much larger m, such as the meta-learning-based parameter ranking problem, where m can be 100+, the PMR method is less efficient. In this paper, we propose a novel method named Hierarchical Meta-Rules (HMR), which is based on the theory of orthogonal contrasts. The proposed HMR method has a linear training complexity with respect to m, providing a way of dealing with a large number of objects that the PMR method cannot handle efficiently. Our experimental results demonstrate the benefit of the new method in the context of meta-learning.}
}

@INPROCEEDINGS{DBLP:conf/adcs/TrotmanPB14,
  AUTHOR = {Andrew Trotman and
               Antti Puurula and
               Blake Burgess},
  TITLE = {Improvements to {BM25} and Language Models Examined},
  BOOKTITLE = {Proc 2014 Australasian Document Computing Symposium},
  PAGES = {58},
  YEAR = {2014},
  PUBLISHER = {ACM},
  ABSTRACT = {Recent work on search engine ranking functions report improvements on BM25 and Language Models with Dirichlet Smoothing. In this investigation 9 recent ranking functions (BM25, BM25+, BM25T, BM25-adpt, BM25L, TFlID, LM-DS, LM-PYP, and LM-PYP-TFIDF) are compared by training on the INEX 2009 Wikipedia collection and testing on INEX 2010 and 9 TREC collections. We find that once trained (using particle swarm optimi- zation) there is very little difference in performance between these functions, that relevance feedback is effective, that stemming is effective, and that it remains unclear which function is best overall.}
}

@INPROCEEDINGS{DBLP:conf/wise/TsoumakasPQVDPRSS14,
  AUTHOR = {Grigorios Tsoumakas and
               Apostolos Papadopoulos and
               Weining Qian and
               Stavros Vologiannidis and
               Alexander D'yakonov and
               Antti Puurula and
               Jesse Read and
               Jan Svec and
               Stanislav Semenov},
  TITLE = {{WISE} 2014 Challenge: Multi-label Classification of Print Media Articles
               to Topics},
  BOOKTITLE = {Proc 15th International Conference on Web Information Systems Engineering},
  PAGES = {541--548},
  YEAR = {2014},
  URL = {http://dx.doi.org/10.1007/978-3-319-11746-1_40},
  ABSTRACT = { The WISE 2014 challenge was concerned with the task of multi-label classification of articles coming from Greek print media. Raw data comes from the scanning of print media, article segmentation, and optical character segmentation, and therefore is quite noisy. Each article is examined by a human annotator and categorized to one or more of the topics being monitored. Topics range from specific persons, products, and companies that can be easily categorized based on keywords, to more general semantic concepts, such as environment or economy. Building multi-label classifiers for the automated annotation of articles into topics can support the work of human annotators by suggesting a list of all topics by order of relevance, or even automate the annotation process for media and/or categories that are easier to predict. This saves valuable time and allows a media monitoring company to expand the portfolio of media being monitored. This paper summarizes the approaches of the top 4 among the 121 teams that participated in the competition.}
}

@INPROCEEDINGS{rijn14:_algor_selec_data_stream,
  AUTHOR = {van Rijn, Jan N and Holmes, Geoffrey and Pfahringer, Bernhard and Vanschoren, Joaquin},
  TITLE = {Algorithm selection on data streams},
  BOOKTITLE = {Proc 17th International Conference on Discovery Science},
  YEAR = 2014,
  PUBLISHER = {Springer},
  PAGES = {325-336},
  ABSTRACT = {We explore the possibilities of meta-learning on data streams, in particular algorithm selection. In a first experiment we calculate the characteristics of a small sample of a data stream, and try to predict which classifier performs best on the entire stream. This yields promising results and interesting patterns. In a second experiment, we build a meta-classifier that predicts, based on measurable data characteristics in a window of the data stream, the best classifier for the next window. The results show that this meta-algorithm is very competitive with state of the art ensembles, such as OzaBag, OzaBoost and Leveraged Bagging. The results of all experiments are made publicly available in an online experiment database, for the purpose of verifiability, reproducibility and generalizability.}
}

@INPROCEEDINGS{rijn14:_towar_meta_learn_over_data_stream,
  AUTHOR = {van Rijn, Jan N and Holmes, Geoffrey and Pfahringer, Bernhard and Vanschoren, Joaquin},
  TITLE = {Towards meta-learning over data streams},
  BOOKTITLE = {Proc International Workshop on Meta-learning and Algorithm Selection},
  YEAR = 2014,
  VOLUME = {Vol-1201},
  PAGES = {37-38},
  PUBLISHER = {ceur-ws.org},
  PDF = {http://ceur-ws.org/Vol-1201/paper-08.pdf},
  ABSTRACT = {Modern society produces vast streams of data. Many stream mining algorithms have been developed to capture general trends in these streams, and make predictions for future observations, but relatively little is known about which algorithms perform particularly well on which kinds of data. Moreover, it is possible that the characteristics of the data change over time, and thus that a different algorithm should be recommended at various points in time. Figure 1 illustrates this. As such, we are dealing with the Algorithm Selection Problem [9] in a data stream setting. Based on measurable meta-features from a window of observations from a data stream, a meta-algorithm is built that predicts the best classifier for the next window. Our results show that this meta-algorithm is competitive with state-of-the-art data streaming ensembles, such as OzaBag [6], OzaBoost [6] andLeveraged Bagging[3].}
}

@ARTICLE{zliobaite14:_activ_learn_with_drift_stream_data,
  AUTHOR = {Žliobaitė, Indrė and Bifet, Albert and Pfahringer, Bernhard and Holmes, Geoff},
  TITLE = {Active learning with drifting streaming data},
  JOURNAL = {IEEE Transactions on Neural Networks and Learning Systems},
  YEAR = 2014,
  VOLUME = 25,
  NUMBER = 1,
  PAGES = {27-39},
  DOI = {doi:10.1109/TNNLS.2012.2236570},
  ABSTRACT = {In learning to classify streaming data, obtaining true labels may require major effort and may incur excessive cost. Active learning focuses on carefully selecting as few labeled instances as possible for learning an accurate predictive model. Streaming data poses additional challenges for active learning, since the data distribution may change over time (concept drift) and models need to adapt. Conventional active learning strategies concentrate on querying the most uncertain instances, which are typically concentrated around the decision boundary. Changes occurring further from the boundary may be missed, and models may fail to adapt. This paper presents a theoretically supported framework for active learning from drifting data streams and develops three active learning strategies for streaming data that explicitly handle concept drift. They are based on uncertainty, dynamic allocation of labeling efforts over time, and randomization of the search space. We empirically demonstrate that these strategies react well to changes that can occur anywhere in the instance space and unexpectedly.}
}

@ARTICLE{Bravo-MarquezMP14,
  AUTHOR = {Felipe Bravo{-}Marquez and
               Marcelo Mendoza and
               Barbara Poblete},
  TITLE = {Meta-level sentiment models for big social data analysis},
  JOURNAL = {Knowl.-Based Syst.},
  VOLUME = {69},
  PAGES = {86--99},
  YEAR = {2014},
  URL = {https://doi.org/10.1016/j.knosys.2014.05.016},
  DOI = {10.1016/j.knosys.2014.05.016},
  PDF = {https://www.cs.waikato.ac.nz/~fbravoma/publications/KBS2014.pdf},
  ABSTRACT = {People react to events, topics and entities by expressing their personal opinions and emotions. These reactions can correspond to a wide range of intensities, from very mild to strong. An adequate processing and understanding of these expressions has been the subject of research in several fields, such as business and politics. In this context, Twitter sentiment analysis, which is the task of automatically identifying and extracting subjective information from tweets, has received increasing attention from the Web mining community. Twitter provides an extremely valuable insight into human opinions, as well as new challenging Big Data problems. These problems include the processing of massive volumes of streaming data, as well as the automatic identification of human expressiveness within short text messages. In that area, several methods and lexical resources have been proposed in order to extract sentiment indicators from natural language texts at both syntactic and semantic levels. These approaches address different dimensions of opinions, such as subjectivity, polarity, intensity and emotion. This article is the first study of how these resources, which are focused on different sentiment scopes, complement each other. With this purpose we identify scenarios in which some of these resources are more useful than others. Furthermore, we propose a novel approach for sentiment classification based on meta-level features. This supervised approach boosts existing sentiment classification of subjectivity and polarity detection on Twitter. Our results show that the combination of meta-level features provides significant improvements in performance. However, we observe that there are important differences that rely on the type of lexical resource, the dataset used to build the model, and the learning strategy. Experimental results indicate that manually generated lexicons are focused on emotional words, being very useful for polarity prediction. On the other hand, lexicons generated with automatic methods include neutral words, introducing noise in the detection of subjectivity. Our findings indicate that polarity and subjectivity prediction are different dimensions of the same problem, but they need to be addressed using different subspace features. Lexicon-based approaches are recommendable for polarity, and stylistic part-of-speech based approaches are meaningful for subjectivity. With this research we offer a more global insight of the resource components for the complex task of classifying human emotion and opinion.}
}

@ARTICLE{Marrese-TaylorVB14,
  AUTHOR = {Edison Marrese{-}Taylor and
               Juan D. Vel{\'{a}}squez and
               Felipe Bravo{-}Marquez},
  TITLE = {A novel deterministic approach for aspect-based opinion mining in
               tourism products reviews},
  JOURNAL = {Expert Syst. Appl.},
  VOLUME = {41},
  NUMBER = {17},
  PAGES = {7764--7775},
  YEAR = {2014},
  URL = {https://doi.org/10.1016/j.eswa.2014.05.045},
  DOI = {10.1016/j.eswa.2014.05.045},
  PDF = {https://www.cs.waikato.ac.nz/~fbravoma/publications/ESWA2014.pdf},
  ABSTRACT = {This work proposes an extension of Bing Liu’s aspect-based opinion mining approach in order to apply it to the tourism domain. The extension concerns with the fact that users refer differently to different kinds of products when writing reviews on the Web. Since Liu’s approach is focused on physical product reviews, it could not be directly applied to the tourism domain, which presents features that are not considered by the model. Through a detailed study of on-line tourism product reviews, we found these features and then model them in our extension, proposing the use of new and more complex NLP-based rules for the tasks of subjective and sentiment classification at the aspect-level. We also entail the task of opinion visualization and summarization and propose new methods to help users digest the vast availability of opinions in an easy manner. Our work also included the development of a generic architecture for an aspect-based opinion mining tool, which we then used to create a prototype and analyze opinions from TripAdvisor in the context of the tourism industry in Los Lagos, a Chilean administrative region also known as the Lake District. Results prove that our extension is able to perform better than Liu’s model in the tourism domain, improving both Accuracy and Recall for the tasks of subjective and sentiment classification. Particularly, the approach is very effective in determining the sentiment orientation of opinions, achieving an F-measure of 92% for the task. However, on average, the algorithms were only capable of extracting 35% of the explicit aspect expressions, using a non-extended approach for this task. Finally, results also showed the effectiveness of our design when applied to solving the industry’s specific issues in the Lake District, since almost 80% of the users that used our tool considered that our tool adds valuable information to their business.}
}