source: src/main/java/weka/classifiers/meta/nestedDichotomies/DataNearBalancedND.java @ 17

Last change on this file since 17 was 4, checked in by gnappo, 14 years ago

Import di weka.

File size: 17.8 KB
Line 
1/*
2 *    This program is free software; you can redistribsute it and/or modify
3 *    it under the terms of the GNU General Public License as published by
4 *    the Free Software Foundation; either version 2 of the License, or
5 *    (at your option) any later version.
6 *
7 *    This program is distributed in the hope that it will be useful,
8 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
9 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 *    GNU General Public License for more details.
11 *
12 *    You should have received a copy of the GNU General Public License
13 *    along with this program; if not, write to the Free Software
14 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17/*
18 *    DataNearBalancedND.java
19 *    Copyright (C) 2005 University of Waikato, Hamilton, New Zealand
20 *
21 */
22package weka.classifiers.meta.nestedDichotomies;
23
24import weka.classifiers.Classifier;
25import weka.classifiers.AbstractClassifier;
26import weka.classifiers.RandomizableSingleClassifierEnhancer;
27import weka.classifiers.meta.FilteredClassifier;
28import weka.core.Capabilities;
29import weka.core.Instance;
30import weka.core.Instances;
31import weka.core.Range;
32import weka.core.RevisionUtils;
33import weka.core.TechnicalInformation;
34import weka.core.TechnicalInformationHandler;
35import weka.core.Utils;
36import weka.core.Capabilities.Capability;
37import weka.core.TechnicalInformation.Field;
38import weka.core.TechnicalInformation.Type;
39import weka.filters.Filter;
40import weka.filters.unsupervised.attribute.MakeIndicator;
41import weka.filters.unsupervised.instance.RemoveWithValues;
42
43import java.util.Hashtable;
44import java.util.Random;
45
46
47/**
48 <!-- globalinfo-start -->
49 * A meta classifier for handling multi-class datasets with 2-class classifiers by building a random data-balanced tree structure.<br/>
50 * <br/>
51 * For more info, check<br/>
52 * <br/>
53 * Lin Dong, Eibe Frank, Stefan Kramer: Ensembles of Balanced Nested Dichotomies for Multi-class Problems. In: PKDD, 84-95, 2005.<br/>
54 * <br/>
55 * Eibe Frank, Stefan Kramer: Ensembles of nested dichotomies for multi-class problems. In: Twenty-first International Conference on Machine Learning, 2004.
56 * <p/>
57 <!-- globalinfo-end -->
58 *
59 <!-- technical-bibtex-start -->
60 * BibTeX:
61 * <pre>
62 * &#64;inproceedings{Dong2005,
63 *    author = {Lin Dong and Eibe Frank and Stefan Kramer},
64 *    booktitle = {PKDD},
65 *    pages = {84-95},
66 *    publisher = {Springer},
67 *    title = {Ensembles of Balanced Nested Dichotomies for Multi-class Problems},
68 *    year = {2005}
69 * }
70 *
71 * &#64;inproceedings{Frank2004,
72 *    author = {Eibe Frank and Stefan Kramer},
73 *    booktitle = {Twenty-first International Conference on Machine Learning},
74 *    publisher = {ACM},
75 *    title = {Ensembles of nested dichotomies for multi-class problems},
76 *    year = {2004}
77 * }
78 * </pre>
79 * <p/>
80 <!-- technical-bibtex-end -->
81 *
82 <!-- options-start -->
83 * Valid options are: <p/>
84 *
85 * <pre> -S &lt;num&gt;
86 *  Random number seed.
87 *  (default 1)</pre>
88 *
89 * <pre> -D
90 *  If set, classifier is run in debug mode and
91 *  may output additional info to the console</pre>
92 *
93 * <pre> -W
94 *  Full name of base classifier.
95 *  (default: weka.classifiers.trees.J48)</pre>
96 *
97 * <pre>
98 * Options specific to classifier weka.classifiers.trees.J48:
99 * </pre>
100 *
101 * <pre> -U
102 *  Use unpruned tree.</pre>
103 *
104 * <pre> -C &lt;pruning confidence&gt;
105 *  Set confidence threshold for pruning.
106 *  (default 0.25)</pre>
107 *
108 * <pre> -M &lt;minimum number of instances&gt;
109 *  Set minimum number of instances per leaf.
110 *  (default 2)</pre>
111 *
112 * <pre> -R
113 *  Use reduced error pruning.</pre>
114 *
115 * <pre> -N &lt;number of folds&gt;
116 *  Set number of folds for reduced error
117 *  pruning. One fold is used as pruning set.
118 *  (default 3)</pre>
119 *
120 * <pre> -B
121 *  Use binary splits only.</pre>
122 *
123 * <pre> -S
124 *  Don't perform subtree raising.</pre>
125 *
126 * <pre> -L
127 *  Do not clean up after the tree has been built.</pre>
128 *
129 * <pre> -A
130 *  Laplace smoothing for predicted probabilities.</pre>
131 *
132 * <pre> -Q &lt;seed&gt;
133 *  Seed for random data shuffling (default 1).</pre>
134 *
135 <!-- options-end -->
136 *
137 * @author Lin Dong
138 * @author Eibe Frank
139 */
140public class DataNearBalancedND 
141  extends RandomizableSingleClassifierEnhancer
142  implements TechnicalInformationHandler {
143
144  /** for serialization */
145  static final long serialVersionUID = 5117477294209496368L;
146 
147  /** The filtered classifier in which the base classifier is wrapped. */
148  protected FilteredClassifier m_FilteredClassifier;
149   
150  /** The hashtable for this node. */
151  protected Hashtable m_classifiers=new Hashtable();
152
153  /** The first successor */
154  protected DataNearBalancedND m_FirstSuccessor = null;
155
156  /** The second successor */
157  protected DataNearBalancedND m_SecondSuccessor = null;
158 
159  /** The classes that are grouped together at the current node */
160  protected Range m_Range = null;
161   
162  /** Is Hashtable given from END? */
163  protected boolean m_hashtablegiven = false;
164   
165  /**
166   * Constructor.
167   */
168  public DataNearBalancedND() {
169   
170    m_Classifier = new weka.classifiers.trees.J48();
171  }
172 
173  /**
174   * String describing default classifier.
175   *
176   * @return the default classifier classname
177   */
178  protected String defaultClassifierString() {
179   
180    return "weka.classifiers.trees.J48";
181  }
182
183  /**
184   * Returns an instance of a TechnicalInformation object, containing
185   * detailed information about the technical background of this class,
186   * e.g., paper reference or book this class is based on.
187   *
188   * @return the technical information about this class
189   */
190  public TechnicalInformation getTechnicalInformation() {
191    TechnicalInformation        result;
192    TechnicalInformation        additional;
193   
194    result = new TechnicalInformation(Type.INPROCEEDINGS);
195    result.setValue(Field.AUTHOR, "Lin Dong and Eibe Frank and Stefan Kramer");
196    result.setValue(Field.TITLE, "Ensembles of Balanced Nested Dichotomies for Multi-class Problems");
197    result.setValue(Field.BOOKTITLE, "PKDD");
198    result.setValue(Field.YEAR, "2005");
199    result.setValue(Field.PAGES, "84-95");
200    result.setValue(Field.PUBLISHER, "Springer");
201
202    additional = result.add(Type.INPROCEEDINGS);
203    additional.setValue(Field.AUTHOR, "Eibe Frank and Stefan Kramer");
204    additional.setValue(Field.TITLE, "Ensembles of nested dichotomies for multi-class problems");
205    additional.setValue(Field.BOOKTITLE, "Twenty-first International Conference on Machine Learning");
206    additional.setValue(Field.YEAR, "2004");
207    additional.setValue(Field.PUBLISHER, "ACM");
208   
209    return result;
210  }
211
212  /**
213   * Set hashtable from END.
214   *
215   * @param table the hashtable to use
216   */
217  public void setHashtable(Hashtable table) {
218
219    m_hashtablegiven = true;
220    m_classifiers = table;
221  }
222   
223  /**
224   * Generates a classifier for the current node and proceeds recursively.
225   *
226   * @param data contains the (multi-class) instances
227   * @param classes contains the indices of the classes that are present
228   * @param rand the random number generator to use
229   * @param classifier the classifier to use
230   * @param table the Hashtable to use
231   * @param instsNumAllClasses
232   * @throws Exception if anything goes worng
233   */
234  private void generateClassifierForNode(Instances data, Range classes,
235                                         Random rand, Classifier classifier, Hashtable table,
236                                         double[] instsNumAllClasses) 
237    throws Exception {
238       
239    // Get the indices
240    int[] indices = classes.getSelection();
241
242    // Randomize the order of the indices
243    for (int j = indices.length - 1; j > 0; j--) {
244      int randPos = rand.nextInt(j + 1);
245      int temp = indices[randPos];
246      indices[randPos] = indices[j];
247      indices[j] = temp;
248    }
249
250    // Pick the classes for the current split
251    double total = 0;
252    for (int j = 0; j < indices.length; j++) {
253      total += instsNumAllClasses[indices[j]];
254    }
255    double halfOfTotal = total / 2;
256       
257    // Go through the list of classes until the either the left or
258    // right subset exceeds half the total weight
259    double sumLeft = 0, sumRight = 0;
260    int i = 0, j = indices.length - 1;
261    do {
262      if (i == j) {
263        if (rand.nextBoolean()) {
264          sumLeft += instsNumAllClasses[indices[i++]];
265        } else {
266          sumRight += instsNumAllClasses[indices[j--]];
267        }
268      } else {
269        sumLeft += instsNumAllClasses[indices[i++]];
270        sumRight += instsNumAllClasses[indices[j--]];
271      }
272    } while (Utils.sm(sumLeft, halfOfTotal) && Utils.sm(sumRight, halfOfTotal));
273
274    int first = 0, second = 0;
275    if (!Utils.sm(sumLeft, halfOfTotal)) {
276      first = i;
277    } else {
278      first = j + 1;
279    }
280    second = indices.length - first;
281
282    int[] firstInds = new int[first];
283    int[] secondInds = new int[second];
284    System.arraycopy(indices, 0, firstInds, 0, first);
285    System.arraycopy(indices, first, secondInds, 0, second);
286       
287    // Sort the indices (important for hash key)!
288    int[] sortedFirst = Utils.sort(firstInds);
289    int[] sortedSecond = Utils.sort(secondInds);
290    int[] firstCopy = new int[first];
291    int[] secondCopy = new int[second];
292       for (int k = 0; k < sortedFirst.length; k++) {
293      firstCopy[k] = firstInds[sortedFirst[k]];
294    }
295    firstInds = firstCopy;
296    for (int k = 0; k < sortedSecond.length; k++) {
297      secondCopy[k] = secondInds[sortedSecond[k]];
298    }
299    secondInds = secondCopy;
300               
301    // Unify indices to improve hashing
302    if (firstInds[0] > secondInds[0]) {
303      int[] help = secondInds;
304      secondInds = firstInds;
305      firstInds = help;
306      int help2 = second;
307      second = first;
308      first = help2;
309    }
310
311    m_Range = new Range(Range.indicesToRangeList(firstInds));
312    m_Range.setUpper(data.numClasses() - 1);
313
314    Range secondRange = new Range(Range.indicesToRangeList(secondInds));
315    secondRange.setUpper(data.numClasses() - 1);
316       
317    // Change the class labels and build the classifier
318    MakeIndicator filter = new MakeIndicator();
319    filter.setAttributeIndex("" + (data.classIndex() + 1));
320    filter.setValueIndices(m_Range.getRanges());
321    filter.setNumeric(false);
322    filter.setInputFormat(data);
323    m_FilteredClassifier = new FilteredClassifier();
324    if (data.numInstances() > 0) {
325      m_FilteredClassifier.setClassifier(AbstractClassifier.makeCopies(classifier, 1)[0]);
326    } else {
327      m_FilteredClassifier.setClassifier(new weka.classifiers.rules.ZeroR());
328    }
329    m_FilteredClassifier.setFilter(filter);
330
331    // Save reference to hash table at current node
332    m_classifiers=table;
333       
334    if (!m_classifiers.containsKey( getString(firstInds) + "|" + getString(secondInds))) {
335      m_FilteredClassifier.buildClassifier(data);
336      m_classifiers.put(getString(firstInds) + "|" + getString(secondInds), m_FilteredClassifier);
337    } else {
338      m_FilteredClassifier=(FilteredClassifier)m_classifiers.get(getString(firstInds) + "|" + 
339                                                                 getString(secondInds));       
340    }
341                               
342    // Create two successors if necessary
343    m_FirstSuccessor = new DataNearBalancedND();
344    if (first == 1) {
345      m_FirstSuccessor.m_Range = m_Range;
346    } else {
347      RemoveWithValues rwv = new RemoveWithValues();
348      rwv.setInvertSelection(true);
349      rwv.setNominalIndices(m_Range.getRanges());
350      rwv.setAttributeIndex("" + (data.classIndex() + 1));
351      rwv.setInputFormat(data);
352      Instances firstSubset = Filter.useFilter(data, rwv);
353      m_FirstSuccessor.generateClassifierForNode(firstSubset, m_Range, 
354                                                 rand, classifier, m_classifiers,
355                                                 instsNumAllClasses);
356    }
357    m_SecondSuccessor = new DataNearBalancedND();
358    if (second == 1) {
359      m_SecondSuccessor.m_Range = secondRange;
360    } else {
361      RemoveWithValues rwv = new RemoveWithValues();
362      rwv.setInvertSelection(true);
363      rwv.setNominalIndices(secondRange.getRanges());
364      rwv.setAttributeIndex("" + (data.classIndex() + 1));
365      rwv.setInputFormat(data);
366      Instances secondSubset = Filter.useFilter(data, rwv);
367      m_SecondSuccessor = new DataNearBalancedND();
368     
369      m_SecondSuccessor.generateClassifierForNode(secondSubset, secondRange, 
370                                                  rand, classifier, m_classifiers,
371                                                  instsNumAllClasses);
372    }
373  }
374
375  /**
376   * Returns default capabilities of the classifier.
377   *
378   * @return      the capabilities of this classifier
379   */
380  public Capabilities getCapabilities() {
381    Capabilities result = super.getCapabilities();
382
383    // class
384    result.disableAllClasses();
385    result.enable(Capability.NOMINAL_CLASS);
386    result.enable(Capability.MISSING_CLASS_VALUES);
387
388    // instances
389    result.setMinimumNumberInstances(1);
390   
391    return result;
392  }
393   
394  /**
395   * Builds tree recursively.
396   *
397   * @param data contains the (multi-class) instances
398   * @throws Exception if the building fails
399   */
400  public void buildClassifier(Instances data) throws Exception {
401
402    // can classifier handle the data?
403    getCapabilities().testWithFail(data);
404
405    // remove instances with missing class
406    data = new Instances(data);
407    data.deleteWithMissingClass();
408   
409    Random random = data.getRandomNumberGenerator(m_Seed);
410       
411    if (!m_hashtablegiven) {
412      m_classifiers = new Hashtable();
413    }
414       
415    // Check which classes are present in the
416    // data and construct initial list of classes
417    boolean[] present = new boolean[data.numClasses()];
418    for (int i = 0; i < data.numInstances(); i++) {
419      present[(int)data.instance(i).classValue()] = true;
420    }
421    StringBuffer list = new StringBuffer();
422    for (int i = 0; i < present.length; i++) {
423      if (present[i]) {
424        if (list.length() > 0) {
425          list.append(",");
426        }
427        list.append(i + 1);
428      }
429    }
430
431    // Determine the number of instances in each class
432    double[] instsNum = new double[data.numClasses()];
433    for (int i = 0; i < data.numInstances(); i++) {
434      instsNum[(int)data.instance(i).classValue()] += data.instance(i).weight();
435    }
436     
437    Range newRange = new Range(list.toString());
438    newRange.setUpper(data.numClasses() - 1);
439       
440    generateClassifierForNode(data, newRange, random, m_Classifier, m_classifiers, instsNum);
441  }
442   
443  /**
444   * Predicts the class distribution for a given instance
445   *
446   * @param inst the (multi-class) instance to be classified
447   * @return the class distribution
448   * @throws Exception if computing fails
449   */
450  public double[] distributionForInstance(Instance inst) throws Exception {
451       
452    double[] newDist = new double[inst.numClasses()];
453    if (m_FirstSuccessor == null) {
454      for (int i = 0; i < inst.numClasses(); i++) {
455        if (m_Range.isInRange(i)) {
456          newDist[i] = 1;
457        }
458      }
459      return newDist;
460    } else {
461      double[] firstDist = m_FirstSuccessor.distributionForInstance(inst);
462      double[] secondDist = m_SecondSuccessor.distributionForInstance(inst);
463      double[] dist = m_FilteredClassifier.distributionForInstance(inst);
464      for (int i = 0; i < inst.numClasses(); i++) {
465        if ((firstDist[i] > 0) && (secondDist[i] > 0)) {
466          System.err.println("Panik!!");
467        }
468        if (m_Range.isInRange(i)) {
469          newDist[i] = dist[1] * firstDist[i];
470        } else {
471          newDist[i] = dist[0] * secondDist[i];
472        }
473      }
474      if  (!Utils.eq(Utils.sum(newDist), 1)) {
475        System.err.println(Utils.sum(newDist));
476        for (int j = 0; j < dist.length; j++) {
477          System.err.print(dist[j] + " ");
478        }
479        System.err.println();
480        for (int j = 0; j < newDist.length; j++) {
481          System.err.print(newDist[j] + " ");
482        }
483        System.err.println();
484        System.err.println(inst);
485        System.err.println(m_FilteredClassifier);
486        //System.err.println(m_Data);
487        System.err.println("bad");
488      }
489      return newDist;
490    }
491  }
492   
493  /**
494   * Returns the list of indices as a string.
495   *
496   * @param indices the indices to return as string
497   * @return the indices as string
498   */
499  public String getString(int [] indices) {
500
501    StringBuffer string = new StringBuffer();
502    for (int i = 0; i < indices.length; i++) {
503      if (i > 0) {
504        string.append(',');
505      }
506      string.append(indices[i]);
507    }
508    return string.toString();
509  }
510       
511  /**
512   * @return a description of the classifier suitable for
513   * displaying in the explorer/experimenter gui
514   */
515  public String globalInfo() {
516           
517    return 
518        "A meta classifier for handling multi-class datasets with 2-class "
519      + "classifiers by building a random data-balanced tree structure.\n\n"
520      + "For more info, check\n\n"
521      + getTechnicalInformation().toString();
522  }
523       
524  /**
525   * Outputs the classifier as a string.
526   *
527   * @return a string representation of the classifier
528   */
529  public String toString() {
530           
531    if (m_classifiers == null) {
532      return "DataNearBalancedND: No model built yet.";
533    }
534    StringBuffer text = new StringBuffer();
535    text.append("DataNearBalancedND");
536    treeToString(text, 0);
537           
538    return text.toString();
539  }
540       
541  /**
542   * Returns string description of the tree.
543   *
544   * @param text the buffer to add the node to
545   * @param nn the node number
546   * @return the next node number
547   */
548  private int treeToString(StringBuffer text, int nn) {
549           
550    nn++;
551    text.append("\n\nNode number: " + nn + "\n\n");
552    if (m_FilteredClassifier != null) {
553      text.append(m_FilteredClassifier);
554    } else {
555      text.append("null");
556    }
557    if (m_FirstSuccessor != null) {
558      nn = m_FirstSuccessor.treeToString(text, nn);
559      nn = m_SecondSuccessor.treeToString(text, nn);
560    }
561    return nn;
562  }
563 
564  /**
565   * Returns the revision string.
566   *
567   * @return            the revision
568   */
569  public String getRevision() {
570    return RevisionUtils.extract("$Revision: 5928 $");
571  }
572       
573  /**
574   * Main method for testing this class.
575   *
576   * @param argv the options
577   */
578  public static void main(String [] argv) {
579    runClassifier(new DataNearBalancedND(), argv);
580  }
581}
582
Note: See TracBrowser for help on using the repository browser.