source: src/main/java/weka/experiment/CrossValidationResultProducer.java @ 28

Last change on this file since 28 was 4, checked in by gnappo, 14 years ago

Import di weka.

File size: 25.6 KB
Line 
1/*
2 *    This program is free software; you can redistribute it and/or modify
3 *    it under the terms of the GNU General Public License as published by
4 *    the Free Software Foundation; either version 2 of the License, or
5 *    (at your option) any later version.
6 *
7 *    This program is distributed in the hope that it will be useful,
8 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
9 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 *    GNU General Public License for more details.
11 *
12 *    You should have received a copy of the GNU General Public License
13 *    along with this program; if not, write to the Free Software
14 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17/*
18 *    CrossValidationResultProducer.java
19 *    Copyright (C) 1999 University of Waikato, Hamilton, New Zealand
20 *
21 */
22
23
24package weka.experiment;
25
26import weka.core.AdditionalMeasureProducer;
27import weka.core.Instances;
28import weka.core.Option;
29import weka.core.OptionHandler;
30import weka.core.RevisionHandler;
31import weka.core.RevisionUtils;
32import weka.core.Utils;
33
34import java.io.File;
35import java.util.Calendar;
36import java.util.Enumeration;
37import java.util.Random;
38import java.util.TimeZone;
39import java.util.Vector;
40
41
42/**
43 <!-- globalinfo-start -->
44 * Generates for each run, carries out an n-fold cross-validation, using the set SplitEvaluator to generate some results. If the class attribute is nominal, the dataset is stratified. Results for each fold are generated, so you may wish to use this in addition with an AveragingResultProducer to obtain averages for each run.
45 * <p/>
46 <!-- globalinfo-end -->
47 *
48 <!-- options-start -->
49 * Valid options are: <p/>
50 *
51 * <pre> -X &lt;number of folds&gt;
52 *  The number of folds to use for the cross-validation.
53 *  (default 10)</pre>
54 *
55 * <pre> -D
56 * Save raw split evaluator output.</pre>
57 *
58 * <pre> -O &lt;file/directory name/path&gt;
59 *  The filename where raw output will be stored.
60 *  If a directory name is specified then then individual
61 *  outputs will be gzipped, otherwise all output will be
62 *  zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)</pre>
63 *
64 * <pre> -W &lt;class name&gt;
65 *  The full class name of a SplitEvaluator.
66 *  eg: weka.experiment.ClassifierSplitEvaluator</pre>
67 *
68 * <pre>
69 * Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
70 * </pre>
71 *
72 * <pre> -W &lt;class name&gt;
73 *  The full class name of the classifier.
74 *  eg: weka.classifiers.bayes.NaiveBayes</pre>
75 *
76 * <pre> -C &lt;index&gt;
77 *  The index of the class for which IR statistics
78 *  are to be output. (default 1)</pre>
79 *
80 * <pre> -I &lt;index&gt;
81 *  The index of an attribute to output in the
82 *  results. This attribute should identify an
83 *  instance in order to know which instances are
84 *  in the test set of a cross validation. if 0
85 *  no output (default 0).</pre>
86 *
87 * <pre> -P
88 *  Add target and prediction columns to the result
89 *  for each fold.</pre>
90 *
91 * <pre>
92 * Options specific to classifier weka.classifiers.rules.ZeroR:
93 * </pre>
94 *
95 * <pre> -D
96 *  If set, classifier is run in debug mode and
97 *  may output additional info to the console</pre>
98 *
99 <!-- options-end -->
100 *
101 * All options after -- will be passed to the split evaluator.
102 *
103 * @author Len Trigg (trigg@cs.waikato.ac.nz)
104 * @version $Revision: 1.17 $
105 */
106public class CrossValidationResultProducer 
107  implements ResultProducer, OptionHandler, AdditionalMeasureProducer, 
108             RevisionHandler {
109 
110  /** for serialization */
111  static final long serialVersionUID = -1580053925080091917L;
112 
113  /** The dataset of interest */
114  protected Instances m_Instances;
115
116  /** The ResultListener to send results to */
117  protected ResultListener m_ResultListener = new CSVResultListener();
118
119  /** The number of folds in the cross-validation */
120  protected int m_NumFolds = 10;
121
122  /** Save raw output of split evaluators --- for debugging purposes */
123  protected boolean m_debugOutput = false;
124
125  /** The output zipper to use for saving raw splitEvaluator output */
126  protected OutputZipper m_ZipDest = null;
127
128  /** The destination output file/directory for raw output */
129  protected File m_OutputFile = new File(
130                                new File(System.getProperty("user.dir")), 
131                                "splitEvalutorOut.zip");
132
133  /** The SplitEvaluator used to generate results */
134  protected SplitEvaluator m_SplitEvaluator = new ClassifierSplitEvaluator();
135
136  /** The names of any additional measures to look for in SplitEvaluators */
137  protected String [] m_AdditionalMeasures = null;
138
139  /** The name of the key field containing the dataset name */
140  public static String DATASET_FIELD_NAME = "Dataset";
141
142  /** The name of the key field containing the run number */
143  public static String RUN_FIELD_NAME = "Run";
144
145  /** The name of the key field containing the fold number */
146  public static String FOLD_FIELD_NAME = "Fold";
147
148  /** The name of the result field containing the timestamp */
149  public static String TIMESTAMP_FIELD_NAME = "Date_time";
150
151  /**
152   * Returns a string describing this result producer
153   * @return a description of the result producer suitable for
154   * displaying in the explorer/experimenter gui
155   */
156  public String globalInfo() {
157    return 
158        "Generates for each run, carries out an n-fold cross-validation, "
159      + "using the set SplitEvaluator to generate some results. If the class "
160      + "attribute is nominal, the dataset is stratified. Results for each fold "
161      + "are generated, so you may wish to use this in addition with an "
162      + "AveragingResultProducer to obtain averages for each run.";
163  }
164
165  /**
166   * Sets the dataset that results will be obtained for.
167   *
168   * @param instances a value of type 'Instances'.
169   */
170  public void setInstances(Instances instances) {
171   
172    m_Instances = instances;
173  }
174
175  /**
176   * Sets the object to send results of each run to.
177   *
178   * @param listener a value of type 'ResultListener'
179   */
180  public void setResultListener(ResultListener listener) {
181
182    m_ResultListener = listener;
183  }
184
185  /**
186   * Set a list of method names for additional measures to look for
187   * in SplitEvaluators. This could contain many measures (of which only a
188   * subset may be produceable by the current SplitEvaluator) if an experiment
189   * is the type that iterates over a set of properties.
190   * @param additionalMeasures an array of measure names, null if none
191   */
192  public void setAdditionalMeasures(String [] additionalMeasures) {
193    m_AdditionalMeasures = additionalMeasures;
194
195    if (m_SplitEvaluator != null) {
196      System.err.println("CrossValidationResultProducer: setting additional "
197                         +"measures for "
198                         +"split evaluator");
199      m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
200    }
201  }
202
203  /**
204   * Returns an enumeration of any additional measure names that might be
205   * in the SplitEvaluator
206   * @return an enumeration of the measure names
207   */
208  public Enumeration enumerateMeasures() {
209    Vector newVector = new Vector();
210    if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
211      Enumeration en = ((AdditionalMeasureProducer)m_SplitEvaluator).
212        enumerateMeasures();
213      while (en.hasMoreElements()) {
214        String mname = (String)en.nextElement();
215        newVector.addElement(mname);
216      }
217    }
218    return newVector.elements();
219  }
220 
221  /**
222   * Returns the value of the named measure
223   * @param additionalMeasureName the name of the measure to query for its value
224   * @return the value of the named measure
225   * @throws IllegalArgumentException if the named measure is not supported
226   */
227  public double getMeasure(String additionalMeasureName) {
228    if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
229      return ((AdditionalMeasureProducer)m_SplitEvaluator).
230        getMeasure(additionalMeasureName);
231    } else {
232      throw new IllegalArgumentException("CrossValidationResultProducer: "
233                          +"Can't return value for : "+additionalMeasureName
234                          +". "+m_SplitEvaluator.getClass().getName()+" "
235                          +"is not an AdditionalMeasureProducer");
236    }
237  }
238 
239  /**
240   * Gets a Double representing the current date and time.
241   * eg: 1:46pm on 20/5/1999 -> 19990520.1346
242   *
243   * @return a value of type Double
244   */
245  public static Double getTimestamp() {
246
247    Calendar now = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
248    double timestamp = now.get(Calendar.YEAR) * 10000
249      + (now.get(Calendar.MONTH) + 1) * 100
250      + now.get(Calendar.DAY_OF_MONTH)
251      + now.get(Calendar.HOUR_OF_DAY) / 100.0
252      + now.get(Calendar.MINUTE) / 10000.0;
253    return new Double(timestamp);
254  }
255 
256  /**
257   * Prepare to generate results.
258   *
259   * @throws Exception if an error occurs during preprocessing.
260   */
261  public void preProcess() throws Exception {
262
263    if (m_SplitEvaluator == null) {
264      throw new Exception("No SplitEvalutor set");
265    }
266    if (m_ResultListener == null) {
267      throw new Exception("No ResultListener set");
268    }
269    m_ResultListener.preProcess(this);
270  }
271 
272  /**
273   * Perform any postprocessing. When this method is called, it indicates
274   * that no more requests to generate results for the current experiment
275   * will be sent.
276   *
277   * @throws Exception if an error occurs
278   */
279  public void postProcess() throws Exception {
280
281    m_ResultListener.postProcess(this);
282
283    if (m_debugOutput) {
284      if (m_ZipDest != null) {
285        m_ZipDest.finished();
286        m_ZipDest = null;
287      }
288    }
289  }
290 
291  /**
292   * Gets the keys for a specified run number. Different run
293   * numbers correspond to different randomizations of the data. Keys
294   * produced should be sent to the current ResultListener
295   *
296   * @param run the run number to get keys for.
297   * @throws Exception if a problem occurs while getting the keys
298   */
299  public void doRunKeys(int run) throws Exception {
300    if (m_Instances == null) {
301      throw new Exception("No Instances set");
302    }
303    /*    // Randomize on a copy of the original dataset
304    Instances runInstances = new Instances(m_Instances);
305    runInstances.randomize(new Random(run));
306    if (runInstances.classAttribute().isNominal()) {
307      runInstances.stratify(m_NumFolds);
308      } */
309    for (int fold = 0; fold < m_NumFolds; fold++) {
310      // Add in some fields to the key like run and fold number, dataset name
311      Object [] seKey = m_SplitEvaluator.getKey();
312      Object [] key = new Object [seKey.length + 3];
313      key[0] = Utils.backQuoteChars(m_Instances.relationName());
314      key[1] = "" + run;
315      key[2] = "" + (fold + 1);
316      System.arraycopy(seKey, 0, key, 3, seKey.length);
317      if (m_ResultListener.isResultRequired(this, key)) {
318        try {
319          m_ResultListener.acceptResult(this, key, null);
320        } catch (Exception ex) {
321          // Save the train and test datasets for debugging purposes?
322          throw ex;
323        }
324      }
325    }
326  }
327
328
329  /**
330   * Gets the results for a specified run number. Different run
331   * numbers correspond to different randomizations of the data. Results
332   * produced should be sent to the current ResultListener
333   *
334   * @param run the run number to get results for.
335   * @throws Exception if a problem occurs while getting the results
336   */
337  public void doRun(int run) throws Exception {
338
339    if (getRawOutput()) {
340      if (m_ZipDest == null) {
341        m_ZipDest = new OutputZipper(m_OutputFile);
342      }
343    }
344
345    if (m_Instances == null) {
346      throw new Exception("No Instances set");
347    }
348    // Randomize on a copy of the original dataset
349    Instances runInstances = new Instances(m_Instances);
350    Random random = new Random(run);
351    runInstances.randomize(random);
352    if (runInstances.classAttribute().isNominal()) {
353      runInstances.stratify(m_NumFolds);
354    }
355    for (int fold = 0; fold < m_NumFolds; fold++) {
356      // Add in some fields to the key like run and fold number, dataset name
357      Object [] seKey = m_SplitEvaluator.getKey();
358      Object [] key = new Object [seKey.length + 3];
359      key[0] =  Utils.backQuoteChars(m_Instances.relationName());
360      key[1] = "" + run;
361      key[2] = "" + (fold + 1);
362      System.arraycopy(seKey, 0, key, 3, seKey.length);
363      if (m_ResultListener.isResultRequired(this, key)) {
364        Instances train = runInstances.trainCV(m_NumFolds, fold, random);
365        Instances test = runInstances.testCV(m_NumFolds, fold);
366        try {
367          Object [] seResults = m_SplitEvaluator.getResult(train, test);
368          Object [] results = new Object [seResults.length + 1];
369          results[0] = getTimestamp();
370          System.arraycopy(seResults, 0, results, 1,
371                           seResults.length);
372          if (m_debugOutput) {
373            String resultName = (""+run+"."+(fold+1)+"."
374              + Utils.backQuoteChars(runInstances.relationName())
375              +"."
376              +m_SplitEvaluator.toString()).replace(' ','_');
377            resultName = Utils.removeSubstring(resultName, 
378                                               "weka.classifiers.");
379            resultName = Utils.removeSubstring(resultName, 
380                                               "weka.filters.");
381            resultName = Utils.removeSubstring(resultName, 
382                                               "weka.attributeSelection.");
383            m_ZipDest.zipit(m_SplitEvaluator.getRawResultOutput(), resultName);
384          }
385          m_ResultListener.acceptResult(this, key, results);
386        } catch (Exception ex) {
387          // Save the train and test datasets for debugging purposes?
388          throw ex;
389        }
390      }
391    }
392  }
393
394  /**
395   * Gets the names of each of the columns produced for a single run.
396   * This method should really be static.
397   *
398   * @return an array containing the name of each column
399   */
400  public String [] getKeyNames() {
401
402    String [] keyNames = m_SplitEvaluator.getKeyNames();
403    // Add in the names of our extra key fields
404    String [] newKeyNames = new String [keyNames.length + 3];
405    newKeyNames[0] = DATASET_FIELD_NAME;
406    newKeyNames[1] = RUN_FIELD_NAME;
407    newKeyNames[2] = FOLD_FIELD_NAME;
408    System.arraycopy(keyNames, 0, newKeyNames, 3, keyNames.length);
409    return newKeyNames;
410  }
411
412  /**
413   * Gets the data types of each of the columns produced for a single run.
414   * This method should really be static.
415   *
416   * @return an array containing objects of the type of each column. The
417   * objects should be Strings, or Doubles.
418   */
419  public Object [] getKeyTypes() {
420
421    Object [] keyTypes = m_SplitEvaluator.getKeyTypes();
422    // Add in the types of our extra fields
423    Object [] newKeyTypes = new String [keyTypes.length + 3];
424    newKeyTypes[0] = new String();
425    newKeyTypes[1] = new String();
426    newKeyTypes[2] = new String();
427    System.arraycopy(keyTypes, 0, newKeyTypes, 3, keyTypes.length);
428    return newKeyTypes;
429  }
430
431  /**
432   * Gets the names of each of the columns produced for a single run.
433   * This method should really be static.
434   *
435   * @return an array containing the name of each column
436   */
437  public String [] getResultNames() {
438
439    String [] resultNames = m_SplitEvaluator.getResultNames();
440    // Add in the names of our extra Result fields
441    String [] newResultNames = new String [resultNames.length + 1];
442    newResultNames[0] = TIMESTAMP_FIELD_NAME;
443    System.arraycopy(resultNames, 0, newResultNames, 1, resultNames.length);
444    return newResultNames;
445  }
446
447  /**
448   * Gets the data types of each of the columns produced for a single run.
449   * This method should really be static.
450   *
451   * @return an array containing objects of the type of each column. The
452   * objects should be Strings, or Doubles.
453   */
454  public Object [] getResultTypes() {
455
456    Object [] resultTypes = m_SplitEvaluator.getResultTypes();
457    // Add in the types of our extra Result fields
458    Object [] newResultTypes = new Object [resultTypes.length + 1];
459    newResultTypes[0] = new Double(0);
460    System.arraycopy(resultTypes, 0, newResultTypes, 1, resultTypes.length);
461    return newResultTypes;
462  }
463
464  /**
465   * Gets a description of the internal settings of the result
466   * producer, sufficient for distinguishing a ResultProducer
467   * instance from another with different settings (ignoring
468   * those settings set through this interface). For example,
469   * a cross-validation ResultProducer may have a setting for the
470   * number of folds. For a given state, the results produced should
471   * be compatible. Typically if a ResultProducer is an OptionHandler,
472   * this string will represent the command line arguments required
473   * to set the ResultProducer to that state.
474   *
475   * @return the description of the ResultProducer state, or null
476   * if no state is defined
477   */
478  public String getCompatibilityState() {
479
480    String result = "-X " + m_NumFolds + " " ;
481    if (m_SplitEvaluator == null) {
482      result += "<null SplitEvaluator>";
483    } else {
484      result += "-W " + m_SplitEvaluator.getClass().getName();
485    }
486    return result + " --";
487  }
488
489  /**
490   * Returns the tip text for this property
491   * @return tip text for this property suitable for
492   * displaying in the explorer/experimenter gui
493   */
494  public String outputFileTipText() {
495    return "Set the destination for saving raw output. If the rawOutput "
496      +"option is selected, then output from the splitEvaluator for "
497      +"individual folds is saved. If the destination is a directory, "
498      +"then each output is saved to an individual gzip file; if the "
499      +"destination is a file, then each output is saved as an entry "
500      +"in a zip file.";
501  }
502
503  /**
504   * Get the value of OutputFile.
505   *
506   * @return Value of OutputFile.
507   */
508  public File getOutputFile() {
509   
510    return m_OutputFile;
511  }
512 
513  /**
514   * Set the value of OutputFile.
515   *
516   * @param newOutputFile Value to assign to OutputFile.
517   */
518  public void setOutputFile(File newOutputFile) {
519   
520    m_OutputFile = newOutputFile;
521  } 
522
523  /**
524   * Returns the tip text for this property
525   * @return tip text for this property suitable for
526   * displaying in the explorer/experimenter gui
527   */
528  public String numFoldsTipText() {
529    return "Number of folds to use in cross validation.";
530  }
531
532  /**
533   * Get the value of NumFolds.
534   *
535   * @return Value of NumFolds.
536   */
537  public int getNumFolds() {
538   
539    return m_NumFolds;
540  }
541 
542  /**
543   * Set the value of NumFolds.
544   *
545   * @param newNumFolds Value to assign to NumFolds.
546   */
547  public void setNumFolds(int newNumFolds) {
548   
549    m_NumFolds = newNumFolds;
550  }
551
552  /**
553   * Returns the tip text for this property
554   * @return tip text for this property suitable for
555   * displaying in the explorer/experimenter gui
556   */
557  public String rawOutputTipText() {
558    return "Save raw output (useful for debugging). If set, then output is "
559      +"sent to the destination specified by outputFile";
560  }
561
562  /**
563   * Get if raw split evaluator output is to be saved
564   * @return true if raw split evalutor output is to be saved
565   */
566  public boolean getRawOutput() {
567    return m_debugOutput;
568  }
569 
570  /**
571   * Set to true if raw split evaluator output is to be saved
572   * @param d true if output is to be saved
573   */
574  public void setRawOutput(boolean d) {
575    m_debugOutput = d;
576  }
577
578  /**
579   * Returns the tip text for this property
580   * @return tip text for this property suitable for
581   * displaying in the explorer/experimenter gui
582   */
583  public String splitEvaluatorTipText() {
584    return "The evaluator to apply to the cross validation folds. "
585      +"This may be a classifier, regression scheme etc.";
586  }
587 
588  /**
589   * Get the SplitEvaluator.
590   *
591   * @return the SplitEvaluator.
592   */
593  public SplitEvaluator getSplitEvaluator() {
594   
595    return m_SplitEvaluator;
596  }
597 
598  /**
599   * Set the SplitEvaluator.
600   *
601   * @param newSplitEvaluator new SplitEvaluator to use.
602   */
603  public void setSplitEvaluator(SplitEvaluator newSplitEvaluator) {
604   
605    m_SplitEvaluator = newSplitEvaluator;
606    m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
607  }
608
609  /**
610   * Returns an enumeration describing the available options..
611   *
612   * @return an enumeration of all the available options.
613   */
614  public Enumeration listOptions() {
615
616    Vector newVector = new Vector(4);
617
618    newVector.addElement(new Option(
619             "\tThe number of folds to use for the cross-validation.\n"
620              +"\t(default 10)", 
621             "X", 1, 
622             "-X <number of folds>"));
623
624    newVector.addElement(new Option(
625             "Save raw split evaluator output.",
626             "D",0,"-D"));
627
628    newVector.addElement(new Option(
629             "\tThe filename where raw output will be stored.\n"
630             +"\tIf a directory name is specified then then individual\n"
631             +"\toutputs will be gzipped, otherwise all output will be\n"
632             +"\tzipped to the named file. Use in conjuction with -D."
633             +"\t(default splitEvalutorOut.zip)", 
634             "O", 1, 
635             "-O <file/directory name/path>"));
636
637    newVector.addElement(new Option(
638             "\tThe full class name of a SplitEvaluator.\n"
639              +"\teg: weka.experiment.ClassifierSplitEvaluator", 
640             "W", 1, 
641             "-W <class name>"));
642
643    if ((m_SplitEvaluator != null) &&
644        (m_SplitEvaluator instanceof OptionHandler)) {
645      newVector.addElement(new Option(
646             "",
647             "", 0, "\nOptions specific to split evaluator "
648             + m_SplitEvaluator.getClass().getName() + ":"));
649      Enumeration enu = ((OptionHandler)m_SplitEvaluator).listOptions();
650      while (enu.hasMoreElements()) {
651        newVector.addElement(enu.nextElement());
652      }
653    }
654    return newVector.elements();
655  }
656
657  /**
658   * Parses a given list of options. <p/>
659   *
660   <!-- options-start -->
661   * Valid options are: <p/>
662   *
663   * <pre> -X &lt;number of folds&gt;
664   *  The number of folds to use for the cross-validation.
665   *  (default 10)</pre>
666   *
667   * <pre> -D
668   * Save raw split evaluator output.</pre>
669   *
670   * <pre> -O &lt;file/directory name/path&gt;
671   *  The filename where raw output will be stored.
672   *  If a directory name is specified then then individual
673   *  outputs will be gzipped, otherwise all output will be
674   *  zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)</pre>
675   *
676   * <pre> -W &lt;class name&gt;
677   *  The full class name of a SplitEvaluator.
678   *  eg: weka.experiment.ClassifierSplitEvaluator</pre>
679   *
680   * <pre>
681   * Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
682   * </pre>
683   *
684   * <pre> -W &lt;class name&gt;
685   *  The full class name of the classifier.
686   *  eg: weka.classifiers.bayes.NaiveBayes</pre>
687   *
688   * <pre> -C &lt;index&gt;
689   *  The index of the class for which IR statistics
690   *  are to be output. (default 1)</pre>
691   *
692   * <pre> -I &lt;index&gt;
693   *  The index of an attribute to output in the
694   *  results. This attribute should identify an
695   *  instance in order to know which instances are
696   *  in the test set of a cross validation. if 0
697   *  no output (default 0).</pre>
698   *
699   * <pre> -P
700   *  Add target and prediction columns to the result
701   *  for each fold.</pre>
702   *
703   * <pre>
704   * Options specific to classifier weka.classifiers.rules.ZeroR:
705   * </pre>
706   *
707   * <pre> -D
708   *  If set, classifier is run in debug mode and
709   *  may output additional info to the console</pre>
710   *
711   <!-- options-end -->
712   *
713   * All options after -- will be passed to the split evaluator.
714   *
715   * @param options the list of options as an array of strings
716   * @throws Exception if an option is not supported
717   */
718  public void setOptions(String[] options) throws Exception {
719   
720    setRawOutput(Utils.getFlag('D', options));
721
722    String fName = Utils.getOption('O', options);
723    if (fName.length() != 0) {
724      setOutputFile(new File(fName));
725    }
726
727    String numFolds = Utils.getOption('X', options);
728    if (numFolds.length() != 0) {
729      setNumFolds(Integer.parseInt(numFolds));
730    } else {
731      setNumFolds(10);
732    }
733
734    String seName = Utils.getOption('W', options);
735    if (seName.length() == 0) {
736      throw new Exception("A SplitEvaluator must be specified with"
737                          + " the -W option.");
738    }
739    // Do it first without options, so if an exception is thrown during
740    // the option setting, listOptions will contain options for the actual
741    // SE.
742    setSplitEvaluator((SplitEvaluator)Utils.forName(
743                      SplitEvaluator.class,
744                      seName,
745                      null));
746    if (getSplitEvaluator() instanceof OptionHandler) {
747      ((OptionHandler) getSplitEvaluator())
748        .setOptions(Utils.partitionOptions(options));
749    }
750  }
751
752  /**
753   * Gets the current settings of the result producer.
754   *
755   * @return an array of strings suitable for passing to setOptions
756   */
757  public String [] getOptions() {
758
759    String [] seOptions = new String [0];
760    if ((m_SplitEvaluator != null) && 
761        (m_SplitEvaluator instanceof OptionHandler)) {
762      seOptions = ((OptionHandler)m_SplitEvaluator).getOptions();
763    }
764   
765    String [] options = new String [seOptions.length + 8];
766    int current = 0;
767
768    options[current++] = "-X"; options[current++] = "" + getNumFolds();
769
770    if (getRawOutput()) {
771      options[current++] = "-D";
772    }
773
774    options[current++] = "-O"; 
775    options[current++] = getOutputFile().getName();
776   
777    if (getSplitEvaluator() != null) {
778      options[current++] = "-W";
779      options[current++] = getSplitEvaluator().getClass().getName();
780    }
781    options[current++] = "--";
782
783    System.arraycopy(seOptions, 0, options, current, 
784                     seOptions.length);
785    current += seOptions.length;
786    while (current < options.length) {
787      options[current++] = "";
788    }
789    return options;
790  }
791
792  /**
793   * Gets a text descrption of the result producer.
794   *
795   * @return a text description of the result producer.
796   */
797  public String toString() {
798
799    String result = "CrossValidationResultProducer: ";
800    result += getCompatibilityState();
801    if (m_Instances == null) {
802      result += ": <null Instances>";
803    } else {
804      result += ": " +  Utils.backQuoteChars(m_Instances.relationName());
805    }
806    return result;
807  }
808 
809  /**
810   * Returns the revision string.
811   *
812   * @return            the revision
813   */
814  public String getRevision() {
815    return RevisionUtils.extract("$Revision: 1.17 $");
816  }
817   
818  /**
819   * Quick test of timestamp
820   *
821   * @param args        the commandline options
822   */
823  public static void main(String [] args) {
824   
825    System.err.println(Utils.doubleToString(getTimestamp().doubleValue(), 4));
826  }
827} // CrossValidationResultProducer
Note: See TracBrowser for help on using the repository browser.