1 | /** |
---|
2 | * This program is free software; you can redistribute it and/or modify |
---|
3 | * it under the terms of the GNU General Public License as published by |
---|
4 | * the Free Software Foundation; either version 2 of the License, or |
---|
5 | * (at your option) any later version. |
---|
6 | * |
---|
7 | * This program is distributed in the hope that it will be useful, |
---|
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
10 | * GNU General Public License for more details. |
---|
11 | * |
---|
12 | * You should have received a copy of the GNU General Public License |
---|
13 | * along with this program; if not, write to the Free Software |
---|
14 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
---|
15 | */ |
---|
16 | |
---|
17 | /* |
---|
18 | * GaussianProcesses.java |
---|
19 | * Copyright (C) 2005-2009 University of Waikato |
---|
20 | */ |
---|
21 | |
---|
22 | package weka.classifiers.functions; |
---|
23 | |
---|
24 | |
---|
25 | import weka.classifiers.Classifier; |
---|
26 | import weka.classifiers.AbstractClassifier; |
---|
27 | import weka.classifiers.ConditionalDensityEstimator; |
---|
28 | import weka.classifiers.Evaluation; |
---|
29 | import weka.classifiers.IntervalEstimator; |
---|
30 | import weka.classifiers.functions.supportVector.CachedKernel; |
---|
31 | import weka.classifiers.functions.supportVector.Kernel; |
---|
32 | import weka.classifiers.functions.supportVector.PolyKernel; |
---|
33 | import weka.classifiers.functions.supportVector.RBFKernel; |
---|
34 | import weka.core.Capabilities; |
---|
35 | import weka.core.Instance; |
---|
36 | import weka.core.Instances; |
---|
37 | import weka.core.matrix.Matrix; |
---|
38 | import weka.core.Option; |
---|
39 | import weka.core.OptionHandler; |
---|
40 | import weka.core.SelectedTag; |
---|
41 | import weka.core.Statistics; |
---|
42 | import weka.core.Tag; |
---|
43 | import weka.core.TechnicalInformation; |
---|
44 | import weka.core.TechnicalInformationHandler; |
---|
45 | import weka.core.WeightedInstancesHandler; |
---|
46 | import weka.core.Utils; |
---|
47 | import weka.core.Capabilities.Capability; |
---|
48 | import weka.core.TechnicalInformation.Field; |
---|
49 | import weka.core.TechnicalInformation.Type; |
---|
50 | import weka.filters.Filter; |
---|
51 | import weka.filters.unsupervised.attribute.NominalToBinary; |
---|
52 | import weka.filters.unsupervised.attribute.Normalize; |
---|
53 | import weka.filters.unsupervised.attribute.ReplaceMissingValues; |
---|
54 | import weka.filters.unsupervised.attribute.Standardize; |
---|
55 | |
---|
56 | import java.io.FileReader; |
---|
57 | import java.io.Serializable; |
---|
58 | import java.util.Enumeration; |
---|
59 | import java.util.Vector; |
---|
60 | |
---|
61 | /** |
---|
62 | * <!-- globalinfo-start --> |
---|
63 | * Implements Gaussian processes for |
---|
64 | * regression without hyperparameter-tuning. To make choosing an |
---|
65 | * appropriate noise level easier, this implementation applies |
---|
66 | * normalization/standardization to the target attribute as well (if |
---|
67 | * normalization/standardizaton is turned on). Missing values |
---|
68 | * are replaced by the global mean/mode. Nominal attributes are |
---|
69 | * converted to binary ones. |
---|
70 | * <!-- globalinfo-end --> |
---|
71 | * |
---|
72 | * <!-- technical-bibtex-start --> BibTeX: |
---|
73 | * |
---|
74 | * <pre> |
---|
75 | * @misc{Mackay1998, |
---|
76 | * address = {Dept. of Physics, Cambridge University, UK}, |
---|
77 | * author = {David J.C. Mackay}, |
---|
78 | * title = {Introduction to Gaussian Processes}, |
---|
79 | * year = {1998}, |
---|
80 | * PS = {http://wol.ra.phy.cam.ac.uk/mackay/gpB.ps.gz} |
---|
81 | * } |
---|
82 | * </pre> |
---|
83 | * |
---|
84 | * <p/> <!-- technical-bibtex-end --> |
---|
85 | * |
---|
86 | * <!-- options-start --> Valid options are: <p/> |
---|
87 | * |
---|
88 | * <pre> |
---|
89 | * -D |
---|
90 | * If set, classifier is run in debug mode and |
---|
91 | * may output additional info to the console |
---|
92 | * </pre> |
---|
93 | * |
---|
94 | * <pre> |
---|
95 | * -L <double> |
---|
96 | * Level of Gaussian Noise. (default 0.1) |
---|
97 | * </pre> |
---|
98 | * |
---|
99 | * <pre> |
---|
100 | * -N |
---|
101 | * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) |
---|
102 | * </pre> |
---|
103 | * |
---|
104 | * <pre> |
---|
105 | * -K <classname and parameters> |
---|
106 | * The Kernel to use. |
---|
107 | * (default: weka.classifiers.functions.supportVector.PolyKernel) |
---|
108 | * </pre> |
---|
109 | * |
---|
110 | * <pre> |
---|
111 | * |
---|
112 | * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: |
---|
113 | * </pre> |
---|
114 | * |
---|
115 | * <pre> |
---|
116 | * -D |
---|
117 | * Enables debugging output (if available) to be printed. |
---|
118 | * (default: off) |
---|
119 | * </pre> |
---|
120 | * |
---|
121 | * <pre> |
---|
122 | * -no-checks |
---|
123 | * Turns off all checks - use with caution! |
---|
124 | * (default: checks on) |
---|
125 | * </pre> |
---|
126 | * |
---|
127 | * <pre> |
---|
128 | * -C <num> |
---|
129 | * The size of the cache (a prime number). |
---|
130 | * (default: 250007) |
---|
131 | * </pre> |
---|
132 | * |
---|
133 | * <pre> |
---|
134 | * -G <num> |
---|
135 | * The Gamma parameter. |
---|
136 | * (default: 0.01) |
---|
137 | * </pre> |
---|
138 | * |
---|
139 | * <!-- options-end --> |
---|
140 | * |
---|
141 | * @author Kurt Driessens (kurtd@cs.waikato.ac.nz) |
---|
142 | * @author Remco Bouckaert (remco@cs.waikato.ac.nz) |
---|
143 | * @version $Revision: 5952 $ |
---|
144 | */ |
---|
145 | public class GaussianProcesses extends AbstractClassifier implements OptionHandler, IntervalEstimator, |
---|
146 | ConditionalDensityEstimator, |
---|
147 | TechnicalInformationHandler, WeightedInstancesHandler { |
---|
148 | |
---|
149 | /** for serialization */ |
---|
150 | static final long serialVersionUID = -8620066949967678545L; |
---|
151 | |
---|
152 | /** The filter used to make attributes numeric. */ |
---|
153 | protected NominalToBinary m_NominalToBinary; |
---|
154 | |
---|
155 | /** normalizes the data */ |
---|
156 | public static final int FILTER_NORMALIZE = 0; |
---|
157 | |
---|
158 | /** standardizes the data */ |
---|
159 | public static final int FILTER_STANDARDIZE = 1; |
---|
160 | |
---|
161 | /** no filter */ |
---|
162 | public static final int FILTER_NONE = 2; |
---|
163 | |
---|
164 | /** The filter to apply to the training data */ |
---|
165 | public static final Tag[] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), |
---|
166 | new Tag(FILTER_STANDARDIZE, "Standardize training data"), |
---|
167 | new Tag(FILTER_NONE, "No normalization/standardization"), }; |
---|
168 | |
---|
169 | /** The filter used to standardize/normalize all values. */ |
---|
170 | protected Filter m_Filter = null; |
---|
171 | |
---|
172 | /** Whether to normalize/standardize/neither */ |
---|
173 | protected int m_filterType = FILTER_NORMALIZE; |
---|
174 | |
---|
175 | /** The filter used to get rid of missing values. */ |
---|
176 | protected ReplaceMissingValues m_Missing; |
---|
177 | |
---|
178 | /** |
---|
179 | * Turn off all checks and conversions? Turning them off assumes that data |
---|
180 | * is purely numeric, doesn't contain any missing values, and has a numeric |
---|
181 | * class. |
---|
182 | */ |
---|
183 | protected boolean m_checksTurnedOff = false; |
---|
184 | |
---|
185 | /** Gaussian Noise Value. */ |
---|
186 | protected double m_delta = 1; |
---|
187 | |
---|
188 | /** |
---|
189 | * The parameters of the linear transforamtion realized by the filter on the |
---|
190 | * class attribute |
---|
191 | */ |
---|
192 | protected double m_Alin; |
---|
193 | protected double m_Blin; |
---|
194 | |
---|
195 | /** Kernel to use * */ |
---|
196 | protected Kernel m_kernel = new PolyKernel(); |
---|
197 | |
---|
198 | /** The number of training instances */ |
---|
199 | protected int m_NumTrain = 0; |
---|
200 | |
---|
201 | /** The training data. */ |
---|
202 | protected double m_avg_target; |
---|
203 | |
---|
204 | /** (negative) covariance matrix in symmetric matrix representation **/ |
---|
205 | public double[][] m_L; |
---|
206 | |
---|
207 | /** The vector of target values. */ |
---|
208 | protected Matrix m_t; |
---|
209 | |
---|
210 | /** |
---|
211 | * Returns a string describing classifier |
---|
212 | * |
---|
213 | * @return a description suitable for displaying in the |
---|
214 | * explorer/experimenter gui |
---|
215 | */ |
---|
216 | public String globalInfo() { |
---|
217 | |
---|
218 | return " Implements Gaussian processes for " |
---|
219 | + "regression without hyperparameter-tuning. To make choosing an " |
---|
220 | + "appropriate noise level easier, this implementation applies " |
---|
221 | + "normalization/standardization to the target attribute as well " |
---|
222 | + "as the other attributes (if " |
---|
223 | + " normalization/standardizaton is turned on). Missing values " |
---|
224 | + "are replaced by the global mean/mode. Nominal attributes are " |
---|
225 | + "converted to binary ones. Note that kernel caching is turned off " |
---|
226 | + "if the kernel used implements CachedKernel."; |
---|
227 | } |
---|
228 | |
---|
229 | /** |
---|
230 | * Returns an instance of a TechnicalInformation object, containing detailed |
---|
231 | * information about the technical background of this class, e.g., paper |
---|
232 | * reference or book this class is based on. |
---|
233 | * |
---|
234 | * @return the technical information about this class |
---|
235 | */ |
---|
236 | public TechnicalInformation getTechnicalInformation() { |
---|
237 | TechnicalInformation result; |
---|
238 | |
---|
239 | result = new TechnicalInformation(Type.MISC); |
---|
240 | result.setValue(Field.AUTHOR, "David J.C. Mackay"); |
---|
241 | result.setValue(Field.YEAR, "1998"); |
---|
242 | result.setValue(Field.TITLE, "Introduction to Gaussian Processes"); |
---|
243 | result.setValue(Field.ADDRESS, "Dept. of Physics, Cambridge University, UK"); |
---|
244 | result.setValue(Field.PS, "http://wol.ra.phy.cam.ac.uk/mackay/gpB.ps.gz"); |
---|
245 | |
---|
246 | return result; |
---|
247 | } |
---|
248 | |
---|
249 | /** |
---|
250 | * Returns default capabilities of the classifier. |
---|
251 | * |
---|
252 | * @return the capabilities of this classifier |
---|
253 | */ |
---|
254 | public Capabilities getCapabilities() { |
---|
255 | Capabilities result = getKernel().getCapabilities(); |
---|
256 | result.setOwner(this); |
---|
257 | |
---|
258 | // attribute |
---|
259 | result.enableAllAttributeDependencies(); |
---|
260 | // with NominalToBinary we can also handle nominal attributes, but only |
---|
261 | // if the kernel can handle numeric attributes |
---|
262 | if (result.handles(Capability.NUMERIC_ATTRIBUTES)) |
---|
263 | result.enable(Capability.NOMINAL_ATTRIBUTES); |
---|
264 | result.enable(Capability.MISSING_VALUES); |
---|
265 | |
---|
266 | // class |
---|
267 | result.disableAllClasses(); |
---|
268 | result.disableAllClassDependencies(); |
---|
269 | result.enable(Capability.NUMERIC_CLASS); |
---|
270 | result.enable(Capability.DATE_CLASS); |
---|
271 | result.enable(Capability.MISSING_CLASS_VALUES); |
---|
272 | |
---|
273 | return result; |
---|
274 | } |
---|
275 | |
---|
276 | /** |
---|
277 | * Method for building the classifier. |
---|
278 | * |
---|
279 | * @param insts |
---|
280 | * the set of training instances |
---|
281 | * @throws Exception |
---|
282 | * if the classifier can't be built successfully |
---|
283 | */ |
---|
284 | public void buildClassifier(Instances insts) throws Exception { |
---|
285 | |
---|
286 | /* check the set of training instances */ |
---|
287 | if (!m_checksTurnedOff) { |
---|
288 | // can classifier handle the data? |
---|
289 | getCapabilities().testWithFail(insts); |
---|
290 | |
---|
291 | // remove instances with missing class |
---|
292 | insts = new Instances(insts); |
---|
293 | insts.deleteWithMissingClass(); |
---|
294 | } |
---|
295 | |
---|
296 | if (!m_checksTurnedOff) { |
---|
297 | m_Missing = new ReplaceMissingValues(); |
---|
298 | m_Missing.setInputFormat(insts); |
---|
299 | insts = Filter.useFilter(insts, m_Missing); |
---|
300 | } else { |
---|
301 | m_Missing = null; |
---|
302 | } |
---|
303 | |
---|
304 | if (getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { |
---|
305 | boolean onlyNumeric = true; |
---|
306 | if (!m_checksTurnedOff) { |
---|
307 | for (int i = 0; i < insts.numAttributes(); i++) { |
---|
308 | if (i != insts.classIndex()) { |
---|
309 | if (!insts.attribute(i).isNumeric()) { |
---|
310 | onlyNumeric = false; |
---|
311 | break; |
---|
312 | } |
---|
313 | } |
---|
314 | } |
---|
315 | } |
---|
316 | |
---|
317 | if (!onlyNumeric) { |
---|
318 | m_NominalToBinary = new NominalToBinary(); |
---|
319 | m_NominalToBinary.setInputFormat(insts); |
---|
320 | insts = Filter.useFilter(insts, m_NominalToBinary); |
---|
321 | } else { |
---|
322 | m_NominalToBinary = null; |
---|
323 | } |
---|
324 | } else { |
---|
325 | m_NominalToBinary = null; |
---|
326 | } |
---|
327 | |
---|
328 | if (m_filterType == FILTER_STANDARDIZE) { |
---|
329 | m_Filter = new Standardize(); |
---|
330 | ((Standardize)m_Filter).setIgnoreClass(true); |
---|
331 | m_Filter.setInputFormat(insts); |
---|
332 | insts = Filter.useFilter(insts, m_Filter); |
---|
333 | } else if (m_filterType == FILTER_NORMALIZE) { |
---|
334 | m_Filter = new Normalize(); |
---|
335 | ((Normalize)m_Filter).setIgnoreClass(true); |
---|
336 | m_Filter.setInputFormat(insts); |
---|
337 | insts = Filter.useFilter(insts, m_Filter); |
---|
338 | } else { |
---|
339 | m_Filter = null; |
---|
340 | } |
---|
341 | |
---|
342 | m_NumTrain = insts.numInstances(); |
---|
343 | |
---|
344 | // determine which linear transformation has been |
---|
345 | // applied to the class by the filter |
---|
346 | if (m_Filter != null) { |
---|
347 | Instance witness = (Instance) insts.instance(0).copy(); |
---|
348 | witness.setValue(insts.classIndex(), 0); |
---|
349 | m_Filter.input(witness); |
---|
350 | m_Filter.batchFinished(); |
---|
351 | Instance res = m_Filter.output(); |
---|
352 | m_Blin = res.value(insts.classIndex()); |
---|
353 | witness.setValue(insts.classIndex(), 1); |
---|
354 | m_Filter.input(witness); |
---|
355 | m_Filter.batchFinished(); |
---|
356 | res = m_Filter.output(); |
---|
357 | m_Alin = res.value(insts.classIndex()) - m_Blin; |
---|
358 | } else { |
---|
359 | m_Alin = 1.0; |
---|
360 | m_Blin = 0.0; |
---|
361 | } |
---|
362 | |
---|
363 | // Initialize kernel |
---|
364 | try { |
---|
365 | CachedKernel cachedKernel = (CachedKernel) m_kernel; |
---|
366 | cachedKernel.setCacheSize(0); |
---|
367 | } catch (Exception e) { |
---|
368 | // ignore |
---|
369 | } |
---|
370 | m_kernel.buildKernel(insts); |
---|
371 | |
---|
372 | // Compute average target value |
---|
373 | double sum = 0.0; |
---|
374 | for (int i = 0; i < insts.numInstances(); i++) { |
---|
375 | sum += insts.instance(i).classValue(); |
---|
376 | } |
---|
377 | m_avg_target = sum / insts.numInstances(); |
---|
378 | |
---|
379 | // initialize kernel matrix/covariance matrix |
---|
380 | int n = insts.numInstances(); |
---|
381 | m_L = new double[n][]; |
---|
382 | for (int i = 0; i < n; i++) { |
---|
383 | m_L[i] = new double[i+1]; |
---|
384 | } |
---|
385 | double kv = 0; |
---|
386 | for (int i = 0; i < n; i++) { |
---|
387 | for (int j = 0; j < i; j++) { |
---|
388 | kv = m_kernel.eval(i, j, insts.instance(i)); |
---|
389 | m_L[i][j] = kv; |
---|
390 | } |
---|
391 | kv = m_kernel.eval(i, i, insts.instance(i)); |
---|
392 | m_L[i][i] = kv + m_delta * m_delta; |
---|
393 | } |
---|
394 | |
---|
395 | // Calculate inverse matrix exploiting symmetry of covariance matrix |
---|
396 | // NB this replaces the kernel matrix with (the negative of) its inverse and does |
---|
397 | // not require any extra memory for a solution matrix |
---|
398 | double [] tmprow = new double [n]; |
---|
399 | double tmp2 = 0, tmp = 0; |
---|
400 | for (int i = 0; i < n; i++) { |
---|
401 | tmp = -m_L[i][i]; |
---|
402 | m_L[i][i] = 1.0 / tmp; |
---|
403 | for (int j = 0; j < n; j++) { |
---|
404 | if (j != i) { |
---|
405 | if (j < i) { |
---|
406 | tmprow[j] = m_L[i][j]; |
---|
407 | m_L[i][j] /= tmp; |
---|
408 | tmp2 = m_L[i][j]; |
---|
409 | m_L[j][j] += tmp2 * tmp2 * tmp; |
---|
410 | } else if (j > i) { |
---|
411 | tmprow[j] = m_L[j][i]; |
---|
412 | m_L[j][i] /= tmp; |
---|
413 | tmp2 = m_L[j][i]; |
---|
414 | m_L[j][j] += tmp2 * tmp2 * tmp; |
---|
415 | } |
---|
416 | } |
---|
417 | } |
---|
418 | |
---|
419 | for (int j = 0; j < n; j++) { |
---|
420 | if (j != i) { |
---|
421 | if (i < j) { |
---|
422 | for (int k = 0; k < i; k++) { |
---|
423 | m_L[j][k] += tmprow[j] * m_L[i][k]; |
---|
424 | } |
---|
425 | } else { |
---|
426 | for (int k = 0; k < j; k++) { |
---|
427 | m_L[j][k] += tmprow[j] * m_L[i][k]; |
---|
428 | } |
---|
429 | |
---|
430 | } |
---|
431 | for (int k = i + 1; k < j; k++) { |
---|
432 | m_L[j][k] += tmprow[j] * m_L[k][i]; |
---|
433 | } |
---|
434 | } |
---|
435 | } |
---|
436 | } |
---|
437 | |
---|
438 | m_t = new Matrix(insts.numInstances(), 1); |
---|
439 | double [] tt = new double[n]; |
---|
440 | for (int i = 0; i < n; i++) { |
---|
441 | tt[i] = insts.instance(i).classValue() - m_avg_target; |
---|
442 | } |
---|
443 | |
---|
444 | // calculate m_t = tt . m_L |
---|
445 | for (int i = 0; i < n; i++) { |
---|
446 | double s = 0; |
---|
447 | for (int k = 0; k < i; k++) { |
---|
448 | s -= m_L[i][k] * tt[k]; |
---|
449 | } |
---|
450 | for (int k = i; k < n; k++) { |
---|
451 | s -= m_L[k][i] * tt[k]; |
---|
452 | } |
---|
453 | m_t.set(i, 0, s); |
---|
454 | } |
---|
455 | |
---|
456 | } // buildClassifier |
---|
457 | |
---|
458 | /** |
---|
459 | * Classifies a given instance. |
---|
460 | * |
---|
461 | * @param inst |
---|
462 | * the instance to be classified |
---|
463 | * @return the classification |
---|
464 | * @throws Exception |
---|
465 | * if instance could not be classified successfully |
---|
466 | */ |
---|
467 | public double classifyInstance(Instance inst) throws Exception { |
---|
468 | |
---|
469 | // Filter instance |
---|
470 | inst = filterInstance(inst); |
---|
471 | |
---|
472 | // Build K vector |
---|
473 | Matrix k = new Matrix(m_NumTrain, 1); |
---|
474 | for (int i = 0; i < m_NumTrain; i++) { |
---|
475 | k.set(i, 0, m_kernel.eval(-1, i, inst)); |
---|
476 | } |
---|
477 | |
---|
478 | double result = k.transpose().times(m_t).get(0, 0) + m_avg_target; |
---|
479 | result = (result - m_Blin) / m_Alin; |
---|
480 | |
---|
481 | return result; |
---|
482 | |
---|
483 | } |
---|
484 | |
---|
485 | /** |
---|
486 | * Filters an instance. |
---|
487 | */ |
---|
488 | protected Instance filterInstance(Instance inst) throws Exception { |
---|
489 | |
---|
490 | if (!m_checksTurnedOff) { |
---|
491 | m_Missing.input(inst); |
---|
492 | m_Missing.batchFinished(); |
---|
493 | inst = m_Missing.output(); |
---|
494 | } |
---|
495 | |
---|
496 | if (m_NominalToBinary != null) { |
---|
497 | m_NominalToBinary.input(inst); |
---|
498 | m_NominalToBinary.batchFinished(); |
---|
499 | inst = m_NominalToBinary.output(); |
---|
500 | } |
---|
501 | |
---|
502 | if (m_Filter != null) { |
---|
503 | m_Filter.input(inst); |
---|
504 | m_Filter.batchFinished(); |
---|
505 | inst = m_Filter.output(); |
---|
506 | } |
---|
507 | return inst; |
---|
508 | } |
---|
509 | |
---|
510 | /** |
---|
511 | * Computes standard deviation for given instance, without |
---|
512 | * transforming target back into original space. |
---|
513 | */ |
---|
514 | protected double computeStdDev(Instance inst, Matrix k) throws Exception { |
---|
515 | |
---|
516 | double kappa = m_kernel.eval(-1, -1, inst) + m_delta * m_delta; |
---|
517 | |
---|
518 | double s = 0; |
---|
519 | int n = m_L.length; |
---|
520 | for (int i = 0; i < n; i++) { |
---|
521 | double t = 0; |
---|
522 | for (int j = 0; j < n; j++) { |
---|
523 | t -= k.get(j,0) * (i>j? m_L[i][j] : m_L[j][i]); |
---|
524 | } |
---|
525 | s += t * k.get(i,0); |
---|
526 | } |
---|
527 | |
---|
528 | double sigma = m_delta; |
---|
529 | if (kappa > s) { |
---|
530 | sigma = Math.sqrt(kappa - s); |
---|
531 | } |
---|
532 | |
---|
533 | return sigma; |
---|
534 | } |
---|
535 | |
---|
536 | /** |
---|
537 | * Computes a prediction interval for the given instance and confidence |
---|
538 | * level. |
---|
539 | * |
---|
540 | * @param inst |
---|
541 | * the instance to make the prediction for |
---|
542 | * @param confidenceLevel |
---|
543 | * the percentage of cases the interval should cover |
---|
544 | * @return a 1*2 array that contains the boundaries of the interval |
---|
545 | * @throws Exception |
---|
546 | * if interval could not be estimated successfully |
---|
547 | */ |
---|
548 | public double[][] predictIntervals(Instance inst, double confidenceLevel) throws Exception { |
---|
549 | |
---|
550 | inst = filterInstance(inst); |
---|
551 | |
---|
552 | // Build K vector (and Kappa) |
---|
553 | Matrix k = new Matrix(m_NumTrain, 1); |
---|
554 | for (int i = 0; i < m_NumTrain; i++) { |
---|
555 | k.set(i, 0, m_kernel.eval(-1, i, inst)); |
---|
556 | } |
---|
557 | |
---|
558 | double estimate = k.transpose().times(m_t).get(0, 0) + m_avg_target; |
---|
559 | |
---|
560 | double sigma = computeStdDev(inst, k); |
---|
561 | |
---|
562 | confidenceLevel = 1.0 - ((1.0 - confidenceLevel) / 2.0); |
---|
563 | |
---|
564 | double z = Statistics.normalInverse(confidenceLevel); |
---|
565 | |
---|
566 | double[][] interval = new double[1][2]; |
---|
567 | |
---|
568 | interval[0][0] = estimate - z * sigma; |
---|
569 | interval[0][1] = estimate + z * sigma; |
---|
570 | |
---|
571 | interval[0][0] = (interval[0][0] - m_Blin) / m_Alin; |
---|
572 | interval[0][1] = (interval[0][1] - m_Blin) / m_Alin; |
---|
573 | |
---|
574 | return interval; |
---|
575 | |
---|
576 | } |
---|
577 | |
---|
578 | /** |
---|
579 | * Gives standard deviation of the prediction at the given instance. |
---|
580 | * |
---|
581 | * @param inst |
---|
582 | * the instance to get the standard deviation for |
---|
583 | * @return the standard deviation |
---|
584 | * @throws Exception |
---|
585 | * if computation fails |
---|
586 | */ |
---|
587 | public double getStandardDeviation(Instance inst) throws Exception { |
---|
588 | |
---|
589 | inst = filterInstance(inst); |
---|
590 | |
---|
591 | // Build K vector (and Kappa) |
---|
592 | Matrix k = new Matrix(m_NumTrain, 1); |
---|
593 | for (int i = 0; i < m_NumTrain; i++) { |
---|
594 | k.set(i, 0, m_kernel.eval(-1, i, inst)); |
---|
595 | } |
---|
596 | |
---|
597 | return computeStdDev(inst, k) / m_Alin; |
---|
598 | } |
---|
599 | |
---|
600 | /** |
---|
601 | * Returns natural logarithm of density estimate for given value based on given instance. |
---|
602 | * |
---|
603 | * @param instance the instance to make the prediction for. |
---|
604 | * @param value the value to make the prediction for. |
---|
605 | * @return the natural logarithm of the density estimate |
---|
606 | * @exception Exception if the density cannot be computed |
---|
607 | */ |
---|
608 | public double logDensity(Instance inst, double value) throws Exception { |
---|
609 | |
---|
610 | inst = filterInstance(inst); |
---|
611 | |
---|
612 | // Build K vector (and Kappa) |
---|
613 | Matrix k = new Matrix(m_NumTrain, 1); |
---|
614 | for (int i = 0; i < m_NumTrain; i++) { |
---|
615 | k.set(i, 0, m_kernel.eval(-1, i, inst)); |
---|
616 | } |
---|
617 | |
---|
618 | double estimate = k.transpose().times(m_t).get(0, 0) + m_avg_target; |
---|
619 | |
---|
620 | double sigma = computeStdDev(inst, k); |
---|
621 | |
---|
622 | // transform to GP space |
---|
623 | value = value * m_Alin + m_Blin; |
---|
624 | // center around estimate |
---|
625 | value = value - estimate; |
---|
626 | double z = -Math.log(sigma * Math.sqrt(2 * Math.PI)) |
---|
627 | - value * value /(2.0*sigma*sigma); |
---|
628 | |
---|
629 | return z + Math.log(m_Alin); |
---|
630 | } |
---|
631 | |
---|
632 | /** |
---|
633 | * Returns an enumeration describing the available options. |
---|
634 | * |
---|
635 | * @return an enumeration of all the available options. |
---|
636 | */ |
---|
637 | public Enumeration listOptions() { |
---|
638 | |
---|
639 | Vector<Option> result = new Vector<Option>(); |
---|
640 | |
---|
641 | Enumeration enm = super.listOptions(); |
---|
642 | while (enm.hasMoreElements()) |
---|
643 | result.addElement((Option)enm.nextElement()); |
---|
644 | |
---|
645 | result.addElement(new Option("\tLevel of Gaussian Noise wrt transformed target." + " (default 1)", "L", 1, "-L <double>")); |
---|
646 | |
---|
647 | result.addElement(new Option("\tWhether to 0=normalize/1=standardize/2=neither. " + "(default 0=normalize)", |
---|
648 | "N", 1, "-N")); |
---|
649 | |
---|
650 | result.addElement(new Option("\tThe Kernel to use.\n" |
---|
651 | + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, |
---|
652 | "-K <classname and parameters>")); |
---|
653 | |
---|
654 | result.addElement(new Option("", "", 0, "\nOptions specific to kernel " + getKernel().getClass().getName() |
---|
655 | + ":")); |
---|
656 | |
---|
657 | enm = ((OptionHandler) getKernel()).listOptions(); |
---|
658 | while (enm.hasMoreElements()) |
---|
659 | result.addElement((Option)enm.nextElement()); |
---|
660 | |
---|
661 | return result.elements(); |
---|
662 | } |
---|
663 | |
---|
664 | /** |
---|
665 | * Parses a given list of options. <p/> |
---|
666 | * |
---|
667 | * <!-- options-start --> Valid options are: <p/> |
---|
668 | * |
---|
669 | * <pre> |
---|
670 | * -D |
---|
671 | * If set, classifier is run in debug mode and |
---|
672 | * may output additional info to the console |
---|
673 | * </pre> |
---|
674 | * |
---|
675 | * <pre> |
---|
676 | * -L <double> |
---|
677 | * Level of Gaussian Noise. (default 0.1) |
---|
678 | * </pre> |
---|
679 | * |
---|
680 | * <pre> |
---|
681 | * -M <double> |
---|
682 | * Level of Gaussian Noise for the class. (default 0.1) |
---|
683 | * </pre> |
---|
684 | * |
---|
685 | * <pre> |
---|
686 | * -N |
---|
687 | * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) |
---|
688 | * </pre> |
---|
689 | * |
---|
690 | * <pre> |
---|
691 | * -K <classname and parameters> |
---|
692 | * The Kernel to use. |
---|
693 | * (default: weka.classifiers.functions.supportVector.PolyKernel) |
---|
694 | * </pre> |
---|
695 | * |
---|
696 | * <pre> |
---|
697 | * |
---|
698 | * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: |
---|
699 | * </pre> |
---|
700 | * |
---|
701 | * <pre> |
---|
702 | * -D |
---|
703 | * Enables debugging output (if available) to be printed. |
---|
704 | * (default: off) |
---|
705 | * </pre> |
---|
706 | * |
---|
707 | * <pre> |
---|
708 | * -no-checks |
---|
709 | * Turns off all checks - use with caution! |
---|
710 | * (default: checks on) |
---|
711 | * </pre> |
---|
712 | * |
---|
713 | * <pre> |
---|
714 | * -C <num> |
---|
715 | * The size of the cache (a prime number). |
---|
716 | * (default: 250007) |
---|
717 | * </pre> |
---|
718 | * |
---|
719 | * <pre> |
---|
720 | * -G <num> |
---|
721 | * The Gamma parameter. |
---|
722 | * (default: 0.01) |
---|
723 | * </pre> |
---|
724 | * |
---|
725 | * <!-- options-end --> |
---|
726 | * |
---|
727 | * @param options |
---|
728 | * the list of options as an array of strings |
---|
729 | * @throws Exception |
---|
730 | * if an option is not supported |
---|
731 | */ |
---|
732 | public void setOptions(String[] options) throws Exception { |
---|
733 | String tmpStr; |
---|
734 | String[] tmpOptions; |
---|
735 | |
---|
736 | tmpStr = Utils.getOption('L', options); |
---|
737 | if (tmpStr.length() != 0) |
---|
738 | setNoise(Double.parseDouble(tmpStr)); |
---|
739 | else |
---|
740 | setNoise(1); |
---|
741 | |
---|
742 | tmpStr = Utils.getOption('N', options); |
---|
743 | if (tmpStr.length() != 0) |
---|
744 | setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); |
---|
745 | else |
---|
746 | setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); |
---|
747 | |
---|
748 | tmpStr = Utils.getOption('K', options); |
---|
749 | tmpOptions = Utils.splitOptions(tmpStr); |
---|
750 | if (tmpOptions.length != 0) { |
---|
751 | tmpStr = tmpOptions[0]; |
---|
752 | tmpOptions[0] = ""; |
---|
753 | setKernel(Kernel.forName(tmpStr, tmpOptions)); |
---|
754 | } |
---|
755 | |
---|
756 | super.setOptions(options); |
---|
757 | } |
---|
758 | |
---|
759 | /** |
---|
760 | * Gets the current settings of the classifier. |
---|
761 | * |
---|
762 | * @return an array of strings suitable for passing to setOptions |
---|
763 | */ |
---|
764 | public String[] getOptions() { |
---|
765 | int i; |
---|
766 | Vector<String> result; |
---|
767 | String[] options; |
---|
768 | |
---|
769 | result = new Vector<String>(); |
---|
770 | options = super.getOptions(); |
---|
771 | for (i = 0; i < options.length; i++) |
---|
772 | result.addElement(options[i]); |
---|
773 | |
---|
774 | result.addElement("-L"); |
---|
775 | result.addElement("" + getNoise()); |
---|
776 | |
---|
777 | result.addElement("-N"); |
---|
778 | result.addElement("" + m_filterType); |
---|
779 | |
---|
780 | result.addElement("-K"); |
---|
781 | result.addElement("" + m_kernel.getClass().getName() + " " + Utils.joinOptions(m_kernel.getOptions())); |
---|
782 | |
---|
783 | return (String[]) result.toArray(new String[result.size()]); |
---|
784 | } |
---|
785 | |
---|
786 | /** |
---|
787 | * Returns the tip text for this property |
---|
788 | * |
---|
789 | * @return tip text for this property suitable for displaying in the |
---|
790 | * explorer/experimenter gui |
---|
791 | */ |
---|
792 | public String kernelTipText() { |
---|
793 | return "The kernel to use."; |
---|
794 | } |
---|
795 | |
---|
796 | /** |
---|
797 | * Gets the kernel to use. |
---|
798 | * |
---|
799 | * @return the kernel |
---|
800 | */ |
---|
801 | public Kernel getKernel() { |
---|
802 | return m_kernel; |
---|
803 | } |
---|
804 | |
---|
805 | /** |
---|
806 | * Sets the kernel to use. |
---|
807 | * |
---|
808 | * @param value |
---|
809 | * the new kernel |
---|
810 | */ |
---|
811 | public void setKernel(Kernel value) { |
---|
812 | m_kernel = value; |
---|
813 | } |
---|
814 | |
---|
815 | /** |
---|
816 | * Returns the tip text for this property |
---|
817 | * |
---|
818 | * @return tip text for this property suitable for displaying in the |
---|
819 | * explorer/experimenter gui |
---|
820 | */ |
---|
821 | public String filterTypeTipText() { |
---|
822 | return "Determines how/if the data will be transformed."; |
---|
823 | } |
---|
824 | |
---|
825 | /** |
---|
826 | * Gets how the training data will be transformed. Will be one of |
---|
827 | * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. |
---|
828 | * |
---|
829 | * @return the filtering mode |
---|
830 | */ |
---|
831 | public SelectedTag getFilterType() { |
---|
832 | |
---|
833 | return new SelectedTag(m_filterType, TAGS_FILTER); |
---|
834 | } |
---|
835 | |
---|
836 | /** |
---|
837 | * Sets how the training data will be transformed. Should be one of |
---|
838 | * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. |
---|
839 | * |
---|
840 | * @param newType |
---|
841 | * the new filtering mode |
---|
842 | */ |
---|
843 | public void setFilterType(SelectedTag newType) { |
---|
844 | |
---|
845 | if (newType.getTags() == TAGS_FILTER) { |
---|
846 | m_filterType = newType.getSelectedTag().getID(); |
---|
847 | } |
---|
848 | } |
---|
849 | |
---|
850 | /** |
---|
851 | * Returns the tip text for this property |
---|
852 | * |
---|
853 | * @return tip text for this property suitable for displaying in the |
---|
854 | * explorer/experimenter gui |
---|
855 | */ |
---|
856 | public String noiseTipText() { |
---|
857 | return "The level of Gaussian Noise (added to the diagonal of the Covariance Matrix), after the " + |
---|
858 | "target has been normalized/standardized/left unchanged)."; |
---|
859 | } |
---|
860 | |
---|
861 | /** |
---|
862 | * Get the value of noise. |
---|
863 | * |
---|
864 | * @return Value of noise. |
---|
865 | */ |
---|
866 | public double getNoise() { |
---|
867 | return m_delta; |
---|
868 | } |
---|
869 | |
---|
870 | /** |
---|
871 | * Set the level of Gaussian Noise. |
---|
872 | * |
---|
873 | * @param v |
---|
874 | * Value to assign to noise. |
---|
875 | */ |
---|
876 | public void setNoise(double v) { |
---|
877 | m_delta = v; |
---|
878 | } |
---|
879 | |
---|
880 | /** |
---|
881 | * Prints out the classifier. |
---|
882 | * |
---|
883 | * @return a description of the classifier as a string |
---|
884 | */ |
---|
885 | public String toString() { |
---|
886 | |
---|
887 | StringBuffer text = new StringBuffer(); |
---|
888 | |
---|
889 | if (m_t == null) |
---|
890 | return "Gaussian Processes: No model built yet."; |
---|
891 | |
---|
892 | try { |
---|
893 | |
---|
894 | text.append("Gaussian Processes\n\n"); |
---|
895 | text.append("Kernel used:\n " + m_kernel.toString() + "\n\n"); |
---|
896 | |
---|
897 | text.append("All values shown based on: " + |
---|
898 | TAGS_FILTER[m_filterType].getReadable() + "\n\n"); |
---|
899 | |
---|
900 | |
---|
901 | text.append("Average Target Value : " + m_avg_target + "\n"); |
---|
902 | |
---|
903 | text.append("Inverted Covariance Matrix:\n"); |
---|
904 | double min = -m_L[0][0]; |
---|
905 | double max = -m_L[0][0]; |
---|
906 | for (int i = 0; i < m_NumTrain; i++) |
---|
907 | for (int j = 0; j <= i; j++) { |
---|
908 | if (-m_L[i][j] < min) |
---|
909 | min = -m_L[i][j]; |
---|
910 | else if (-m_L[i][j] > max) |
---|
911 | max = -m_L[i][j]; |
---|
912 | } |
---|
913 | text.append(" Lowest Value = " + min + "\n"); |
---|
914 | text.append(" Highest Value = " + max + "\n"); |
---|
915 | text.append("Inverted Covariance Matrix * Target-value Vector:\n"); |
---|
916 | min = m_t.get(0, 0); |
---|
917 | max = m_t.get(0, 0); |
---|
918 | for (int i = 0; i < m_NumTrain; i++) { |
---|
919 | if (m_t.get(i, 0) < min) |
---|
920 | min = m_t.get(i, 0); |
---|
921 | else if (m_t.get(i, 0) > max) |
---|
922 | max = m_t.get(i, 0); |
---|
923 | } |
---|
924 | text.append(" Lowest Value = " + min + "\n"); |
---|
925 | text.append(" Highest Value = " + max + "\n \n"); |
---|
926 | |
---|
927 | } catch (Exception e) { |
---|
928 | return "Can't print the classifier."; |
---|
929 | } |
---|
930 | |
---|
931 | return text.toString(); |
---|
932 | } |
---|
933 | |
---|
934 | /** |
---|
935 | * Main method for testing this class. |
---|
936 | * |
---|
937 | * @param argv |
---|
938 | * the commandline parameters |
---|
939 | */ |
---|
940 | public static void main(String[] argv) { |
---|
941 | |
---|
942 | runClassifier(new GaussianProcesses(), argv); |
---|
943 | } |
---|
944 | } |
---|