1 | /* |
---|
2 | * This program is free software; you can redistribute it and/or modify |
---|
3 | * it under the terms of the GNU General Public License as published by |
---|
4 | * the Free Software Foundation; either version 2 of the License, or |
---|
5 | * (at your option) any later version. |
---|
6 | * |
---|
7 | * This program is distributed in the hope that it will be useful, |
---|
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
10 | * GNU General Public License for more details. |
---|
11 | * |
---|
12 | * You should have received a copy of the GNU General Public License |
---|
13 | * along with this program; if not, write to the Free Software |
---|
14 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
---|
15 | */ |
---|
16 | |
---|
17 | /* |
---|
18 | * ComplementNaiveBayes.java |
---|
19 | * Copyright (C) 2003 University of Waikato, Hamilton, New Zealand |
---|
20 | */ |
---|
21 | |
---|
22 | package weka.classifiers.bayes; |
---|
23 | |
---|
24 | import weka.classifiers.Classifier; |
---|
25 | import weka.classifiers.AbstractClassifier; |
---|
26 | import weka.core.Capabilities; |
---|
27 | import weka.core.FastVector; |
---|
28 | import weka.core.Instance; |
---|
29 | import weka.core.Instances; |
---|
30 | import weka.core.Option; |
---|
31 | import weka.core.OptionHandler; |
---|
32 | import weka.core.RevisionUtils; |
---|
33 | import weka.core.TechnicalInformation; |
---|
34 | import weka.core.TechnicalInformationHandler; |
---|
35 | import weka.core.Utils; |
---|
36 | import weka.core.WeightedInstancesHandler; |
---|
37 | import weka.core.Capabilities.Capability; |
---|
38 | import weka.core.TechnicalInformation.Field; |
---|
39 | import weka.core.TechnicalInformation.Type; |
---|
40 | |
---|
41 | |
---|
42 | /** |
---|
43 | <!-- globalinfo-start --> |
---|
44 | * Class for building and using a Complement class Naive Bayes classifier.<br/> |
---|
45 | * <br/> |
---|
46 | * For more information see, <br/> |
---|
47 | * <br/> |
---|
48 | * Jason D. Rennie, Lawrence Shih, Jaime Teevan, David R. Karger: Tackling the Poor Assumptions of Naive Bayes Text Classifiers. In: ICML, 616-623, 2003.<br/> |
---|
49 | * <br/> |
---|
50 | * P.S.: TF, IDF and length normalization transforms, as described in the paper, can be performed through weka.filters.unsupervised.StringToWordVector. |
---|
51 | * <p/> |
---|
52 | <!-- globalinfo-end --> |
---|
53 | * |
---|
54 | <!-- technical-bibtex-start --> |
---|
55 | * BibTeX: |
---|
56 | * <pre> |
---|
57 | * @inproceedings{Rennie2003, |
---|
58 | * author = {Jason D. Rennie and Lawrence Shih and Jaime Teevan and David R. Karger}, |
---|
59 | * booktitle = {ICML}, |
---|
60 | * pages = {616-623}, |
---|
61 | * publisher = {AAAI Press}, |
---|
62 | * title = {Tackling the Poor Assumptions of Naive Bayes Text Classifiers}, |
---|
63 | * year = {2003} |
---|
64 | * } |
---|
65 | * </pre> |
---|
66 | * <p/> |
---|
67 | <!-- technical-bibtex-end --> |
---|
68 | * |
---|
69 | <!-- options-start --> |
---|
70 | * Valid options are: <p/> |
---|
71 | * |
---|
72 | * <pre> -N |
---|
73 | * Normalize the word weights for each class |
---|
74 | * </pre> |
---|
75 | * |
---|
76 | * <pre> -S |
---|
77 | * Smoothing value to avoid zero WordGivenClass probabilities (default=1.0). |
---|
78 | * </pre> |
---|
79 | * |
---|
80 | <!-- options-end --> |
---|
81 | * |
---|
82 | * @author Ashraf M. Kibriya (amk14@cs.waikato.ac.nz) |
---|
83 | * @version $Revision: 5928 $ |
---|
84 | */ |
---|
85 | public class ComplementNaiveBayes extends AbstractClassifier |
---|
86 | implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler { |
---|
87 | |
---|
88 | /** for serialization */ |
---|
89 | static final long serialVersionUID = 7246302925903086397L; |
---|
90 | |
---|
91 | /** |
---|
92 | Weight of words for each class. The weight is actually the |
---|
93 | log of the probability of a word (w) given a class (c) |
---|
94 | (i.e. log(Pr[w|c])). The format of the matrix is: |
---|
95 | wordWeights[class][wordAttribute] |
---|
96 | */ |
---|
97 | private double[][] wordWeights; |
---|
98 | |
---|
99 | /** Holds the smoothing value to avoid word probabilities of zero.<br> |
---|
100 | P.S.: According to the paper this is the Alpha i parameter |
---|
101 | */ |
---|
102 | private double smoothingParameter = 1.0; |
---|
103 | |
---|
104 | /** True if the words weights are to be normalized */ |
---|
105 | private boolean m_normalizeWordWeights = false; |
---|
106 | |
---|
107 | /** Holds the number of Class values present in the set of specified |
---|
108 | instances */ |
---|
109 | private int numClasses; |
---|
110 | |
---|
111 | /** The instances header that'll be used in toString */ |
---|
112 | private Instances header; |
---|
113 | |
---|
114 | |
---|
115 | /** |
---|
116 | * Returns an enumeration describing the available options. |
---|
117 | * |
---|
118 | * @return an enumeration of all the available options. |
---|
119 | */ |
---|
120 | public java.util.Enumeration listOptions() { |
---|
121 | FastVector newVector = new FastVector(2); |
---|
122 | newVector.addElement( |
---|
123 | new Option("\tNormalize the word weights for each class\n", |
---|
124 | "N", 0,"-N")); |
---|
125 | newVector.addElement( |
---|
126 | new Option("\tSmoothing value to avoid zero WordGivenClass"+ |
---|
127 | " probabilities (default=1.0).\n", |
---|
128 | "S", 1,"-S")); |
---|
129 | |
---|
130 | return newVector.elements(); |
---|
131 | } |
---|
132 | |
---|
133 | /** |
---|
134 | * Gets the current settings of the classifier. |
---|
135 | * |
---|
136 | * @return an array of strings suitable for passing to setOptions |
---|
137 | */ |
---|
138 | public String[] getOptions() { |
---|
139 | String options[] = new String[4]; |
---|
140 | int current=0; |
---|
141 | |
---|
142 | if(getNormalizeWordWeights()) |
---|
143 | options[current++] = "-N"; |
---|
144 | |
---|
145 | options[current++] = "-S"; |
---|
146 | options[current++] = Double.toString(smoothingParameter); |
---|
147 | |
---|
148 | while (current < options.length) { |
---|
149 | options[current++] = ""; |
---|
150 | } |
---|
151 | |
---|
152 | return options; |
---|
153 | } |
---|
154 | |
---|
155 | /** |
---|
156 | * Parses a given list of options. <p/> |
---|
157 | * |
---|
158 | <!-- options-start --> |
---|
159 | * Valid options are: <p/> |
---|
160 | * |
---|
161 | * <pre> -N |
---|
162 | * Normalize the word weights for each class |
---|
163 | * </pre> |
---|
164 | * |
---|
165 | * <pre> -S |
---|
166 | * Smoothing value to avoid zero WordGivenClass probabilities (default=1.0). |
---|
167 | * </pre> |
---|
168 | * |
---|
169 | <!-- options-end --> |
---|
170 | * |
---|
171 | * @param options the list of options as an array of strings |
---|
172 | * @throws Exception if an option is not supported |
---|
173 | */ |
---|
174 | public void setOptions(String[] options) throws Exception { |
---|
175 | |
---|
176 | setNormalizeWordWeights(Utils.getFlag('N', options)); |
---|
177 | |
---|
178 | String val = Utils.getOption('S', options); |
---|
179 | if(val.length()!=0) |
---|
180 | setSmoothingParameter(Double.parseDouble(val)); |
---|
181 | else |
---|
182 | setSmoothingParameter(1.0); |
---|
183 | } |
---|
184 | |
---|
185 | /** |
---|
186 | * Returns true if the word weights for each class are to be normalized |
---|
187 | * |
---|
188 | * @return true if the word weights are normalized |
---|
189 | */ |
---|
190 | public boolean getNormalizeWordWeights() { |
---|
191 | return m_normalizeWordWeights; |
---|
192 | } |
---|
193 | |
---|
194 | /** |
---|
195 | * Sets whether if the word weights for each class should be normalized |
---|
196 | * |
---|
197 | * @param doNormalize whether the word weights are to be normalized |
---|
198 | */ |
---|
199 | public void setNormalizeWordWeights(boolean doNormalize) { |
---|
200 | m_normalizeWordWeights = doNormalize; |
---|
201 | } |
---|
202 | |
---|
203 | /** |
---|
204 | * Returns the tip text for this property |
---|
205 | * @return tip text for this property suitable for |
---|
206 | * displaying in the explorer/experimenter gui |
---|
207 | */ |
---|
208 | public String normalizeWordWeightsTipText() { |
---|
209 | return "Normalizes the word weights for each class."; |
---|
210 | } |
---|
211 | |
---|
212 | /** |
---|
213 | * Gets the smoothing value to be used to avoid zero WordGivenClass |
---|
214 | * probabilities. |
---|
215 | * |
---|
216 | * @return the smoothing value |
---|
217 | */ |
---|
218 | public double getSmoothingParameter() { |
---|
219 | return smoothingParameter; |
---|
220 | } |
---|
221 | |
---|
222 | /** |
---|
223 | * Sets the smoothing value used to avoid zero WordGivenClass probabilities |
---|
224 | * |
---|
225 | * @param val the new smooting value |
---|
226 | */ |
---|
227 | public void setSmoothingParameter(double val) { |
---|
228 | smoothingParameter = val; |
---|
229 | } |
---|
230 | |
---|
231 | /** |
---|
232 | * Returns the tip text for this property |
---|
233 | * @return tip text for this property suitable for |
---|
234 | * displaying in the explorer/experimenter gui |
---|
235 | */ |
---|
236 | public String smoothingParameterTipText() { |
---|
237 | return "Sets the smoothing parameter to avoid zero WordGivenClass "+ |
---|
238 | "probabilities (default=1.0)."; |
---|
239 | } |
---|
240 | |
---|
241 | /** |
---|
242 | * Returns a string describing this classifier |
---|
243 | * @return a description of the classifier suitable for |
---|
244 | * displaying in the explorer/experimenter gui |
---|
245 | */ |
---|
246 | public String globalInfo() { |
---|
247 | |
---|
248 | return "Class for building and using a Complement class Naive Bayes "+ |
---|
249 | "classifier.\n\nFor more information see, \n\n"+ |
---|
250 | getTechnicalInformation().toString() + "\n\n" + |
---|
251 | "P.S.: TF, IDF and length normalization transforms, as "+ |
---|
252 | "described in the paper, can be performed through "+ |
---|
253 | "weka.filters.unsupervised.StringToWordVector."; |
---|
254 | } |
---|
255 | |
---|
256 | /** |
---|
257 | * Returns an instance of a TechnicalInformation object, containing |
---|
258 | * detailed information about the technical background of this class, |
---|
259 | * e.g., paper reference or book this class is based on. |
---|
260 | * |
---|
261 | * @return the technical information about this class |
---|
262 | */ |
---|
263 | public TechnicalInformation getTechnicalInformation() { |
---|
264 | TechnicalInformation result; |
---|
265 | |
---|
266 | result = new TechnicalInformation(Type.INPROCEEDINGS); |
---|
267 | result.setValue(Field.AUTHOR, "Jason D. Rennie and Lawrence Shih and Jaime Teevan and David R. Karger"); |
---|
268 | result.setValue(Field.TITLE, "Tackling the Poor Assumptions of Naive Bayes Text Classifiers"); |
---|
269 | result.setValue(Field.BOOKTITLE, "ICML"); |
---|
270 | result.setValue(Field.YEAR, "2003"); |
---|
271 | result.setValue(Field.PAGES, "616-623"); |
---|
272 | result.setValue(Field.PUBLISHER, "AAAI Press"); |
---|
273 | |
---|
274 | return result; |
---|
275 | } |
---|
276 | |
---|
277 | /** |
---|
278 | * Returns default capabilities of the classifier. |
---|
279 | * |
---|
280 | * @return the capabilities of this classifier |
---|
281 | */ |
---|
282 | public Capabilities getCapabilities() { |
---|
283 | Capabilities result = super.getCapabilities(); |
---|
284 | result.disableAll(); |
---|
285 | |
---|
286 | // attributes |
---|
287 | result.enable(Capability.NUMERIC_ATTRIBUTES); |
---|
288 | result.enable(Capability.MISSING_VALUES); |
---|
289 | |
---|
290 | // class |
---|
291 | result.enable(Capability.NOMINAL_CLASS); |
---|
292 | result.enable(Capability.MISSING_CLASS_VALUES); |
---|
293 | |
---|
294 | return result; |
---|
295 | } |
---|
296 | |
---|
297 | /** |
---|
298 | * Generates the classifier. |
---|
299 | * |
---|
300 | * @param instances set of instances serving as training data |
---|
301 | * @throws Exception if the classifier has not been built successfully |
---|
302 | */ |
---|
303 | public void buildClassifier(Instances instances) throws Exception { |
---|
304 | |
---|
305 | // can classifier handle the data? |
---|
306 | getCapabilities().testWithFail(instances); |
---|
307 | |
---|
308 | // remove instances with missing class |
---|
309 | instances = new Instances(instances); |
---|
310 | instances.deleteWithMissingClass(); |
---|
311 | |
---|
312 | numClasses = instances.numClasses(); |
---|
313 | int numAttributes = instances.numAttributes(); |
---|
314 | |
---|
315 | header = new Instances(instances, 0); |
---|
316 | double [][] ocrnceOfWordInClass = new double[numClasses][numAttributes]; |
---|
317 | wordWeights = new double[numClasses][numAttributes]; |
---|
318 | //double [] docsPerClass = new double[numClasses]; |
---|
319 | double[] wordsPerClass = new double[numClasses]; |
---|
320 | double totalWordOccurrences = 0; |
---|
321 | double sumOfSmoothingParams = (numAttributes-1)*smoothingParameter; |
---|
322 | int classIndex = instances.instance(0).classIndex(); |
---|
323 | Instance instance; |
---|
324 | int docClass; |
---|
325 | double numOccurrences; |
---|
326 | |
---|
327 | java.util.Enumeration enumInsts = instances.enumerateInstances(); |
---|
328 | while (enumInsts.hasMoreElements()) { |
---|
329 | instance = (Instance) enumInsts.nextElement(); |
---|
330 | docClass = (int)instance.value(classIndex); |
---|
331 | //docsPerClass[docClass] += instance.weight(); |
---|
332 | |
---|
333 | for(int a = 0; a<instance.numValues(); a++) |
---|
334 | if(instance.index(a) != instance.classIndex()) { |
---|
335 | if(!instance.isMissing(a)) { |
---|
336 | numOccurrences = instance.valueSparse(a) * instance.weight(); |
---|
337 | if(numOccurrences < 0) |
---|
338 | throw new Exception("Numeric attribute"+ |
---|
339 | " values must all be greater"+ |
---|
340 | " or equal to zero."); |
---|
341 | totalWordOccurrences += numOccurrences; |
---|
342 | wordsPerClass[docClass] += numOccurrences; |
---|
343 | ocrnceOfWordInClass[docClass] |
---|
344 | [instance.index(a)] += numOccurrences; |
---|
345 | //For the time being wordweights[0][i] |
---|
346 | //will hold the total occurrence of word |
---|
347 | // i over all classes |
---|
348 | wordWeights[0] |
---|
349 | [instance.index(a)] += numOccurrences; |
---|
350 | } |
---|
351 | } |
---|
352 | } |
---|
353 | |
---|
354 | //Calculating the complement class probability for all classes except 0 |
---|
355 | for(int c=1; c<numClasses; c++) { |
---|
356 | //total occurrence of words in classes other than c |
---|
357 | double totalWordOcrnces = totalWordOccurrences - wordsPerClass[c]; |
---|
358 | |
---|
359 | for(int w=0; w<numAttributes; w++) { |
---|
360 | if(w != classIndex ) { |
---|
361 | //occurrence of w in classes other that c |
---|
362 | double ocrncesOfWord = |
---|
363 | wordWeights[0][w] - ocrnceOfWordInClass[c][w]; |
---|
364 | |
---|
365 | wordWeights[c][w] = |
---|
366 | Math.log((ocrncesOfWord+smoothingParameter) / |
---|
367 | (totalWordOcrnces+sumOfSmoothingParams)); |
---|
368 | } |
---|
369 | } |
---|
370 | } |
---|
371 | |
---|
372 | //Now calculating the complement class probability for class 0 |
---|
373 | for(int w=0; w<numAttributes; w++) { |
---|
374 | if(w != classIndex) { |
---|
375 | //occurrence of w in classes other that c |
---|
376 | double ocrncesOfWord = wordWeights[0][w] - ocrnceOfWordInClass[0][w]; |
---|
377 | //total occurrence of words in classes other than c |
---|
378 | double totalWordOcrnces = totalWordOccurrences - wordsPerClass[0]; |
---|
379 | |
---|
380 | wordWeights[0][w] = |
---|
381 | Math.log((ocrncesOfWord+smoothingParameter) / |
---|
382 | (totalWordOcrnces+sumOfSmoothingParams)); |
---|
383 | } |
---|
384 | } |
---|
385 | |
---|
386 | //Normalizing weights |
---|
387 | if(m_normalizeWordWeights==true) |
---|
388 | for(int c=0; c<numClasses; c++) { |
---|
389 | double sum=0; |
---|
390 | for(int w=0; w<numAttributes; w++) { |
---|
391 | if(w!=classIndex) |
---|
392 | sum += Math.abs(wordWeights[c][w]); |
---|
393 | } |
---|
394 | for(int w=0; w<numAttributes; w++) { |
---|
395 | if(w!=classIndex) { |
---|
396 | wordWeights[c][w] = wordWeights[c][w]/sum; |
---|
397 | } |
---|
398 | } |
---|
399 | } |
---|
400 | |
---|
401 | } |
---|
402 | |
---|
403 | |
---|
404 | /** |
---|
405 | * Classifies a given instance. <p> |
---|
406 | * |
---|
407 | * The classification rule is: <br> |
---|
408 | * MinC(forAllWords(ti*Wci)) <br> |
---|
409 | * where <br> |
---|
410 | * ti is the frequency of word i in the given instance <br> |
---|
411 | * Wci is the weight of word i in Class c. <p> |
---|
412 | * |
---|
413 | * For more information see section 4.4 of the paper mentioned above |
---|
414 | * in the classifiers description. |
---|
415 | * |
---|
416 | * @param instance the instance to classify |
---|
417 | * @return the index of the class the instance is most likely to belong. |
---|
418 | * @throws Exception if the classifier has not been built yet. |
---|
419 | */ |
---|
420 | public double classifyInstance(Instance instance) throws Exception { |
---|
421 | |
---|
422 | if(wordWeights==null) |
---|
423 | throw new Exception("Error. The classifier has not been built "+ |
---|
424 | "properly."); |
---|
425 | |
---|
426 | double [] valueForClass = new double[numClasses]; |
---|
427 | double sumOfClassValues=0; |
---|
428 | |
---|
429 | for(int c=0; c<numClasses; c++) { |
---|
430 | double sumOfWordValues=0; |
---|
431 | for(int w=0; w<instance.numValues(); w++) { |
---|
432 | if(instance.index(w)!=instance.classIndex()) { |
---|
433 | double freqOfWordInDoc = instance.valueSparse(w); |
---|
434 | sumOfWordValues += freqOfWordInDoc * |
---|
435 | wordWeights[c][instance.index(w)]; |
---|
436 | } |
---|
437 | } |
---|
438 | //valueForClass[c] = Math.log(probOfClass[c]) - sumOfWordValues; |
---|
439 | valueForClass[c] = sumOfWordValues; |
---|
440 | sumOfClassValues += valueForClass[c]; |
---|
441 | } |
---|
442 | |
---|
443 | int minidx=0; |
---|
444 | for(int i=0; i<numClasses; i++) |
---|
445 | if(valueForClass[i]<valueForClass[minidx]) |
---|
446 | minidx = i; |
---|
447 | |
---|
448 | return minidx; |
---|
449 | } |
---|
450 | |
---|
451 | |
---|
452 | /** |
---|
453 | * Prints out the internal model built by the classifier. In this case |
---|
454 | * it prints out the word weights calculated when building the classifier. |
---|
455 | */ |
---|
456 | public String toString() { |
---|
457 | if(wordWeights==null) { |
---|
458 | return "The classifier hasn't been built yet."; |
---|
459 | } |
---|
460 | |
---|
461 | int numAttributes = header.numAttributes(); |
---|
462 | StringBuffer result = new StringBuffer("The word weights for each class are: \n"+ |
---|
463 | "------------------------------------\n\t"); |
---|
464 | |
---|
465 | for(int c = 0; c<numClasses; c++) |
---|
466 | result.append(header.classAttribute().value(c)).append("\t"); |
---|
467 | |
---|
468 | result.append("\n"); |
---|
469 | |
---|
470 | for(int w = 0; w<numAttributes; w++) { |
---|
471 | result.append(header.attribute(w).name()).append("\t"); |
---|
472 | for(int c = 0; c<numClasses; c++) |
---|
473 | result.append(Double.toString(wordWeights[c][w])).append("\t"); |
---|
474 | result.append("\n"); |
---|
475 | } |
---|
476 | |
---|
477 | return result.toString(); |
---|
478 | } |
---|
479 | |
---|
480 | /** |
---|
481 | * Returns the revision string. |
---|
482 | * |
---|
483 | * @return the revision |
---|
484 | */ |
---|
485 | public String getRevision() { |
---|
486 | return RevisionUtils.extract("$Revision: 5928 $"); |
---|
487 | } |
---|
488 | |
---|
489 | /** |
---|
490 | * Main method for testing this class. |
---|
491 | * |
---|
492 | * @param argv the options |
---|
493 | */ |
---|
494 | public static void main(String [] argv) { |
---|
495 | runClassifier(new ComplementNaiveBayes(), argv); |
---|
496 | } |
---|
497 | } |
---|
498 | |
---|