Search This Blog

Wednesday, 22 March 2017

Implement Candidate Elimination Algorithm in JAVA

Program:
package k;

import java.io.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

public class kl {
                static int sg = 0;

Data Compression using 2D Wavelet Analysis

(Type this code in editor then run it)

load woman;              % Load original image.
image(X)
title('Original Image')
colormap(map)
x = X(100:200,100:200);  % Select ROI
n = 5;                   % Decomposition Level
w = 'sym8';              % Near symmetric wavelet

Saturday, 18 March 2017

Python program for visualisation of shapefile


import shapefile
import matplotlib.pyplot as plt
from numpy import array
from numpy import arange
import matplotlib.cm as cm
import matplotlib.patches as patches
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
sf = shapefile.Reader('D:/vector/India/IND_adm3.shp')

recs = sf.records()
shapes = sf.shapes()
Nshp = len(shapes)
cns =[]
for nshp in xrange(Nshp):
    cns.append(recs[nshp][1])
cns = array(cns)
cm = cm.get_cmap('Dark2')
cccol = cm(1.*arange(Nshp)/Nshp)
# --plot--
fig = plt.figure()
ax = fig.add_subplot(111)
for nshp in xrange(Nshp):
    ptchs = []
    pts = array(shapes[nshp].points)
    prt = shapes[nshp].parts
    par = list(prt) + [pts.shape[0]]
    for pij in xrange(len(prt)):
        ptchs.append(Polygon(pts[par[pij]:par[pij + 1]]))
        ax.add_collection(PatchCollection(ptchs,facecolor = cccol[nshp,:],\
                          edgecolor = 'k', linewidths =.1))
ax.set_xlim(-180,+180)
ax.set_ylim(-90,90)
##fig.savefig('test.png')
plt.show()
     

Friday, 17 March 2017

Python program to print countryName, countryCode, minLat, maxLat, minLong,maxLong

from osgeo import ogr

shapefile = ogr.Open('D:/vector/TM_WORLD_BORDERS-0.3/TM_WORLD_BORDERS-0.3.shp')

layer = shapefile.GetLayer(0)
countries =[]
for i in range(layer.GetFeatureCount()):
    feature = layer.GetFeature(i)
    countryCode = feature.GetField('ISO3')
    countryName = feature.GetField('NAME')
    print '{}:{}'.format(countryCode, countryName)
    geometry = feature.GetGeometryRef()
    minLong, maxLong, minLat, maxLat = geometry.GetEnvelope()
    countries.append((countryName, countryCode, minLat, maxLat, minLong,maxLong))


countries.sort()
for name,code, minLat, maxLat, minLong, maxLong in countries:
    print '{} ({}) lat = {:.4f}..{:.4f}..Long={:.4f}..{:.4f}'.format(name, code, minLat, maxLat, minLong, maxLong)

Python program to extract Spatial reference of shape file

from osgeo import ogr

shapefile = ogr.Open('D:/vector/India/IND_adm2.shp')

layer = shapefile.GetLayer(0)
##Get Projected Spatial reference

sr = layer.GetSpatialRef()
print sr

geogr_sr = sr.CloneGeogCS()

Python program to extract attributes of features of shape file

from osgeo import ogr

shapefile = ogr.Open('D:/vector/TM_WORLD_BORDERS-0.3/TM_WORLD_BORDERS-0.3.shp')


if shapefile is not None:
    print 'Data loaded successfully..'
    layers = shapefile.GetLayerCount()
    print 'Shapefile has {} layers'.format(layers)
    layer = shapefile.GetLayer(0)
    features = layer.GetFeatureCount()
    print 'Layer {} has {} features'.format(layers,features)
    feature = layer.GetFeature(82)
    attributes = feature.items()
    print 'Feature 82 has following attributes'
    for key, value in attributes.items():
        print '{} = {} '.format(key,value)

    geometry = feature.GetGeometryRef()
    geometryName = geometry.GetGeometryName()

    print 'Feature\'s geometry data consist of a {}'.format(geometryName)
else:
    print 'Failed to load date'
   

Python program to extract features of shape file

from osgeo import ogr

shapefile = ogr.Open('D:/vector/TM_WORLD_BORDERS-0.3/TM_WORLD_BORDERS-0.3.shp')

if shapefile is not None:
    print 'Data loaded successfully..'
    layers = shapefile.GetLayerCount()
    print 'Shapefile has {} layers'.format(layers)
    for numLayers in range(layers):
        layer = shapefile.GetLayer(numLayers)
        features = layer.GetFeatureCount()
        print 'Layer {} has {} features'.format(numLayers,features)
        for featureNum in range(features):
            feature = layer.GetFeature(featureNum)
            featureName = feature.GetField('NAME')
            print 'Feature {} had name :{}'.format(featureNum, featureName)
else:
    print 'Failed to load date'
   

Thursday, 16 March 2017

Program in JAVA to implement Mutual Exclusion.

public class Counter_locks implements Runnable {

      int count = 0;

     public Counter_locks() { 
     }

     public synchronized void run()  //synchronizes the run method
     {
         System.out.println(Thread.currentThread().getName() + ": " + "Current Value of count = " + count);
         System.out.println(Thread.currentThread().getName() + " increments count....");
         count ++;
        try {
                             Thread.currentThread().sleep(1000);
         } catch (InterruptedException e) {
             System.out.println("Thread " + Thread.currentThread().getName() + "interrupted.");
         }
         System.out.println("Value of count after incremented by thread "
         + Thread.currentThread().getName() + " = " + count);
     }

     public static void main(String args[]) {
         Counter_locks c = new Counter_locks();
  
         Thread t1 = new Thread(c);
         Thread t2 = new Thread(c);
         Thread t3 = new Thread(c);
         Thread t4 = new Thread(c);
  
         t1.setName("Thread 1");
         t2.setName("Thread 2");
         t3.setName("Thread 3");
         t4.setName("Thread 4");
  
         t1.start();
         t2.start();
         t3.start();
         t4.start();
  
         try {
             t1.join();
             t2.join();
             t3.join();
             t4.join();
         }
         catch (InterruptedException e) {
             System.out.println("Main thread Interrupted .....");
         }
  
         System.out.println("Main thread exits.....");
     }
 }

Output :

D:\>javac  Counter_locks.java
D:\>java Counter_locks
Thread 1: Current Value of count = 0
Thread 1 increments count....
Value of count after incremented by thread Thread 1 = 1
Thread 4: Current Value of count = 1
Thread 4 increments count....
Value of count after incremented by thread Thread 4 = 2
Thread 3: Current Value of count = 2
Thread 3 increments count....
Value of count after incremented by thread Thread 3 = 3
Thread 2: Current Value of count = 3
Thread 2 increments count....
Value of count after incremented by thread Thread 2 = 4

Main thread exits.....

Wednesday, 15 March 2017

DeadlockAvoidance (java)

import java.util.Scanner;
public class DeadlockAvoidance{
 private int need[][],allocate[][],max[][],avail[][],np,nr;
  private void input() {  
   Scanner sc=new Scanner(System.in);
     System.out.print("Enter no. of processes and resources : ");
     np=sc.nextInt();  //no. of process
     nr=sc.nextInt();  //no. of resources
     need=new int[np][nr];  //initializing arrays
     max=new int[np][nr];
     allocate=new int[np][nr];
     avail=new int[1][nr];
         System.out.println("Enter allocation matrix -->");
     for(int i=0;i<np;i++)
          for(int j=0;j<nr;j++)
         allocate[i][j]=sc.nextInt();  //allocation matrix
            System.out.println("Enter max matrix -->");
     for(int i=0;i<np;i++)
          for(int j=0;j<nr;j++)
         max[i][j]=sc.nextInt();  //max matrix
               System.out.println("Enter available matrix -->");
        for(int j=0;j<nr;j++)
         avail[0][j]=sc.nextInt();  //available matrix
            sc.close(); }
    private int[][] calc_need(){
       for(int i=0;i<np;i++)
         for(int j=0;j<nr;j++)  //calculating need matrix
          need[i][j]=max[i][j]-allocate[i][j];
           return need;
    }
      private boolean check(int i){
       //checking if all resources for ith process can be allocated
       for(int j=0;j<nr;j++)
       if(avail[0][j]<need[i][j])
          return false;
        return true;
    }
    public void isSafe(){
       input();
       calc_need();
       boolean done[]=new boolean[np];
       int j=0;
       while(j<np){  //until all process allocated
       boolean allocated=false;
       for(int i=0;i<np;i++)
        if(!done[i] && check(i)){  //trying to allocate
            for(int k=0;k<nr;k++)
            avail[0][k]=avail[0][k]-need[i][k]+max[i][k];
         System.out.println("Allocated process : "+i);
         allocated=done[i]=true;
               j++;  }
          if(!allocated) break;  //if no allocation
       }
       if(j==np)  //if all processes are allocated
        System.out.println("\nSafely allocated");
       else
        System.out.println("All proceess cant be allocated safely");    }
      public static void main(String[] args) {
       new DeadlockAvoidance().isSafe();
    }}

DWT CODE IN MATLAB (NO GUI)

i= imread('rice.png');
imshow(i);

MATLAB GUI code for discrete wavelet transform DWT of image

click here to download .m file
click here to download .fig file
CODE :

function varargout = dwtgui(varargin)
% DWTGUI M-file for dwtgui.fig
%      DWTGUI, by itself, creates a new DWTGUI or raises the existing
%      singleton*.

Tuesday, 7 March 2017

Machine Learning :To Implement GUI program on Delta , Epsilon and N

Aim : To Implement GUI program on Delta , Epsilon and N
DataSet 1:- Diabetes Data Set 

Data Set Information:
Diabetes patient records were obtained from two sources: an automatic electronic recording device and paper records. The automatic device had an internal clock to timestamp events, whereas the paper records only provided "logical time" slots (breakfast, lunch, dinner, bedtime). For paper records, fixed times were assigned to breakfast (08:00), lunch (12:00), dinner (18:00), and bedtime (22:00). Thus paper records have fictitious uniform recording times whereas electronic records have more realistic time stamps. 

Diabetes files consist of four fields per record. Each field is separated by a tab and each record is separated by a newline.

File Names and format:
(1) Date in MM-DD-YYYY format
(2) Time in XX:YY format
(3) Code
(4) Value

The Code field is deciphered as follows:

33 = Regular insulin dose
34 = NPH insulin dose
35 = UltraLente insulin dose
48 = Unspecified blood glucose measurement
57 = Unspecified blood glucose measurement
58 = Pre-breakfast blood glucose measurement
59 = Post-breakfast blood glucose measurement
60 = Pre-lunch blood glucose measurement
61 = Post-lunch blood glucose measurement
62 = Pre-supper blood glucose measurement
63 = Post-supper blood glucose measurement
64 = Pre-snack blood glucose measurement
65 = Hypoglycemic symptoms
66 = Typical meal ingestion
67 = More-than-usual meal ingestion
68 = Less-than-usual meal ingestion
69 = Typical exercise activity
70 = More-than-usual exercise activity
71 = Less-than-usual exercise activity
72 = Unspecified special event

Attribute Information:
Diabetes files consist of four fields per record. Each field is separated by a tab and each record is separated by a newline.

File Names and format:
(1) Date in MM-DD-YYYY format
(2) Time in XX:YY format
(3) Code
(4) Value

DataSet 2:- Car Data Set 

Data Set Information:
Car Evaluation Database was derived from a simple hierarchical decision model originally developed for the demonstration of DEX, M. Bohanec, V. Rajkovic: Expert system for decision making. Sistemica 1(1), pp. 145-157, 1990.). The model evaluates cars according to the following concept structure:

CAR car acceptability
. PRICE overall price
. . buying buying price
. . maint price of the maintenance
. TECH technical characteristics
. . COMFORT comfort
. . . doors number of doors
. . . persons capacity in terms of persons to carry
. . . lug_boot the size of luggage boot
. . safety estimated safety of the car

Input attributes are printed in lowercase. Besides the target concept (CAR), the model includes three intermediate concepts: PRICE, TECH, COMFORT. Every concept is in the original model related to its lower level descendants by a set of examples (for these examples sets see 
[Web Link]).

The Car Evaluation Database contains examples with the structural information removed, i.e., directly relates CAR to the six input attributes: buying, maint, doors, persons, lug_boot, safety.

Because of known underlying concept structure, this database may be particularly useful for testing constructive induction and structure discovery methods. 

Attribute Information:
Class Values:
unacc, acc, good, vgood

Attributes:
buying: vhigh, high, med, low.
maint: vhigh, high, med, low.
doors: 2, 3, 4, 5more.
persons: 2, 4, more.
lug_boot: small, med, big.
safety: low, med, high. 

DataSet 3:- Glass Data Set 

Data Set Information:
Vina conducted a comparison test of her rule-based system, BEAGLE, the nearest-neighbor algorithm, and discriminant analysis. BEAGLE is a product available through VRS Consulting, Inc.; 4676 Admiralty Way, Suite 206; Marina Del Ray, CA 90292 (213) 827-7890 and FAX: -3189. In determining whether the glass was a type of "float" glass or not, the following results were obtained (# incorrect answers):

Type of Sample -- Beagle -- NN -- DA
Windows that were float processed (87) -- 10 -- 12 -- 21
Windows that were not: (76) -- 19 -- 16 -- 22

The study of classification of types of glass was motivated by criminological investigation. At the scene of the crime, the glass left can be used as evidence...if it is correctly identified!

Attribute Information:
1. Id number: 1 to 214
2. RI: refractive index
3. Na: Sodium (unit measurement: weight percent in corresponding oxide, as are attributes 4-10)
4. Mg: Magnesium
5. Al: Aluminum
6. Si: Silicon
7. K: Potassium
8. Ca: Calcium
9. Ba: Barium
10. Fe: Iron
11. Type of glass: (class attribute)
-- 1 building_windows_float_processed
-- 2 building_windows_non_float_processed
-- 3 vehicle_windows_float_processed
-- 4 vehicle_windows_non_float_processed (none in this database)
-- 5 containers
-- 6 tableware
-- 7 headlamps

Bayes Classifier:
 1.     BayesNet:

weka.classifiers.bayes
Class BayesNet
java.lang.Object
weka.classifiers.bayes.BayesNet

All Implemented Interfaces:

Direct Known Subclasses:

public class BayesNet

Bayes Network learning using various search algorithms and quality measures.
Base class for a Bayes Network classifier. Provides datastructures (network structure, conditional probability distributions, etc.) and facilities common to Bayes Network learning algorithms like K2 and B.

2.     NaiveBayes:

Class NaiveBayes
java.lang.Object
weka.classifiers.bayes.NaiveBayes

All Implemented Interfaces:

Direct Known Subclasses:


public class NaiveBayes

Class for a Naive Bayes classifier using estimator classes. Numeric estimator precision values are chosen based on analysis of the training data. For this reason, the classifier is not an UpdateableClassifier (which in typical usage are initialized with zero training instances) -- if you need the UpdateableClassifier functionality, use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable classifier will use a default precision of 0.1 for numeric attributes when buildClassifier is called with zero training instances.

3.     NaiveBayesMultinomial:

Class NaiveBayesMultinomial
java.lang.Object
weka.classifiers.bayes.NaiveBayesMultinomial

All Implemented Interfaces:

Direct Known Subclasses:

public class NaiveBayesMultinomial
Class for building and using a multinomial Naive Bayes classifier. 

The core equation for this classifier:
P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)
where Ci is class i and D is a document.

4.     NaiveBayesMultinomialText:  

Class NaiveBayesMultinomialText
java.lang.Object
weka.classifiers.bayes.NaiveBayesMultinomialText

All Implemented Interfaces:

public class NaiveBayesMultinomialText

Multinomial naive bayes for text data. Operates directly (and only) on String attributes. Other types of input attributes are accepted but ignored during training and classification
Valid options are:
 -W
  Use word frequencies instead of binary bag of words.
 -P <# instances>
  How often to prune the dictionary of low frequency words (default = 0, i.e. don't prune)
 -M <double>
  Minimum word frequency. Words with less than this frequence are ignored.
  If periodic pruning is turned on then this is also used to determine which
  words to remove from the dictionary (default = 3).
 -normalize
  Normalize document length (use in conjunction with -norm and -lnorm)
 -norm <num>
  Specify the norm that each instance must have (default 1.0)
 -lnorm <num>
  Specify L-norm to use (default 2.0)
 -lowercase
  Convert all tokens to lowercase before adding to the dictionary.
 -stopwords-handler
  The stopwords handler to use (default Null).
 -tokenizer <spec>
  The tokenizing algorihtm (classname plus parameters) to use.
  (default: weka.core.tokenizers.WordTokenizer)
 -stemmer <spec>
  The stemmering algorihtm (classname plus parameters) to use.
 -output-debug-info
  If set, classifier is run in debug mode and
  may output additional info to the console
 -do-not-check-capabilities
  If set, classifier capabilities are not checked before classifier is built
  (use with caution).


4.      Naïve Bayes Multinomial Updatable:

Class NaiveBayesMultinomialUpdateable
java.lang.Object
weka.classifiers.bayes.NaiveBayesMultinomialUpdateable

All Implemented Interfaces:

public class NaiveBayesMultinomialUpdateable
Class for building and using a multinomial Naive Bayes classifier.
The core equation for this classifier:
P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)
where Ci is class i and D is a document.
Incremental version of the algorithm.


5.      Naïve Bayes Updatable:

Class NaiveBayesUpdateable
java.lang.Object
weka.classifiers.bayes.NaiveBayesUpdateable

All Implemented Interfaces:

public class NaiveBayesUpdateable
extends NaiveBayes
Class for a Naive Bayes classifier using estimator classes. This is the updateable version of NaiveBayes.
This classifier will use a default precision of 0.1 for numeric attributes when buildClassifier is called with zero training instances.
Bayes Classifier:

1.      BayesNet:

Classifier

Precision
Recall
F-Measure
Class
DataSet 1
0.795
0.639
0.816   
0.608
0.806
0.623
tested_negative
tested_positive
DataSet 2
0.918
0.676
0.645
0.938
0.959
0.706
0.290
0.462
0.938
0.690
0.400
0.619
unacc
acc
good
vgood
DataSet 3
0.653
0.767
0.250
0.000
0.750
0.538
0.897
0.886
0.605
0.059
0.000
0.692
0.778
0.897
0.752
0.676
0.095
0.000
0.720
0.636
0.897
build wind float
build wind non-float
vehic wind float
vehic wind non-float
containers
tableware
headlamps

Weighted Avg. DataSet-1
0.741    
0.743   
0.742

DataSet-2
0.854     
0.857
0.849

DataSet-3
0.695
0.706
0.686


Confusion Matrix:

DataSet 1
DataSet 2
DataSet 3
a   b            <-- classified as
408  92 |   a=tested_negative
105 163 |  b=tested_positive
a    b    c    d   <-- classified as
1160   49    1    0 |   a = unacc
104  271    9    0  |   b = acc
    0   47   20    2   |   c = good
    0   34    1   30   |   d =vgood
a  b c d  e  f  g <--classified as
62  5 2 0 0 1 0 |  a=build wind float
 21 46  1  0  3  4  1 |  b = build wind non-float
 11  4  1  0  0  1  0 |  c = vehic wind float
  0  0  0  0  0  0  0 |  d = vehic wind non-float
  0  3  0  0  9  0  1 |  e = containers
  0  1  0  0  0  7  1 |  f = tableware
  1  1  0  1  0  0 26 |  g = headlamps

2.      NaiveBayes:

Classifier: Naive

Precision
Recall
F-Measure
Class
DataSet 1
0.802
0.678
0.844
0.612
0.823
0.643
tested_negative
tested_positive
DataSet 2
0.917
0.672
0.633
0.931
0.960
0.706
0.275
0.415
0.938
0.689
0.384
0.574
unacc
acc
good
vgood
DataSet 3
0.455
0.481
0.190
0.000
0.333
0.571
0.857
0.729
0.171
0.235
0.000
0.308
0.889
0.828
0.560
0.252
0.211
0.000
0.320
0.696
0.842
build wind float
build wind non-float
vehic wind float
vehic wind non-float
containers
tableware
headlamps

Weighted Avg. DataSet-1
0.759    
0.763
0.760

DataSet-2
0.852
0.855
0.847

DataSet-3
0.496
0.486
0.453


Confusion Matrix:

DataSet 1
DataSet 2
DataSet 3
a   b         <-- classified as
422  78  |  a=tested_negative
104 164 |  b=tested_positive
 a    b    c    d   <-- classified as
1161   48    1    0 |   a = unacc
104  271    9    0  |   b = acc
    1   47   19    2   |    c = good
    0   37    1   27   |  d = vgood
a  b  c  d  e  f  g   <-- classified as
 51  5 11  0  0  2  1 |  a = build wind float
 48 13  6  0  5  3  1 |  b = build wind non-float
 12  0  4  0  0  1  0 |  c = vehic wind float
  0  0  0  0  0  0  0 |  d = vehic wind non-float
  0  8  0  0  4  0  1 |  e = containers
  0  0  0  0  0  8  1 |  f = tableware
  1  1  0  0  3  0 24 |  g = headlamps

3.      NaiveBayesMultinomial:

Classifier: Naive

Precision
Recall
F-Measure
Class
DataSet 1
0.698
0.429
0.678
0.451
0.688
0.440
tested_negative
tested_positive
DataSet 2
-------
-------
--------
-------
DataSet 3
0.548
0.460
0.000
0.000
0.500
0.000
0.657
0.657
0.526
0.000
0.000
0.231
0.000
0.793
0.597
0.491
0.000
0.000
0.316
0.000
0.719
build wind float
build wind non-float
vehic wind float
vehic wind non-float
containers
tableware
headlamps

Weighted Avg. DataSet-1
0.604
0.599
0.601

DataSet-2
-----
-----
-------

DataSet-3
0.462
0.523
0.486


Confusion Matrix:

DataSet 1
DataSet 2
DataSet 3
a   b         <-- classified as
339 161 | a=tested_negative
147 121 | b=tested_positive


----------------------------------
a  b  c  d  e  f  g   <-- classified as
 46 24  0  0  0  0  0 |  a = build wind float
 28 40  0  0  2  2  4 |  b = build wind non-float
  9  8  0  0  0  0  0 |  c = vehic wind float
  0  0  0  0  0  0  0 |  d = vehic wind non-float
  0  5  0  0  3  0  5 |  e = containers
  0  6  0  0  0  0  3 |  f = tableware
  1  4  0  0  1  0 23 |  g = headlamps

4.      NaiveBayesMultinomialUpdateable:

Classifier: Naive

Precision
Recall
F-Measure
Class
DataSet 1
0.694
0.427
0.686
0.437
0.690
0.432
tested_negative
tested_positive
DataSet 2
--------
--------
--------
-------------
DataSet 3
1.000
0.387
0.000
0.000
1.000
0.000
0.938
0.029
0.987
0.000
0.000
0.154
0.000
0.517
0.056
0.556
0.000
0.000
0.267
0.000
0.667
build wind float
build wind non-float
vehic wind float
vehic wind non-float
containers
tableware
headlamps

Weighted Avg. DataSet-1
0.601
0.599
0.600

DataSet-2
------------
------------
---------

DataSet-3
0.652
0.439
0.322


Confusion Matrix:

DataSet 1
DataSet 2
DataSet 3
a   b         <-- classified as
343 157 | a=tested_negative
151 117 | b=tested_positive

a  b  c  d  e  f  g   <-- classified as
  2 68  0  0  0  0  0 |  a = build wind float
  0 75  0  0  0  0  1 |  b = build wind non-float
  0 17  0  0  0  0  0 |  c = vehic wind float
  0  0  0  0  0  0  0 |  d = vehic wind non-float
  0 11  0  0  2  0  0 |  e = containers
  0  9  0  0  0  0  0 |  f = tableware
  0 14  0  0  0  0 15 |  g = headlamps

5.      NaiveBayesUpdateable :

Classifier: Naive

Precision
Recall
F-Measure
Class
DataSet 1
0.802
0.678
0.844
0.612
0.823
0.643
tested_negative
tested_positive
DataSet 2
0.917
0.672
0.633
0.931
0.960
0.706
0.275
0.415
0.938
0.689
0.384
0.574
unacc
acc
good
vgood
DataSet 3
0.455
0.481
0.190
0.00
0.333
0.571
0.857
0.729
0.171
0.235
0.000
0.308
0.889
0.828
0.560
0.252
0.211
0.000
0.320
0.696
0.842
build wind float
build wind non-float
vehic wind float
vehic wind non-float
containers
tableware
headlamps

Weighted Avg. DataSet-1
0.759
0.763
0.760

DataSet-2
0.852
0.855
0.847

DataSet-3
0.496
0.486
0.453


Confusion Matrix:

DataSet 1
DataSet 2
DataSet 3
a   b         <-- classified as
422  78  | a=tested_negative
104 164 | b=tested_positive
a    b    c    d   <-- classified as
1161   48    1    0 |   a = unacc
104  271    9    0  |   b = acc
    1   47   19    2   |   c = good
    0   37    1   27   |   d= vgood
a  b  c  d  e  f  g   <-- classified as
 51  5 11  0  0  2  1 |  a = build wind float
 48 13  6  0  5  3  1 |  b = build wind non-float
 12  0  4  0  0  1  0 |  c = vehic wind float
  0  0  0  0  0  0  0 |  d = vehic wind non-float
  0  8  0  0  4  0  1 |  e = containers
  0  0  0  0  0  8  1 |  f = tableware
  1  1  0  0  3  0 24 |  g = headlamps

6.      NaiveBayesMultinomialText :

Classifier: Naive

Precision
Recall
F-Measure
Class
DataSet 1
0.651
0.000
1.000
0.000
0.789
0.000
tested_negative
tested_positive
DataSet 2
0.700
0.000
0.000
0.000
1.000
0.000
0.000
0.000
0.824
0.000
0.000
0.000
unacc
acc
good
vgood
DataSet 3
0.000
0.355
0.000
0.000
0.000
0.000
0.000
0.000
1.000
0.000
0.000
0.000
0.000
0.000
0.000
0.524
0.000
0.000
0.000
0.000
0.000
build wind float
build wind non-float
vehic wind float
vehic wind non-float
containers
tableware
headlamps

Weighted Avg. DataSet-1
0.424
0.651
0.513

DataSet-2
0.490
0.700
0.577

DataSet-3
0.126
0.355
0.186


Confusion Matrix:

DataSet 1
DataSet 2
DataSet 3
a   b      <-- classified as
500   0 |  a = tested_negative
268   0 |  b = tested_positive
a    b    c    d   <-- classified as
1210    0    0    0 |    a = unacc
  384     0    0    0 |    b = acc
     69    0    0    0 |    c = good
     65    0    0    0 |    d = vgood
a  b  c  d  e  f  g   <-- classified as
  0 70  0  0  0  0  0 |  a = build wind float
  0 76  0  0  0  0  0 |  b = build wind non-float
  0 17  0  0  0  0  0 |  c = vehic wind float
  0  0  0  0  0  0  0 |  d = vehic wind non-float
  0 13  0  0  0  0  0 |  e = containers
  0  9  0  0  0  0  0 |  f = tableware
  0 29  0  0  0  0  0 |  g = headlamps