# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 12:49:36 2019

@author: Márton
"""

from sklearn.datasets import load_iris;
from sklearn import tree, neighbors, naive_bayes, metrics;
import matplotlib.pyplot as plt;
import numpy as np;
import itertools;

# Load in our dataset
iris_data = load_iris();

# Decision tree
# Initialize our decision tree object
crit = 'entropy';
depth = 3;
classification_tree = tree.DecisionTreeClassifier(criterion=crit,max_depth=depth);

# Train our decision tree (tree induction + pruning)
classification_tree = classification_tree.fit(iris_data.data, iris_data.target);
pred_dtree = classification_tree.predict(iris_data.data);

# Computing confusion matrix for decision tree
cm_dtree = metrics.confusion_matrix(iris_data.target,pred_dtree);
acc_dtree = metrics.accuracy_score(iris_data.target,pred_dtree);
print(metrics.classification_report(iris_data.target,pred_dtree, target_names=iris_data.target_names))
f1_dtree = metrics.f1_score(iris_data.target,pred_dtree,average='macro');

# Naive Bayes
NB = naive_bayes.GaussianNB();
NB = NB.fit(iris_data.data, iris_data.target);
pred_NB = NB.predict(iris_data.data);

# Computing confusion matrix for naive Bayes
cm_NB = metrics.confusion_matrix(iris_data.target,pred_NB);
acc_NB = metrics.accuracy_score(iris_data.target,pred_NB);

# Nearest Neighbors
knn = 2;
neigh = neighbors.KNeighborsClassifier(n_neighbors=knn);
neigh = neigh.fit(iris_data.data, iris_data.target);
pred_neigh = neigh.predict(iris_data.data);

# Computing confusion matrix for nearest neighbors
cm_neigh = metrics.confusion_matrix(iris_data.target,pred_neigh);
acc_neigh = metrics.accuracy_score(iris_data.target,pred_neigh);

# Visualisation of the confusion matrix

def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Greens):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()
    
# Plot non-normalized confusion matrix
plt.figure(1);
plot_confusion_matrix(cm_dtree, classes=iris_data.target_names,
                      title='Confusion matrix for decision tree classifier');
plt.show(); 


