# -*- coding: utf-8 -*-
"""
Created on Tue Oct 26 22:10:34 2021

@author: Márton Ispány
"""

from urllib.request import urlopen;  # importing url handling
import numpy as np;   # importing numpy
from sklearn.feature_extraction.text import CountVectorizer; # importing vectorizer
from sklearn.model_selection import train_test_split; # importing splitting
from sklearn.naive_bayes import MultinomialNB;  # importing multinomial naive Bayes
from sklearn.metrics import confusion_matrix;  # importing confusion matrix

# Reading the dataset

url = 'https://arato.inf.unideb.hu/ispany.marton/TextandWebMining/Datasets/SMSSpamCollection.txt';
raw_data = urlopen(url);
smsspam = np.loadtxt(raw_data,delimiter="\t",dtype=str);
sms = smsspam[:,1];
target = smsspam[:,0];
del smsspam;

# Preprocessing: vectorizing the dataset

vectorizer = CountVectorizer(stop_words='english',max_df=0.8,min_df=0.01); 
DT = vectorizer.fit_transform(sms); 
vocabulary_dict = vectorizer.vocabulary_;
vocabulary_list = vectorizer.get_feature_names();
vocabulary = np.asarray(vocabulary_list);  # vocabulary in 1D array
stopwords = vectorizer.stop_words_;
n_words = DT.shape[1];

# Document-term matrix in dense form 
DT_dense = DT.toarray();

# Partitioning into training and test sets
X_train, X_test, y_train, y_test = train_test_split(DT,target, test_size=0.3, 
                                shuffle = True, random_state=2021);

# Fitting multinomial naive Bayes model

alpha = 1;
clf_MNB = MultinomialNB(alpha=alpha);
clf_MNB.fit(X_train,y_train);
acc_train = clf_MNB.score(X_train,y_train);
acc_test = clf_MNB.score(X_test,y_test);

# Confusion matrices

y_train_pred = clf_MNB.predict(X_train);
cm_train = confusion_matrix(y_train, y_train_pred);
y_test_pred = clf_MNB.predict(X_test);
cm_test = confusion_matrix(y_test, y_test_pred);
