# -*- coding: utf-8 -*-
"""
Created on Mon Nov 14 09:35:27 2022

@author: Márton
"""

from urllib.request import urlopen;  # importing url handling
import numpy as np;   # importing numpy
from sklearn.feature_extraction.text import TfidfVectorizer; # importing vectorizer
from sklearn.model_selection import train_test_split; # importing splitting
from sklearn.cluster import KMeans;  # importing Kmeans
from sklearn.mixture import GaussianMixture; # importing Gaussian mixture
from sklearn.metrics.cluster import contingency_matrix;  # importing contingency matrix
from sklearn.metrics import davies_bouldin_score;  # importing Davies-Bouldin score

# Reading the dataset

url = 'https://arato.inf.unideb.hu/ispany.marton/TextandWebMining/Datasets/SMSSpamCollection.txt';
raw_data = urlopen(url);
smsspam = np.loadtxt(raw_data,delimiter="\t",dtype=str);
sms = smsspam[:,1];
target = smsspam[:,0];
del smsspam;

# Preprocessing: vectorizing the dataset

vectorizer = TfidfVectorizer(stop_words='english',max_df=0.8,min_df=0.0001); 
DT = vectorizer.fit_transform(sms); 
vocabulary_dict = vectorizer.vocabulary_;
vocabulary_list = vectorizer.get_feature_names();
vocabulary = np.asarray(vocabulary_list);  # vocabulary in 1D array
stopwords = vectorizer.stop_words_;
n_words = DT.shape[1];

# Document-term matrix in dense form 
DT_dense = DT.toarray();

# Partitioning into training and test sets
X_train, X_test, y_train, y_test = train_test_split(DT,target, test_size=0.3, 
                                stratify = target, shuffle = True, random_state=2021);
X_train_dense = X_train.toarray();
n_c = 20;   # number of clusters

# Fitting Kmeans
KM = KMeans(n_clusters=n_c, random_state=2021);
KM.fit(X_train);
centers_KM = KM.cluster_centers_;
clabel_KM = KM.labels_;

cm = contingency_matrix(y_train,clabel_KM);
db_train = davies_bouldin_score(X_train.toarray(),clabel_KM);
db_test = davies_bouldin_score(X_test.toarray(),KM.predict(X_test));