# -*- coding: utf-8 -*-
"""
Created on Fri Oct 29 13:18:00 2021

@author: Márton
"""

from urllib.request import urlopen;  # importing url handling
import numpy as np;   # importing numpy
from sklearn.feature_extraction.text import CountVectorizer; # importing vectorizer
from sklearn.model_selection import train_test_split; # importing splitting
from sklearn.cluster import KMeans;  # importing Kmeans
from sklearn.metrics.cluster import contingency_matrix;  # importing contingency matrix
from sklearn.metrics import davies_bouldin_score;  # importing Davies-Bouldin score

# Reading the dataset

url = 'https://arato.inf.unideb.hu/ispany.marton/TextandWebMining/Datasets/SMSSpamCollection.txt';
raw_data = urlopen(url);
smsspam = np.loadtxt(raw_data,delimiter="\t",dtype=str);
sms = smsspam[:,1];
target = smsspam[:,0];
del smsspam;

# Preprocessing: vectorizing the dataset

vectorizer = CountVectorizer(stop_words='english',max_df=0.8,min_df=0.01); 
DT = vectorizer.fit_transform(sms); 
vocabulary_dict = vectorizer.vocabulary_;
vocabulary_list = vectorizer.get_feature_names();
vocabulary = np.asarray(vocabulary_list);  # vocabulary in 1D array
stopwords = vectorizer.stop_words_;
n_words = DT.shape[1];

# Document-term matrix in dense form 
DT_dense = DT.toarray();

# Partitioning into training and test sets
X_train, X_test, y_train, y_test = train_test_split(DT,target, test_size=0.3, 
                                shuffle = True, random_state=2021);

# Fitting Kmeans

kmeans = KMeans(n_clusters=2, random_state=2021);
kmeans.fit(X_train);
centers = kmeans.cluster_centers_;
clabel = kmeans.labels_;

cm = contingency_matrix(y_train,clabel);
db_train = davies_bouldin_score(X_train.toarray(),clabel);
db_test = davies_bouldin_score(X_test.toarray(),kmeans.predict(X_test));