# -*- coding: utf-8 -*-
"""
Created on Mon Oct 10 09:00:26 2022

@author: Márton
"""

from sklearn.datasets import fetch_20newsgroups;
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer;
from sklearn.naive_bayes import MultinomialNB;
from sklearn.neural_network import MLPClassifier;
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix, classification_report;
from sklearn.decomposition import TruncatedSVD;
import matplotlib.pyplot as plt;
import numpy as np;

categories = [
    'sci.crypt',
    'sci.electronics',
    'sci.med',
    'sci.space'
];
ds_train = fetch_20newsgroups(subset='train',categories=categories);
ds_test = fetch_20newsgroups(subset='test',categories=categories);
n_train = len(ds_train.data);
n_test = len(ds_test.data);

Tfidf_vector = TfidfVectorizer(stop_words='english',max_df=0.8,min_df=0.01); 
DT_train_tfidf = Tfidf_vector.fit_transform(ds_train.data); 
vocabulary_tfidf = Tfidf_vector.get_feature_names();
n_words = DT_train_tfidf.shape[1];

# document-term matrix in dense form 
DT_train_tfidf_dense = DT_train_tfidf.toarray();

# checking normalization
row_norm = np.diag(np.dot(DT_train_tfidf,DT_train_tfidf.T).toarray());

# SVD for training data
svd = TruncatedSVD(n_components=2, n_iter=10, random_state=2022);
svd.fit(DT_train_tfidf);
transformed_docs = svd.transform(DT_train_tfidf);

# Visualizing of clustering in the principal components space
fig = plt.figure(1);
plt.title('SVD of 20newsgroups');
plt.xlabel('SVD1');
plt.ylabel('SVD2');
plt.scatter(transformed_docs[:,0],transformed_docs[:,1],s=50,c=ds_train.target);  # data
plt.legend();
plt.show();
