# -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 08:27:54 2018

@author: Márton
"""

from sklearn.datasets import fetch_20newsgroups;  # importing the dataset
import sklearn.feature_extraction.text as txt;  # importing text preprocessing
import sklearn.metrics.pairwise as pw;  # importing pairwise similarity metrics
from sklearn import decomposition as decomp;
import matplotlib.pyplot as plt;  # importing pyplot
import matplotlib.colors as col;  # importing coloring
import pandas as pd; # importing pandas
import numpy as np;  # importing numpy for arrays

# Importing the training and testing datasets
ds_train = fetch_20newsgroups(subset='train',
                             shuffle=True, random_state=2018);
ds_test = fetch_20newsgroups(subset='test',
                             shuffle=True, random_state=2018);
n_train = len(ds_train.data);   # number of training records
n_test = len(ds_test.data);
n_class = len(ds_train.target_names);

min_pr = 0.05;        
vectorizer = txt.CountVectorizer(stop_words='english',min_df=min_pr); 
DT_train = vectorizer.fit_transform(ds_train.data); 
vocabulary_dict = vectorizer.vocabulary_;
vocabulary_list = vectorizer.get_feature_names();
vocabulary = np.asarray(vocabulary_list);  # vocabulary in 1D array
stopwords = vectorizer.stop_words_;
n_words = DT_train.shape[1];

# stopword in list
stopwords_list = list(stopwords);

# transforming the test dataset            
DT_test = vectorizer.transform(ds_test.data);

# computing cosine similarity
cos_sim = pw.cosine_similarity(DT_train,DT_test);

index = 100;
first_k = 10;
sorted_index = np.flip(np.argsort(cos_sim[:,index]));
most_related_docs = list();
most_related_docs_target = list();
for i in range(first_k):
    most_related_docs.append(ds_train.data[sorted_index[i]]);
    most_related_docs_target.append(ds_train.target_names[ds_train.target[sorted_index[i]]]);
    
print(ds_test.target_names[ds_test.target[index]]);
print(ds_test.data[index]);