-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDecisionTree.py
More file actions
139 lines (104 loc) · 4.76 KB
/
DecisionTree.py
File metadata and controls
139 lines (104 loc) · 4.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn import tree
from sklearn.metrics import roc_curve, auc
# pd.set_option('display.max_columns', 1220) per visualizzare tutte le colonne del dataset
# Importo il dataset
data = open('survey_results_public.csv')
dataset = pd.read_csv(data, index_col=0)
print("Dimensioni iniziali dataset: ", dataset.shape)
# Dataset setup:
# 1)eliminare righe con NaN nella colonna Salario
dataset = dataset.dropna(subset=['Salary'])
print("Dimensioni dataset dopo rimozione NaN nella colonna Salario: ", dataset.shape)
# 2)estrarre la colonna Salario
salary = dataset.pop('Salary')
# 3)eliminare colonne irrilevanti
dataset = dataset.drop(['ExpectedSalary', 'NonDeveloperType'], axis=1)
print("Dimensione dataset dopo rimozione colonne irrilevanti e la colonna Salario: ", dataset.shape)
# 4)trasformare le colonne con stringhe in tante colonne binarie, separando anche quelle con valori sovrapposti.
# I valori NaN vengono trasformati in stringa per essere utilizzati anch'essi nella previsione.
columns = dataset.columns.values
is_num = np.array([col for col in dataset.dtypes != 'object'])
col_num = columns[is_num]
col_obj = columns[~is_num]
for col in col_obj:
dataset[col].fillna(value="NaN", inplace=True)
tdf = dataset[col].str.get_dummies(sep='; ')
tdf.columns = [col+'_'+s for s in tdf.columns]
dataset = dataset.join(tdf).drop(col, axis=1)
print("Dimensione dataset dopo lo split delle colonne di stringhe: ", dataset.shape)
# 5)sostituire i valori NaN nelle colonne numeriche, inserendo al loro posto la mediana.
for col in col_num:
dataset[col].fillna(value=dataset[col].dropna().median(), inplace=True)
# Trovo la mediana dei valori nella colonna Salario
mediana = salary.median()
print("\nMediana dei salari: ", mediana)
# Divido il dataset in train-set e test-set
RANDOM_STATE = 42
TEST_SIZE = 0.2
target = salary >= mediana
x_train, x_test, y_train, y_test = train_test_split(dataset, target, test_size=TEST_SIZE, random_state=RANDOM_STATE)
# Traccio il grafico a seconda delle veriazioni del max_depth e ne trovo il massimo secondo la AUC metric
max_depths = np.linspace(1, 32, 32, endpoint=True)
train_results = []
test_results = []
for max_depth in max_depths:
dt = tree.DecisionTreeClassifier(max_depth=max_depth)
dt.fit(x_train, y_train)
train_pred = dt.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
y_pred = dt.predict(x_test)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
max_depth = 0
max_value = test_results[0]
for i in range(1, len(test_results)):
if test_results[i] > max_value:
max_value = test_results[i]
max_depth = i+1
line1, = plt.plot(max_depths, train_results, 'b', label="Train AUC")
line2, = plt.plot(max_depths, test_results, 'r', label="Test AUC")
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel('AUC score')
plt.xlabel('Tree depth')
plt.show()
print("max depth: ", max_depth)
# Definisco il Decision Tree e stampo la precisione
clf = tree.DecisionTreeClassifier(max_depth=max_depth)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print("Predizione del test set:\n", y_pred, "\nScore: ", clf.score(x_test, y_test))
# AUC metric
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
print("AUC score: ", roc_auc)
# 10-fold cross validation con Decision Tree
k_fold = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
scores = cross_val_score(clf, x_train, y_train, cv=k_fold)
print('10-fold cross validation score:\n {}'.format(scores))
print('Media: {}'.format(scores.mean()))
# Creo il grafo dell'albero
'''
import pydotplus
import collections
import graphviz
dot_data = tree.export_graphviz(clf, feature_names=dataset.columns.values, out_file=None, filled=True, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
colors = ('turquoise', 'orange')
edges = collections.defaultdict(list)
for edge in graph.get_edge_list():
edges[edge.get_source()].append(int(edge.get_destination()))
for edge in edges:
edges[edge].sort()
for i in range(2):
dest = graph.get_node(str(edges[edge][i]))[0]
dest.set_fillcolor(colors[i])
graph.write_png('DecisionTree.png')
'''