0%

Machine Learning Procedure

Hey

Machine Learning notes

Machine Learning Procedure

导入数据

1
2
3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

数据预览

1
2
3
4
5
6
7
X = pd.DataFrame(X)
# 取前10行数据
X.head(n=10)
# 取数据中任意的10行
X.sample(n=10)
# 查看数据的大体数据分布以及大小
X.describe()

数据可视化

通过箱线图可以发现异常值

1
2
X.plot(kind='box')
plt.show()

通过条形图可以看出数据结构的分布特征,是否满足正太分布

1
2
X.hist(figsize=(12,5),xlabelsize=1,ylabelsize=1)
plt.show()

通过折线图可以看出数据值的大小密度的分布情况

1
2
X.plot(kind="density",subplot=True,layout=(4,4),figsize=(12,5))
plt.show()

通过特征相关图我们能够知道哪些特征存在明显的相关性

1
2
pd.scatter_matrix(X,figsize=(10,10))
plt.show()

通过热力图可以更加清晰的看出各个特征之间的关系

1
2
3
4
5
6
7
8
9
10
11
12
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
cax = ax.matshow(X.corr(),vmin=-1,vmax=1,interploation="none")
fig.colorbar(cax)
# 这里为数据特征关联的分布大小
ticks = np.arange(0,4,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
# 这里的col_name为特征的名称
ax.set_xticklabels(col_name)
ax.set_yticklabels(col_name)
plt.show()

查找最优模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold, cross_val_score,GridSearchCV
from sklearn.preprocessing import StandardScaler

models = []
models.append(("AB", AdaBoostClassifier()))
models.append(("GBM", GradientBoostingClassifier()))
models.append(("RF", RandomForestClassifier()))
models.append(("ET", ExtraTreesClassifier()))
models.append(("SVC", SVC()))
models.append(("KNN", KNeighborsClassifier()))
models.append(("LR", LogisticRegression()))
models.append(("GNB", GaussianNB()))
models.append(("LDA", LinearDiscriminantAnalysis()))

names = []
results = []

for name, model in models:
kfold = KFold(n_splits=5, random_state=42)
result = cross_val_score(model, X, y, scoring='accuracy', cv=kfold)
names.append(name)
results.append(result)
print("{} Mean:{:.4f}(Std:{:.4f})".format(name, result.mean(), result.std()))

使用Pipeline

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
from sklearn.pipeline import Pipeline

pipeline = []
pipeline.append(("ScalerET", Pipeline([("Scaler",StandardScaler()),
("ET",ExtraTreesClassifier())])))
pipeline.append(("ScalerGBM", Pipeline([("Scaler",StandardScaler()),
("GBM",GradientBoostingClassifier())])))
pipeline.append(("ScalerRF", Pipeline([("Scaler",StandardScaler()),
("RF",RandomForestClassifier())])))

names = []
results = []

for name, model in pipeline:
kfold = KFold(n_splits=5, random_state=42)
result = cross_val_score(model, X, y, scoring='accuracy', cv=kfold)
names.append(name)
results.append(result)
print("{} Mean:{:.4f}(Std:{:.4f})".format(name, result.mean(), result.std()))

模型调节

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
param_grid = {
"C":[0.1,0.3,0.5,0.7,0.9,1.0,1.3,1.5,1.7,2.0],
"kernel":["linear","poly","rbf","sigmoid"]
}
model = SVC()
kfold = KFold(n_splits=5,random_state=42)
grid = GridSearchCV(estimator=model,param_grid=param_grid,scoring="accuracy",cv=kfold)
grid_result = grid.fit(X,y)
print("Best: {} using {}".format(grid_result.best_score_,grid_result.best_params_))
means = grid_result.cv_results_["mean_test_score"]
stds = grid_result.cv_results_["std_test_score"]
params = grid_result.cv_results_["params"]
for mean,stdev,param in zip(means,stds,params):
print("{} ({}) with {}".format(mean,stdev,param))

# 使用随机梯度下降法
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint

param_distribs = {
'n_estimators': randint(low=1, high=200),
'max_features': randint(low=1, high=8),
}

forest_reg = RandomForestRegressor(random_state=42)
rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs,
n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42)
rnd_search.fit(housing_prepared, housing_labels)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# svm 调参
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import expon, reciprocal

# see https://docs.scipy.org/doc/scipy/reference/stats.html
# for `expon()` and `reciprocal()` documentation and more probability distribution functions.

# Note: gamma is ignored when kernel is "linear"
param_distribs = {
'kernel': ['linear', 'rbf'],
'C': reciprocal(20, 200000),
'gamma': expon(scale=1.0),
}

svm_reg = SVR()
rnd_search = RandomizedSearchCV(svm_reg, param_distributions=param_distribs,
n_iter=50, cv=5, scoring='neg_mean_squared_error',
verbose=2, random_state=42)
rnd_search.fit(housing_prepared, housing_labels)