0%

数据集划分

Hey

Machine Learning notes

样本抽样以及数据集划分

1. 随机取样,将数据集分为训练集和测试集

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 第一种方法
# 使用sklearn中的train_test_split
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)

# 第二种方法
# 当数据的数量是不确定的时候
import hashlib
def test_set_check(identifier,test_ratio,hash=hashlib.md5):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
# 这样可以避免原本的训练集中的数据进入测试集中 这里的id_column可以是组合的
def split_train_test_by_id(data,test_ratio,id_column):
ids = data[id_column]
in_test_set = ids.apply(lambda id_:test_set_check(id_,test_ratio))
return data.loc[~in_test_set],data.loc[in_test_set]

2. 分层抽样

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 对数据根据median_income进行分层
housing["income_cat"] = pd.cut(housing["median_income"],bins=[0,1.5,3.0,4.5,6,np.inf],labels=[1,2,3,4,5])
# 对每层的数量进行计数
housing["income_cat"].value_counts()
# 可视化
housing["income_cat"].hist()
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_split=1,test_size=0.2,random_state=42)
for train_index,test_index in split.split(housing,housing["income_cat"]):
strat_train_set = housing.loc[train_index]
srat_test_set = housing.loc[test_index]
strat_test_set["income_cat"].value_counts() / len(strat_test_set)
housing["income_cat"].value_count() / len(strat_test_set)

# 对median_cat进行删除
for set_ in (strat_train_set,strat_test_set):
set_.drop("income_cat",axis=1,inplace=True)