花萼长度
花萼宽度
花瓣长度
花瓣宽度
可以通过这 4 个特征预测鸢尾花卉属于 (iris-setosa, iris-versicolour, iris-virginica) 中的哪一品种.
0x2: 欠特征化(under-featuring)
我们先来讨论欠特征化 (under-featuring) 的情况, 我们的数据集中有 4 个维度的特征, 并且这 4 个特征和目标 target 的相关度都是很高的, 换句话说这 4 个特征都是富含信息量的特征:
- # -*- coding: utf-8 -*-
- from sklearn.naive_bayes import GaussianNB
- import numpy as np
- from sklearn.datasets import load_iris
- from sklearn.metrics import accuracy_score
- from sklearn.metrics import confusion_matrix
- import numpy
- from sklearn.utils import shuffleif __name__ == '__main__':
- # naive Bayes
- muNB = GaussianNB()
- # load data
- iris = load_iris()
- print "np.shape(iris.data):", np.shape(iris.data)
- # feature vec
- X_train = iris.data[:int(len(iris.data)*0.8)]
- X_test = iris.data[int(len(iris.data)*0.8):]
- # label
- Y_train = iris.target[:int(len(iris.data)*0.8)]
- Y_test = iris.target[int(len(iris.data)*0.8):]
- # shuffle
- X_train, Y_train = shuffle(X_train, Y_train)
- X_test, Y_test = shuffle(X_test, Y_test)
- # load origin feature
- X_train_vec = X_train[:, :4]
- X_test_vec = X_test[:, :4]
- print "Pearson Relevance X[0]:", numpy.corrcoef(np.array([i[0] for i in X_train_vec[:, 0:1]]), Y_train)[0, 1]
- print "Pearson Relevance X[1]:", numpy.corrcoef(np.array([i[0] for i in X_train_vec[:, 1:2]]), Y_train)[0, 1]
- print "Pearson Relevance X[2]:", numpy.corrcoef(np.array([i[0] for i in X_train_vec[:, 2:3]]), Y_train)[0, 1]
- print "Pearson Relevance X[3]:", numpy.corrcoef(np.array([i[0] for i in X_train_vec[:, 3:4]]), Y_train)[0, 1]
4 个特征的皮尔森相关度都超过了 0.5
现在我们分别尝试只使用 1 个, 2 个, 3 个, 4 个特征情况下, 训练得到的朴素贝叶斯模型的泛化和预测性能:
- # -*- coding: utf-8 -*-
- from sklearn.naive_bayes import GaussianNB
- import numpy as np
- from sklearn.datasets import load_iris
- from sklearn.metrics import accuracy_score
- from sklearn.metrics import confusion_matrix
- import numpy
- from sklearn.utils import shuffle
- def model_tain_and_test(feature_cn):
- # load origin feature
- X_train_vec = X_train[:, :feature_cn]
- X_test_vec = X_test[:, :feature_cn]
- # train model
- muNB.fit(X_train_vec, Y_train)
- # predidct the test data
- y_predict = muNB.predict(X_test_vec)
- print "feature_cn:", feature_cn
- print 'accuracy is: {0}'.format(accuracy_score(Y_test, y_predict))
- print 'error is: {0}'.format(confusion_matrix(Y_test, y_predict))
- print ' '
- if __name__ == '__main__':
- # naive Bayes
- muNB = GaussianNB()
- # load data
- iris = load_iris()
- print "np.shape(iris.data):", np.shape(iris.data)
- # feature vec
- X_train = iris.data[:int(len(iris.data)*0.8)]
- X_test = iris.data[int(len(iris.data)*0.8):]
- # label
- Y_train = iris.target[:int(len(iris.data)*0.8)]
- Y_test = iris.target[int(len(iris.data)*0.8):]
- # shuffle
- X_train, Y_train = shuffle(X_train, Y_train)
- X_test, Y_test = shuffle(X_test, Y_test)
- # train and test the generalization and prediction
- model_tain_and_test(1)
- model_tain_and_test(2)
- model_tain_and_test(3)
- model_tain_and_test(4)
可以看到, 只使用 1 个特征的时候, 在测试集上的预测精确度只有 33.3%, 随着特征数的增加, 测试集上的预测精确度逐渐增加.
用贝叶斯网的角度来看朴素贝叶斯模型, 有如下结构图,
Xi 节点这里相当于特征, 网络中每个 Xi 节点的增加, 都会改变对 Class 结果的概率推理, Xi 越多, 推理的准确度也就越高.
从信息论的角度也很好理解, 我们可以将 P(Class | Xi)看成是条件熵的信息传递过程, 我们提供的信息越多, 原则上, 对 Class 的不确定性就会越低.
至此, 我们得出如下结论:
特征工程过程中需要特别关注描述完整性问题(description integrity problem), 特征维度没有完整的情况下, 提供再多的数据对模型效果都没有实质的帮助. 样本集的概率完整性要从 "特征完整性" 和 "数据完整性" 两个方面保证, 它们二者归根结底还是信息完整性的本质问题.
0x3: 过特征化(over-featuring)
现在我们在原始的 4 个特征维度上, 继续增加新的无用特征, 即那种和目标 target 相关度很低的特征.
- # -*- coding: utf-8 -*-
- from sklearn.naive_bayes import GaussianNB
- import numpy as np
- from sklearn.datasets import load_iris
- from sklearn.metrics import accuracy_score
- from sklearn.metrics import confusion_matrix
- import numpy
- from sklearn.utils import shuffle
- import random
- def feature_expend(feature_vec):
- # colum_1 * colum_2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 0], feature_vec[:, 1])])))
- # random from colum_1
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 0]])))
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 0]])))
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 0]])))
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 0]])))
- # random from colum_2
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 1]])))
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 1]])))
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 1]])))
- feature_vec = np.hstack((feature_vec, np.array([[random.uniform(.0, i)] for i in feature_vec[:, 1]])))
- return feature_vec
- def model_tain_and_test(X_train, X_test, Y_train, Y_test, feature_cn):
- # load origin feature
- X_train_vec = X_train[:, :feature_cn]
- X_test_vec = X_test[:, :feature_cn]
- # train model
- muNB.fit(X_train_vec, Y_train)
- # predidct the test data
- y_predict = muNB.predict(X_test_vec)
- print "feature_cn:", feature_cn
- print 'accuracy is: {0}'.format(accuracy_score(Y_test, y_predict))
- print 'error is: {0}'.format(confusion_matrix(Y_test, y_predict))
- print ' '
- if __name__ == '__main__':
- # naive Bayes
- muNB = GaussianNB()
- # load data
- iris = load_iris()
- print "np.shape(iris.data):", np.shape(iris.data)
- # feature vec
- X_train = iris.data[:int(len(iris.data)*0.8)]
- X_test = iris.data[int(len(iris.data)*0.8):]
- # label
- Y_train = iris.target[:int(len(iris.data)*0.8)]
- Y_test = iris.target[int(len(iris.data)*0.8):]
- # shuffle
- X_train, Y_train = shuffle(X_train, Y_train)
- X_test, Y_test = shuffle(X_test, Y_test)
- # expend feature
- X_train = feature_expend(X_train)
- X_test = feature_expend(X_test)
- print "X_test:", X_test
- # show Pearson Relevance
- for i in range(len(X_train[0])):
- print "Pearson Relevance X[{0}]:".format(i), numpy.corrcoef(np.array([i[0] for i in X_train[:, i:i+1]]), Y_train)[0, 1]
- model_tain_and_test(X_train, X_test, Y_train, Y_test, len(X_train[0]))
我们用 random 函数模拟了一个无用的新特征, 可以看到, 无用的特征对模型不但没有帮助, 反而降低了模型的性能.
至此, 我们得出如下结论:
特征不是越多越多, 机器学习不是洗衣机, 一股脑将所有特征都丢进去, 然后双手合十, 指望着模型能施展魔法, 自动筛选出有用的好特征, 当然, dropout / 正则化这些手段确实有助于提高模型性能, 它们的工作本质也是通过去除一些特征, 从而缓解垃圾特征对模型带来的影响.
当然, 未来也许会发展出 autoFeature 的工程技术, 但是作为数据科学工作者, 我们自己必须要理解特征工程的意义.
0x4: 特征加工对模型性能的影响
所谓的 "特征加工", 具体来说就是对原始的特征进行线性变换(拉伸和旋转), 得到新的特征, 例如:
- X_i * X_j
- X_i ^ 2
- X_i / X_j
- X_i + X_j
- X_i - X_j
本质上来说, 我们可以将深度神经网络的隐层看做是一种特征加工操作, 稍有不同的是, 深度神经网络中激活函数充当了非线性扭曲的作用, 不过其本质思想还是不变的.
那接下来问题是, 特征加工对模型的性能有没有影响呢?
准确的回答是, 特征加工对模型的影响取决于新增特征的相关度, 以及坏特征在所有特征中的占比.
我们来通过几个实验解释上面这句话, 下面笔者先通过模拟出几个典型场景, 最终给出总结结论:
1. 新增的特征和目标 target 相关度很低, 同时该坏特征的占比还很高
- # -*- coding: utf-8 -*-
- from sklearn.naive_bayes import GaussianNB
- import numpy as np
- from sklearn.datasets import load_iris
- from sklearn.metrics import accuracy_score
- from sklearn.metrics import confusion_matrix
- import numpy
- from sklearn.utils import shuffle
- def feature_expend(feature_vec):
- # colum_1 * colum_2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 0], feature_vec[:, 1])])))
- # colum_1 / colum_2
- # feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.divide(feature_vec[:, 0], feature_vec[:, 1])])))
- # colum_3 * colum_4
- # feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 2], feature_vec[:, 3])])))
- # colum_4 * colum_1
- # feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 3], feature_vec[:, 0])])))
- # colum_1 ^ 2
- # feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 0], feature_vec[:, 0])])))
- # colum_2 ^ 2
- # feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 1], feature_vec[:, 1])])))
- # colum_3 ^ 2
- # feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 2], feature_vec[:, 2])])))
- # colum_4 ^ 2
- # feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 3], feature_vec[:, 3])])))
- return feature_vec
- def model_tain_and_test(X_train, X_test, Y_train, Y_test, feature_cn):
- # load origin feature
- X_train_vec = X_train[:, :feature_cn]
- X_test_vec = X_test[:, :feature_cn]
- # train model
- muNB.fit(X_train_vec, Y_train)
- # predidct the test data
- y_predict = muNB.predict(X_test_vec)
- print "feature_cn:", feature_cn
- print 'accuracy is: {0}'.format(accuracy_score(Y_test, y_predict))
- print 'error is: {0}'.format(confusion_matrix(Y_test, y_predict))
- print ' '
- if __name__ == '__main__':
- # naive Bayes
- muNB = GaussianNB()
- # load data
- iris = load_iris()
- print "np.shape(iris.data):", np.shape(iris.data)
- # feature vec
- X_train = iris.data[:int(len(iris.data)*0.8)]
- X_test = iris.data[int(len(iris.data)*0.8):]
- # label
- Y_train = iris.target[:int(len(iris.data)*0.8)]
- Y_test = iris.target[int(len(iris.data)*0.8):]
- # shuffle
- X_train, Y_train = shuffle(X_train, Y_train)
- X_test, Y_test = shuffle(X_test, Y_test)
- # expend feature
- X_train = feature_expend(X_train)
- X_test = feature_expend(X_test)
- print "X_test:", X_test
- # show Pearson Relevance
- for i in range(len(X_train[0])):
- print "Pearson Relevance X[{0}]:".format(i), numpy.corrcoef(np.array([i[0] for i in X_train[:, i:i+1]]), Y_train)[0, 1]
- model_tain_and_test(X_train, X_test, Y_train, Y_test, len(X_train[0])-1)
- model_tain_and_test(X_train, X_test, Y_train, Y_test, len(X_train[0]))
上面代码中, 我们新增了一个 "colum_1 * colum_2" 特征维度, 并且打印了该特征的皮尔森相关度, 相关度只有 0.15, 这是一个很差的特征. 同时该坏特征占了总特征的 1/5 比例, 是一个不低的比例.
因此在这种情况下, 模型的检出效果受到了影响, 下降了. 原因之前也解释过, 坏的特征因为累乘效应, 影响了最终的概率值.
2. 新增的一批特征中, 出现了少量的坏特征, 即坏特征占比很低
- # -*- coding: utf-8 -*-
- from sklearn.naive_bayes import GaussianNB
- import numpy as np
- from sklearn.datasets import load_iris
- from sklearn.metrics import accuracy_score
- from sklearn.metrics import confusion_matrix
- import numpy
- from sklearn.utils import shuffle
- def feature_expend(feature_vec):
- # colum_1 * colum_2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 0], feature_vec[:, 1])])))
- # colum_1 / colum_2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.divide(feature_vec[:, 0], feature_vec[:, 1])])))
- # colum_3 * colum_4
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 2], feature_vec[:, 3])])))
- # colum_4 * colum_1
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 3], feature_vec[:, 0])])))
- # colum_1 ^ 2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 0], feature_vec[:, 0])])))
- # colum_2 ^ 2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 1], feature_vec[:, 1])])))
- # colum_3 ^ 2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 2], feature_vec[:, 2])])))
- # colum_4 ^ 2
- feature_vec = np.hstack((feature_vec, np.array([[i] for i in np.multiply(feature_vec[:, 3], feature_vec[:, 3])])))
- return feature_vec
- def model_tain_and_test(X_train, X_test, Y_train, Y_test, feature_cn):
- # load origin feature
- X_train_vec = X_train[:, :feature_cn]
- X_test_vec = X_test[:, :feature_cn]
- # train model
- muNB.fit(X_train_vec, Y_train)
- # predidct the test data
- y_predict = muNB.predict(X_test_vec)
- print "feature_cn:", feature_cn
- print 'accuracy is: {0}'.format(accuracy_score(Y_test, y_predict))
- print 'error is: {0}'.format(confusion_matrix(Y_test, y_predict))
- print ' '
- if __name__ == '__main__':
- # naive Bayes
- muNB = GaussianNB()
- # load data
- iris = load_iris()
- print "np.shape(iris.data):", np.shape(iris.data)
- # feature vec
- X_train = iris.data[:int(len(iris.data)*0.8)]
- X_test = iris.data[int(len(iris.data)*0.8):]
- # label
- Y_train = iris.target[:int(len(iris.data)*0.8)]
- Y_test = iris.target[int(len(iris.data)*0.8):]
- # shuffle
- X_train, Y_train = shuffle(X_train, Y_train)
- X_test, Y_test = shuffle(X_test, Y_test)
- # expend feature
- X_train = feature_expend(X_train)
- X_test = feature_expend(X_test)
- print "X_test:", X_test
- # show Pearson Relevance
- for i in range(len(X_train[0])):
- print "Pearson Relevance X[{0}]:".format(i), numpy.corrcoef(np.array([i[0] for i in X_train[:, i:i+1]]), Y_train)[0, 1]
- model_tain_and_test(X_train, X_test, Y_train, Y_test, len(X_train[0]))
在这个场景中,"colum_1 * colum_2" 这个坏特征依然存在, 但和上一个场景不同的是, 除了这个坏特征之外, 新增的特征都是好特征(相关度都很高).
根据简单的乘积因子原理可以很容易理解, 这个坏特征对最终概率数值的影响会被 "稀释", 从而降低了对模型性能的影响.
至此, 我们得出如下结论:
深度神经网络的隐层结构大规模增加了特征的数量. 本质上, 深度神经网络通过矩阵线性变换和非线性激活函数得到海量的特征维度的组合. 我们可以想象到, 其中一定有好特征(相关度高), 也一定会有坏特征(相关度低).
但是有一定我们可以确定, 好特征的出现概率肯定是远远大于坏特征的, 因为所有的特征都是从输入层的好特征衍生而来的(遗传进化思想). 那么当新增特征数量足够多的时候, 从概率上就可以证明, 好特征的影响就会远远大于坏特征, 从而消解掉了坏特征对模型性能的影响. 这就是为什么深度神经网络的适应度很强的原因之一.
用一句通俗的话来说就是: 如果你有牛刀, 杀鸡为啥不用牛刀? 用牛刀杀鸡的好处在于, 不管来的是鸡还是牛, 都能自适应地保证能杀掉.
3. 不同机器学习模型中, 过特征化的结构基础
冗余特征和过特征化现象在机器学习模型中并不罕见, 在不同的模型中有不同的表现形式, 例如:
来源: https://www.cnblogs.com/LittleHann/p/11730458.html