其他
数学推导+纯Python实现机器学习算法:GBDT
The following article is from 机器学习实验室 Author louwill
Datawhale推荐
作者:louwill,Machine Learning Lab
时隔大半年,机器学习算法推导系列终于有时间继续更新了。在之前的14讲中,笔者将监督模型中主要的单模型算法基本都过了一遍。预计在接下来的10讲中,笔者将努力更新完以GBDT代表的集成学习模型,以EM算法、CRF和隐马为代表的概率图模型以及以聚类降维为代表的无监督学习算法。
对每个样本
,计算负梯度,即残差
将上步得到的残差作为样本新的真实值,并将数据
作为下棵树的训练数据,得到一颗新的回归树 其对应的叶子节点区域为 。其中 为回归树t的叶子节点的个数。 对叶子区域
计算最佳拟合值
更新强学习器
class TreeNode():
def __init__(self, feature_i=None, threshold=None,
value=None, true_branch=None, false_branch=None):
pass
class Tree(object):
def __init__(self, min_samples_split=2, min_impurity=1e-7,
max_depth=float("inf"), loss=None):
self.root = None # Root node in dec. tree
# Minimum n of samples to justify split
self.min_samples_split = min_samples_split
# The minimum impurity to justify split
self.min_impurity = min_impurity
# The maximum depth to grow the tree to
self.max_depth = max_depth
# Function to calculate impurity (classif.=>info gain, regr=>variance reduct.)
# 切割树的方法,gini,方差等
self._impurity_calculation = None
# Function to determine prediction of y at leaf
# 树节点取值的方法,分类树:选取出现最多次数的值,回归树:取所有值的平均值
self._leaf_value_calculation = None
# If y is one-hot encoded (multi-dim) or not (one-dim)
self.one_dim = None
# If Gradient Boost
self.loss = loss
def fit(self, X, y, loss=None):
""" Build decision tree """
pass
def _build_tree(self, X, y, current_depth=0):
""" Recursive method which builds out the decision tree and splits X and respective y
pass
def predict_value(self, x, tree=None):
""" Do a recursive search down the tree and make a prediction of the data sample by the
value of the leaf that we end up at """
pass
def predict(self, X):
""" Classify samples one by one and return the set of labels """
pass
def print_tree(self, tree=None, indent=" "):
pass
class RegressionTree(Tree):
# 使用方差法进行树分割
def _calculate_variance_reduction(self, y, y1, y2):
var_tot = calculate_variance(y)
var_1 = calculate_variance(y1)
var_2 = calculate_variance(y2)
frac_1 = len(y1) / len(y)
frac_2 = len(y2) / len(y)
# Calculate the variance reduction
variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2)
return sum(variance_reduction)
# 使用均值法取叶子结点值
def _mean_of_y(self, y):
value = np.mean(y, axis=0)
return value if len(value) > 1 else value[0]
# 回归树拟合
def fit(self, X, y):
self._impurity_calculation = self._calculate_variance_reduction
self._leaf_value_calculation = self._mean_of_y
super(RegressionTree, self).fit(X, y)
class Loss(object):
def loss(self, y_true, y_pred):
return NotImplementedError()
def gradient(self, y, y_pred):
raise NotImplementedError()
def acc(self, y, y_pred):
return 0
class SquareLoss(Loss):
def __init__(self): pass
def loss(self, y, y_pred):
return 0.5 * np.power((y - y_pred), 2)
def gradient(self, y, y_pred):
return -(y - y_pred)
class GBDT(object):
def __init__(self, n_estimators, learning_rate, min_samples_split,
min_impurity, max_depth, regression):
# 基本参数
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.min_samples_split = min_samples_split
self.min_impurity = min_impurity
self.max_depth = max_depth
self.regression = regression
self.loss = SquareLoss()
if not self.regression:
self.loss = SotfMaxLoss()
# 分类问题也可以使用回归树,利用残差去学习概率
self.estimators = []
for i in range(self.n_estimators):
self.estimators.append(RegressionTree(min_samples_split=self.min_samples_split,
min_impurity=self.min_impurity,
max_depth=self.max_depth))
# 拟合方法
def fit(self, X, y):
# 让第一棵树去拟合模型
self.estimators[0].fit(X, y)
y_pred = self.estimators[0].predict(X)
for i in range(1, self.n_estimators):
gradient = self.loss.gradient(y, y_pred)
self.estimators[i].fit(X, gradient)
y_pred -= np.multiply(self.learning_rate, self.estimators[i].predict(X))
# 预测方法
def predict(self, X):
y_pred = self.estimators[0].predict(X)
for i in range(1, self.n_estimators):
y_pred -= np.multiply(self.learning_rate, self.estimators[i].predict(X))
if not self.regression:
# Turn into probability distribution
y_pred = np.exp(y_pred) / np.expand_dims(np.sum(np.exp(y_pred), axis=1), axis=1)
# Set label to the value that maximizes probability
y_pred = np.argmax(y_pred, axis=1)
return y_pred
# regression tree
class GBDTRegressor(GBDT):
def __init__(self, n_estimators=200, learning_rate=0.5, min_samples_split=2,
min_var_red=1e-7, max_depth=4, debug=False):
super(GBDTRegressor, self).__init__(n_estimators=n_estimators,
learning_rate=learning_rate,
min_samples_split=min_samples_split,
min_impurity=min_var_red,
max_depth=max_depth,
regression=True)
# classification tree
class GBDTClassifier(GBDT):
def __init__(self, n_estimators=200, learning_rate=.5, min_samples_split=2,
min_info_gain=1e-7, max_depth=2, debug=False):
super(GBDTClassifier, self).__init__(n_estimators=n_estimators,
learning_rate=learning_rate,
min_samples_split=min_samples_split,
min_impurity=min_info_gain,
max_depth=max_depth,
regression=False)
def fit(self, X, y):
y = to_categorical(y)
super(GBDTClassifier, self).fit(X, y)
from sklearn import datasets
boston = datasets.load_boston()
X, y = shuffle_data(boston.data, boston.target, seed=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
model = GBDTRegressor()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# Color map
cmap = plt.get_cmap('viridis')
mse = mean_squared_error(y_test, y_pred)
print ("Mean Squared Error:", mse)
# Plot the results
m1 = plt.scatter(range(X_test.shape[0]), y_test, color=cmap(0.5), s=10)
m2 = plt.scatter(range(X_test.shape[0]), y_pred, color='black', s=10)
plt.suptitle("Regression Tree")
plt.title("MSE: %.2f" % mse, fontsize=10)
plt.xlabel('sample')
plt.ylabel('house price')
plt.legend((m1, m2), ("Test data", "Prediction"), loc='lower right')
plt.show();
欢迎扫码关注: