本文實(shí)現(xiàn)的原理很簡(jiǎn)單,優(yōu)化方法是用的梯度下降。后面有測(cè)試結(jié)果。
先來看看實(shí)現(xiàn)的示例代碼:
# coding=utf-8from math import expimport matplotlib.pyplot as pltimport numpy as npfrom sklearn.datasets.samples_generator import make_blobsdef sigmoid(num): ''' :param num: 待計(jì)算的x :return: sigmoid之后的數(shù)值 ''' if type(num) == int or type(num) == float: return 1.0 / (1 + exp(-1 * num)) else: raise ValueError, 'only int or float data can compute sigmoid'class logistic(): def __init__(self, x, y): if type(x) == type(y) == list: self.x = np.array(x) self.y = np.array(y) elif type(x) == type(y) == np.ndarray: self.x = x self.y = y else: raise ValueError, 'input data error' def sigmoid(self, x): ''' :param x: 輸入向量 :return: 對(duì)輸入向量整體進(jìn)行simgoid計(jì)算后的向量結(jié)果 ''' s = np.frompyfunc(lambda x: sigmoid(x), 1, 1) return s(x) def train_with_punish(self, alpha, errors, punish=0.0001): ''' :param alpha: alpha為學(xué)習(xí)速率 :param errors: 誤差小于多少時(shí)停止迭代的閾值 :param punish: 懲罰系數(shù) :param times: 最大迭代次數(shù) :return: ''' self.punish = punish dimension = self.x.shape[1] self.theta = np.random.random(dimension) compute_error = 100000000 times = 0 while compute_error > errors: res = np.dot(self.x, self.theta) delta = self.sigmoid(res) - self.y self.theta = self.theta - alpha * np.dot(self.x.T, delta) - punish * self.theta # 帶懲罰的梯度下降方法 compute_error = np.sum(delta) times += 1 def predict(self, x): ''' :param x: 給入新的未標(biāo)注的向量 :return: 按照計(jì)算出的參數(shù)返回判定的類別 ''' x = np.array(x) if self.sigmoid(np.dot(x, self.theta)) > 0.5: return 1 else: return 0def test1(): ''' 用來進(jìn)行測(cè)試和畫圖,展現(xiàn)效果 :return: ''' x, y = make_blobs(n_samples=200, centers=2, n_features=2, random_state=0, center_box=(10, 20)) x1 = [] y1 = [] x2 = [] y2 = [] for i in range(len(y)): if y[i] == 0: x1.append(x[i][0]) y1.append(x[i][1]) elif y[i] == 1: x2.append(x[i][0]) y2.append(x[i][1]) # 以上均為處理數(shù)據(jù),生成出兩類數(shù)據(jù) p = logistic(x, y) p.train_with_punish(alpha=0.00001, errors=0.005, punish=0.01) # 步長是0.00001,最大允許誤差是0.005,懲罰系數(shù)是0.01 x_test = np.arange(10, 20, 0.01) y_test = (-1 * p.theta[0] / p.theta[1]) * x_test plt.plot(x_test, y_test, c='g', label='logistic_line') plt.scatter(x1, y1, c='r', label='positive') plt.scatter(x2, y2, c='b', label='negative') plt.legend(loc=2) plt.title('punish value = ' + p.punish.__str__()) plt.show()if __name__ == '__main__': test1()運(yùn)行結(jié)果如下圖

總結(jié)
以上就是這篇文章的全部?jī)?nèi)容了,希望本文的內(nèi)容對(duì)大家的學(xué)習(xí)或者工作能帶來一定的幫助,如果有疑問大家可以留言交流,謝謝大家對(duì)武林網(wǎng)的支持。
新聞熱點(diǎn)
疑難解答
圖片精選