机器学习算法代码

2023-11-07

BPNN

import math
import random

random.seed(0)


def rand(a, b):
    return (b - a) * random.random() + a


def make_matrix(m, n, fill=0.0):
    mat = []
    for i in range(m):
        mat.append([fill] * n)
    return mat


def sigmoid(x):
    return 1.0 / (1.0 + math.exp(-x))


def sigmoid_derivative(x):
    return x * (1 - x)


class BPNeuralNetwork:
    def __init__(self):
        self.input_n = 0
        self.hidden_n = 0
        self.output_n = 0
        self.input_cells = []
        self.hidden_cells = []
        self.output_cells = []
        self.input_weights = []
        self.output_weights = []
        self.input_correction = []
        self.output_correction = []

    def setup(self, ni, nh, no):
        self.input_n = ni + 1
        self.hidden_n = nh
        self.output_n = no
        # init cells
        self.input_cells = [1.0] * self.input_n
        self.hidden_cells = [1.0] * self.hidden_n
        self.output_cells = [1.0] * self.output_n
        # init weights
        self.input_weights = make_matrix(self.input_n, self.hidden_n)
        self.output_weights = make_matrix(self.hidden_n, self.output_n)
        # random activate
        for i in range(self.input_n):
            for h in range(self.hidden_n):
                self.input_weights[i][h] = rand(-0.2, 0.2)
        for h in range(self.hidden_n):
            for o in range(self.output_n):
                self.output_weights[h][o] = rand(-2.0, 2.0)
        # init correction matrix
        self.input_correction = make_matrix(self.input_n, self.hidden_n)
        self.output_correction = make_matrix(self.hidden_n, self.output_n)

    def predict(self, inputs):
        # activate input layer
        for i in range(self.input_n - 1):
            self.input_cells[i] = inputs[i]
        # activate hidden layer
        for j in range(self.hidden_n):
            total = 0.0
            for i in range(self.input_n):
                total += self.input_cells[i] * self.input_weights[i][j]
            self.hidden_cells[j] = sigmoid(total)
        # activate output layer
        for k in range(self.output_n):
            total = 0.0
            for j in range(self.hidden_n):
                total += self.hidden_cells[j] * self.output_weights[j][k]
            self.output_cells[k] = sigmoid(total)
        return self.output_cells[:]

    def back_propagate(self, case, label, learn, correct):
        # feed forward
        self.predict(case)
        # get output layer error
        output_deltas = [0.0] * self.output_n
        for o in range(self.output_n):
            error = label[o] - self.output_cells[o]
            output_deltas[o] = sigmoid_derivative(self.output_cells[o]) * error
        # get hidden layer error
        hidden_deltas = [0.0] * self.hidden_n
        for h in range(self.hidden_n):
            error = 0.0
            for o in range(self.output_n):
                error += output_deltas[o] * self.output_weights[h][o]
            hidden_deltas[h] = sigmoid_derivative(self.hidden_cells[h]) * error
        # update output weights
        for h in range(self.hidden_n):
            for o in range(self.output_n):
                change = output_deltas[o] * self.hidden_cells[h]
                self.output_weights[h][o] += learn * change + correct * self.output_correction[h][o]
                self.output_correction[h][o] = change
        # update input weights
        for i in range(self.input_n):
            for h in range(self.hidden_n):
                change = hidden_deltas[h] * self.input_cells[i]
                self.input_weights[i][h] += learn * change + correct * self.input_correction[i][h]
                self.input_correction[i][h] = change
        # get global error
        error = 0.0
        for o in range(len(label)):
            error += 0.5 * (label[o] - self.output_cells[o]) ** 2
        return error

    def train(self, cases, labels, limit=10000, learn=0.05, correct=0.1):
        for j in range(limit):
            error = 0.0
            for i in range(len(cases)):
                label = labels[i]
                case = cases[i]
                error += self.back_propagate(case, label, learn, correct)

    def test(self):
        cases = [
            [0, 0],
            [0, 1],
            [1, 0],
            [1, 1],
        ]
        labels = [[0], [1], [1], [0]]
        self.setup(2, 5, 1)
        self.train(cases, labels, 10000, 0.05, 0.1)
        for case in cases:
            print(self.predict(case))


if __name__ == '__main__':
    nn = BPNeuralNetwork()
    nn.test()

CNN

# 二维CNN
import numpy as np
input = np.array([[1,1,1,0,0],[0,1,1,1,0],[0,0,1,1,1],[0,0,1,1,0],[0,1,1,0,0]])
kernel = np.array([[1,0,1],[0,1,0],[1,0,1]])
# print(input.shape,kernel.shape)

def my_conv(input,kernel):
    output_size = len(input) - len(kernel) + 1
    res = np.zeros([output_size,output_size], np.float32)
    for i in range(len(res)):
        for j in range(len(res)):
            res[i][j] = compute_conv(input, kernel, i, j)
    return res

def compute_conv(input, kernel, i, j):
    res = 0
    for kk in range(3):
        for k in range(3):
#             print(input[i+kk][j+k])
            res += input[i+kk][j+k] * kernel[kk][k]  #这句是关键代码,实现了两个矩阵的点乘操作
    return res
print(my_conv(input, kernel)) 

RNN

import copy
import numpy as np

def rand(lower, upper):
    return (upper - lower) * np.random.random() + lower


def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def sigmoid_derivative(y):
    return y * (1 - y)


def make_mat(m, n, fill=0.0):
    # n * m
    return np.array([[fill] * n for _ in range(m)])


def make_rand_mat(m, n, lower=-1, upper=1):
    return np.array([[rand(lower, upper)] * n for _ in range(m)])


def int_to_bin(x, dim=0):
    x = bin(x)[2:]
    # align
    k = dim - len(x)
    if k > 0:
        x = "0" * k + x
    return x


class RNN:
    def __init__(self):
        self.input_n = 0
        self.hidden_n = 0
        self.output_n = 0
        self.input_weights = []  # (input, hidden)
        self.output_weights = []  # (hidden, output)
        self.hidden_weights = []  # (hidden, hidden)

    def setup(self, ni, nh, no):
        self.input_n = ni
        self.hidden_n = nh
        self.output_n = no
        self.input_weights = make_rand_mat(self.input_n, self.hidden_n)
        self.output_weights = make_rand_mat(self.hidden_n, self.output_n)
        self.hidden_weights = make_rand_mat(self.hidden_n, self.hidden_n)

    def predict(self, case, dim=0):
        guess = np.zeros(dim)
        hidden_layer_history = [np.zeros(self.hidden_n)]

        for i in range(dim):
            x = np.array([[c[dim - i - 1] for c in case]])

            hidden_layer = sigmoid(np.dot(x, self.input_weights) + np.dot(hidden_layer_history[-1], self.hidden_weights))
            output_layer = sigmoid(np.dot(hidden_layer, self.output_weights))
            guess[dim - i - 1] = np.round(output_layer[0][0])  # if you don't like int, change it

            hidden_layer_history.append(copy.deepcopy(hidden_layer))

        return guess

    def do_train(self, case, label, dim=0, learn=0.1):
        input_updates = np.zeros_like(self.input_weights)
        output_updates = np.zeros_like(self.output_weights)
        hidden_updates = np.zeros_like(self.hidden_weights)

        guess = np.zeros_like(label)
        error = 0

        output_deltas = []
        hidden_layer_history = [np.zeros(self.hidden_n)]

        for i in range(dim):
            x = np.array([[c[dim - i - 1] for c in case]])
            y = np.array([[label[dim - i - 1]]]).T

            hidden_layer = sigmoid(np.dot(x, self.input_weights) + np.dot(hidden_layer_history[-1], self.hidden_weights))
            output_layer = sigmoid(np.dot(hidden_layer, self.output_weights))

            output_error = y - output_layer
            output_deltas.append(output_error * sigmoid_derivative(output_layer))
            error += np.abs(output_error[0])

            guess[dim - i - 1] = np.round(output_layer[0][0])

            hidden_layer_history.append(copy.deepcopy(hidden_layer))

        future_hidden_layer_delta = np.zeros(self.hidden_n)
        for i in range(dim):
            x = np.array([[c[i] for c in case]])
            hidden_layer = hidden_layer_history[-i - 1]
            prev_hidden_layer = hidden_layer_history[-i - 2]

            output_delta = output_deltas[-i - 1]
            hidden_delta = (future_hidden_layer_delta.dot(self.hidden_weights.T) +
                             output_delta.dot(self.output_weights.T)) * sigmoid_derivative(hidden_layer)

            output_updates += np.atleast_2d(hidden_layer).T.dot(output_delta)
            hidden_updates += np.atleast_2d(prev_hidden_layer).T.dot(hidden_delta)
            input_updates += x.T.dot(hidden_delta)

            future_hidden_layer_delta = hidden_delta

        self.input_weights += input_updates * learn
        self.output_weights += output_updates * learn
        self.hidden_weights += hidden_updates * learn

        return guess, error

    def train(self, cases, labels, dim=0, learn=0.1, limit=1000):
        for i in range(limit):
            for j in range(len(cases)):
                case = cases[j]
                label = labels[j]
                self.do_train(case, label, dim=dim, learn=learn)

    def test(self):
        self.setup(2, 16, 1)
        for i in range(20000):
            a_int = int(rand(0, 127))
            a = int_to_bin(a_int, dim=8)
            a = np.array([int(t) for t in a])

            b_int = int(rand(0, 127))
            b = int_to_bin(b_int, dim=8)
            b = np.array([int(t) for t in b])

            c_int = a_int + b_int
            c = int_to_bin(c_int, dim=8)
            c = np.array([int(t) for t in c])

            guess, error = self.do_train([a, b], c, dim=8)

            if i % 1000 == 0:
                print("Error:" + str(error))
                print("Predict:" + str(guess))
                print("True:" + str(c))

                out = 0
                for index, x in enumerate(reversed(guess)):
                    out += x * pow(2, index)
                print(str(a_int) + " + " + str(b_int) + " = " + str(out))

                result = str(self.predict([a, b], dim=8))
                print(result)

                print("===============")

if __name__ == '__main__':
    nn = RNN()
    nn.test()

LR

import numpy as np

# 数据处理
def loadData(fileName):
    dataList = []; labelList = []
    fr = open(fileName, 'r')
    for line in fr.readlines():
        curLine = line.strip().split(',')
        labelList.append(float(curLine[2]))
        dataList.append([float(num) for num in curLine[0:1]])
    return dataList, labelList

# LR预测
def predict(w, x):
    wx = np.dot(w, x)
    P1 = np.exp(wx) / (1 + np.exp(wx))
    if P1 >= 0.5:
        return 1
    return 0

# 梯度下降训练
def GD(trainDataList, trainLabelList, iter=30):
    for i in range(len(trainDataList)):
        trainDataList[i].append(1)
    trainDataList = np.array(trainDataList)
    w = np.zeros(trainDataList.shape[1])
    alpha = 0.001
    for i in range(iter):
        for j in range(trainDataList.shape[0]):
            wx = np.dot(w, trainDataList[j])
            yi = trainLabelList[j]
            xi = trainDataList[j]
            w += alpha * (yi - (np.exp(wx)) / (1 + np.exp(wx))) * xi
    return w

# 测试
def test(testDataList, testLabelList, w):
    for i in range(len(testDataList)):
        testDataList[i].append(1)
    errorCnt = 0
    for i in range(len(testDataList)):
        if testLabelList[i] != predict(w, testDataList[i]):
            errorCnt += 1
    return 1 - errorCnt / len(testDataList)

# 打印准确率
if __name__ == '__main__':
    trainData, trainLabel = loadData('../data/train.txt')
    testData, testLabel = loadData('../data/test.txt')
    w = GD(trainData, trainLabel)
    accuracy = test(testData, testLabel, w)
    print('the accuracy is:', accuracy)
  • LR实现2
import numpy as np

# sigmoid函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# LR模型类
class LogisticRegression:
    def __init__(self, lr=0.01, num_iters=1000):
        self.lr = lr
        self.num_iters = num_iters
        self.w = None
        self.b = None
    
    # 训练函数
    def fit(self, X, y):
        n_samples, n_features = X.shape
        
        # 初始化参数w和b
        self.w = np.zeros(n_features)
        self.b = 0
        
        # 梯度下降训练模型
        for i in range(self.num_iters):
            z = np.dot(X, self.w) + self.b
            h = sigmoid(z)
            dw = (1 / n_samples) * np.dot(X.T, (h - y))
            db = (1 / n_samples) * np.sum(h - y)
            self.w -= self.lr * dw
            self.b -= self.lr * db
    
    # 预测函数
    def predict(self, X):
        z = np.dot(X, self.w) + self.b
        h = sigmoid(z)
        y_pred = np.round(h)
        return y_pred

线性回归

import numpy as np
import matplotlib.pyplot as plt

x = np.array([[1, 5.56], [2, 5.70], [3, 5.91], [4, 6.40],[5, 6.80],
              [6, 7.05], [7, 8.90], [8, 8.70],[9, 9.00], [10, 9.05]])
m, n = np.shape(x)
x_data = np.ones((m, n))
x_data[:, :-1] = x[:, :-1]
y_data = x[:, -1]
m, n = np.shape(x_data)
theta = np.ones(n)

def gradientDescent(iter, x, y, w, alpha):
    x_train = x.transpose()
    for i in range(0, iter):
        pre = np.dot(x, w)
        loss = (pre - y)
        gradient = np.dot(x_train, loss) / m
        w = w - alpha * gradient
        cost = 1.0 / 2 * m * np.sum(np.square(np.dot(x, np.transpose(w)) - y))
        print("第{}次梯度下降损失为: {}".format(i,round(cost,2)))
    return w


result = gradientDescent(1000, x_data, y_data, theta, 0.01)
y_pre = np.dot(x_data, result)
print("线性回归模型 w: ", result)

plt.rc('font', family='Arial Unicode MS', size=14)
plt.scatter(x[:, 0], x[:, 1], color='b', label='训练数据')
plt.plot(x[:, 0], y_pre, color='r', label='预测数据')
plt.xlabel('x')
plt.ylabel('y')
plt.title('线性回归预测(梯度下降)')
plt.legend()
plt.show()

K-Means

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

import numpy as np
from matplotlib import pyplot
%matplotlib inline

class K_Means(object):
    # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数
    def __init__(self, k=2, tolerance=0.0001, max_iter=300):
        self.k_ = k
        self.tolerance_ = tolerance
        self.max_iter_ = max_iter

    def fit(self, data):
        self.centers_ = {} # self.centers_中存放每一个簇的簇中心
        for i in range(self.k_):
            self.centers_[i] = data[i] # (随机)取前k个样本作为初始中心点

        for i in range(self.max_iter_):
            self.clf_ = {} # self.clf_中存放每一个簇的样本
            for i in range(self.k_):
                self.clf_[i] = []
            # print("质点:",self.centers_)
            
            # 遍历训练集
            for feature in data:
                # distances = [np.linalg.norm(feature-self.centers[center]) for center in self.centers]
                distances = []
                for center in self.centers_:
                    # 欧拉距离
                    # np.sqrt(np.sum((features-self.centers_[center])**2))
                    distances.append(np.linalg.norm(feature - self.centers_[center]))
                classification = distances.index(min(distances)) # 选到距离最小的簇为分类结果(取下标)
                self.clf_[classification].append(feature) # 向此簇加入当前样本

            # print("分组情况:",self.clf_)
            prev_centers = dict(self.centers_) # 先保存上一轮的中心簇
            for c in self.clf_:
                self.centers_[c] = np.average(self.clf_[c], axis=0) # 簇中心更新为当前簇中样本的平均值

            # '中心点'是否在误差范围
            optimized = True
            for center in self.centers_:
                org_centers = prev_centers[center]
                cur_centers = self.centers_[center]
                if np.sum((cur_centers - org_centers) / org_centers * 100.0) > self.tolerance_:
                    optimized = False
            if optimized:
                break

    def predict(self, p_data):
        distances = [np.linalg.norm(p_data - self.centers_[center]) for center in self.centers_]
        index = distances.index(min(distances))
        return index


if __name__ == '__main__':
    x = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]])
    k_means = K_Means(k=2)
    k_means.fit(x)
    print(k_means.centers_)
    for center in k_means.centers_:
        pyplot.scatter(k_means.centers_[center][0], k_means.centers_[center][1], marker='*', s=350)

    for cat in k_means.clf_:
        for point in k_means.clf_[cat]:
            pyplot.scatter(point[0], point[1], c=('r' if cat == 0 else 'b'))

    predict = [[2, 1], [6, 9]]
    for feature in predict:
        cat = k_means.predict(predict)
        pyplot.scatter(feature[0], feature[1], c=('r' if cat == 0 else 'b'), marker='x')

    pyplot.show()

AUC

  • AUC 正负样本对比实现(n^2)
    在这里插入图片描述
def AUC(label, pre):
    """
    适用于python3.0以上版本
    """
    #计算正样本和负样本的索引,以便索引出之后的概率值
    pos = [i for i in range(len(label)) if label[i] == 1]
    neg = [i for i in range(len(label)) if label[i] == 0]
 
    auc = 0
    for i in pos:
        for j in neg:
            if pre[i] > pre[j]:
                auc += 1
            elif pre[i] == pre[j]:
                auc += 0.5
 
    return auc / (len(pos)*len(neg))

if __name__ == '__main__':
    label = [1,0,0,0,1,0,1,1]
    pre = [0.9, 0.8, 0.3, 0.1, 0.4, 0.9, 0.66, 0.7]
    print(AUC(label, pre))
 
    from sklearn.metrics import roc_curve, auc
    fpr, tpr, th = roc_curve(label, pre , pos_label=1)
    print('sklearn', auc(fpr, tpr))
  • 根据rank公式计算(nlogn)
    在这里插入图片描述
# AUC rank公式实现
import numpy as np
from sklearn.metrics import roc_auc_score

def calc_auc(y_labels, y_scores):
    f = list(zip(y_scores, y_labels))
    rank = [values2 for values1, values2 in sorted(f, key=lambda x:x[0])]
    rankList = [i+1 for i in range(len(rank)) if rank[i] == 1]
    pos_cnt = np.sum(y_labels == 1)
    neg_cnt = np.sum(y_labels == 0)
    auc = (np.sum(rankList) - pos_cnt*(pos_cnt+1)/2) / (pos_cnt*neg_cnt)
    print(auc)
 
 
def get_score():
    # 随机生成100组label和score
    y_labels = np.zeros(100)
    y_scores = np.zeros(100)
    for i in range(100):
        y_labels[i] = np.random.choice([0, 1])
        y_scores[i] = np.random.random()
    return y_labels, y_scores
 
 
if __name__ == '__main__':
    y_labels, y_scores = get_score()
    # 调用sklearn中的方法计算AUC,与后面自己写的方法作对比
    print('sklearn AUC:', roc_auc_score(y_labels, y_scores))
    calc_auc(y_labels, y_scores)
  • AUC 直方图近似实现(n)
import numpy as np
from sklearn.metrics import roc_curve
from sklearn.metrics import auc

#---自己按照公式实现
def auc_calculate(labels,preds,n_bins=100):
    postive_len = sum(labels)
    negative_len = len(labels) - postive_len
    total_case = postive_len * negative_len
    pos_histogram = [0 for _ in range(n_bins)]
    neg_histogram = [0 for _ in range(n_bins)]
    bin_width = 1.0 / n_bins
    for i in range(len(labels)):
        nth_bin = int(preds[i]/bin_width)
        if labels[i]==1:
            pos_histogram[nth_bin] += 1
        else:
            neg_histogram[nth_bin] += 1
    accumulated_neg = 0
    satisfied_pair = 0
    for i in range(n_bins):
        satisfied_pair += (pos_histogram[i]*accumulated_neg + pos_histogram[i]*neg_histogram[i]*0.5)
        accumulated_neg += neg_histogram[i]

    return satisfied_pair / float(total_case)

if __name__ == '__main__':

    y = np.array([1,0,0,0,1,0,1,0,])
    pred = np.array([0.9, 0.8, 0.3, 0.1,0.4,0.9,0.66,0.7])

    fpr, tpr, thresholds = roc_curve(y, pred, pos_label=1)
    print("-----sklearn:",auc(fpr, tpr))
    print("-----py脚本:",auc_calculate(y,pred))

CrossEntropy / Softmax

import numpy as np

def cross_entropy(y, y_hat):
    # n = 1e-6
    # return -np.sum(y * np.log(y_hat + n) + (1 - y) * np.log(1 - y_hat + n), axis=1)
    assert y.shape == y_hat.shape
    res = -np.sum(np.nan_to_num(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)))
    return round(res, 3)

def softmax(y):
    y_shift = y - np.max(y, axis=1, keepdims=True)
    y_exp = np.exp(y_shift)
    y_exp_sum = np.sum(y_exp, axis=1, keepdims=True)
    return y_exp / y_exp_sum

if __name__ == "__main__":
    y = np.array([1, 0, 0, 1]).reshape(-1, 1)
    y_hat = np.array([1, 0.4, 0.5, 0.1]).reshape(-1, 1)
    print(cross_entropy(y, y_hat))
    # y = np.array([[1,2,3,4],[1,3,4,5],[3,4,5,6]])
    # print(softmax(y))
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

机器学习算法代码 的相关文章

随机推荐

  • 用TensorFlow编写训练模型的程序——快速导读

    1 训练模型是怎么回事 训练模型是指 通过程序的反复迭代来修正神经网络中各个节点的值 从而实现具有一定拟合效果的算法 在训练神经网络的过程中 数据流有两个方向 正向和反向 正向负责预测生成结果 沿着网络节点的运算方向一层层计算下去 反向负责
  • 如何高速安装jetson-inference,一步到位,避免踩坑!

    踩了很长时间的坑 终于弄明白怎么高速下载jetson inference 来源 安装jetson inference 自动下载模型 满速下载起飞 解决下载模型被墙问题 奈流云何的博客 CSDN博客 需要将Github的仓库复制到Gitee上
  • 使用stm32的ADC得到准确的电压

    一 引脚多一点的stm32单片机存在VREF 和VREF 引脚 由上面的供电图知道 如果存在VREF 和VREF 引脚 那么ADC是由这两个引脚供电的 ADC的采集电压范围为 VREF
  • 理解Vue插槽

    引言 在Vue开发中 我们多采用组件化开发 组件化开发最大特点就是对页面结构化划分 组件的复用 而在实际中 页面对组件的需求或许也稍有不同 那么就需要一种需求 为页面定制组件 解决的方式便是通过插槽 实例
  • Java动态执行计算表达式利器 -- ScriptEngine

    在通过配置文件对系统进行参数配置时 有时需要更好的理解参数值的具体意义 往往采用计算表达式的方式设置 例如1天换成秒数为86400 如果写成24 60 60就很清晰的表达是一天的秒数 但是这个表达式通过properties的方式获取为字符串
  • Celery ValueError: not enough values to unpack (expected 3, got 0)

    目录 1 Celery ValueError not enough values to unpack expected 3 got 0 2 AttributeError async 1 Celery ValueError not enoug
  • 使用CUDA实现零拷贝技术

    使用CUDA实现零拷贝技术 零拷贝技术是指在内存和设备之间传输数据时 不需要显式地将数据从内存复制到设备的过程 从而提高数据传输效率 本文将介绍如何使用CUDA实现零拷贝技术 并提供示例代码 在使用CUDA进行图像处理时 通常需要将数据从主
  • 【计算机视觉

    文章目录 一 SqueezeNet 二 Inception v3 三 Visual Geometry Group 19 Layer CNN 四 MobileNetV1 五 Data efficient Image Transformer 六
  • 【CentOS7】开机自启动三种方法

    有个需求 比如说我想要执行开机杀毒程序 就需要去做开机自启动相关操作 准备工作 在 usr local目录下建立killviruses sh 前提 安装病毒库 vi killviruses sh 键入以下内容 前提 已安装ClamAV cl
  • js纯ajax,纯js 的Ajax 请求

    纯js 的Ajax 请求 2018 02 24 126 var XMLHttpReq function createXMLHttpRequest try XMLHttpReq new ActiveXObject Msxml2 XMLHTTP
  • 深度学习优化算法大全系列3:NAG(Nesterov Acceleration Gradient)

    1 NAG与SGD M的区别 NAG全称为Nesterov Accelerated Gradient 是在SGD Momentum基础进一步优化所得 前面的文章我们提到过 SGD M主要是利用历史累积动量来代替当前梯度从而达到减小震荡 加速
  • python自定义assert抛出的异常

    方法一 常用于assert失败后的提示用语 assert 1 gt 4 what 异常为 AssertionError what 方法二 常用于assert失败后推断导致的报错 try assert 1 gt 4 except Except
  • 前端实现导出Excel

    一 准备文件 1 创建excel文件夹 excel Blob js Export2Excel js 2 Blob js文件夹内容 eslint disable Blob js global self unescape jslint bitw
  • python pygame 游戏实战:Maze 迷宫生成,显示和游戏(附全部代码)

    生成迷宫 maze 的算法有很多种 论坛上有很多这方面的资料可以参考 这里使用回溯法 backtracking 主要参考Build a 2 player maze game with Python Part 4 Coding TidBits
  • 深入理解nandflash之基本特性

    nandflash作为嵌入式中的 磁盘 被广泛的应用 以 K9F2G08U0B 为例 其他型号都差不多 nandflash的结构 nandflash的结构有页 page block 块 的概念 其中页是真实概念 而块儿是虚拟概念 目的是为了
  • Graphviz 可视化图形软件(python)

    目录 1 简介 2 Graphviz 工具安装 3 检查是否安装成功 4 Graphviz 库安装 5 验证安装的graphviz是否可用 6 绘制红酒数据集得到的决策树 7 问题 pycharm正常画决策树 但jupyter显示 Modu
  • 在线java编译器_五个免费在线Java编译器,轻松编译代码

    原标题 五个免费在线Java编译器 轻松编译代码 Java编译器网络版成为有用的在许多情况下 例如 假设你正在编写一个Java代码 但不在自己的计算机上 减少时间的浪费 可以无需下载和安装任何软件 使用免费的在线工具运行代码 也就很有帮助
  • 爬虫日常-cookies登录12306

    文章目录 前言 页面分析 代码设计 前言 hello兄弟们 今天没事干也不晓得更什么内容 就把上次和大家说的可以采用cookies登录12306的方法告诉大家 这个功能熟练了的话还是比较简单的 毕竟可以直接通过pickle 建议大家可以自行
  • 简单年龄换算

    function calculatePetAge birthday var userBirthday new Date birthday var now new Date var petAgeNew now getTime userBirt
  • 机器学习算法代码

    BPNN import math import random random seed 0 def rand a b return b a random random a def make matrix m n fill 0 0 mat fo