ML-exp-2/code/bp_neural_network-iris.py
fly6516 e00976c7dc feat(code): 添加 BP 神经网络模型处理 Iris 和 Wine Quality 数据集
- 新增 BPNeuralNetwork 类实现基本的反向传播神经网络
- 添加数据预处理功能,包括特征缩放和标签的 one-hot 编码- 实现十折交叉验证来评估模型性能
- 分别针对 Iris 和 Wine Quality 数据集进行模型训练和测试
- 输出平均准确率来衡量模型效果
2025-03-17 11:13:08 +08:00

105 lines
4.6 KiB
Python

import numpy as np
from ucimlrepo import fetch_ucirepo
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
# 定义BP神经网络类
class BPNeuralNetwork:
def __init__(self, input_size, hidden_size, output_size, learning_rate=0.1):
# 初始化输入层、隐藏层和输出层的大小
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.learning_rate = learning_rate
# 初始化权重和偏置
self.weights_input_hidden = np.random.randn(self.input_size, self.hidden_size) # 输入层到隐藏层的权重
self.bias_hidden = np.random.randn(self.hidden_size) # 隐藏层的偏置
self.weights_hidden_output = np.random.randn(self.hidden_size, self.output_size) # 隐藏层到输出层的权重
self.bias_output = np.random.randn(self.output_size) # 输出层的偏置
def sigmoid(self, x):
# Sigmoid激活函数
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
# Sigmoid激活函数的导数
return x * (1 - x)
def forward(self, X):
# 前向传播
self.hidden_input = np.dot(X, self.weights_input_hidden) + self.bias_hidden # 隐藏层的输入
self.hidden_output = self.sigmoid(self.hidden_input) # 隐藏层的输出
self.final_input = np.dot(self.hidden_output, self.weights_hidden_output) + self.bias_output # 输出层的输入
self.final_output = self.sigmoid(self.final_input) # 输出层的输出
return self.final_output
def backward(self, X, y, output):
# 反向传播
output_error = y - output # 输出层的误差
output_delta = output_error * self.sigmoid_derivative(output) # 输出层的误差信号
hidden_error = output_delta.dot(self.weights_hidden_output.T) # 隐藏层的误差
hidden_delta = hidden_error * self.sigmoid_derivative(self.hidden_output) # 隐藏层的误差信号
# 更新权重和偏置
self.weights_hidden_output += self.hidden_output.T.dot(output_delta) * self.learning_rate # 更新隐藏层到输出层的权重
self.bias_output += np.sum(output_delta, axis=0) * self.learning_rate # 更新输出层的偏置
self.weights_input_hidden += X.T.dot(hidden_delta) * self.learning_rate # 更新输入层到隐藏层的权重
self.bias_hidden += np.sum(hidden_delta, axis=0) * self.learning_rate # 更新隐藏层的偏置
def train(self, X, y, epochs):
# 训练网络
for epoch in range(epochs):
output = self.forward(X) # 前向传播
self.backward(X, y, output) # 反向传播
if epoch % 1000 == 0:
loss = np.mean(np.square(y - output)) # 计算损失
print(f'Epoch {epoch}, Loss: {loss}') # 打印损失
def predict(self, X):
# 预测
return np.round(self.forward(X)) # 四舍五入预测结果
# 将标签转换为one-hot编码
def one_hot_encode(y):
# 确保 y 中的每个元素是字符串
y = np.array([str(label) for label in y])
# 创建一个标签到整数的映射
label_to_int = {label: i for i, label in enumerate(np.unique(y))}
# 将标签转换为整数
y_int = np.array([label_to_int[label] for label in y])
n_values = np.max(y_int) + 1
return np.eye(n_values)[y_int] # 返回one-hot编码
# 加载Iris数据集
iris = fetch_ucirepo(id=53)
X = iris.data.features.values # 特征数据
y = iris.data.targets.values # 标签数据
# 特征缩放
scaler = StandardScaler()
X = scaler.fit_transform(X) # 标准化特征数据
# 对标签进行one-hot编码
y_encoded = one_hot_encode(y)
# 十折交叉验证
kf = KFold(n_splits=10, shuffle=True, random_state=42)
accuracies = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index] # 训练集和测试集的特征数据
y_train, y_test = y_encoded[train_index], y_encoded[test_index] # 训练集和测试集的标签数据
# 创建并训练BP神经网络
nn = BPNeuralNetwork(input_size=X_train.shape[1], hidden_size=5, output_size=y_train.shape[1], learning_rate=0.01)
nn.train(X_train, y_train, epochs=10000)
# 预测并计算准确率
predictions = nn.predict(X_test)
accuracy = accuracy_score(np.argmax(y_test, axis=1), np.argmax(predictions, axis=1)) # 计算准确率
accuracies.append(accuracy) # 存储每次交叉验证的准确率
print(f'Average Accuracy: {np.mean(accuracies)}') # 打印平均准确率