716 lines
57 KiB
Plaintext
716 lines
57 KiB
Plaintext
|
{
|
|||
|
"cells": [
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"id": "initial_id",
|
|||
|
"metadata": {
|
|||
|
"collapsed": true,
|
|||
|
"ExecuteTime": {
|
|||
|
"end_time": "2025-06-24T05:07:43.448551Z",
|
|||
|
"start_time": "2025-06-24T05:07:43.431592Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# 首先我们导入所有需要的包:\n",
|
|||
|
"import os\n",
|
|||
|
"import random\n",
|
|||
|
"\n",
|
|||
|
"import numpy as np\n",
|
|||
|
"import pandas as pd\n",
|
|||
|
"import deepquantum as dq\n",
|
|||
|
"import matplotlib.pyplot as plt\n",
|
|||
|
"import torch\n",
|
|||
|
"import torch.nn as nn\n",
|
|||
|
"import torch.optim as optim\n",
|
|||
|
"import torchvision.transforms as transforms\n",
|
|||
|
"from tqdm import tqdm\n",
|
|||
|
"from sklearn.metrics import roc_auc_score\n",
|
|||
|
"from torch.utils.data import DataLoader\n",
|
|||
|
"from torchvision.datasets import MNIST, FashionMNIST\n",
|
|||
|
"\n",
|
|||
|
"def seed_torch(seed=1024):\n",
|
|||
|
" \"\"\"\n",
|
|||
|
" Set random seeds for reproducibility.\n",
|
|||
|
"\n",
|
|||
|
" Args:\n",
|
|||
|
" seed (int): Random seed number to use. Default is 1024.\n",
|
|||
|
" \"\"\"\n",
|
|||
|
"\n",
|
|||
|
" random.seed(seed)\n",
|
|||
|
" os.environ['PYTHONHASHSEED'] = str(seed)\n",
|
|||
|
" np.random.seed(seed)\n",
|
|||
|
" torch.manual_seed(seed)\n",
|
|||
|
" torch.cuda.manual_seed(seed)\n",
|
|||
|
"\n",
|
|||
|
" # Seed all GPUs with the same seed if using multi-GPU\n",
|
|||
|
" torch.cuda.manual_seed_all(seed)\n",
|
|||
|
" torch.backends.cudnn.benchmark = False\n",
|
|||
|
" torch.backends.cudnn.deterministic = True\n",
|
|||
|
"\n",
|
|||
|
"seed_torch(1024)"
|
|||
|
],
|
|||
|
"outputs": [],
|
|||
|
"execution_count": 4
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {
|
|||
|
"ExecuteTime": {
|
|||
|
"end_time": "2025-06-24T05:07:43.472777Z",
|
|||
|
"start_time": "2025-06-24T05:07:43.463779Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"cell_type": "code",
|
|||
|
"source": [
|
|||
|
"def calculate_score(y_true, y_preds):\n",
|
|||
|
" # 将模型预测结果转为概率分布\n",
|
|||
|
" preds_prob = torch.softmax(y_preds, dim=1)\n",
|
|||
|
" # 获得预测的类别(概率最高的一类)\n",
|
|||
|
" preds_class = torch.argmax(preds_prob, dim=1)\n",
|
|||
|
" # 计算准确率\n",
|
|||
|
" correct = (preds_class == y_true).float()\n",
|
|||
|
" accuracy = correct.sum() / len(correct)\n",
|
|||
|
" return accuracy.cpu().numpy()\n",
|
|||
|
"\n",
|
|||
|
"\n",
|
|||
|
"def train_model(model, criterion, optimizer, train_loader, valid_loader, num_epochs, device):\n",
|
|||
|
" \"\"\"\n",
|
|||
|
" 训练和验证模型。\n",
|
|||
|
"\n",
|
|||
|
" Args:\n",
|
|||
|
" model (torch.nn.Module): 要训练的模型。\n",
|
|||
|
" criterion (torch.nn.Module): 损失函数。\n",
|
|||
|
" optimizer (torch.optim.Optimizer): 优化器。\n",
|
|||
|
" train_loader (torch.utils.data.DataLoader): 训练数据加载器。\n",
|
|||
|
" valid_loader (torch.utils.data.DataLoader): 验证数据加载器。\n",
|
|||
|
" num_epochs (int): 训练的epoch数。\n",
|
|||
|
"\n",
|
|||
|
" Returns:\n",
|
|||
|
" model (torch.nn.Module): 训练后的模型。\n",
|
|||
|
" \"\"\"\n",
|
|||
|
"\n",
|
|||
|
" model.train()\n",
|
|||
|
" train_loss_list = []\n",
|
|||
|
" valid_loss_list = []\n",
|
|||
|
" train_acc_list = []\n",
|
|||
|
" valid_acc_list = []\n",
|
|||
|
"\n",
|
|||
|
" with tqdm(total=num_epochs) as pbar:\n",
|
|||
|
" for epoch in range(num_epochs):\n",
|
|||
|
" # 训练阶段\n",
|
|||
|
" train_loss = 0.0\n",
|
|||
|
" train_acc = 0.0\n",
|
|||
|
" for images, labels in train_loader:\n",
|
|||
|
" images = images.to(device)\n",
|
|||
|
" labels = labels.to(device)\n",
|
|||
|
" optimizer.zero_grad()\n",
|
|||
|
" outputs = model(images)\n",
|
|||
|
" loss = criterion(outputs, labels)\n",
|
|||
|
" loss.backward()\n",
|
|||
|
" optimizer.step()\n",
|
|||
|
" train_loss += loss.item()\n",
|
|||
|
" train_acc += calculate_score(labels, outputs)\n",
|
|||
|
"\n",
|
|||
|
" train_loss /= len(train_loader)\n",
|
|||
|
" train_acc /= len(train_loader)\n",
|
|||
|
"\n",
|
|||
|
" # 验证阶段\n",
|
|||
|
" model.eval()\n",
|
|||
|
" valid_loss = 0.0\n",
|
|||
|
" valid_acc = 0.0\n",
|
|||
|
" with torch.no_grad():\n",
|
|||
|
" for images, labels in valid_loader:\n",
|
|||
|
" images = images.to(device)\n",
|
|||
|
" labels = labels.to(device)\n",
|
|||
|
" outputs = model(images)\n",
|
|||
|
" loss = criterion(outputs, labels)\n",
|
|||
|
" valid_loss += loss.item()\n",
|
|||
|
" valid_acc += calculate_score(labels, outputs)\n",
|
|||
|
"\n",
|
|||
|
" valid_loss /= len(valid_loader)\n",
|
|||
|
" valid_acc /= len(valid_loader)\n",
|
|||
|
"\n",
|
|||
|
" pbar.set_description(f\"Train loss: {train_loss:.3f} Valid Acc: {valid_acc:.3f}\")\n",
|
|||
|
" pbar.update()\n",
|
|||
|
"\n",
|
|||
|
"\n",
|
|||
|
" train_loss_list.append(train_loss)\n",
|
|||
|
" valid_loss_list.append(valid_loss)\n",
|
|||
|
" train_acc_list.append(train_acc)\n",
|
|||
|
" valid_acc_list.append(valid_acc)\n",
|
|||
|
"\n",
|
|||
|
" metrics = {'epoch': list(range(1, num_epochs + 1)),\n",
|
|||
|
" 'train_acc': train_acc_list,\n",
|
|||
|
" 'valid_acc': valid_acc_list,\n",
|
|||
|
" 'train_loss': train_loss_list,\n",
|
|||
|
" 'valid_loss': valid_loss_list}\n",
|
|||
|
"\n",
|
|||
|
"\n",
|
|||
|
"\n",
|
|||
|
" return model, metrics\n",
|
|||
|
"\n",
|
|||
|
"def test_model(model, test_loader, device):\n",
|
|||
|
" model.eval()\n",
|
|||
|
" test_acc = 0.0\n",
|
|||
|
" with torch.no_grad():\n",
|
|||
|
" for images, labels in test_loader:\n",
|
|||
|
" images = images.to(device)\n",
|
|||
|
" labels = labels.to(device)\n",
|
|||
|
" outputs = model(images)\n",
|
|||
|
" test_acc += calculate_score(labels, outputs)\n",
|
|||
|
"\n",
|
|||
|
" test_acc /= len(test_loader)\n",
|
|||
|
" print(f'Test Acc: {test_acc:.3f}')\n",
|
|||
|
" return test_acc"
|
|||
|
],
|
|||
|
"id": "cc4c2323375a0d64",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": 5
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {
|
|||
|
"ExecuteTime": {
|
|||
|
"end_time": "2025-06-24T05:07:44.895616Z",
|
|||
|
"start_time": "2025-06-24T05:07:43.501957Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"cell_type": "code",
|
|||
|
"source": [
|
|||
|
"# 定义图像变换\n",
|
|||
|
"trans1 = transforms.Compose([\n",
|
|||
|
" transforms.Resize((18, 18)), # 调整大小为18x18\n",
|
|||
|
" transforms.ToTensor() # 转换为张量\n",
|
|||
|
"])\n",
|
|||
|
"\n",
|
|||
|
"trans2 = transforms.Compose([\n",
|
|||
|
" transforms.Resize((16, 16)), # 调整大小为16x16\n",
|
|||
|
" transforms.ToTensor() # 转换为张量\n",
|
|||
|
"])\n",
|
|||
|
"train_dataset = FashionMNIST(root='./data/notebook1', train=False, transform=trans1,download=True)\n",
|
|||
|
"test_dataset = FashionMNIST(root='./data/notebook1', train=False, transform=trans1,download=True)\n",
|
|||
|
"\n",
|
|||
|
"# 定义训练集和测试集的比例\n",
|
|||
|
"train_ratio = 0.8 # 训练集比例为80%,验证集比例为20%\n",
|
|||
|
"valid_ratio = 0.2\n",
|
|||
|
"total_samples = len(train_dataset)\n",
|
|||
|
"train_size = int(train_ratio * total_samples)\n",
|
|||
|
"valid_size = int(valid_ratio * total_samples)\n",
|
|||
|
"\n",
|
|||
|
"# 分割训练集和测试集\n",
|
|||
|
"train_dataset, valid_dataset = torch.utils.data.random_split(train_dataset, [train_size, valid_size])\n",
|
|||
|
"\n",
|
|||
|
"# 加载随机抽取的训练数据集\n",
|
|||
|
"train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True, drop_last=True)\n",
|
|||
|
"valid_loader = DataLoader(valid_dataset, batch_size=64, shuffle=False, drop_last=True)\n",
|
|||
|
"test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False, drop_last=True)"
|
|||
|
],
|
|||
|
"id": "4b641527c641afc1",
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"name": "stderr",
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"100%|██████████| 5.15k/5.15k [00:00<?, ?B/s]\n"
|
|||
|
]
|
|||
|
}
|
|||
|
],
|
|||
|
"execution_count": 6
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {
|
|||
|
"ExecuteTime": {
|
|||
|
"end_time": "2025-06-24T05:07:44.960786Z",
|
|||
|
"start_time": "2025-06-24T05:07:44.954222Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"cell_type": "code",
|
|||
|
"source": [
|
|||
|
"singlegate_list = ['rx', 'ry', 'rz', 's', 't', 'p', 'u3']\n",
|
|||
|
"doublegate_list = ['rxx', 'ryy', 'rzz', 'swap', 'cnot', 'cp', 'ch', 'cu', 'ct', 'cz']"
|
|||
|
],
|
|||
|
"id": "1c3e55f43e47a4f1",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": 7
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {
|
|||
|
"ExecuteTime": {
|
|||
|
"end_time": "2025-06-24T05:07:44.989143Z",
|
|||
|
"start_time": "2025-06-24T05:07:44.981160Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"cell_type": "code",
|
|||
|
"source": [
|
|||
|
"# 随机量子卷积层\n",
|
|||
|
"class RandomQuantumConvolutionalLayer(nn.Module):\n",
|
|||
|
" def __init__(self, nqubit, num_circuits, seed:int=1024):\n",
|
|||
|
" super(RandomQuantumConvolutionalLayer, self).__init__()\n",
|
|||
|
" random.seed(seed)\n",
|
|||
|
" self.nqubit = nqubit\n",
|
|||
|
" self.cirs = nn.ModuleList([self.circuit(nqubit) for _ in range(num_circuits)])\n",
|
|||
|
"\n",
|
|||
|
" def circuit(self, nqubit):\n",
|
|||
|
" cir = dq.QubitCircuit(nqubit)\n",
|
|||
|
" cir.rxlayer(encode=True) # 对原论文的量子线路结构并无影响,只是做了一个数据编码的操作\n",
|
|||
|
" cir.barrier()\n",
|
|||
|
" for iter in range(3):\n",
|
|||
|
" for i in range(nqubit):\n",
|
|||
|
" singlegate = random.choice(singlegate_list)\n",
|
|||
|
" getattr(cir, singlegate)(i)\n",
|
|||
|
" control_bit, target_bit = random.sample(range(0, nqubit - 1), 2)\n",
|
|||
|
" doublegate = random.choice(doublegate_list)\n",
|
|||
|
" if doublegate[0] in ['r', 's']:\n",
|
|||
|
" getattr(cir, doublegate)([control_bit, target_bit])\n",
|
|||
|
" else:\n",
|
|||
|
" getattr(cir, doublegate)(control_bit, target_bit)\n",
|
|||
|
" cir.barrier()\n",
|
|||
|
"\n",
|
|||
|
" cir.observable(0)\n",
|
|||
|
" return cir\n",
|
|||
|
"\n",
|
|||
|
" def forward(self, x):\n",
|
|||
|
" kernel_size, stride = 2, 2\n",
|
|||
|
" # [64, 1, 18, 18] -> [64, 1, 9, 18, 2] -> [64, 1, 9, 9, 2, 2]\n",
|
|||
|
" x_unflod = x.unfold(2, kernel_size, stride).unfold(3, kernel_size, stride)\n",
|
|||
|
" w = int((x.shape[-1] - kernel_size) / stride + 1)\n",
|
|||
|
" x_reshape = x_unflod.reshape(-1, self.nqubit)\n",
|
|||
|
"\n",
|
|||
|
" exps = []\n",
|
|||
|
" for cir in self.cirs: # out_channels\n",
|
|||
|
" cir(x_reshape)\n",
|
|||
|
" exp = cir.expectation()\n",
|
|||
|
" exps.append(exp)\n",
|
|||
|
"\n",
|
|||
|
" exps = torch.stack(exps, dim=1)\n",
|
|||
|
" exps = exps.reshape(x.shape[0], 3, w, w)\n",
|
|||
|
" return exps"
|
|||
|
],
|
|||
|
"id": "f03fcd820876a62",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": 8
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {
|
|||
|
"ExecuteTime": {
|
|||
|
"end_time": "2025-06-24T05:07:45.475988Z",
|
|||
|
"start_time": "2025-06-24T05:07:45.019221Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"cell_type": "code",
|
|||
|
"source": [
|
|||
|
"net = RandomQuantumConvolutionalLayer(nqubit=4, num_circuits=3, seed=1024)\n",
|
|||
|
"net.cirs[0].draw()"
|
|||
|
],
|
|||
|
"id": "fcea5aa513a0bd68",
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"data": {
|
|||
|
"text/plain": [
|
|||
|
"<Figure size 1207.22x367.889 with 1 Axes>"
|
|||
|
],
|
|||
|
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA7UAAAEvCAYAAACaO+Y5AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAAX8hJREFUeJzt3QlYVFX/B/DvsK+CLCqKCCKIa5qKuedWLmm5tJrma/urab2mZsvfbDFTe01ts7LMFrPUci3NNMMd9w0R2QQBUUAWZWf+zzm8TCKogDJ37r3fz/PMc5k7M3CGued35nfPcg1Go9EIIiIiIiIiIhWyUroARERERERERDXFpJaIiIiIiIhUi0ktERERERERqRaTWiIiIiIiIlItJrVERERERESkWkxqiYiIiIiISLWY1BIREREREZFqMaklIiIiIiIi1WJSS0RERERERKrFpJaIiIiIiIhUi0ktERERERERqRaTWiIiIiIiIlItJrVERERERESkWkxqiYiIiIiISLWY1BIREREREZFqMaklIiIiIiIi1WJSS0RERERERKrFpJaIiIiIiIhUi0ktERERERERqRaTWiIiIiIiIlItJrVERERERESkWkxqiYiIiIiISLWY1BIREREREZFqMaklIiIiIiIi1WJSS0RERERERKrFpJaIiIiIiIhUi0ktERERERERqRaTWiIiIiIiIlItG6ULQHQrwsPDq/X8ixcvYvXq1Rg+fDi8vLyq9JpOnTrVsHREZGlxoCYxQGAcIEvGtpCIwnXeFrKnlnRFVOIvv/xSbolIfxgDiFgPiPTuogZjAJNaIiIiIiIiUi0mtURERERERKRaTGqJiIiIiIhItZjUkq64urpiwIABcktE+sMYQMR6QKR3rhqMAQaj0WhUuhBE5lrxsSYseaU3ImIcIGIdIKJwnccB9tSSruTn5yMhIUFuiUh/GAOIWA+I9C5fgzGASS3pSmxsLEaMGCG3RKQ/jAFErAdEeherwRhgo3QBqHJiVHhRrnrOntg42sNgMChdDFVS22dN1cO6QXqKDTzeSa/HPt16LNDq5864aB5Mai2UqNTfBz4OtRgV/R1snRyULoYqqe2zpuph3SA9xQYe76TXY59uPRZo9XNnXDQPDj8mIiIiIiIi1WJSS0RERERERKrF4cekKyEhIdi3b5/SxSAihTAGELEeEOldiAZjAHtqiYiIiIiISLWY1JKuxMfHY9y4cXJLRPrDGEDEekCkd/EajAFMaklXcnNzcfz4cbklIv1hDCBiPSDSu1wNxgAmtURERERERKRaXChKQxp0aYUBq2eW21d4ORdZMcmIXvk3IpZshLG4RLHykf40e+hudF8wATsmfYQzP/1V4XEXX2+MDP8UZ1Zsw44XP1akjERawnaAyPKwLSSqfUxqNShmdRgStx4EDAY4eruj2YO9EDpzLNyCGmH3lMVKF4+IiGoZ2wEiItITJrUalHYsFjGrwkz3I5duwrCwBQh+rC8Ozl6O/LQs6JWPjw9mzpwpt0SkP3qJAWwH6Eb0Ug+ISD8xgHNqdaAoNx8XDkbBYGWFOk3qQ8/c3NwwcOBAuSUi/dFrDGA7QFfTaz0gIu3GACa1OuHqX/olJv9SDvQsIyMDP//8s9wSkf7oOQawHaAyeq4HRARNxgAmtRpk42gHew9X2HvWgXuIHzrPegqebZrKs/RisRA9O3/+PObOnSu3RKQ/eokBbAfoRvRSD4hIPzFAF3NqL168iDlz5mD16tVITEyEt7c3hg8fjlmzZmHixIn46quvsGjRIkyYMAFa0H7qI/J2tbgNe7B3+peKlYmIiMyH7QAREemJ5pPaw4cPyzHjKSkpcHZ2RsuWLZGUlISFCxciOjoa6enp8nnt2rWDVkR+uxlx63bDytYGdUP80Hr8A3D28URxfoHpOVZ2NhiyeS5ifwnD0QWrTfu7fzgeDt7u2DLqXYVKT3pkNBqVLgKRprAdIFIftoVVYDCg5dOD0Xx0f3kppLy0LMSu24XDc1bItQNIv6y03kM7ZMgQmdBOnjwZycnJOHjwoLz//vvvY8OGDQgPD4fBYEDbtm2hFVkxKUgOO4ZzWw/h+Cdr8OcTs+HVLhBd3n/W9JySgiLsmLgIbSYOR92WTeQ+vwGd4Nu/I3b+5xMFS09aUpRX+gXa2tG+0sdtnEr3F//veUR0e7AdILIcbAtvn9C3xsrLk106nYg9r3+FuPW70fLJQei77BWZ8JJ+aTqpFUOLxXBjMax43rx5cHV1NT02depU3HHHHSgqKoK/vz/q1KkDrbqwPxLRK/9GwAPd4N2xuWl/2tEYnPh0LXosfAFOPh7oMvc57H31S+Se186k8Ws5OTmhc+fOcku1L+dsqty6BzWq9HG3IF+5zf7f84hqm15jANsBuppe64FS2BbeHu7BvmgxbqCcSrHtybmI+n4Lwt/8Bvve/AY+3dvI+Eb6jQGaTWojIiKwYsUKeHl54b333qv0OR06dJBbkdxeLTY2FkOHDpVJcN26dTFmzBikpaVBzY7MX4mSomK0n/Jw+f0frkJJcTGG/jEXKTuPI3bNTmiZn5+fnD8ttlT70o7FIOfcBdnQONavW+4xMSxSNE7GkhIkbN6vWBlJX/QcA9gOUBk91wMlsC28PQKGdZeXJTv5xYZy+0VyW3glD4EjeipWNrXx02AM0GxSu3z5cpSUlGDUqFFwcXGp9DmOjo4Vktrs7Gz07t1b9vCK3/H5558jLCwM9913n/x9apUdlyK/qDTs2Rb1Orcw7TcWFeNCeCQcPN1wZsU2aF1xcTFycnLklmqfsbgEe6Z9AVtXJ9y/9QN0eO1xBD/eD21fGokhm+egQddWOLroF2RFJyldVNIJPccAtgNURs/1QAlsC28Pr3bN5Am4i4eiyu0vzi9E+vE4OcWC9BsDNJvUbt26VW5Fgno9InG9NqkVSey5c+fw66+/ykT2wQcfxA8//IA9e/Zg7dq1ULOjC0rPxl99ll58sWn2cG9ELNmI0Lf+BWsHO2hZVFQU+vTpI7dkHol/HsTGoa8jZecJNHuoF+6a9RRaPztELu7w1zMf4NDs5UoXkXRE7zGA7QAJeq8HSmBbeOuc6tdFfnq2XA/gWldS0uWJOdHzTfqMAZr95OPj4+W2SZPSxS+uJebS7ty5s0JSu379enTv3r1cd3yXLl3QtGlTrFu3Dg888EC1y9KxY0e5OFV12BqtMAOh1XpNyu4TWOoz8rqPZ0adwzLff77I2Dg5yFUuD7z7PU59swkDf3kLd05/DOEzlqK6goOCUWgwf0/2yJHXf7+VSU0tna/y22+/4cCBA1V6zbBhw1CbavJZq03akWjZaOuRUnVDT6oTB2oSA8wRB7TQDgg83pWhhbZQD+2intrCmsSCm33uYqGt4oLCSh8TvbVl1+guKKyY9CrJXHFxpAbawgYNGmD//poNw9dsUnv58mW5zc3NrfRxMd9WrI4s5s0GBASY9p88eVL2zl6rVatW8rGaEAmt6P2tDjuDNVAftarTm2Pk4gWnlv4u7++Y9BGGbpmHs7/txfk9EdX6XUnJSSgwFiv2OVdV2fEgtlV9bXU/u+oyx2dNylGqbuhJdeJATWKAOeKAFtoBgce7MrTQFl6N7aL61SQW3OxzL87Nh62zW6WPWdvbym1RruWtIG2uuHhZo20h9J7Uikw/IyNDXsJH9LReTVzaZ8qUKfJncSkfcUmfMuI17u7uFX6fh4cHIiMja1yW6hJnq1CLJ3Ua9WmPgKHdsKbvZNO+7Pjz8mx9t/njsbbP5Gpd76uhT0NFzs6Law9XR1nFFfOpq/raRo0qX63wdqntz5qUpVTd0JPqxIGaxABzxAEttAMCj3dlaKEtvBrbRfWrSSy42ed+5XwG3IJ95TW2rx2C7NTAA3lpmSixsF5ac8ZFZw20hTXJmTSf1Pbr10+ugCyuR9u/f38EBwfL/eK6tKNHj5a9tEK7du1qvSw16UYXq7h9H/g4aou4duEPIU9U2C/O1pedsa+O01GnYevkAHMTn2d1nDp1Si4ANnDgQISEhFTpNR9++CFqU21/1qQspeqGnlQnDtQkBpg
|
|||
|
},
|
|||
|
"execution_count": 9,
|
|||
|
"metadata": {},
|
|||
|
"output_type": "execute_result"
|
|||
|
}
|
|||
|
],
|
|||
|
"execution_count": 9
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {
|
|||
|
"ExecuteTime": {
|
|||
|
"end_time": "2025-06-24T05:07:45.523034Z",
|
|||
|
"start_time": "2025-06-24T05:07:45.502484Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"cell_type": "code",
|
|||
|
"source": [
|
|||
|
"# 基于随机量子卷积层的混合模型\n",
|
|||
|
"class RandomQCCNN(nn.Module):\n",
|
|||
|
" def __init__(self):\n",
|
|||
|
" super(RandomQCCNN, self).__init__()\n",
|
|||
|
" self.conv = nn.Sequential(\n",
|
|||
|
" RandomQuantumConvolutionalLayer(nqubit=4, num_circuits=3, seed=1024), # num_circuits=3代表我们在quanv1层只用了3个量子卷积核\n",
|
|||
|
" nn.ReLU(),\n",
|
|||
|
" nn.MaxPool2d(kernel_size=2, stride=1),\n",
|
|||
|
" nn.Conv2d(3, 6, kernel_size=2, stride=1),\n",
|
|||
|
" nn.ReLU(),\n",
|
|||
|
" nn.MaxPool2d(kernel_size=2, stride=1)\n",
|
|||
|
" )\n",
|
|||
|
" self.fc = nn.Sequential(\n",
|
|||
|
" nn.Linear(6 * 6 * 6, 1024),\n",
|
|||
|
" nn.Dropout(0.4),\n",
|
|||
|
" nn.Linear(1024, 10)\n",
|
|||
|
" )\n",
|
|||
|
"\n",
|
|||
|
" def forward(self, x):\n",
|
|||
|
" x = self.conv(x)\n",
|
|||
|
" x = x.reshape(x.size(0), -1)\n",
|
|||
|
" x = self.fc(x)\n",
|
|||
|
" return x"
|
|||
|
],
|
|||
|
"id": "64082ff8ea82fe8",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": 10
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {
|
|||
|
"jupyter": {
|
|||
|
"is_executing": true
|
|||
|
},
|
|||
|
"ExecuteTime": {
|
|||
|
"start_time": "2025-06-24T05:07:45.545363Z"
|
|||
|
}
|
|||
|
},
|
|||
|
"cell_type": "code",
|
|||
|
"source": [
|
|||
|
"num_epochs = 300\n",
|
|||
|
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
|||
|
"seed_torch(1024) # 重新设置随机种子\n",
|
|||
|
"model = RandomQCCNN()\n",
|
|||
|
"model.to(device)\n",
|
|||
|
"criterion = nn.CrossEntropyLoss()\n",
|
|||
|
"optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=0.001) # 添加正则化项\n",
|
|||
|
"optim_model, metrics = train_model(model, criterion, optimizer, train_loader, valid_loader, num_epochs, device)\n",
|
|||
|
"torch.save(optim_model.state_dict(), './data/notebook1/random_qccnn_weights.pt') # 保存训练好的模型参数,用于后续的推理或测试\n",
|
|||
|
"pd.DataFrame(metrics).to_csv('./data/notebook1/random_qccnn_metrics.csv', index='None') # 保存模型训练过程,用于后续图标展示"
|
|||
|
],
|
|||
|
"id": "19b3021c114a9129",
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"name": "stderr",
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"Train loss: 1.012 Valid Acc: 0.621: 5%|▌ | 16/300 [08:18<2:36:59, 33.17s/it]"
|
|||
|
]
|
|||
|
}
|
|||
|
],
|
|||
|
"execution_count": null
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"state_dict = torch.load('./data/notebook1/random_qccnn_weights.pt', map_location=device)\n",
|
|||
|
"random_qccnn_model = RandomQCCNN()\n",
|
|||
|
"random_qccnn_model.load_state_dict(state_dict)\n",
|
|||
|
"random_qccnn_model.to(device)\n",
|
|||
|
"\n",
|
|||
|
"test_acc = test_model(random_qccnn_model, test_loader, device)"
|
|||
|
],
|
|||
|
"id": "49ceb326295cd4a9"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"data = pd.read_csv('./data/notebook1/random_qccnn_metrics.csv')\n",
|
|||
|
"epoch = data['epoch']\n",
|
|||
|
"train_loss = data['train_loss']\n",
|
|||
|
"valid_loss = data['valid_loss']\n",
|
|||
|
"train_acc = data['train_acc']\n",
|
|||
|
"valid_acc = data['valid_acc']\n",
|
|||
|
"\n",
|
|||
|
"# 创建图和Axes对象\n",
|
|||
|
"fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))\n",
|
|||
|
"\n",
|
|||
|
"# 绘制训练损失曲线\n",
|
|||
|
"ax1.plot(epoch, train_loss, label='Train Loss')\n",
|
|||
|
"ax1.plot(epoch, valid_loss, label='Valid Loss')\n",
|
|||
|
"ax1.set_title('Training Loss Curve')\n",
|
|||
|
"ax1.set_xlabel('Epoch')\n",
|
|||
|
"ax1.set_ylabel('Loss')\n",
|
|||
|
"ax1.legend()\n",
|
|||
|
"\n",
|
|||
|
"# 绘制训练准确率曲线\n",
|
|||
|
"ax2.plot(epoch, train_acc, label='Train Accuracy')\n",
|
|||
|
"ax2.plot(epoch, valid_acc, label='Valid Accuracy')\n",
|
|||
|
"ax2.set_title('Training Accuracy Curve')\n",
|
|||
|
"ax2.set_xlabel('Epoch')\n",
|
|||
|
"ax2.set_ylabel('Accuracy')\n",
|
|||
|
"ax2.legend()\n",
|
|||
|
"\n",
|
|||
|
"plt.show()"
|
|||
|
],
|
|||
|
"id": "45287356d5a9a0ad"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"class ParameterizedQuantumConvolutionalLayer(nn.Module):\n",
|
|||
|
" def __init__(self, nqubit, num_circuits):\n",
|
|||
|
" super().__init__()\n",
|
|||
|
" self.nqubit = nqubit\n",
|
|||
|
" self.cirs = nn.ModuleList([self.circuit(nqubit) for _ in range(num_circuits)])\n",
|
|||
|
"\n",
|
|||
|
" def circuit(self, nqubit):\n",
|
|||
|
" cir = dq.QubitCircuit(nqubit)\n",
|
|||
|
" cir.rxlayer(encode=True) #对原论文的量子线路结构并无影响,只是做了一个数据编码的操作\n",
|
|||
|
" cir.barrier()\n",
|
|||
|
" for iter in range(4): #对应原论文中一个量子卷积线路上的深度为4,可控参数一共16个\n",
|
|||
|
" cir.rylayer()\n",
|
|||
|
" cir.cnot_ring()\n",
|
|||
|
" cir.barrier()\n",
|
|||
|
"\n",
|
|||
|
" cir.observable(0)\n",
|
|||
|
" return cir\n",
|
|||
|
"\n",
|
|||
|
" def forward(self, x):\n",
|
|||
|
" kernel_size, stride = 2, 2\n",
|
|||
|
" # [64, 1, 18, 18] -> [64, 1, 9, 18, 2] -> [64, 1, 9, 9, 2, 2]\n",
|
|||
|
" x_unflod = x.unfold(2, kernel_size, stride).unfold(3, kernel_size, stride)\n",
|
|||
|
" w = int((x.shape[-1] - kernel_size) / stride + 1)\n",
|
|||
|
" x_reshape = x_unflod.reshape(-1, self.nqubit)\n",
|
|||
|
"\n",
|
|||
|
" exps = []\n",
|
|||
|
" for cir in self.cirs: # out_channels\n",
|
|||
|
" cir(x_reshape)\n",
|
|||
|
" exp = cir.expectation()\n",
|
|||
|
" exps.append(exp)\n",
|
|||
|
"\n",
|
|||
|
" exps = torch.stack(exps, dim=1)\n",
|
|||
|
" exps = exps.reshape(x.shape[0], 3, w, w)\n",
|
|||
|
" return exps"
|
|||
|
],
|
|||
|
"id": "736fe987b84d5891"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"# 此处我们可视化其中一个量子卷积核的线路结构:\n",
|
|||
|
"net = ParameterizedQuantumConvolutionalLayer(nqubit=4, num_circuits=3)\n",
|
|||
|
"net.cirs[0].draw()"
|
|||
|
],
|
|||
|
"id": "e8058c7fde0a012b"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"# QCCNN整体网络架构:\n",
|
|||
|
"class QCCNN(nn.Module):\n",
|
|||
|
" def __init__(self):\n",
|
|||
|
" super(QCCNN, self).__init__()\n",
|
|||
|
" self.conv = nn.Sequential(\n",
|
|||
|
" ParameterizedQuantumConvolutionalLayer(nqubit=4, num_circuits=3),\n",
|
|||
|
" nn.ReLU(),\n",
|
|||
|
" nn.MaxPool2d(kernel_size=2, stride=1)\n",
|
|||
|
" )\n",
|
|||
|
"\n",
|
|||
|
" self.fc = nn.Sequential(\n",
|
|||
|
" nn.Linear(8 * 8 * 3, 128),\n",
|
|||
|
" nn.Dropout(0.4),\n",
|
|||
|
" nn.ReLU(),\n",
|
|||
|
" nn.Linear(128, 10)\n",
|
|||
|
" )\n",
|
|||
|
"\n",
|
|||
|
" def forward(self, x):\n",
|
|||
|
" x = self.conv(x)\n",
|
|||
|
" x = x.reshape(x.size(0), -1)\n",
|
|||
|
" x = self.fc(x)\n",
|
|||
|
" return x"
|
|||
|
],
|
|||
|
"id": "e3c6160fff06bed2"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"num_epochs = 300\n",
|
|||
|
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
|||
|
"\n",
|
|||
|
"model = QCCNN()\n",
|
|||
|
"model.to(device)\n",
|
|||
|
"criterion = nn.CrossEntropyLoss()\n",
|
|||
|
"optimizer = optim.Adam(model.parameters(), lr=1e-5) # 添加正则化项\n",
|
|||
|
"optim_model, metrics = train_model(model, criterion, optimizer, train_loader, valid_loader, num_epochs, device)\n",
|
|||
|
"torch.save(optim_model.state_dict(), './data/notebook1/qccnn_weights.pt') # 保存训练好的模型参数,用于后续的推理或测试\n",
|
|||
|
"pd.DataFrame(metrics).to_csv('./data/notebook1/qccnn_metrics.csv', index='None') # 保存模型训练过程,用于后续图标展示"
|
|||
|
],
|
|||
|
"id": "34202fca380ee084"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"state_dict = torch.load('./data/notebook1/qccnn_weights.pt', map_location=device)\n",
|
|||
|
"qccnn_model = QCCNN()\n",
|
|||
|
"qccnn_model.load_state_dict(state_dict)\n",
|
|||
|
"qccnn_model.to(device)\n",
|
|||
|
"\n",
|
|||
|
"test_acc = test_model(qccnn_model, test_loader, device)"
|
|||
|
],
|
|||
|
"id": "f613b1c9a9ea0cd6"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"def vgg_block(in_channel,out_channel,num_convs):\n",
|
|||
|
" layers = nn.ModuleList()\n",
|
|||
|
" assert num_convs >= 1\n",
|
|||
|
" layers.append(nn.Conv2d(in_channel,out_channel,kernel_size=3,padding=1))\n",
|
|||
|
" layers.append(nn.ReLU())\n",
|
|||
|
" for _ in range(num_convs-1):\n",
|
|||
|
" layers.append(nn.Conv2d(out_channel,out_channel,kernel_size=3,padding=1))\n",
|
|||
|
" layers.append(nn.ReLU())\n",
|
|||
|
" layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n",
|
|||
|
" return nn.Sequential(*layers)\n",
|
|||
|
"\n",
|
|||
|
"VGG = nn.Sequential(\n",
|
|||
|
" vgg_block(1,10,3), # 14,14\n",
|
|||
|
" vgg_block(10,16,3), # 4 * 4\n",
|
|||
|
" nn.Flatten(),\n",
|
|||
|
" nn.Linear(16 * 4 * 4, 120),\n",
|
|||
|
" nn.Sigmoid(),\n",
|
|||
|
" nn.Linear(120, 84),\n",
|
|||
|
" nn.Sigmoid(),\n",
|
|||
|
" nn.Linear(84,10),\n",
|
|||
|
" nn.Softmax(dim=-1)\n",
|
|||
|
")"
|
|||
|
],
|
|||
|
"id": "37cc9edc6c4b035d"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"num_epochs = 300\n",
|
|||
|
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
|||
|
"\n",
|
|||
|
"vgg_model = VGG\n",
|
|||
|
"vgg_model.to(device)\n",
|
|||
|
"criterion = nn.CrossEntropyLoss()\n",
|
|||
|
"optimizer = optim.Adam(vgg_model.parameters(), lr=1e-5) # 添加正则化项\n",
|
|||
|
"vgg_model, metrics = train_model(vgg_model, criterion, optimizer, train_loader, valid_loader, num_epochs, device)\n",
|
|||
|
"torch.save(vgg_model.state_dict(), './data/notebook1/vgg_weights.pt') # 保存训练好的模型参数,用于后续的推理或测试\n",
|
|||
|
"pd.DataFrame(metrics).to_csv('./data/notebook1/vgg_metrics.csv', index='None') # 保存模型训练过程,用于后续图标展示"
|
|||
|
],
|
|||
|
"id": "643da0fb0433f438"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"state_dict = torch.load('./data/notebook1/vgg_weights.pt', map_location=device)\n",
|
|||
|
"vgg_model = VGG\n",
|
|||
|
"vgg_model.load_state_dict(state_dict)\n",
|
|||
|
"vgg_model.to(device)\n",
|
|||
|
"\n",
|
|||
|
"vgg_test_acc = test_model(vgg_model, test_loader, device)"
|
|||
|
],
|
|||
|
"id": "cc56710965ab7c82"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"vgg_data = pd.read_csv('./data/notebook1/vgg_metrics.csv')\n",
|
|||
|
"qccnn_data = pd.read_csv('./data/notebook1/qccnn_metrics.csv')\n",
|
|||
|
"vgg_epoch = vgg_data['epoch']\n",
|
|||
|
"vgg_train_loss = vgg_data['train_loss']\n",
|
|||
|
"vgg_valid_loss = vgg_data['valid_loss']\n",
|
|||
|
"vgg_train_acc = vgg_data['train_acc']\n",
|
|||
|
"vgg_valid_acc = vgg_data['valid_acc']\n",
|
|||
|
"\n",
|
|||
|
"qccnn_epoch = qccnn_data['epoch']\n",
|
|||
|
"qccnn_train_loss = qccnn_data['train_loss']\n",
|
|||
|
"qccnn_valid_loss = qccnn_data['valid_loss']\n",
|
|||
|
"qccnn_train_acc = qccnn_data['train_acc']\n",
|
|||
|
"qccnn_valid_acc = qccnn_data['valid_acc']\n",
|
|||
|
"\n",
|
|||
|
"# 创建图和Axes对象\n",
|
|||
|
"fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))\n",
|
|||
|
"\n",
|
|||
|
"# 绘制训练损失曲线\n",
|
|||
|
"ax1.plot(vgg_epoch, vgg_train_loss, label='VGG Train Loss')\n",
|
|||
|
"ax1.plot(vgg_epoch, vgg_valid_loss, label='VGG Valid Loss')\n",
|
|||
|
"ax1.plot(qccnn_epoch, qccnn_train_loss, label='QCCNN Valid Loss')\n",
|
|||
|
"ax1.plot(qccnn_epoch, qccnn_valid_loss, label='QCCNN Valid Loss')\n",
|
|||
|
"ax1.set_title('Training Loss Curve')\n",
|
|||
|
"ax1.set_xlabel('Epoch')\n",
|
|||
|
"ax1.set_ylabel('Loss')\n",
|
|||
|
"ax1.legend()\n",
|
|||
|
"\n",
|
|||
|
"# 绘制训练准确率曲线\n",
|
|||
|
"ax2.plot(vgg_epoch, vgg_train_acc, label='VGG Train Accuracy')\n",
|
|||
|
"ax2.plot(vgg_epoch, vgg_valid_acc, label='VGG Valid Accuracy')\n",
|
|||
|
"ax2.plot(qccnn_epoch, qccnn_train_acc, label='QCCNN Train Accuracy')\n",
|
|||
|
"ax2.plot(qccnn_epoch, qccnn_valid_acc, label='QCCNN Valid Accuracy')\n",
|
|||
|
"ax2.set_title('Training Accuracy Curve')\n",
|
|||
|
"ax2.set_xlabel('Epoch')\n",
|
|||
|
"ax2.set_ylabel('Accuracy')\n",
|
|||
|
"ax2.legend()\n",
|
|||
|
"\n",
|
|||
|
"plt.show()"
|
|||
|
],
|
|||
|
"id": "8e450f8cfb2812d2"
|
|||
|
},
|
|||
|
{
|
|||
|
"metadata": {},
|
|||
|
"cell_type": "code",
|
|||
|
"outputs": [],
|
|||
|
"execution_count": null,
|
|||
|
"source": [
|
|||
|
"# 这里我们对比不同模型之间可训练参数量的区别\n",
|
|||
|
"\n",
|
|||
|
"def count_parameters(model):\n",
|
|||
|
" \"\"\"\n",
|
|||
|
" 计算模型的参数数量\n",
|
|||
|
" \"\"\"\n",
|
|||
|
" return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
|
|||
|
"\n",
|
|||
|
"number_params_VGG = count_parameters(VGG)\n",
|
|||
|
"number_params_QCCNN = count_parameters(QCCNN())\n",
|
|||
|
"print(f'VGG 模型可训练参数量:{number_params_VGG}\\t QCCNN模型可训练参数量:{number_params_QCCNN}')"
|
|||
|
],
|
|||
|
"id": "9675ba847f4a998d"
|
|||
|
}
|
|||
|
],
|
|||
|
"metadata": {
|
|||
|
"kernelspec": {
|
|||
|
"display_name": "Python 3",
|
|||
|
"language": "python",
|
|||
|
"name": "python3"
|
|||
|
},
|
|||
|
"language_info": {
|
|||
|
"codemirror_mode": {
|
|||
|
"name": "ipython",
|
|||
|
"version": 2
|
|||
|
},
|
|||
|
"file_extension": ".py",
|
|||
|
"mimetype": "text/x-python",
|
|||
|
"name": "python",
|
|||
|
"nbconvert_exporter": "python",
|
|||
|
"pygments_lexer": "ipython2",
|
|||
|
"version": "2.7.6"
|
|||
|
}
|
|||
|
},
|
|||
|
"nbformat": 4,
|
|||
|
"nbformat_minor": 5
|
|||
|
}
|