網(wǎng)站制作的常見問題微信廣告推廣價格表
知識點回顧:
- resnet結(jié)構(gòu)解析
- CBAM放置位置的思考
- 針對預(yù)訓(xùn)練模型的訓(xùn)練策略
- 差異化學(xué)習(xí)率
- 三階段微調(diào)
作業(yè):
- 好好理解下resnet18的模型結(jié)構(gòu)
- 嘗試對vgg16+cbam進行微調(diào)策略
import time
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter # 新增TensorBoard導(dǎo)入# VGG16+CBAM模型實現(xiàn)
class VGGBlock(nn.Module):def __init__(self, in_channels, out_channels, num_convs, use_cbam=True):super(VGGBlock, self).__init__()layers = []for _ in range(num_convs):layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))layers.append(nn.BatchNorm2d(out_channels))layers.append(nn.ReLU(inplace=True))in_channels = out_channelsif use_cbam:layers.append(CBAM(out_channels))layers.append(nn.MaxPool2d(kernel_size=2, stride=2))self.block = nn.Sequential(*layers)def forward(self, x):return self.block(x)# CBAM模塊實現(xiàn)(與之前相同)
class ChannelAttention(nn.Module):def __init__(self, in_channels, reduction_ratio=16):super(ChannelAttention, self).__init__()self.avg_pool = nn.AdaptiveAvgPool2d(1)self.max_pool = nn.AdaptiveMaxPool2d(1)self.fc = nn.Sequential(nn.Conv2d(in_channels, in_channels // reduction_ratio, 1, bias=False),nn.ReLU(),nn.Conv2d(in_channels // reduction_ratio, in_channels, 1, bias=False))def forward(self, x):avg_out = self.fc(self.avg_pool(x))max_out = self.fc(self.max_pool(x))out = avg_out + max_outreturn torch.sigmoid(out)class SpatialAttention(nn.Module):def __init__(self, kernel_size=7):super(SpatialAttention, self).__init__()self.conv = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)def forward(self, x):avg_out = torch.mean(x, dim=1, keepdim=True)max_out, _ = torch.max(x, dim=1, keepdim=True)out = torch.cat([avg_out, max_out], dim=1)out = self.conv(out)return torch.sigmoid(out)class CBAM(nn.Module):def __init__(self, in_channels, reduction_ratio=16, kernel_size=7):super(CBAM, self).__init__()self.channel_att = ChannelAttention(in_channels, reduction_ratio)self.spatial_att = SpatialAttention(kernel_size)def forward(self, x):x = x * self.channel_att(x)x = x * self.spatial_att(x)return xclass VGG16_CBAM(nn.Module):def __init__(self, num_classes=10, init_weights=True):super(VGG16_CBAM, self).__init__()# VGG16的配置,每段表示[卷積層數(shù), 輸出通道數(shù)]cfg = [[2, 64, True], # 第一段:2個卷積層,64通道,使用CBAM[2, 128, True], # 第二段:2個卷積層,128通道,使用CBAM[3, 256, True], # 第三段:3個卷積層,256通道,使用CBAM[3, 512, True], # 第四段:3個卷積層,512通道,使用CBAM[3, 512, False] # 第五段:3個卷積層,512通道,不使用CBAM(為了性能考慮)]self.features = self._make_layers(cfg)self.avgpool = nn.AdaptiveAvgPool2d((7, 7))self.classifier = nn.Sequential(nn.Linear(512 * 7 * 7, 4096),nn.ReLU(True),nn.Dropout(),nn.Linear(4096, 4096),nn.ReLU(True),nn.Dropout(),nn.Linear(4096, num_classes),)if init_weights:self._initialize_weights()def _make_layers(self, cfg):layers = []in_channels = 3for num_convs, out_channels, use_cbam in cfg:layers.append(VGGBlock(in_channels, out_channels, num_convs, use_cbam))in_channels = out_channelsreturn nn.Sequential(*layers)def forward(self, x):x = self.features(x)x = self.avgpool(x)x = torch.flatten(x, 1)x = self.classifier(x)return xdef _initialize_weights(self):for m in self.modules():if isinstance(m, nn.Conv2d):nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')if m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.BatchNorm2d):nn.init.constant_(m.weight, 1)nn.init.constant_(m.bias, 0)elif isinstance(m, nn.Linear):nn.init.normal_(m.weight, 0, 0.01)nn.init.constant_(m.bias, 0)# ======================================================================
# 4. 結(jié)合了分階段策略和詳細打印的訓(xùn)練函數(shù)
# ======================================================================
def set_trainable_layers(model, trainable_parts):print(f"\n---> 解凍以下部分并設(shè)為可訓(xùn)練: {trainable_parts}")for name, param in model.named_parameters():param.requires_grad = Falsefor part in trainable_parts:if part in name:param.requires_grad = Truebreakdef train_staged_finetuning(model, criterion, train_loader, test_loader, device, epochs, writer):optimizer = None# 初始化歷史記錄列表all_iter_losses, iter_indices = [], []train_acc_history, test_acc_history = [], []train_loss_history, test_loss_history = [], []for epoch in range(1, epochs + 1):epoch_start_time = time.time()# --- 動態(tài)調(diào)整學(xué)習(xí)率和凍結(jié)層 ---if epoch == 1:print("\n" + "="*50 + "\n🚀 **階段 1:訓(xùn)練CBAM模塊和分類器**\n" + "="*50)set_trainable_layers(model, ["cbam", "classifier"])optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)writer.add_scalar('learning_rate', 1e-3, epoch) # 記錄學(xué)習(xí)率elif epoch == 6:print("\n" + "="*50 + "\n?? **階段 2:解凍后兩段卷積層 (block4, block5)**\n" + "="*50)set_trainable_layers(model, ["cbam", "classifier", "features.3", "features.4"])optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4)writer.add_scalar('learning_rate', 1e-4, epoch) # 記錄學(xué)習(xí)率elif epoch == 21:print("\n" + "="*50 + "\n🛰? **階段 3:解凍所有層,進行全局微調(diào)**\n" + "="*50)for param in model.parameters(): param.requires_grad = Trueoptimizer = optim.Adam(model.parameters(), lr=1e-5)writer.add_scalar('learning_rate', 1e-5, epoch) # 記錄學(xué)習(xí)率# --- 訓(xùn)練循環(huán) ---model.train()running_loss, correct, total = 0.0, 0, 0for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = criterion(output, target)loss.backward()optimizer.step()# 記錄每個iteration的損失iter_loss = loss.item()all_iter_losses.append(iter_loss)iter_indices.append((epoch - 1) * len(train_loader) + batch_idx + 1)# 向TensorBoard添加迭代損失global_step = (epoch - 1) * len(train_loader) + batch_idx + 1writer.add_scalar('train/iter_loss', iter_loss, global_step)running_loss += iter_loss_, predicted = output.max(1)total += target.size(0)correct += predicted.eq(target).sum().item()# 每100個batch打印一次if (batch_idx + 1) % 100 == 0:print(f'Epoch: {epoch}/{epochs} | Batch: {batch_idx+1}/{len(train_loader)} 'f'| 單Batch損失: {iter_loss:.4f} | 累計平均損失: {running_loss/(batch_idx+1):.4f}')epoch_train_loss = running_loss / len(train_loader)epoch_train_acc = 100. * correct / totaltrain_loss_history.append(epoch_train_loss)train_acc_history.append(epoch_train_acc)# 向TensorBoard添加epoch級訓(xùn)練指標(biāo)writer.add_scalar('train/loss', epoch_train_loss, epoch)writer.add_scalar('train/accuracy', epoch_train_acc, epoch)# --- 測試循環(huán) ---model.eval()test_loss, correct_test, total_test = 0, 0, 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += criterion(output, target).item()_, predicted = output.max(1)total_test += target.size(0)correct_test += predicted.eq(target).sum().item()epoch_test_loss = test_loss / len(test_loader)epoch_test_acc = 100. * correct_test / total_testtest_loss_history.append(epoch_test_loss)test_acc_history.append(epoch_test_acc)# 向TensorBoard添加epoch級測試指標(biāo)writer.add_scalar('test/loss', epoch_test_loss, epoch)writer.add_scalar('test/accuracy', epoch_test_acc, epoch)# 打印每個epoch的最終結(jié)果print(f'Epoch {epoch}/{epochs} 完成 | 耗時: {time.time() - epoch_start_time:.2f}s | 訓(xùn)練準(zhǔn)確率: {epoch_train_acc:.2f}% | 測試準(zhǔn)確率: {epoch_test_acc:.2f}%')# 訓(xùn)練結(jié)束后調(diào)用繪圖函數(shù)print("\n訓(xùn)練完成! 開始繪制結(jié)果圖表...")plot_iter_losses(all_iter_losses, iter_indices)plot_epoch_metrics(train_acc_history, test_acc_history, train_loss_history, test_loss_history)# 返回最終的測試準(zhǔn)確率return epoch_test_acc# ======================================================================
# 5. 繪圖函數(shù)定義
# ======================================================================
def plot_iter_losses(losses, indices):plt.figure(figsize=(10, 4))plt.plot(indices, losses, 'b-', alpha=0.7, label='Iteration Loss')plt.xlabel('Iteration(Batch序號)')plt.ylabel('損失值')plt.title('每個 Iteration 的訓(xùn)練損失')plt.legend()plt.grid(True)plt.tight_layout()plt.show()def plot_epoch_metrics(train_acc, test_acc, train_loss, test_loss):epochs = range(1, len(train_acc) + 1)plt.figure(figsize=(12, 4))plt.subplot(1, 2, 1)plt.plot(epochs, train_acc, 'b-', label='訓(xùn)練準(zhǔn)確率')plt.plot(epochs, test_acc, 'r-', label='測試準(zhǔn)確率')plt.xlabel('Epoch')plt.ylabel('準(zhǔn)確率 (%)')plt.title('訓(xùn)練和測試準(zhǔn)確率')plt.legend(); plt.grid(True)plt.subplot(1, 2, 2)plt.plot(epochs, train_loss, 'b-', label='訓(xùn)練損失')plt.plot(epochs, test_loss, 'r-', label='測試損失')plt.xlabel('Epoch')plt.ylabel('損失值')plt.title('訓(xùn)練和測試損失')plt.legend(); plt.grid(True)plt.tight_layout()plt.show()# ======================================================================
# 7. 數(shù)據(jù)加載和預(yù)處理
# ======================================================================
def load_data():# 數(shù)據(jù)預(yù)處理train_transform = transforms.Compose([transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])test_transform = transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])# 加載數(shù)據(jù)集(以CIFAR10為例)train_dataset = datasets.CIFAR10(root='./data', train=True,download=True, transform=train_transform)test_dataset = datasets.CIFAR10(root='./data', train=False,download=True, transform=test_transform)# 創(chuàng)建數(shù)據(jù)加載器train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)return train_loader, test_loader# ======================================================================
# 6. 執(zhí)行訓(xùn)練
# ======================================================================
if __name__ == "__main__":# 設(shè)置設(shè)備device = torch.device("cuda" if torch.cuda.is_available() else "cpu")print(f"使用設(shè)備: {device}")# 初始化TensorBoard日志記錄器(自動生成帶時間戳的日志目錄)writer = SummaryWriter()# 加載數(shù)據(jù)train_loader, test_loader = load_data()# 創(chuàng)建模型model = VGG16_CBAM().to(device)# 記錄模型結(jié)構(gòu)(需要提供一個輸入樣例)input_sample = torch.randn(1, 3, 224, 224).to(device)writer.add_graph(model, input_sample)criterion = nn.CrossEntropyLoss()epochs = 50print("開始使用帶分階段微調(diào)策略的VGG16+CBAM模型進行訓(xùn)練...")final_accuracy = train_staged_finetuning(model, criterion, train_loader, test_loader, device, epochs, writer)print(f"訓(xùn)練完成!最終測試準(zhǔn)確率: {final_accuracy:.2f}%")# 關(guān)閉TensorBoard日志記錄器writer.close()# torch.save(model.state_dict(), 'vgg16_cbam_finetuned.pth')# print("模型已保存為: vgg16_cbam_finetuned.pth")
@浙大疏錦行