360網(wǎng)站備案查詢熱狗網(wǎng)站關(guān)鍵詞優(yōu)化
神經(jīng)網(wǎng)絡(luò)調(diào)參指南
知識(shí)點(diǎn)回顧:
- 隨機(jī)種子
- 內(nèi)參的初始化
- 神經(jīng)網(wǎng)絡(luò)調(diào)參指南
- 參數(shù)的分類
- 調(diào)參的順序
- 各部分參數(shù)的調(diào)整心得
參數(shù)可視化
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np# 設(shè)置設(shè)備
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")# 定義極簡(jiǎn)CNN模型(僅1個(gè)卷積層+1個(gè)全連接層)
class SimpleCNN(nn.Module):def __init__(self):super(SimpleCNN, self).__init__()# 卷積層:輸入3通道,輸出16通道,卷積核3x3self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)# 池化層:2x2窗口,尺寸減半self.pool = nn.MaxPool2d(kernel_size=2)# 全連接層:展平后連接到10個(gè)輸出(對(duì)應(yīng)10個(gè)類別)# 輸入尺寸:16通道 × 16x16特征圖 = 16×16×16=4096self.fc = nn.Linear(16 * 16 * 16, 10)def forward(self, x):# 卷積+池化x = self.pool(self.conv1(x)) # 輸出尺寸: [batch, 16, 16, 16]# 展平x = x.view(-1, 16 * 16 * 16) # 展平為: [batch, 4096]# 全連接x = self.fc(x) # 輸出尺寸: [batch, 10]return x# 初始化模型
model = SimpleCNN()
model = model.to(device)# 查看模型結(jié)構(gòu)
print(model)# 查看初始權(quán)重統(tǒng)計(jì)信息
def print_weight_stats(model):# 卷積層conv_weights = model.conv1.weight.dataprint("\n卷積層 權(quán)重統(tǒng)計(jì):")print(f" 均值: {conv_weights.mean().item():.6f}")print(f" 標(biāo)準(zhǔn)差: {conv_weights.std().item():.6f}")print(f" 理論標(biāo)準(zhǔn)差 (Kaiming): {np.sqrt(2/3):.6f}") # 輸入通道數(shù)為3# 全連接層fc_weights = model.fc.weight.dataprint("\n全連接層 權(quán)重統(tǒng)計(jì):")print(f" 均值: {fc_weights.mean().item():.6f}")print(f" 標(biāo)準(zhǔn)差: {fc_weights.std().item():.6f}")print(f" 理論標(biāo)準(zhǔn)差 (Kaiming): {np.sqrt(2/(16*16*16)):.6f}")# 改進(jìn)的可視化權(quán)重分布函數(shù)
def visualize_weights(model, layer_name, weights, save_path=None):plt.figure(figsize=(12, 5))# 權(quán)重直方圖plt.subplot(1, 2, 1)plt.hist(weights.cpu().numpy().flatten(), bins=50)plt.title(f'{layer_name} 權(quán)重分布')plt.xlabel('權(quán)重值')plt.ylabel('頻次')# 權(quán)重?zé)釄Dplt.subplot(1, 2, 2)if len(weights.shape) == 4: # 卷積層權(quán)重 [out_channels, in_channels, kernel_size, kernel_size]# 只顯示第一個(gè)輸入通道的前10個(gè)濾波器w = weights[:10, 0].cpu().numpy()plt.imshow(w.reshape(-1, weights.shape[2]), cmap='viridis')else: # 全連接層權(quán)重 [out_features, in_features]# 只顯示前10個(gè)神經(jīng)元的權(quán)重,重塑為更合理的矩形w = weights[:10].cpu().numpy()# 計(jì)算更合理的二維形狀(嘗試接近正方形)n_features = w.shape[1]side_length = int(np.sqrt(n_features))# 如果不能完美整除,添加零填充使能重塑if n_features % side_length != 0:new_size = (side_length + 1) * side_lengthw_padded = np.zeros((w.shape[0], new_size))w_padded[:, :n_features] = ww = w_padded# 重塑并顯示plt.imshow(w.reshape(w.shape[0] * side_length, -1), cmap='viridis')plt.colorbar()plt.title(f'{layer_name} 權(quán)重?zé)釄D')plt.tight_layout()if save_path:plt.savefig(f'{save_path}_{layer_name}.png')plt.show()# 打印權(quán)重統(tǒng)計(jì)
print_weight_stats(model)# 可視化各層權(quán)重
visualize_weights(model, "Conv1", model.conv1.weight.data, "initial_weights")
visualize_weights(model, "FC", model.fc.weight.data, "initial_weights")# 可視化偏置
plt.figure(figsize=(12, 5))# 卷積層偏置
conv_bias = model.conv1.bias.data
plt.subplot(1, 2, 1)
plt.bar(range(len(conv_bias)), conv_bias.cpu().numpy())
plt.title('卷積層 偏置')# 全連接層偏置
fc_bias = model.fc.bias.data
plt.subplot(1, 2, 2)
plt.bar(range(len(fc_bias)), fc_bias.cpu().numpy())
plt.title('全連接層 偏置')plt.tight_layout()
plt.savefig('biases_initial.png')
plt.show()print("\n偏置統(tǒng)計(jì):")
print(f"卷積層偏置 均值: {conv_bias.mean().item():.6f}")
print(f"卷積層偏置 標(biāo)準(zhǔn)差: {conv_bias.std().item():.6f}")
print(f"全連接層偏置 均值: {fc_bias.mean().item():.6f}")
print(f"全連接層偏置 標(biāo)準(zhǔn)差: {fc_bias.std().item():.6f}")
指南
1. 參數(shù)初始化----有預(yù)訓(xùn)練的參數(shù)直接起飛
2. batchsize---測(cè)試下能允許的最高值
3. epoch---這個(gè)不必多說,默認(rèn)都是訓(xùn)練到收斂位置,可以采取早停策略
4. 學(xué)習(xí)率與調(diào)度器----收益最高,因?yàn)榘包c(diǎn)太多了,模型越復(fù)雜鞍點(diǎn)越多
5. 模型結(jié)構(gòu)----消融實(shí)驗(yàn)或者對(duì)照試驗(yàn)
6. 損失函數(shù)---選擇比較少,試出來一個(gè)即可,高手可以自己構(gòu)建
7. 激活函數(shù)---選擇同樣較少
8. 正則化參數(shù)---主要是droupout,等到過擬合了用,上述所有步驟都為了讓模型過擬合