怎么做跨境電商網(wǎng)站北京營銷公司比較好的
文章目錄
- 前言
- 一、完整代碼
- 二、修改成自己的數(shù)據(jù)集
- 總結(jié)
前言
膠囊網(wǎng)絡(luò)的概念可以先行搜索。
一、完整代碼
import torch
import torch.nn.functional as F
from torch import nn
from torchvision import transforms, datasets
from torch.optim import Adam
from torch.utils.data import DataLoader# 定義膠囊網(wǎng)絡(luò)中的膠囊層
class CapsuleLayer(nn.Module):def __init__(self, num_capsules, num_route_nodes, in_channels, out_channels, kernel_size=None, stride=None,num_iterations=3):super(CapsuleLayer, self).__init__()self.num_route_nodes = num_route_nodesself.num_iterations = num_iterationsself.num_capsules = num_capsulesif num_route_nodes != -1:self.route_weights = nn.Parameter(torch.randn(num_capsules, num_route_nodes, in_channels, out_channels))else:self.capsules = nn.ModuleList([nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=0)for _ in range(num_capsules)])def squash(self, tensor, dim=-1):squared_norm = (tensor ** 2).sum(dim=dim, keepdim=True)scale = squared_norm / (1 + squared_norm)return scale * tensor / torch.sqrt(squared_norm)def forward(self, x):if self.num_route_nodes != -1:priors = x[None, :, :, None, :] @ self.route_weights[:, None, :, :, :]logits = torch.zeros(*priors.size()).to(x.device)for i in range(self.num_iterations):probs = F.softmax(logits, dim=2)outputs = self.squash((probs * priors).sum(dim=2, keepdim=True))if i != self.num_iterations - 1:delta_logits = (priors * outputs).sum(dim=-1, keepdim=True)logits = logits + delta_logitselse:outputs = [capsule(x).view(x.size(0), -1, 1) for capsule in self.capsules]outputs = torch.cat(outputs, dim=-2)outputs = self.squash(outputs)return outputs# 定義整個(gè)膠囊網(wǎng)絡(luò)模型
class CapsuleNet(nn.Module):def __init__(self):super(CapsuleNet, self).__init__()self.conv1 = nn.Conv2d(in_channels=1, out_channels=256, kernel_size=9, stride=1)self.primary_capsules = CapsuleLayer(num_capsules=8, num_route_nodes=-1, in_channels=256, out_channels=32,kernel_size=9, stride=2)self.digit_capsules = CapsuleLayer(num_capsules=10, num_route_nodes=32 * 6 * 6, in_channels=8,out_channels=16)def forward(self, x):x = F.relu(self.conv1(x), inplace=True)x = self.primary_capsules(x)x = self.digit_capsules(x).squeeze().transpose(0, 1)x = (x ** 2).sum(dim=-1) ** 0.5return x# 訓(xùn)練和評估
def train(model, train_loader, optimizer, epoch):model.train()for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = F.cross_entropy(output, target)loss.backward()optimizer.step()if batch_idx % 10 == 0:print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))def test(model, test_loader):model.eval()test_loss = 0correct = 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += F.cross_entropy(output, target, reduction='sum').item()pred = output.argmax(dim=1, keepdim=True)correct += pred.eq(target.view_as(pred)).sum().item()test_loss /= len(test_loader.dataset)print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))# 數(shù)據(jù)加載和預(yù)處理
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))
])train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, transform=transform)train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True)# 設(shè)置設(shè)備
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# 初始化模型和優(yōu)化器
model = CapsuleNet().to(device)
optimizer = Adam(model.parameters())# 訓(xùn)練和測試模型
num_epochs = 10
for epoch in range(num_epochs):train(model, train_loader, optimizer, epoch)test(model, test_loader)
二、修改成自己的數(shù)據(jù)集
以下幾個(gè)位置是需要修改的。
# 數(shù)據(jù)加載和預(yù)處理
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))
])train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, transform=transform)train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True)
這些位置要根據(jù)數(shù)據(jù)集實(shí)際情況修改。主要是如果分辨率修改了,那么下面的也要跟著修改。
self.conv1 = nn.Conv2d(in_channels=1, out_channels=256, kernel_size=9, stride=1)
self.primary_capsules = CapsuleLayer(num_capsules=8, num_route_nodes=-1, in_channels=256, out_channels=32, kernel_size=9, stride=2)
self.digit_capsules = CapsuleLayer(num_capsules=10, num_route_nodes=32 * 6 * 6, in_channels=8,out_channels=16)
修改這3行代碼很容易報(bào)錯(cuò)。要理解了以后修改。
總結(jié)
多試試。